CombinedText stringlengths 4 3.42M |
|---|
package components
import (
cryptorand "crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v12 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubevirt.io/kubevirt/pkg/certificates/bootstrap"
certutil "kubevirt.io/kubevirt/pkg/certificates/triple/cert"
)
var _ = Describe("Certificate Management", func() {
Context("CA certificate bundle", func() {
It("should drop expired CAs", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(-10*time.Minute)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(1))
Expect(bundle).To(Equal(expectBundle))
})
It("should be properly appended when within the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current, given[0]})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(2))
Expect(bundle).To(Equal(expectBundle))
})
It("should kick out the first CA cert if it is outside of the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now.Add(-3*time.Minute), now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(1))
Expect(bundle).To(Equal(expectBundle))
})
It("should kick out a CA cert which are outside of the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
NewSelfSignedCert(now.Add(-10*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current, given[1]})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(2))
Expect(bundle).To(Equal(expectBundle))
})
It("should kick out multiple CA certs which are outside of the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now.Add(-5*time.Minute), now.Add(-5*time.Minute).Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
NewSelfSignedCert(now.Add(-10*time.Minute), now.Add(1*time.Hour)),
NewSelfSignedCert(now.Add(-5*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(1))
Expect(bundle).To(Equal(expectBundle))
})
It("should ensure that the current CA is not added over and over again", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
current,
current,
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current, given[0]})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(2))
Expect(bundle).To(Equal(expectBundle))
})
It("should be protected against misuse by cropping big arrays", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{}
for i := 1; i < 20; i++ {
given = append(given, NewSelfSignedCert(now.Add(-1*time.Minute), now.Add(1*time.Hour)))
}
givenBundle := CACertsToBundle(given)
_, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(11))
})
It("should immediately suggest a rotation if the cert is not signed by the provided CA", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
ca := NewSelfSignedCert(now, now.Add(1*time.Hour))
renewal := &v1.Duration{Duration: 4 * time.Hour}
deadline := NextRotationDeadline(current, ca, renewal, nil)
Expect(deadline.Before(time.Now())).To(BeTrue())
})
It("should set notBefore on the certificate to notBefore value of the CA certificate ", func() {
duration := &v1.Duration{Duration: 5 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, duration)).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, duration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
Expect(crt.Leaf.NotBefore).To(Equal(caCrt.Leaf.NotBefore))
})
DescribeTable("should set the notAfter on the certificate according to the supplied duration", func(caDuration time.Duration) {
crtDuration := &v1.Duration{Duration: 2 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
now := time.Now()
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: caDuration})).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, crtDuration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
Expect(crt.Leaf.NotAfter.Unix()).To(BeNumerically("==", now.Add(crtDuration.Duration).Unix(), 10))
},
Entry("with a long valid CA", 24*time.Hour),
Entry("with a CA which expires before the certificate rotation", 1*time.Hour),
)
DescribeTable("should suggest a rotation on the certificate according to its expiration", func(caDuration time.Duration) {
crtDuration := &v1.Duration{Duration: 2 * time.Hour}
crtRenewBefore := &v1.Duration{Duration: 1 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: caDuration})).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
now := time.Now()
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, crtDuration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
deadline := now.Add(time.Hour)
// Generating certificates may take a little bit of time to execute (entropy, ...). Since we can't
// inject a fake time into the foreign code which generates the certificates, allow a generous diff of three
// seconds.
Expect(NextRotationDeadline(crt, caCrt, crtRenewBefore, nil).Unix()).To(BeNumerically("==", deadline.Unix(), 3))
},
Entry("with a long valid CA", 24*time.Hour),
Entry("with a CA which expires before the certificate rotation", 1*time.Hour),
)
DescribeTable("should successfully sign with the current CA the certificate for", func(scretName string) {
duration := &v1.Duration{Duration: 5 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, duration)).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
Expect(err).NotTo(HaveOccurred())
var crtSecret *v12.Secret
for _, s := range NewCertSecrets("test", "test") {
if s.Name == scretName {
crtSecret = s
break
}
}
Expect(crtSecret).ToNot(BeNil())
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, duration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).ToNot(HaveOccurred())
Expect(crt).ToNot(BeNil())
},
Entry("virt-handler", VirtHandlerCertSecretName),
Entry("virt-controller", VirtControllerCertSecretName),
Entry("virt-api", VirtApiCertSecretName),
Entry("virt-operator", VirtOperatorCertSecretName),
)
It("should suggest earlier rotation if CA expires before cert", func() {
caDuration := 6 * time.Hour
crtDuration := &v1.Duration{Duration: 24 * time.Hour}
crtRenewBefore := &v1.Duration{Duration: 18 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: caDuration})).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
now := time.Now()
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, crtDuration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
deadline := now.Add(6 * time.Hour)
// Generating certificates may take a little bit of time to execute (entropy, ...). Since we can't
// inject a fake time into the foreign code which generates the certificates, allow a generous diff of three
// seconds.
Expect(NextRotationDeadline(crt, caCrt, crtRenewBefore, nil).Unix()).To(BeNumerically("==", deadline.Unix(), 3))
})
})
It("should set the right namespaces on the certificate secrets", func() {
secrets := NewCertSecrets("install_namespace", "operator_namespace")
for _, secret := range secrets[:len(secrets)-1] {
Expect(secret.Namespace).To(Equal("install_namespace"))
Expect(secret.Name).ToNot(Equal(VirtOperatorCertSecretName))
}
Expect(secrets[len(secrets)-1].Namespace).To(Equal("operator_namespace"))
})
It("should create the kubevirt-ca configmap for the right namespace", func() {
configMaps := NewCAConfigMaps("namespace")
var configMap *v12.ConfigMap
for _, cm := range configMaps {
if cm.Name == KubeVirtCASecretName {
configMap = cm
}
}
Expect(configMap.Namespace).To(Equal("namespace"))
})
It("should populate secrets with certificates", func() {
secrets := NewCertSecrets("install_namespace", "operator_namespace")
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: 1 * time.Hour})).To(Succeed())
Expect(caSecret.Data).To(HaveKey(bootstrap.CertBytesValue))
Expect(caSecret.Data).To(HaveKey(bootstrap.KeyBytesValue))
caCert, err := LoadCertificates(caSecret)
Expect(err).ToNot(HaveOccurred())
for _, secret := range secrets {
Expect(PopulateSecretWithCertificate(secret, caCert, &v1.Duration{Duration: 1 * time.Hour})).To(Succeed())
Expect(secret.Data).To(HaveKey(bootstrap.CertBytesValue))
Expect(secret.Data).To(HaveKey(bootstrap.KeyBytesValue))
_, err = LoadCertificates(secret)
Expect(err).ToNot(HaveOccurred())
}
})
})
// NewSelfSignedCert creates a CA certificate
func NewSelfSignedCert(notBefore time.Time, notAfter time.Time) *tls.Certificate {
key, err := certutil.NewPrivateKey()
Expect(err).ToNot(HaveOccurred())
tmpl := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(0),
Subject: pkix.Name{
CommonName: "who",
Organization: []string{"cares"},
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
Expect(err).ToNot(HaveOccurred())
leaf, err := x509.ParseCertificate(certDERBytes)
Expect(err).ToNot(HaveOccurred())
keyBytes := certutil.EncodePrivateKeyPEM(key)
Expect(err).ToNot(HaveOccurred())
crtBytes := certutil.EncodeCertPEM(leaf)
crt, err := tls.X509KeyPair(crtBytes, keyBytes)
Expect(err).ToNot(HaveOccurred())
crt.Leaf = leaf
return &crt
}
func CACertsToBundle(crts []*tls.Certificate) []byte {
var caBundle []byte
for _, crt := range crts {
caBundle = append(caBundle, certutil.EncodeCertPEM(crt.Leaf)...)
}
return caBundle
}
Use `math/rand` instead of `crypto/rand` in tests to speed up execution
Signed-off-by: Janusz Marcinkiewicz <e16f9959b915ffc54861626b1c977e9e33c0f595@nvidia.com>
package components
import (
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"math/rand"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v12 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubevirt.io/kubevirt/pkg/certificates/bootstrap"
certutil "kubevirt.io/kubevirt/pkg/certificates/triple/cert"
)
var _ = Describe("Certificate Management", func() {
Context("CA certificate bundle", func() {
It("should drop expired CAs", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(-10*time.Minute)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(1))
Expect(bundle).To(Equal(expectBundle))
})
It("should be properly appended when within the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current, given[0]})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(2))
Expect(bundle).To(Equal(expectBundle))
})
It("should kick out the first CA cert if it is outside of the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now.Add(-3*time.Minute), now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(1))
Expect(bundle).To(Equal(expectBundle))
})
It("should kick out a CA cert which are outside of the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
NewSelfSignedCert(now.Add(-10*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current, given[1]})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(2))
Expect(bundle).To(Equal(expectBundle))
})
It("should kick out multiple CA certs which are outside of the overlap period", func() {
now := time.Now()
current := NewSelfSignedCert(now.Add(-5*time.Minute), now.Add(-5*time.Minute).Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
NewSelfSignedCert(now.Add(-10*time.Minute), now.Add(1*time.Hour)),
NewSelfSignedCert(now.Add(-5*time.Minute), now.Add(1*time.Hour)),
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(1))
Expect(bundle).To(Equal(expectBundle))
})
It("should ensure that the current CA is not added over and over again", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{
NewSelfSignedCert(now.Add(-20*time.Minute), now.Add(1*time.Hour)),
current,
current,
}
givenBundle := CACertsToBundle(given)
expectBundle := CACertsToBundle([]*tls.Certificate{current, given[0]})
bundle, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(2))
Expect(bundle).To(Equal(expectBundle))
})
It("should be protected against misuse by cropping big arrays", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
given := []*tls.Certificate{}
for i := 1; i < 20; i++ {
given = append(given, NewSelfSignedCert(now.Add(-1*time.Minute), now.Add(1*time.Hour)))
}
givenBundle := CACertsToBundle(given)
_, count, err := MergeCABundle(current, givenBundle, 2*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expect(count).To(Equal(11))
})
It("should immediately suggest a rotation if the cert is not signed by the provided CA", func() {
now := time.Now()
current := NewSelfSignedCert(now, now.Add(1*time.Hour))
ca := NewSelfSignedCert(now, now.Add(1*time.Hour))
renewal := &v1.Duration{Duration: 4 * time.Hour}
deadline := NextRotationDeadline(current, ca, renewal, nil)
Expect(deadline.Before(time.Now())).To(BeTrue())
})
It("should set notBefore on the certificate to notBefore value of the CA certificate ", func() {
duration := &v1.Duration{Duration: 5 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, duration)).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, duration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
Expect(crt.Leaf.NotBefore).To(Equal(caCrt.Leaf.NotBefore))
})
DescribeTable("should set the notAfter on the certificate according to the supplied duration", func(caDuration time.Duration) {
crtDuration := &v1.Duration{Duration: 2 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
now := time.Now()
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: caDuration})).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, crtDuration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
Expect(crt.Leaf.NotAfter.Unix()).To(BeNumerically("==", now.Add(crtDuration.Duration).Unix(), 10))
},
Entry("with a long valid CA", 24*time.Hour),
Entry("with a CA which expires before the certificate rotation", 1*time.Hour),
)
DescribeTable("should suggest a rotation on the certificate according to its expiration", func(caDuration time.Duration) {
crtDuration := &v1.Duration{Duration: 2 * time.Hour}
crtRenewBefore := &v1.Duration{Duration: 1 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: caDuration})).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
now := time.Now()
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, crtDuration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
deadline := now.Add(time.Hour)
// Generating certificates may take a little bit of time to execute (entropy, ...). Since we can't
// inject a fake time into the foreign code which generates the certificates, allow a generous diff of three
// seconds.
Expect(NextRotationDeadline(crt, caCrt, crtRenewBefore, nil).Unix()).To(BeNumerically("==", deadline.Unix(), 3))
},
Entry("with a long valid CA", 24*time.Hour),
Entry("with a CA which expires before the certificate rotation", 1*time.Hour),
)
DescribeTable("should successfully sign with the current CA the certificate for", func(scretName string) {
duration := &v1.Duration{Duration: 5 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, duration)).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
Expect(err).NotTo(HaveOccurred())
var crtSecret *v12.Secret
for _, s := range NewCertSecrets("test", "test") {
if s.Name == scretName {
crtSecret = s
break
}
}
Expect(crtSecret).ToNot(BeNil())
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, duration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).ToNot(HaveOccurred())
Expect(crt).ToNot(BeNil())
},
Entry("virt-handler", VirtHandlerCertSecretName),
Entry("virt-controller", VirtControllerCertSecretName),
Entry("virt-api", VirtApiCertSecretName),
Entry("virt-operator", VirtOperatorCertSecretName),
)
It("should suggest earlier rotation if CA expires before cert", func() {
caDuration := 6 * time.Hour
crtDuration := &v1.Duration{Duration: 24 * time.Hour}
crtRenewBefore := &v1.Duration{Duration: 18 * time.Hour}
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: caDuration})).To(Succeed())
caCrt, err := LoadCertificates(caSecret)
now := time.Now()
Expect(err).NotTo(HaveOccurred())
crtSecret := NewCertSecrets("test", "test")[0]
Expect(PopulateSecretWithCertificate(crtSecret, caCrt, crtDuration)).To(Succeed())
crt, err := LoadCertificates(crtSecret)
Expect(err).NotTo(HaveOccurred())
deadline := now.Add(6 * time.Hour)
// Generating certificates may take a little bit of time to execute (entropy, ...). Since we can't
// inject a fake time into the foreign code which generates the certificates, allow a generous diff of three
// seconds.
Expect(NextRotationDeadline(crt, caCrt, crtRenewBefore, nil).Unix()).To(BeNumerically("==", deadline.Unix(), 3))
})
})
It("should set the right namespaces on the certificate secrets", func() {
secrets := NewCertSecrets("install_namespace", "operator_namespace")
for _, secret := range secrets[:len(secrets)-1] {
Expect(secret.Namespace).To(Equal("install_namespace"))
Expect(secret.Name).ToNot(Equal(VirtOperatorCertSecretName))
}
Expect(secrets[len(secrets)-1].Namespace).To(Equal("operator_namespace"))
})
It("should create the kubevirt-ca configmap for the right namespace", func() {
configMaps := NewCAConfigMaps("namespace")
var configMap *v12.ConfigMap
for _, cm := range configMaps {
if cm.Name == KubeVirtCASecretName {
configMap = cm
}
}
Expect(configMap.Namespace).To(Equal("namespace"))
})
It("should populate secrets with certificates", func() {
secrets := NewCertSecrets("install_namespace", "operator_namespace")
caSecrets := NewCACertSecrets("test")
var caSecret *v12.Secret
for _, ca := range caSecrets {
if ca.Name == KubeVirtCASecretName {
caSecret = ca
}
}
Expect(PopulateSecretWithCertificate(caSecret, nil, &v1.Duration{Duration: 1 * time.Hour})).To(Succeed())
Expect(caSecret.Data).To(HaveKey(bootstrap.CertBytesValue))
Expect(caSecret.Data).To(HaveKey(bootstrap.KeyBytesValue))
caCert, err := LoadCertificates(caSecret)
Expect(err).ToNot(HaveOccurred())
for _, secret := range secrets {
Expect(PopulateSecretWithCertificate(secret, caCert, &v1.Duration{Duration: 1 * time.Hour})).To(Succeed())
Expect(secret.Data).To(HaveKey(bootstrap.CertBytesValue))
Expect(secret.Data).To(HaveKey(bootstrap.KeyBytesValue))
_, err = LoadCertificates(secret)
Expect(err).ToNot(HaveOccurred())
}
})
})
// NewSelfSignedCert creates a CA certificate
func NewSelfSignedCert(notBefore time.Time, notAfter time.Time) *tls.Certificate {
key, err := certutil.NewPrivateKey()
Expect(err).ToNot(HaveOccurred())
tmpl := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(0),
Subject: pkix.Name{
CommonName: "who",
Organization: []string{"cares"},
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
r := rand.New(rand.NewSource(time.Now().Unix()))
certDERBytes, err := x509.CreateCertificate(r, &tmpl, &tmpl, key.Public(), key)
Expect(err).ToNot(HaveOccurred())
leaf, err := x509.ParseCertificate(certDERBytes)
Expect(err).ToNot(HaveOccurred())
keyBytes := certutil.EncodePrivateKeyPEM(key)
Expect(err).ToNot(HaveOccurred())
crtBytes := certutil.EncodeCertPEM(leaf)
crt, err := tls.X509KeyPair(crtBytes, keyBytes)
Expect(err).ToNot(HaveOccurred())
crt.Leaf = leaf
return &crt
}
func CACertsToBundle(crts []*tls.Certificate) []byte {
var caBundle []byte
for _, crt := range crts {
caBundle = append(caBundle, certutil.EncodeCertPEM(crt.Leaf)...)
}
return caBundle
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package time provides functionality for measuring and displaying time.
//
// The calendrical calculations always assume a Gregorian calendar, with
// no leap seconds.
package time
import "errors"
// A Time represents an instant in time with nanosecond precision.
//
// Programs using times should typically store and pass them as values,
// not pointers. That is, time variables and struct fields should be of
// type time.Time, not *time.Time. A Time value can be used by
// multiple goroutines simultaneously.
//
// Time instants can be compared using the Before, After, and Equal methods.
// The Sub method subtracts two instants, producing a Duration.
// The Add method adds a Time and a Duration, producing a Time.
//
// The zero value of type Time is January 1, year 1, 00:00:00.000000000 UTC.
// As this time is unlikely to come up in practice, the IsZero method gives
// a simple way of detecting a time that has not been initialized explicitly.
//
// Each Time has associated with it a Location, consulted when computing the
// presentation form of the time, such as in the Format, Hour, and Year methods.
// The methods Local, UTC, and In return a Time with a specific location.
// Changing the location in this way changes only the presentation; it does not
// change the instant in time being denoted and therefore does not affect the
// computations described in earlier paragraphs.
//
// Note that the Go == operator compares not just the time instant but also the
// Location. Therefore, Time values should not be used as map or database keys
// without first guaranteeing that the identical Location has been set for all
// values, which can be achieved through use of the UTC or Local method.
//
type Time struct {
// sec gives the number of seconds elapsed since
// January 1, year 1 00:00:00 UTC.
sec int64
// nsec specifies a non-negative nanosecond
// offset within the second named by Seconds.
// It must be in the range [0, 999999999].
nsec int32
// loc specifies the Location that should be used to
// determine the minute, hour, month, day, and year
// that correspond to this Time.
// The nil location means UTC.
// All UTC times are represented with loc==nil, never loc==&utcLoc.
loc *Location
}
func (t *Time) setLoc(loc *Location) {
if loc == &utcLoc {
loc = nil
}
t.loc = loc
}
// After reports whether the time instant t is after u.
func (t Time) After(u Time) bool {
return t.sec > u.sec || t.sec == u.sec && t.nsec > u.nsec
}
// Before reports whether the time instant t is before u.
func (t Time) Before(u Time) bool {
return t.sec < u.sec || t.sec == u.sec && t.nsec < u.nsec
}
// Equal reports whether t and u represent the same time instant.
// Two times can be equal even if they are in different locations.
// For example, 6:00 +0200 CEST and 4:00 UTC are Equal.
// This comparison is different from using t == u, which also compares
// the locations.
func (t Time) Equal(u Time) bool {
return t.sec == u.sec && t.nsec == u.nsec
}
// A Month specifies a month of the year (January = 1, ...).
type Month int
const (
January Month = 1 + iota
February
March
April
May
June
July
August
September
October
November
December
)
var months = [...]string{
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
}
// String returns the English name of the month ("January", "February", ...).
func (m Month) String() string { return months[m-1] }
// A Weekday specifies a day of the week (Sunday = 0, ...).
type Weekday int
const (
Sunday Weekday = iota
Monday
Tuesday
Wednesday
Thursday
Friday
Saturday
)
var days = [...]string{
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
}
// String returns the English name of the day ("Sunday", "Monday", ...).
func (d Weekday) String() string { return days[d] }
// Computations on time.
//
// The zero value for a Time is defined to be
// January 1, year 1, 00:00:00.000000000 UTC
// which (1) looks like a zero, or as close as you can get in a date
// (1-1-1 00:00:00 UTC), (2) is unlikely enough to arise in practice to
// be a suitable "not set" sentinel, unlike Jan 1 1970, and (3) has a
// non-negative year even in time zones west of UTC, unlike 1-1-0
// 00:00:00 UTC, which would be 12-31-(-1) 19:00:00 in New York.
//
// The zero Time value does not force a specific epoch for the time
// representation. For example, to use the Unix epoch internally, we
// could define that to distinguish a zero value from Jan 1 1970, that
// time would be represented by sec=-1, nsec=1e9. However, it does
// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
// epoch, and that's what we do.
//
// The Add and Sub computations are oblivious to the choice of epoch.
//
// The presentation computations - year, month, minute, and so on - all
// rely heavily on division and modulus by positive constants. For
// calendrical calculations we want these divisions to round down, even
// for negative values, so that the remainder is always positive, but
// Go's division (like most hardware division instructions) rounds to
// zero. We can still do those computations and then adjust the result
// for a negative numerator, but it's annoying to write the adjustment
// over and over. Instead, we can change to a different epoch so long
// ago that all the times we care about will be positive, and then round
// to zero and round down coincide. These presentation routines already
// have to add the zone offset, so adding the translation to the
// alternate epoch is cheap. For example, having a non-negative time t
// means that we can write
//
// sec = t % 60
//
// instead of
//
// sec = t % 60
// if sec < 0 {
// sec += 60
// }
//
// everywhere.
//
// The calendar runs on an exact 400 year cycle: a 400-year calendar
// printed for 1970-2469 will apply as well to 2370-2769. Even the days
// of the week match up. It simplifies the computations to choose the
// cycle boundaries so that the exceptional years are always delayed as
// long as possible. That means choosing a year equal to 1 mod 400, so
// that the first leap year is the 4th year, the first missed leap year
// is the 100th year, and the missed missed leap year is the 400th year.
// So we'd prefer instead to print a calendar for 2001-2400 and reuse it
// for 2401-2800.
//
// Finally, it's convenient if the delta between the Unix epoch and
// long-ago epoch is representable by an int64 constant.
//
// These three considerations—choose an epoch as early as possible, that
// uses a year equal to 1 mod 400, and that is no more than 2⁶³ seconds
// earlier than 1970—bring us to the year -292277022399. We refer to
// this year as the absolute zero year, and to times measured as a uint64
// seconds since this year as absolute times.
//
// Times measured as an int64 seconds since the year 1—the representation
// used for Time's sec field—are called internal times.
//
// Times measured as an int64 seconds since the year 1970 are called Unix
// times.
//
// It is tempting to just use the year 1 as the absolute epoch, defining
// that the routines are only valid for years >= 1. However, the
// routines would then be invalid when displaying the epoch in time zones
// west of UTC, since it is year 0. It doesn't seem tenable to say that
// printing the zero time correctly isn't supported in half the time
// zones. By comparison, it's reasonable to mishandle some times in
// the year -292277022399.
//
// All this is opaque to clients of the API and can be changed if a
// better implementation presents itself.
const (
// The unsigned zero year for internal calculations.
// Must be 1 mod 400, and times before it will not compute correctly,
// but otherwise can be changed at will.
absoluteZeroYear = -292277022399
// The year of the zero Time.
// Assumed by the unixToInternal computation below.
internalYear = 1
// Offsets to convert between internal and absolute or Unix times.
absoluteToInternal int64 = (absoluteZeroYear - internalYear) * 365.2425 * secondsPerDay
internalToAbsolute = -absoluteToInternal
unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay
internalToUnix int64 = -unixToInternal
)
// IsZero reports whether t represents the zero time instant,
// January 1, year 1, 00:00:00 UTC.
func (t Time) IsZero() bool {
return t.sec == 0 && t.nsec == 0
}
// abs returns the time t as an absolute time, adjusted by the zone offset.
// It is called when computing a presentation property like Month or Hour.
func (t Time) abs() uint64 {
l := t.loc
// Avoid function calls when possible.
if l == nil || l == &localLoc {
l = l.get()
}
sec := t.sec + internalToUnix
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
sec += int64(l.cacheZone.offset)
} else {
_, offset, _, _, _ := l.lookup(sec)
sec += int64(offset)
}
}
return uint64(sec + (unixToInternal + internalToAbsolute))
}
// locabs is a combination of the Zone and abs methods,
// extracting both return values from a single zone lookup.
func (t Time) locabs() (name string, offset int, abs uint64) {
l := t.loc
if l == nil || l == &localLoc {
l = l.get()
}
// Avoid function call if we hit the local time cache.
sec := t.sec + internalToUnix
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
name = l.cacheZone.name
offset = l.cacheZone.offset
} else {
name, offset, _, _, _ = l.lookup(sec)
}
sec += int64(offset)
} else {
name = "UTC"
}
abs = uint64(sec + (unixToInternal + internalToAbsolute))
return
}
// Date returns the year, month, and day in which t occurs.
func (t Time) Date() (year int, month Month, day int) {
year, month, day, _ = t.date(true)
return
}
// Year returns the year in which t occurs.
func (t Time) Year() int {
year, _, _, _ := t.date(false)
return year
}
// Month returns the month of the year specified by t.
func (t Time) Month() Month {
_, month, _, _ := t.date(true)
return month
}
// Day returns the day of the month specified by t.
func (t Time) Day() int {
_, _, day, _ := t.date(true)
return day
}
// Weekday returns the day of the week specified by t.
func (t Time) Weekday() Weekday {
return absWeekday(t.abs())
}
// absWeekday is like Weekday but operates on an absolute time.
func absWeekday(abs uint64) Weekday {
// January 1 of the absolute year, like January 1 of 2001, was a Monday.
sec := (abs + uint64(Monday)*secondsPerDay) % secondsPerWeek
return Weekday(int(sec) / secondsPerDay)
}
// ISOWeek returns the ISO 8601 year and week number in which t occurs.
// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
// of year n+1.
func (t Time) ISOWeek() (year, week int) {
year, month, day, yday := t.date(true)
wday := int(t.Weekday()+6) % 7 // weekday but Monday = 0.
const (
Mon int = iota
Tue
Wed
Thu
Fri
Sat
Sun
)
// Calculate week as number of Mondays in year up to
// and including today, plus 1 because the first week is week 0.
// Putting the + 1 inside the numerator as a + 7 keeps the
// numerator from being negative, which would cause it to
// round incorrectly.
week = (yday - wday + 7) / 7
// The week number is now correct under the assumption
// that the first Monday of the year is in week 1.
// If Jan 1 is a Tuesday, Wednesday, or Thursday, the first Monday
// is actually in week 2.
jan1wday := (wday - yday + 7*53) % 7
if Tue <= jan1wday && jan1wday <= Thu {
week++
}
// If the week number is still 0, we're in early January but in
// the last week of last year.
if week == 0 {
year--
week = 52
// A year has 53 weeks when Jan 1 or Dec 31 is a Thursday,
// meaning Jan 1 of the next year is a Friday
// or it was a leap year and Jan 1 of the next year is a Saturday.
if jan1wday == Fri || (jan1wday == Sat && isLeap(year)) {
week++
}
}
// December 29 to 31 are in week 1 of next year if
// they are after the last Thursday of the year and
// December 31 is a Monday, Tuesday, or Wednesday.
if month == December && day >= 29 && wday < Thu {
if dec31wday := (wday + 31 - day) % 7; Mon <= dec31wday && dec31wday <= Wed {
year++
week = 1
}
}
return
}
// Clock returns the hour, minute, and second within the day specified by t.
func (t Time) Clock() (hour, min, sec int) {
return absClock(t.abs())
}
// absClock is like clock but operates on an absolute time.
func absClock(abs uint64) (hour, min, sec int) {
sec = int(abs % secondsPerDay)
hour = sec / secondsPerHour
sec -= hour * secondsPerHour
min = sec / secondsPerMinute
sec -= min * secondsPerMinute
return
}
// Hour returns the hour within the day specified by t, in the range [0, 23].
func (t Time) Hour() int {
return int(t.abs()%secondsPerDay) / secondsPerHour
}
// Minute returns the minute offset within the hour specified by t, in the range [0, 59].
func (t Time) Minute() int {
return int(t.abs()%secondsPerHour) / secondsPerMinute
}
// Second returns the second offset within the minute specified by t, in the range [0, 59].
func (t Time) Second() int {
return int(t.abs() % secondsPerMinute)
}
// Nanosecond returns the nanosecond offset within the second specified by t,
// in the range [0, 999999999].
func (t Time) Nanosecond() int {
return int(t.nsec)
}
// YearDay returns the day of the year specified by t, in the range [1,365] for non-leap years,
// and [1,366] in leap years.
func (t Time) YearDay() int {
_, _, _, yday := t.date(false)
return yday + 1
}
// A Duration represents the elapsed time between two instants
// as an int64 nanosecond count. The representation limits the
// largest representable duration to approximately 290 years.
type Duration int64
const (
minDuration Duration = -1 << 63
maxDuration Duration = 1<<63 - 1
)
// Common durations. There is no definition for units of Day or larger
// to avoid confusion across daylight savings time zone transitions.
//
// To count the number of units in a Duration, divide:
// second := time.Second
// fmt.Print(int64(second/time.Millisecond)) // prints 1000
//
// To convert an integer number of units to a Duration, multiply:
// seconds := 10
// fmt.Print(time.Duration(seconds)*time.Second) // prints 10s
//
const (
Nanosecond Duration = 1
Microsecond = 1000 * Nanosecond
Millisecond = 1000 * Microsecond
Second = 1000 * Millisecond
Minute = 60 * Second
Hour = 60 * Minute
)
// String returns a string representing the duration in the form "72h3m0.5s".
// Leading zero units are omitted. As a special case, durations less than one
// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
// that the leading digit is non-zero. The zero duration formats as 0s.
func (d Duration) String() string {
// Largest time is 2540400h10m10.000000000s
var buf [32]byte
w := len(buf)
u := uint64(d)
neg := d < 0
if neg {
u = -u
}
if u < uint64(Second) {
// Special case: if duration is smaller than a second,
// use smaller units, like 1.2ms
var prec int
w--
buf[w] = 's'
w--
switch {
case u == 0:
return "0s"
case u < uint64(Microsecond):
// print nanoseconds
prec = 0
buf[w] = 'n'
case u < uint64(Millisecond):
// print microseconds
prec = 3
// U+00B5 'µ' micro sign == 0xC2 0xB5
w-- // Need room for two bytes.
copy(buf[w:], "µ")
default:
// print milliseconds
prec = 6
buf[w] = 'm'
}
w, u = fmtFrac(buf[:w], u, prec)
w = fmtInt(buf[:w], u)
} else {
w--
buf[w] = 's'
w, u = fmtFrac(buf[:w], u, 9)
// u is now integer seconds
w = fmtInt(buf[:w], u%60)
u /= 60
// u is now integer minutes
if u > 0 {
w--
buf[w] = 'm'
w = fmtInt(buf[:w], u%60)
u /= 60
// u is now integer hours
// Stop at hours because days can be different lengths.
if u > 0 {
w--
buf[w] = 'h'
w = fmtInt(buf[:w], u)
}
}
}
if neg {
w--
buf[w] = '-'
}
return string(buf[w:])
}
// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
// tail of buf, omitting trailing zeros. it omits the decimal
// point too when the fraction is 0. It returns the index where the
// output bytes begin and the value v/10**prec.
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
// Omit trailing zeros up to and including decimal point.
w := len(buf)
print := false
for i := 0; i < prec; i++ {
digit := v % 10
print = print || digit != 0
if print {
w--
buf[w] = byte(digit) + '0'
}
v /= 10
}
if print {
w--
buf[w] = '.'
}
return w, v
}
// fmtInt formats v into the tail of buf.
// It returns the index where the output begins.
func fmtInt(buf []byte, v uint64) int {
w := len(buf)
if v == 0 {
w--
buf[w] = '0'
} else {
for v > 0 {
w--
buf[w] = byte(v%10) + '0'
v /= 10
}
}
return w
}
// Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return int64(d) }
// These methods return float64 because the dominant
// use case is for printing a floating point number like 1.5s, and
// a truncation to integer would make them not useful in those cases.
// Splitting the integer and fraction ourselves guarantees that
// converting the returned float64 to an integer rounds the same
// way that a pure integer conversion would have, even in cases
// where, say, float64(d.Nanoseconds())/1e9 would have rounded
// differently.
// Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 {
sec := d / Second
nsec := d % Second
return float64(sec) + float64(nsec)*1e-9
}
// Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 {
min := d / Minute
nsec := d % Minute
return float64(min) + float64(nsec)*(1e-9/60)
}
// Hours returns the duration as a floating point number of hours.
func (d Duration) Hours() float64 {
hour := d / Hour
nsec := d % Hour
return float64(hour) + float64(nsec)*(1e-9/60/60)
}
// Add returns the time t+d.
func (t Time) Add(d Duration) Time {
t.sec += int64(d / 1e9)
nsec := t.nsec + int32(d%1e9)
if nsec >= 1e9 {
t.sec++
nsec -= 1e9
} else if nsec < 0 {
t.sec--
nsec += 1e9
}
t.nsec = nsec
return t
}
// Sub returns the duration t-u. If the result exceeds the maximum (or minimum)
// value that can be stored in a Duration, the maximum (or minimum) duration
// will be returned.
// To compute t-d for a duration d, use t.Add(-d).
func (t Time) Sub(u Time) Duration {
d := Duration(t.sec-u.sec)*Second + Duration(t.nsec-u.nsec)
// Check for overflow or underflow.
switch {
case u.Add(d).Equal(t):
return d // d is correct
case t.Before(u):
return minDuration // t - u is negative out of range
default:
return maxDuration // t - u is positive out of range
}
}
// Since returns the time elapsed since t.
// It is shorthand for time.Now().Sub(t).
func Since(t Time) Duration {
return Now().Sub(t)
}
// Until returns the duration until t.
// It is shorthand for t.Sub(time.Now()).
func Until(t Time) Duration {
return t.Sub(Now())
}
// AddDate returns the time corresponding to adding the
// given number of years, months, and days to t.
// For example, AddDate(-1, 2, 3) applied to January 1, 2011
// returns March 4, 2010.
//
// AddDate normalizes its result in the same way that Date does,
// so, for example, adding one month to October 31 yields
// December 1, the normalized form for November 31.
func (t Time) AddDate(years int, months int, days int) Time {
year, month, day := t.Date()
hour, min, sec := t.Clock()
return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec), t.Location())
}
const (
secondsPerMinute = 60
secondsPerHour = 60 * 60
secondsPerDay = 24 * secondsPerHour
secondsPerWeek = 7 * secondsPerDay
daysPer400Years = 365*400 + 97
daysPer100Years = 365*100 + 24
daysPer4Years = 365*4 + 1
)
// date computes the year, day of year, and when full=true,
// the month and day in which t occurs.
func (t Time) date(full bool) (year int, month Month, day int, yday int) {
return absDate(t.abs(), full)
}
// absDate is like date but operates on an absolute time.
func absDate(abs uint64, full bool) (year int, month Month, day int, yday int) {
// Split into time and day.
d := abs / secondsPerDay
// Account for 400 year cycles.
n := d / daysPer400Years
y := 400 * n
d -= daysPer400Years * n
// Cut off 100-year cycles.
// The last cycle has one extra leap year, so on the last day
// of that year, day / daysPer100Years will be 4 instead of 3.
// Cut it back down to 3 by subtracting n>>2.
n = d / daysPer100Years
n -= n >> 2
y += 100 * n
d -= daysPer100Years * n
// Cut off 4-year cycles.
// The last cycle has a missing leap year, which does not
// affect the computation.
n = d / daysPer4Years
y += 4 * n
d -= daysPer4Years * n
// Cut off years within a 4-year cycle.
// The last year is a leap year, so on the last day of that year,
// day / 365 will be 4 instead of 3. Cut it back down to 3
// by subtracting n>>2.
n = d / 365
n -= n >> 2
y += n
d -= 365 * n
year = int(int64(y) + absoluteZeroYear)
yday = int(d)
if !full {
return
}
day = yday
if isLeap(year) {
// Leap year
switch {
case day > 31+29-1:
// After leap day; pretend it wasn't there.
day--
case day == 31+29-1:
// Leap day.
month = February
day = 29
return
}
}
// Estimate month on assumption that every month has 31 days.
// The estimate may be too low by at most one month, so adjust.
month = Month(day / 31)
end := int(daysBefore[month+1])
var begin int
if day >= end {
month++
begin = end
} else {
begin = int(daysBefore[month])
}
month++ // because January is 1
day = day - begin + 1
return
}
// daysBefore[m] counts the number of days in a non-leap year
// before month m begins. There is an entry for m=12, counting
// the number of days before January of next year (365).
var daysBefore = [...]int32{
0,
31,
31 + 28,
31 + 28 + 31,
31 + 28 + 31 + 30,
31 + 28 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
}
func daysIn(m Month, year int) int {
if m == February && isLeap(year) {
return 29
}
return int(daysBefore[m] - daysBefore[m-1])
}
// Provided by package runtime.
func now() (sec int64, nsec int32)
// Now returns the current local time.
func Now() Time {
sec, nsec := now()
return Time{sec + unixToInternal, nsec, Local}
}
// UTC returns t with the location set to UTC.
func (t Time) UTC() Time {
t.setLoc(&utcLoc)
return t
}
// Local returns t with the location set to local time.
func (t Time) Local() Time {
t.setLoc(Local)
return t
}
// In returns t with the location information set to loc.
//
// In panics if loc is nil.
func (t Time) In(loc *Location) Time {
if loc == nil {
panic("time: missing Location in call to Time.In")
}
t.setLoc(loc)
return t
}
// Location returns the time zone information associated with t.
func (t Time) Location() *Location {
l := t.loc
if l == nil {
l = UTC
}
return l
}
// Zone computes the time zone in effect at time t, returning the abbreviated
// name of the zone (such as "CET") and its offset in seconds east of UTC.
func (t Time) Zone() (name string, offset int) {
name, offset, _, _, _ = t.loc.lookup(t.sec + internalToUnix)
return
}
// Unix returns t as a Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (t Time) Unix() int64 {
return t.sec + internalToUnix
}
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64 (a date before the year
// 1678 or after 2262). Note that this means the result of calling UnixNano
// on the zero Time is undefined.
func (t Time) UnixNano() int64 {
return (t.sec+internalToUnix)*1e9 + int64(t.nsec)
}
const timeBinaryVersion byte = 1
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (t Time) MarshalBinary() ([]byte, error) {
var offsetMin int16 // minutes east of UTC. -1 is UTC.
if t.Location() == UTC {
offsetMin = -1
} else {
_, offset := t.Zone()
if offset%60 != 0 {
return nil, errors.New("Time.MarshalBinary: zone offset has fractional minute")
}
offset /= 60
if offset < -32768 || offset == -1 || offset > 32767 {
return nil, errors.New("Time.MarshalBinary: unexpected zone offset")
}
offsetMin = int16(offset)
}
enc := []byte{
timeBinaryVersion, // byte 0 : version
byte(t.sec >> 56), // bytes 1-8: seconds
byte(t.sec >> 48),
byte(t.sec >> 40),
byte(t.sec >> 32),
byte(t.sec >> 24),
byte(t.sec >> 16),
byte(t.sec >> 8),
byte(t.sec),
byte(t.nsec >> 24), // bytes 9-12: nanoseconds
byte(t.nsec >> 16),
byte(t.nsec >> 8),
byte(t.nsec),
byte(offsetMin >> 8), // bytes 13-14: zone offset in minutes
byte(offsetMin),
}
return enc, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (t *Time) UnmarshalBinary(data []byte) error {
buf := data
if len(buf) == 0 {
return errors.New("Time.UnmarshalBinary: no data")
}
if buf[0] != timeBinaryVersion {
return errors.New("Time.UnmarshalBinary: unsupported version")
}
if len(buf) != /*version*/ 1+ /*sec*/ 8+ /*nsec*/ 4+ /*zone offset*/ 2 {
return errors.New("Time.UnmarshalBinary: invalid length")
}
buf = buf[1:]
t.sec = int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56
buf = buf[8:]
t.nsec = int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
buf = buf[4:]
offset := int(int16(buf[1])|int16(buf[0])<<8) * 60
if offset == -1*60 {
t.setLoc(&utcLoc)
} else if _, localoff, _, _, _ := Local.lookup(t.sec + internalToUnix); offset == localoff {
t.setLoc(Local)
} else {
t.setLoc(FixedZone("", offset))
}
return nil
}
// TODO(rsc): Remove GobEncoder, GobDecoder, MarshalJSON, UnmarshalJSON in Go 2.
// The same semantics will be provided by the generic MarshalBinary, MarshalText,
// UnmarshalBinary, UnmarshalText.
// GobEncode implements the gob.GobEncoder interface.
func (t Time) GobEncode() ([]byte, error) {
return t.MarshalBinary()
}
// GobDecode implements the gob.GobDecoder interface.
func (t *Time) GobDecode(data []byte) error {
return t.UnmarshalBinary(data)
}
// MarshalJSON implements the json.Marshaler interface.
// The time is a quoted string in RFC 3339 format, with sub-second precision added if present.
func (t Time) MarshalJSON() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
// RFC 3339 is clear that years are 4 digits exactly.
// See golang.org/issue/4556#c15 for more discussion.
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
}
b := make([]byte, 0, len(RFC3339Nano)+2)
b = append(b, '"')
b = t.AppendFormat(b, RFC3339Nano)
b = append(b, '"')
return b, nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// The time is expected to be a quoted string in RFC 3339 format.
func (t *Time) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
// Fractional seconds are handled implicitly by Parse.
var err error
*t, err = Parse(`"`+RFC3339+`"`, string(data))
return err
}
// MarshalText implements the encoding.TextMarshaler interface.
// The time is formatted in RFC 3339 format, with sub-second precision added if present.
func (t Time) MarshalText() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
}
b := make([]byte, 0, len(RFC3339Nano))
return t.AppendFormat(b, RFC3339Nano), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The time is expected to be in RFC 3339 format.
func (t *Time) UnmarshalText(data []byte) error {
// Fractional seconds are handled implicitly by Parse.
var err error
*t, err = Parse(RFC3339, string(data))
return err
}
// Unix returns the local Time corresponding to the given Unix time,
// sec seconds and nsec nanoseconds since January 1, 1970 UTC.
// It is valid to pass nsec outside the range [0, 999999999].
// Not all sec values have a corresponding time value. One such
// value is 1<<63-1 (the largest int64 value).
func Unix(sec int64, nsec int64) Time {
if nsec < 0 || nsec >= 1e9 {
n := nsec / 1e9
sec += n
nsec -= n * 1e9
if nsec < 0 {
nsec += 1e9
sec--
}
}
return Time{sec + unixToInternal, int32(nsec), Local}
}
func isLeap(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}
// norm returns nhi, nlo such that
// hi * base + lo == nhi * base + nlo
// 0 <= nlo < base
func norm(hi, lo, base int) (nhi, nlo int) {
if lo < 0 {
n := (-lo-1)/base + 1
hi -= n
lo += n * base
}
if lo >= base {
n := lo / base
hi += n
lo -= n * base
}
return hi, lo
}
// Date returns the Time corresponding to
// yyyy-mm-dd hh:mm:ss + nsec nanoseconds
// in the appropriate zone for that time in the given location.
//
// The month, day, hour, min, sec, and nsec values may be outside
// their usual ranges and will be normalized during the conversion.
// For example, October 32 converts to November 1.
//
// A daylight savings time transition skips or repeats times.
// For example, in the United States, March 13, 2011 2:15am never occurred,
// while November 6, 2011 1:15am occurred twice. In such cases, the
// choice of time zone, and therefore the time, is not well-defined.
// Date returns a time that is correct in one of the two zones involved
// in the transition, but it does not guarantee which.
//
// Date panics if loc is nil.
func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {
if loc == nil {
panic("time: missing Location in call to Date")
}
// Normalize month, overflowing into year.
m := int(month) - 1
year, m = norm(year, m, 12)
month = Month(m) + 1
// Normalize nsec, sec, min, hour, overflowing into day.
sec, nsec = norm(sec, nsec, 1e9)
min, sec = norm(min, sec, 60)
hour, min = norm(hour, min, 60)
day, hour = norm(day, hour, 24)
y := uint64(int64(year) - absoluteZeroYear)
// Compute days since the absolute epoch.
// Add in days from 400-year cycles.
n := y / 400
y -= 400 * n
d := daysPer400Years * n
// Add in 100-year cycles.
n = y / 100
y -= 100 * n
d += daysPer100Years * n
// Add in 4-year cycles.
n = y / 4
y -= 4 * n
d += daysPer4Years * n
// Add in non-leap years.
n = y
d += 365 * n
// Add in days before this month.
d += uint64(daysBefore[month-1])
if isLeap(year) && month >= March {
d++ // February 29
}
// Add in days before today.
d += uint64(day - 1)
// Add in time elapsed today.
abs := d * secondsPerDay
abs += uint64(hour*secondsPerHour + min*secondsPerMinute + sec)
unix := int64(abs) + (absoluteToInternal + internalToUnix)
// Look for zone offset for t, so we can adjust to UTC.
// The lookup function expects UTC, so we pass t in the
// hope that it will not be too close to a zone transition,
// and then adjust if it is.
_, offset, _, start, end := loc.lookup(unix)
if offset != 0 {
switch utc := unix - int64(offset); {
case utc < start:
_, offset, _, _, _ = loc.lookup(start - 1)
case utc >= end:
_, offset, _, _, _ = loc.lookup(end)
}
unix -= int64(offset)
}
t := Time{unix + unixToInternal, int32(nsec), nil}
t.setLoc(loc)
return t
}
// Truncate returns the result of rounding t down to a multiple of d (since the zero time).
// If d <= 0, Truncate returns t unchanged.
//
// Truncate operates on the time as an absolute duration since the
// zero time; it does not operate on the presentation form of the
// time. Thus, Truncate(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Truncate(d Duration) Time {
if d <= 0 {
return t
}
_, r := div(t, d)
return t.Add(-r)
}
// Round returns the result of rounding t to the nearest multiple of d (since the zero time).
// The rounding behavior for halfway values is to round up.
// If d <= 0, Round returns t unchanged.
//
// Round operates on the time as an absolute duration since the
// zero time; it does not operate on the presentation form of the
// time. Thus, Round(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Round(d Duration) Time {
if d <= 0 {
return t
}
_, r := div(t, d)
if r+r < d {
return t.Add(-r)
}
return t.Add(d - r)
}
// div divides t by d and returns the quotient parity and remainder.
// We don't use the quotient parity anymore (round half up instead of round to even)
// but it's still here in case we change our minds.
func div(t Time, d Duration) (qmod2 int, r Duration) {
neg := false
nsec := t.nsec
if t.sec < 0 {
// Operate on absolute value.
neg = true
t.sec = -t.sec
nsec = -nsec
if nsec < 0 {
nsec += 1e9
t.sec-- // t.sec >= 1 before the -- so safe
}
}
switch {
// Special case: 2d divides 1 second.
case d < Second && Second%(d+d) == 0:
qmod2 = int(nsec/int32(d)) & 1
r = Duration(nsec % int32(d))
// Special case: d is a multiple of 1 second.
case d%Second == 0:
d1 := int64(d / Second)
qmod2 = int(t.sec/d1) & 1
r = Duration(t.sec%d1)*Second + Duration(nsec)
// General case.
// This could be faster if more cleverness were applied,
// but it's really only here to avoid special case restrictions in the API.
// No one will care about these cases.
default:
// Compute nanoseconds as 128-bit number.
sec := uint64(t.sec)
tmp := (sec >> 32) * 1e9
u1 := tmp >> 32
u0 := tmp << 32
tmp = (sec & 0xFFFFFFFF) * 1e9
u0x, u0 := u0, u0+tmp
if u0 < u0x {
u1++
}
u0x, u0 = u0, u0+uint64(nsec)
if u0 < u0x {
u1++
}
// Compute remainder by subtracting r<<k for decreasing k.
// Quotient parity is whether we subtract on last round.
d1 := uint64(d)
for d1>>63 != 1 {
d1 <<= 1
}
d0 := uint64(0)
for {
qmod2 = 0
if u1 > d1 || u1 == d1 && u0 >= d0 {
// subtract
qmod2 = 1
u0x, u0 = u0, u0-d0
if u0 > u0x {
u1--
}
u1 -= d1
}
if d1 == 0 && d0 == uint64(d) {
break
}
d0 >>= 1
d0 |= (d1 & 1) << 63
d1 >>= 1
}
r = Duration(u0)
}
if neg && r != 0 {
// If input was negative and not an exact multiple of d, we computed q, r such that
// q*d + r = -t
// But the right answers are given by -(q-1), d-r:
// q*d + r = -t
// -q*d - r = t
// -(q-1)*d + (d - r) = t
qmod2 ^= 1
r = d - r
}
return
}
time: clarify Equal docs
The docs used to imply that using == would compare Locations, but of
course it just compares Location pointers, which will have unpredictable
results depending on how the pointers are loaded.
Change-Id: I783c1309e476a9616a1c1c290eac713aba3b0b57
Reviewed-on: https://go-review.googlesource.com/32332
Reviewed-by: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package time provides functionality for measuring and displaying time.
//
// The calendrical calculations always assume a Gregorian calendar, with
// no leap seconds.
package time
import "errors"
// A Time represents an instant in time with nanosecond precision.
//
// Programs using times should typically store and pass them as values,
// not pointers. That is, time variables and struct fields should be of
// type time.Time, not *time.Time. A Time value can be used by
// multiple goroutines simultaneously.
//
// Time instants can be compared using the Before, After, and Equal methods.
// The Sub method subtracts two instants, producing a Duration.
// The Add method adds a Time and a Duration, producing a Time.
//
// The zero value of type Time is January 1, year 1, 00:00:00.000000000 UTC.
// As this time is unlikely to come up in practice, the IsZero method gives
// a simple way of detecting a time that has not been initialized explicitly.
//
// Each Time has associated with it a Location, consulted when computing the
// presentation form of the time, such as in the Format, Hour, and Year methods.
// The methods Local, UTC, and In return a Time with a specific location.
// Changing the location in this way changes only the presentation; it does not
// change the instant in time being denoted and therefore does not affect the
// computations described in earlier paragraphs.
//
// Note that the Go == operator compares not just the time instant but also the
// Location. Therefore, Time values should not be used as map or database keys
// without first guaranteeing that the identical Location has been set for all
// values, which can be achieved through use of the UTC or Local method.
//
type Time struct {
// sec gives the number of seconds elapsed since
// January 1, year 1 00:00:00 UTC.
sec int64
// nsec specifies a non-negative nanosecond
// offset within the second named by Seconds.
// It must be in the range [0, 999999999].
nsec int32
// loc specifies the Location that should be used to
// determine the minute, hour, month, day, and year
// that correspond to this Time.
// The nil location means UTC.
// All UTC times are represented with loc==nil, never loc==&utcLoc.
loc *Location
}
func (t *Time) setLoc(loc *Location) {
if loc == &utcLoc {
loc = nil
}
t.loc = loc
}
// After reports whether the time instant t is after u.
func (t Time) After(u Time) bool {
return t.sec > u.sec || t.sec == u.sec && t.nsec > u.nsec
}
// Before reports whether the time instant t is before u.
func (t Time) Before(u Time) bool {
return t.sec < u.sec || t.sec == u.sec && t.nsec < u.nsec
}
// Equal reports whether t and u represent the same time instant.
// Two times can be equal even if they are in different locations.
// For example, 6:00 +0200 CEST and 4:00 UTC are Equal.
// Note that using == with Time values produces unpredictable results.
func (t Time) Equal(u Time) bool {
return t.sec == u.sec && t.nsec == u.nsec
}
// A Month specifies a month of the year (January = 1, ...).
type Month int
const (
January Month = 1 + iota
February
March
April
May
June
July
August
September
October
November
December
)
var months = [...]string{
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
}
// String returns the English name of the month ("January", "February", ...).
func (m Month) String() string { return months[m-1] }
// A Weekday specifies a day of the week (Sunday = 0, ...).
type Weekday int
const (
Sunday Weekday = iota
Monday
Tuesday
Wednesday
Thursday
Friday
Saturday
)
var days = [...]string{
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
}
// String returns the English name of the day ("Sunday", "Monday", ...).
func (d Weekday) String() string { return days[d] }
// Computations on time.
//
// The zero value for a Time is defined to be
// January 1, year 1, 00:00:00.000000000 UTC
// which (1) looks like a zero, or as close as you can get in a date
// (1-1-1 00:00:00 UTC), (2) is unlikely enough to arise in practice to
// be a suitable "not set" sentinel, unlike Jan 1 1970, and (3) has a
// non-negative year even in time zones west of UTC, unlike 1-1-0
// 00:00:00 UTC, which would be 12-31-(-1) 19:00:00 in New York.
//
// The zero Time value does not force a specific epoch for the time
// representation. For example, to use the Unix epoch internally, we
// could define that to distinguish a zero value from Jan 1 1970, that
// time would be represented by sec=-1, nsec=1e9. However, it does
// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
// epoch, and that's what we do.
//
// The Add and Sub computations are oblivious to the choice of epoch.
//
// The presentation computations - year, month, minute, and so on - all
// rely heavily on division and modulus by positive constants. For
// calendrical calculations we want these divisions to round down, even
// for negative values, so that the remainder is always positive, but
// Go's division (like most hardware division instructions) rounds to
// zero. We can still do those computations and then adjust the result
// for a negative numerator, but it's annoying to write the adjustment
// over and over. Instead, we can change to a different epoch so long
// ago that all the times we care about will be positive, and then round
// to zero and round down coincide. These presentation routines already
// have to add the zone offset, so adding the translation to the
// alternate epoch is cheap. For example, having a non-negative time t
// means that we can write
//
// sec = t % 60
//
// instead of
//
// sec = t % 60
// if sec < 0 {
// sec += 60
// }
//
// everywhere.
//
// The calendar runs on an exact 400 year cycle: a 400-year calendar
// printed for 1970-2469 will apply as well to 2370-2769. Even the days
// of the week match up. It simplifies the computations to choose the
// cycle boundaries so that the exceptional years are always delayed as
// long as possible. That means choosing a year equal to 1 mod 400, so
// that the first leap year is the 4th year, the first missed leap year
// is the 100th year, and the missed missed leap year is the 400th year.
// So we'd prefer instead to print a calendar for 2001-2400 and reuse it
// for 2401-2800.
//
// Finally, it's convenient if the delta between the Unix epoch and
// long-ago epoch is representable by an int64 constant.
//
// These three considerations—choose an epoch as early as possible, that
// uses a year equal to 1 mod 400, and that is no more than 2⁶³ seconds
// earlier than 1970—bring us to the year -292277022399. We refer to
// this year as the absolute zero year, and to times measured as a uint64
// seconds since this year as absolute times.
//
// Times measured as an int64 seconds since the year 1—the representation
// used for Time's sec field—are called internal times.
//
// Times measured as an int64 seconds since the year 1970 are called Unix
// times.
//
// It is tempting to just use the year 1 as the absolute epoch, defining
// that the routines are only valid for years >= 1. However, the
// routines would then be invalid when displaying the epoch in time zones
// west of UTC, since it is year 0. It doesn't seem tenable to say that
// printing the zero time correctly isn't supported in half the time
// zones. By comparison, it's reasonable to mishandle some times in
// the year -292277022399.
//
// All this is opaque to clients of the API and can be changed if a
// better implementation presents itself.
const (
// The unsigned zero year for internal calculations.
// Must be 1 mod 400, and times before it will not compute correctly,
// but otherwise can be changed at will.
absoluteZeroYear = -292277022399
// The year of the zero Time.
// Assumed by the unixToInternal computation below.
internalYear = 1
// Offsets to convert between internal and absolute or Unix times.
absoluteToInternal int64 = (absoluteZeroYear - internalYear) * 365.2425 * secondsPerDay
internalToAbsolute = -absoluteToInternal
unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay
internalToUnix int64 = -unixToInternal
)
// IsZero reports whether t represents the zero time instant,
// January 1, year 1, 00:00:00 UTC.
func (t Time) IsZero() bool {
return t.sec == 0 && t.nsec == 0
}
// abs returns the time t as an absolute time, adjusted by the zone offset.
// It is called when computing a presentation property like Month or Hour.
func (t Time) abs() uint64 {
l := t.loc
// Avoid function calls when possible.
if l == nil || l == &localLoc {
l = l.get()
}
sec := t.sec + internalToUnix
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
sec += int64(l.cacheZone.offset)
} else {
_, offset, _, _, _ := l.lookup(sec)
sec += int64(offset)
}
}
return uint64(sec + (unixToInternal + internalToAbsolute))
}
// locabs is a combination of the Zone and abs methods,
// extracting both return values from a single zone lookup.
func (t Time) locabs() (name string, offset int, abs uint64) {
l := t.loc
if l == nil || l == &localLoc {
l = l.get()
}
// Avoid function call if we hit the local time cache.
sec := t.sec + internalToUnix
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
name = l.cacheZone.name
offset = l.cacheZone.offset
} else {
name, offset, _, _, _ = l.lookup(sec)
}
sec += int64(offset)
} else {
name = "UTC"
}
abs = uint64(sec + (unixToInternal + internalToAbsolute))
return
}
// Date returns the year, month, and day in which t occurs.
func (t Time) Date() (year int, month Month, day int) {
year, month, day, _ = t.date(true)
return
}
// Year returns the year in which t occurs.
func (t Time) Year() int {
year, _, _, _ := t.date(false)
return year
}
// Month returns the month of the year specified by t.
func (t Time) Month() Month {
_, month, _, _ := t.date(true)
return month
}
// Day returns the day of the month specified by t.
func (t Time) Day() int {
_, _, day, _ := t.date(true)
return day
}
// Weekday returns the day of the week specified by t.
func (t Time) Weekday() Weekday {
return absWeekday(t.abs())
}
// absWeekday is like Weekday but operates on an absolute time.
func absWeekday(abs uint64) Weekday {
// January 1 of the absolute year, like January 1 of 2001, was a Monday.
sec := (abs + uint64(Monday)*secondsPerDay) % secondsPerWeek
return Weekday(int(sec) / secondsPerDay)
}
// ISOWeek returns the ISO 8601 year and week number in which t occurs.
// Week ranges from 1 to 53. Jan 01 to Jan 03 of year n might belong to
// week 52 or 53 of year n-1, and Dec 29 to Dec 31 might belong to week 1
// of year n+1.
func (t Time) ISOWeek() (year, week int) {
year, month, day, yday := t.date(true)
wday := int(t.Weekday()+6) % 7 // weekday but Monday = 0.
const (
Mon int = iota
Tue
Wed
Thu
Fri
Sat
Sun
)
// Calculate week as number of Mondays in year up to
// and including today, plus 1 because the first week is week 0.
// Putting the + 1 inside the numerator as a + 7 keeps the
// numerator from being negative, which would cause it to
// round incorrectly.
week = (yday - wday + 7) / 7
// The week number is now correct under the assumption
// that the first Monday of the year is in week 1.
// If Jan 1 is a Tuesday, Wednesday, or Thursday, the first Monday
// is actually in week 2.
jan1wday := (wday - yday + 7*53) % 7
if Tue <= jan1wday && jan1wday <= Thu {
week++
}
// If the week number is still 0, we're in early January but in
// the last week of last year.
if week == 0 {
year--
week = 52
// A year has 53 weeks when Jan 1 or Dec 31 is a Thursday,
// meaning Jan 1 of the next year is a Friday
// or it was a leap year and Jan 1 of the next year is a Saturday.
if jan1wday == Fri || (jan1wday == Sat && isLeap(year)) {
week++
}
}
// December 29 to 31 are in week 1 of next year if
// they are after the last Thursday of the year and
// December 31 is a Monday, Tuesday, or Wednesday.
if month == December && day >= 29 && wday < Thu {
if dec31wday := (wday + 31 - day) % 7; Mon <= dec31wday && dec31wday <= Wed {
year++
week = 1
}
}
return
}
// Clock returns the hour, minute, and second within the day specified by t.
func (t Time) Clock() (hour, min, sec int) {
return absClock(t.abs())
}
// absClock is like clock but operates on an absolute time.
func absClock(abs uint64) (hour, min, sec int) {
sec = int(abs % secondsPerDay)
hour = sec / secondsPerHour
sec -= hour * secondsPerHour
min = sec / secondsPerMinute
sec -= min * secondsPerMinute
return
}
// Hour returns the hour within the day specified by t, in the range [0, 23].
func (t Time) Hour() int {
return int(t.abs()%secondsPerDay) / secondsPerHour
}
// Minute returns the minute offset within the hour specified by t, in the range [0, 59].
func (t Time) Minute() int {
return int(t.abs()%secondsPerHour) / secondsPerMinute
}
// Second returns the second offset within the minute specified by t, in the range [0, 59].
func (t Time) Second() int {
return int(t.abs() % secondsPerMinute)
}
// Nanosecond returns the nanosecond offset within the second specified by t,
// in the range [0, 999999999].
func (t Time) Nanosecond() int {
return int(t.nsec)
}
// YearDay returns the day of the year specified by t, in the range [1,365] for non-leap years,
// and [1,366] in leap years.
func (t Time) YearDay() int {
_, _, _, yday := t.date(false)
return yday + 1
}
// A Duration represents the elapsed time between two instants
// as an int64 nanosecond count. The representation limits the
// largest representable duration to approximately 290 years.
type Duration int64
const (
minDuration Duration = -1 << 63
maxDuration Duration = 1<<63 - 1
)
// Common durations. There is no definition for units of Day or larger
// to avoid confusion across daylight savings time zone transitions.
//
// To count the number of units in a Duration, divide:
// second := time.Second
// fmt.Print(int64(second/time.Millisecond)) // prints 1000
//
// To convert an integer number of units to a Duration, multiply:
// seconds := 10
// fmt.Print(time.Duration(seconds)*time.Second) // prints 10s
//
const (
Nanosecond Duration = 1
Microsecond = 1000 * Nanosecond
Millisecond = 1000 * Microsecond
Second = 1000 * Millisecond
Minute = 60 * Second
Hour = 60 * Minute
)
// String returns a string representing the duration in the form "72h3m0.5s".
// Leading zero units are omitted. As a special case, durations less than one
// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure
// that the leading digit is non-zero. The zero duration formats as 0s.
func (d Duration) String() string {
// Largest time is 2540400h10m10.000000000s
var buf [32]byte
w := len(buf)
u := uint64(d)
neg := d < 0
if neg {
u = -u
}
if u < uint64(Second) {
// Special case: if duration is smaller than a second,
// use smaller units, like 1.2ms
var prec int
w--
buf[w] = 's'
w--
switch {
case u == 0:
return "0s"
case u < uint64(Microsecond):
// print nanoseconds
prec = 0
buf[w] = 'n'
case u < uint64(Millisecond):
// print microseconds
prec = 3
// U+00B5 'µ' micro sign == 0xC2 0xB5
w-- // Need room for two bytes.
copy(buf[w:], "µ")
default:
// print milliseconds
prec = 6
buf[w] = 'm'
}
w, u = fmtFrac(buf[:w], u, prec)
w = fmtInt(buf[:w], u)
} else {
w--
buf[w] = 's'
w, u = fmtFrac(buf[:w], u, 9)
// u is now integer seconds
w = fmtInt(buf[:w], u%60)
u /= 60
// u is now integer minutes
if u > 0 {
w--
buf[w] = 'm'
w = fmtInt(buf[:w], u%60)
u /= 60
// u is now integer hours
// Stop at hours because days can be different lengths.
if u > 0 {
w--
buf[w] = 'h'
w = fmtInt(buf[:w], u)
}
}
}
if neg {
w--
buf[w] = '-'
}
return string(buf[w:])
}
// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
// tail of buf, omitting trailing zeros. it omits the decimal
// point too when the fraction is 0. It returns the index where the
// output bytes begin and the value v/10**prec.
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
// Omit trailing zeros up to and including decimal point.
w := len(buf)
print := false
for i := 0; i < prec; i++ {
digit := v % 10
print = print || digit != 0
if print {
w--
buf[w] = byte(digit) + '0'
}
v /= 10
}
if print {
w--
buf[w] = '.'
}
return w, v
}
// fmtInt formats v into the tail of buf.
// It returns the index where the output begins.
func fmtInt(buf []byte, v uint64) int {
w := len(buf)
if v == 0 {
w--
buf[w] = '0'
} else {
for v > 0 {
w--
buf[w] = byte(v%10) + '0'
v /= 10
}
}
return w
}
// Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return int64(d) }
// These methods return float64 because the dominant
// use case is for printing a floating point number like 1.5s, and
// a truncation to integer would make them not useful in those cases.
// Splitting the integer and fraction ourselves guarantees that
// converting the returned float64 to an integer rounds the same
// way that a pure integer conversion would have, even in cases
// where, say, float64(d.Nanoseconds())/1e9 would have rounded
// differently.
// Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 {
sec := d / Second
nsec := d % Second
return float64(sec) + float64(nsec)*1e-9
}
// Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 {
min := d / Minute
nsec := d % Minute
return float64(min) + float64(nsec)*(1e-9/60)
}
// Hours returns the duration as a floating point number of hours.
func (d Duration) Hours() float64 {
hour := d / Hour
nsec := d % Hour
return float64(hour) + float64(nsec)*(1e-9/60/60)
}
// Add returns the time t+d.
func (t Time) Add(d Duration) Time {
t.sec += int64(d / 1e9)
nsec := t.nsec + int32(d%1e9)
if nsec >= 1e9 {
t.sec++
nsec -= 1e9
} else if nsec < 0 {
t.sec--
nsec += 1e9
}
t.nsec = nsec
return t
}
// Sub returns the duration t-u. If the result exceeds the maximum (or minimum)
// value that can be stored in a Duration, the maximum (or minimum) duration
// will be returned.
// To compute t-d for a duration d, use t.Add(-d).
func (t Time) Sub(u Time) Duration {
d := Duration(t.sec-u.sec)*Second + Duration(t.nsec-u.nsec)
// Check for overflow or underflow.
switch {
case u.Add(d).Equal(t):
return d // d is correct
case t.Before(u):
return minDuration // t - u is negative out of range
default:
return maxDuration // t - u is positive out of range
}
}
// Since returns the time elapsed since t.
// It is shorthand for time.Now().Sub(t).
func Since(t Time) Duration {
return Now().Sub(t)
}
// Until returns the duration until t.
// It is shorthand for t.Sub(time.Now()).
func Until(t Time) Duration {
return t.Sub(Now())
}
// AddDate returns the time corresponding to adding the
// given number of years, months, and days to t.
// For example, AddDate(-1, 2, 3) applied to January 1, 2011
// returns March 4, 2010.
//
// AddDate normalizes its result in the same way that Date does,
// so, for example, adding one month to October 31 yields
// December 1, the normalized form for November 31.
func (t Time) AddDate(years int, months int, days int) Time {
year, month, day := t.Date()
hour, min, sec := t.Clock()
return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec), t.Location())
}
const (
secondsPerMinute = 60
secondsPerHour = 60 * 60
secondsPerDay = 24 * secondsPerHour
secondsPerWeek = 7 * secondsPerDay
daysPer400Years = 365*400 + 97
daysPer100Years = 365*100 + 24
daysPer4Years = 365*4 + 1
)
// date computes the year, day of year, and when full=true,
// the month and day in which t occurs.
func (t Time) date(full bool) (year int, month Month, day int, yday int) {
return absDate(t.abs(), full)
}
// absDate is like date but operates on an absolute time.
func absDate(abs uint64, full bool) (year int, month Month, day int, yday int) {
// Split into time and day.
d := abs / secondsPerDay
// Account for 400 year cycles.
n := d / daysPer400Years
y := 400 * n
d -= daysPer400Years * n
// Cut off 100-year cycles.
// The last cycle has one extra leap year, so on the last day
// of that year, day / daysPer100Years will be 4 instead of 3.
// Cut it back down to 3 by subtracting n>>2.
n = d / daysPer100Years
n -= n >> 2
y += 100 * n
d -= daysPer100Years * n
// Cut off 4-year cycles.
// The last cycle has a missing leap year, which does not
// affect the computation.
n = d / daysPer4Years
y += 4 * n
d -= daysPer4Years * n
// Cut off years within a 4-year cycle.
// The last year is a leap year, so on the last day of that year,
// day / 365 will be 4 instead of 3. Cut it back down to 3
// by subtracting n>>2.
n = d / 365
n -= n >> 2
y += n
d -= 365 * n
year = int(int64(y) + absoluteZeroYear)
yday = int(d)
if !full {
return
}
day = yday
if isLeap(year) {
// Leap year
switch {
case day > 31+29-1:
// After leap day; pretend it wasn't there.
day--
case day == 31+29-1:
// Leap day.
month = February
day = 29
return
}
}
// Estimate month on assumption that every month has 31 days.
// The estimate may be too low by at most one month, so adjust.
month = Month(day / 31)
end := int(daysBefore[month+1])
var begin int
if day >= end {
month++
begin = end
} else {
begin = int(daysBefore[month])
}
month++ // because January is 1
day = day - begin + 1
return
}
// daysBefore[m] counts the number of days in a non-leap year
// before month m begins. There is an entry for m=12, counting
// the number of days before January of next year (365).
var daysBefore = [...]int32{
0,
31,
31 + 28,
31 + 28 + 31,
31 + 28 + 31 + 30,
31 + 28 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
}
func daysIn(m Month, year int) int {
if m == February && isLeap(year) {
return 29
}
return int(daysBefore[m] - daysBefore[m-1])
}
// Provided by package runtime.
func now() (sec int64, nsec int32)
// Now returns the current local time.
func Now() Time {
sec, nsec := now()
return Time{sec + unixToInternal, nsec, Local}
}
// UTC returns t with the location set to UTC.
func (t Time) UTC() Time {
t.setLoc(&utcLoc)
return t
}
// Local returns t with the location set to local time.
func (t Time) Local() Time {
t.setLoc(Local)
return t
}
// In returns t with the location information set to loc.
//
// In panics if loc is nil.
func (t Time) In(loc *Location) Time {
if loc == nil {
panic("time: missing Location in call to Time.In")
}
t.setLoc(loc)
return t
}
// Location returns the time zone information associated with t.
func (t Time) Location() *Location {
l := t.loc
if l == nil {
l = UTC
}
return l
}
// Zone computes the time zone in effect at time t, returning the abbreviated
// name of the zone (such as "CET") and its offset in seconds east of UTC.
func (t Time) Zone() (name string, offset int) {
name, offset, _, _, _ = t.loc.lookup(t.sec + internalToUnix)
return
}
// Unix returns t as a Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (t Time) Unix() int64 {
return t.sec + internalToUnix
}
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC. The result is undefined if the Unix time
// in nanoseconds cannot be represented by an int64 (a date before the year
// 1678 or after 2262). Note that this means the result of calling UnixNano
// on the zero Time is undefined.
func (t Time) UnixNano() int64 {
return (t.sec+internalToUnix)*1e9 + int64(t.nsec)
}
const timeBinaryVersion byte = 1
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (t Time) MarshalBinary() ([]byte, error) {
var offsetMin int16 // minutes east of UTC. -1 is UTC.
if t.Location() == UTC {
offsetMin = -1
} else {
_, offset := t.Zone()
if offset%60 != 0 {
return nil, errors.New("Time.MarshalBinary: zone offset has fractional minute")
}
offset /= 60
if offset < -32768 || offset == -1 || offset > 32767 {
return nil, errors.New("Time.MarshalBinary: unexpected zone offset")
}
offsetMin = int16(offset)
}
enc := []byte{
timeBinaryVersion, // byte 0 : version
byte(t.sec >> 56), // bytes 1-8: seconds
byte(t.sec >> 48),
byte(t.sec >> 40),
byte(t.sec >> 32),
byte(t.sec >> 24),
byte(t.sec >> 16),
byte(t.sec >> 8),
byte(t.sec),
byte(t.nsec >> 24), // bytes 9-12: nanoseconds
byte(t.nsec >> 16),
byte(t.nsec >> 8),
byte(t.nsec),
byte(offsetMin >> 8), // bytes 13-14: zone offset in minutes
byte(offsetMin),
}
return enc, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (t *Time) UnmarshalBinary(data []byte) error {
buf := data
if len(buf) == 0 {
return errors.New("Time.UnmarshalBinary: no data")
}
if buf[0] != timeBinaryVersion {
return errors.New("Time.UnmarshalBinary: unsupported version")
}
if len(buf) != /*version*/ 1+ /*sec*/ 8+ /*nsec*/ 4+ /*zone offset*/ 2 {
return errors.New("Time.UnmarshalBinary: invalid length")
}
buf = buf[1:]
t.sec = int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56
buf = buf[8:]
t.nsec = int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
buf = buf[4:]
offset := int(int16(buf[1])|int16(buf[0])<<8) * 60
if offset == -1*60 {
t.setLoc(&utcLoc)
} else if _, localoff, _, _, _ := Local.lookup(t.sec + internalToUnix); offset == localoff {
t.setLoc(Local)
} else {
t.setLoc(FixedZone("", offset))
}
return nil
}
// TODO(rsc): Remove GobEncoder, GobDecoder, MarshalJSON, UnmarshalJSON in Go 2.
// The same semantics will be provided by the generic MarshalBinary, MarshalText,
// UnmarshalBinary, UnmarshalText.
// GobEncode implements the gob.GobEncoder interface.
func (t Time) GobEncode() ([]byte, error) {
return t.MarshalBinary()
}
// GobDecode implements the gob.GobDecoder interface.
func (t *Time) GobDecode(data []byte) error {
return t.UnmarshalBinary(data)
}
// MarshalJSON implements the json.Marshaler interface.
// The time is a quoted string in RFC 3339 format, with sub-second precision added if present.
func (t Time) MarshalJSON() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
// RFC 3339 is clear that years are 4 digits exactly.
// See golang.org/issue/4556#c15 for more discussion.
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
}
b := make([]byte, 0, len(RFC3339Nano)+2)
b = append(b, '"')
b = t.AppendFormat(b, RFC3339Nano)
b = append(b, '"')
return b, nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// The time is expected to be a quoted string in RFC 3339 format.
func (t *Time) UnmarshalJSON(data []byte) error {
// Ignore null, like in the main JSON package.
if string(data) == "null" {
return nil
}
// Fractional seconds are handled implicitly by Parse.
var err error
*t, err = Parse(`"`+RFC3339+`"`, string(data))
return err
}
// MarshalText implements the encoding.TextMarshaler interface.
// The time is formatted in RFC 3339 format, with sub-second precision added if present.
func (t Time) MarshalText() ([]byte, error) {
if y := t.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
}
b := make([]byte, 0, len(RFC3339Nano))
return t.AppendFormat(b, RFC3339Nano), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The time is expected to be in RFC 3339 format.
func (t *Time) UnmarshalText(data []byte) error {
// Fractional seconds are handled implicitly by Parse.
var err error
*t, err = Parse(RFC3339, string(data))
return err
}
// Unix returns the local Time corresponding to the given Unix time,
// sec seconds and nsec nanoseconds since January 1, 1970 UTC.
// It is valid to pass nsec outside the range [0, 999999999].
// Not all sec values have a corresponding time value. One such
// value is 1<<63-1 (the largest int64 value).
func Unix(sec int64, nsec int64) Time {
if nsec < 0 || nsec >= 1e9 {
n := nsec / 1e9
sec += n
nsec -= n * 1e9
if nsec < 0 {
nsec += 1e9
sec--
}
}
return Time{sec + unixToInternal, int32(nsec), Local}
}
func isLeap(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}
// norm returns nhi, nlo such that
// hi * base + lo == nhi * base + nlo
// 0 <= nlo < base
func norm(hi, lo, base int) (nhi, nlo int) {
if lo < 0 {
n := (-lo-1)/base + 1
hi -= n
lo += n * base
}
if lo >= base {
n := lo / base
hi += n
lo -= n * base
}
return hi, lo
}
// Date returns the Time corresponding to
// yyyy-mm-dd hh:mm:ss + nsec nanoseconds
// in the appropriate zone for that time in the given location.
//
// The month, day, hour, min, sec, and nsec values may be outside
// their usual ranges and will be normalized during the conversion.
// For example, October 32 converts to November 1.
//
// A daylight savings time transition skips or repeats times.
// For example, in the United States, March 13, 2011 2:15am never occurred,
// while November 6, 2011 1:15am occurred twice. In such cases, the
// choice of time zone, and therefore the time, is not well-defined.
// Date returns a time that is correct in one of the two zones involved
// in the transition, but it does not guarantee which.
//
// Date panics if loc is nil.
func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {
if loc == nil {
panic("time: missing Location in call to Date")
}
// Normalize month, overflowing into year.
m := int(month) - 1
year, m = norm(year, m, 12)
month = Month(m) + 1
// Normalize nsec, sec, min, hour, overflowing into day.
sec, nsec = norm(sec, nsec, 1e9)
min, sec = norm(min, sec, 60)
hour, min = norm(hour, min, 60)
day, hour = norm(day, hour, 24)
y := uint64(int64(year) - absoluteZeroYear)
// Compute days since the absolute epoch.
// Add in days from 400-year cycles.
n := y / 400
y -= 400 * n
d := daysPer400Years * n
// Add in 100-year cycles.
n = y / 100
y -= 100 * n
d += daysPer100Years * n
// Add in 4-year cycles.
n = y / 4
y -= 4 * n
d += daysPer4Years * n
// Add in non-leap years.
n = y
d += 365 * n
// Add in days before this month.
d += uint64(daysBefore[month-1])
if isLeap(year) && month >= March {
d++ // February 29
}
// Add in days before today.
d += uint64(day - 1)
// Add in time elapsed today.
abs := d * secondsPerDay
abs += uint64(hour*secondsPerHour + min*secondsPerMinute + sec)
unix := int64(abs) + (absoluteToInternal + internalToUnix)
// Look for zone offset for t, so we can adjust to UTC.
// The lookup function expects UTC, so we pass t in the
// hope that it will not be too close to a zone transition,
// and then adjust if it is.
_, offset, _, start, end := loc.lookup(unix)
if offset != 0 {
switch utc := unix - int64(offset); {
case utc < start:
_, offset, _, _, _ = loc.lookup(start - 1)
case utc >= end:
_, offset, _, _, _ = loc.lookup(end)
}
unix -= int64(offset)
}
t := Time{unix + unixToInternal, int32(nsec), nil}
t.setLoc(loc)
return t
}
// Truncate returns the result of rounding t down to a multiple of d (since the zero time).
// If d <= 0, Truncate returns t unchanged.
//
// Truncate operates on the time as an absolute duration since the
// zero time; it does not operate on the presentation form of the
// time. Thus, Truncate(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Truncate(d Duration) Time {
if d <= 0 {
return t
}
_, r := div(t, d)
return t.Add(-r)
}
// Round returns the result of rounding t to the nearest multiple of d (since the zero time).
// The rounding behavior for halfway values is to round up.
// If d <= 0, Round returns t unchanged.
//
// Round operates on the time as an absolute duration since the
// zero time; it does not operate on the presentation form of the
// time. Thus, Round(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Round(d Duration) Time {
if d <= 0 {
return t
}
_, r := div(t, d)
if r+r < d {
return t.Add(-r)
}
return t.Add(d - r)
}
// div divides t by d and returns the quotient parity and remainder.
// We don't use the quotient parity anymore (round half up instead of round to even)
// but it's still here in case we change our minds.
func div(t Time, d Duration) (qmod2 int, r Duration) {
neg := false
nsec := t.nsec
if t.sec < 0 {
// Operate on absolute value.
neg = true
t.sec = -t.sec
nsec = -nsec
if nsec < 0 {
nsec += 1e9
t.sec-- // t.sec >= 1 before the -- so safe
}
}
switch {
// Special case: 2d divides 1 second.
case d < Second && Second%(d+d) == 0:
qmod2 = int(nsec/int32(d)) & 1
r = Duration(nsec % int32(d))
// Special case: d is a multiple of 1 second.
case d%Second == 0:
d1 := int64(d / Second)
qmod2 = int(t.sec/d1) & 1
r = Duration(t.sec%d1)*Second + Duration(nsec)
// General case.
// This could be faster if more cleverness were applied,
// but it's really only here to avoid special case restrictions in the API.
// No one will care about these cases.
default:
// Compute nanoseconds as 128-bit number.
sec := uint64(t.sec)
tmp := (sec >> 32) * 1e9
u1 := tmp >> 32
u0 := tmp << 32
tmp = (sec & 0xFFFFFFFF) * 1e9
u0x, u0 := u0, u0+tmp
if u0 < u0x {
u1++
}
u0x, u0 = u0, u0+uint64(nsec)
if u0 < u0x {
u1++
}
// Compute remainder by subtracting r<<k for decreasing k.
// Quotient parity is whether we subtract on last round.
d1 := uint64(d)
for d1>>63 != 1 {
d1 <<= 1
}
d0 := uint64(0)
for {
qmod2 = 0
if u1 > d1 || u1 == d1 && u0 >= d0 {
// subtract
qmod2 = 1
u0x, u0 = u0, u0-d0
if u0 > u0x {
u1--
}
u1 -= d1
}
if d1 == 0 && d0 == uint64(d) {
break
}
d0 >>= 1
d0 |= (d1 & 1) << 63
d1 >>= 1
}
r = Duration(u0)
}
if neg && r != 0 {
// If input was negative and not an exact multiple of d, we computed q, r such that
// q*d + r = -t
// But the right answers are given by -(q-1), d-r:
// q*d + r = -t
// -q*d - r = t
// -(q-1)*d + (d - r) = t
qmod2 ^= 1
r = d - r
}
return
}
|
package toolbox
import (
"context"
"testing"
)
func TestJokeSources(t *testing.T) {
text, err := getDadJoke(context.Background(), 10)
if err != nil || text == "" {
t.Fatal(err)
}
text, err = getChuckNorrisJoke(context.Background(), 10)
if err != nil || text == "" {
t.Fatal(err)
}
}
func TestJoke(t *testing.T) {
joke := Joke{}
if !joke.IsConfigured() {
t.Fatal("should be configured")
}
if err := joke.Initialise(); err != nil {
t.Fatal(err)
}
if err := joke.SelfTest(); err != nil {
t.Fatal(err)
}
if result := joke.Execute(context.Background(), Command{TimeoutSec: 10}); result.Error != nil || len(result.Output) < 30 {
t.Fatal(result)
} else {
t.Log(result)
}
}
toolbox: fix unlikely error in TestJoke
package toolbox
import (
"context"
"testing"
)
func TestJokeSources(t *testing.T) {
text, err := getDadJoke(context.Background(), 10)
if err != nil || text == "" {
t.Fatal(err)
}
text, err = getChuckNorrisJoke(context.Background(), 10)
if err != nil || text == "" {
t.Fatal(err)
}
}
func TestJoke(t *testing.T) {
joke := Joke{}
if !joke.IsConfigured() {
t.Fatal("should be configured")
}
if err := joke.Initialise(); err != nil {
t.Fatal(err)
}
if err := joke.SelfTest(); err != nil {
t.Fatal(err)
}
if result := joke.Execute(context.Background(), Command{TimeoutSec: 10}); result.Error != nil || len(result.Output) < 10 {
t.Fatal(result)
} else {
t.Log(result)
}
}
|
package storage
import (
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/akutz/gofig"
"github.com/akutz/goof"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
awsefs "github.com/aws/aws-sdk-go/service/efs"
"github.com/emccode/libstorage/api/context"
"github.com/emccode/libstorage/api/registry"
"github.com/emccode/libstorage/api/types"
"github.com/emccode/libstorage/drivers/storage/efs"
)
const (
tagDelimiter = "/"
)
// Driver represents a EFS driver implementation of StorageDriver
type driver struct {
config gofig.Config
awsCreds *credentials.Credentials
}
func init() {
registry.RegisterStorageDriver(efs.Name, newDriver)
}
func newDriver() types.StorageDriver {
return &driver{}
}
// Name returns the name of the driver
func (d *driver) Name() string {
return efs.Name
}
// Init initializes the driver.
func (d *driver) Init(ctx types.Context, config gofig.Config) error {
d.config = config
fields := log.Fields{
"accessKey": d.accessKey(),
"secretKey": d.secretKey(),
"region": d.region(),
"tag": d.tag(),
}
if d.accessKey() == "" {
fields["accessKey"] = ""
} else {
fields["accessKey"] = "******"
}
if d.secretKey() == "" {
fields["secretKey"] = ""
} else {
fields["secretKey"] = "******"
}
d.awsCreds = credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.StaticProvider{Value: credentials.Value{AccessKeyID: d.accessKey(), SecretAccessKey: d.secretKey()}},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New()),
},
})
ctx.WithFields(fields).Info("storage driver initialized")
return nil
}
// InstanceInspect returns an instance.
func (d *driver) InstanceInspect(
ctx types.Context,
opts types.Store) (*types.Instance, error) {
iid := context.MustInstanceID(ctx)
if iid.ID != "" {
return &types.Instance{InstanceID: iid}, nil
}
var awsSubnetID string
if err := iid.UnmarshalMetadata(&awsSubnetID); err != nil {
return nil, err
}
instanceID := &types.InstanceID{ID: awsSubnetID, Driver: d.Name()}
return &types.Instance{InstanceID: instanceID}, nil
}
// Type returns the type of storage a driver provides
func (d *driver) Type(ctx types.Context) (types.StorageType, error) {
return types.NAS, nil
}
// NextDeviceInfo returns the information about the driver's next available
// device workflow.
func (d *driver) NextDeviceInfo(
ctx types.Context) (*types.NextDeviceInfo, error) {
return nil, nil
}
// Volumes returns all volumes or a filtered list of volumes.
func (d *driver) Volumes(
ctx types.Context,
opts *types.VolumesOpts) ([]*types.Volume, error) {
fileSystems, err := d.getAllFileSystems()
if err != nil {
return nil, err
}
var volumesSD []*types.Volume
for _, fileSystem := range fileSystems {
// Make sure that name is popullated
if fileSystem.Name == nil {
ctx.WithFields(log.Fields{
"filesystemid": *fileSystem.FileSystemId,
}).Warn("missing EFS filesystem name")
continue
}
// Only volumes with partition prefix
if !strings.HasPrefix(*fileSystem.Name, d.tag()+tagDelimiter) {
continue
}
// Only volumes in "available" state
if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable {
continue
}
volumeSD := &types.Volume{
Name: d.getPrintableName(*fileSystem.Name),
ID: *fileSystem.FileSystemId,
Size: *fileSystem.SizeInBytes.Value,
Attachments: nil,
}
var atts []*types.VolumeAttachment
if opts.Attachments {
atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId)
if err != nil {
return nil, err
}
}
if len(atts) > 0 {
volumeSD.Attachments = atts
}
volumesSD = append(volumesSD, volumeSD)
}
return volumesSD, nil
}
// VolumeInspect inspects a single volume.
func (d *driver) VolumeInspect(
ctx types.Context,
volumeID string,
opts *types.VolumeInspectOpts) (*types.Volume, error) {
resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return nil, err
}
if len(resp.FileSystems) > 0 {
fileSystem := resp.FileSystems[0]
// Only volumes in "available" state
if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable {
return nil, nil
}
// Name is optional via tag so make sure it exists
var fileSystemName string
if fileSystem.Name != nil {
fileSystemName = *fileSystem.Name
} else {
ctx.WithFields(log.Fields{
"filesystemid": *fileSystem.FileSystemId,
}).Warn("missing EFS filesystem name")
}
volume := &types.Volume{
Name: d.getPrintableName(fileSystemName),
ID: *fileSystem.FileSystemId,
Size: *fileSystem.SizeInBytes.Value,
Attachments: nil,
}
var atts []*types.VolumeAttachment
if opts.Attachments {
atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId)
if err != nil {
return nil, err
}
}
if len(atts) > 0 {
volume.Attachments = atts
}
return volume, nil
}
return nil, types.ErrNotFound{}
}
// VolumeCreate creates a new volume.
func (d *driver) VolumeCreate(
ctx types.Context,
name string,
opts *types.VolumeCreateOpts) (*types.Volume, error) {
request := &awsefs.CreateFileSystemInput{
CreationToken: aws.String(name),
PerformanceMode: aws.String(awsefs.PerformanceModeGeneralPurpose),
}
if opts.Type != nil && strings.ToLower(*opts.Type) == "maxio" {
request.PerformanceMode = aws.String(awsefs.PerformanceModeMaxIo)
}
fileSystem, err := d.efsClient().CreateFileSystem(request)
if err != nil {
return nil, err
}
_, err = d.efsClient().CreateTags(&awsefs.CreateTagsInput{
FileSystemId: fileSystem.FileSystemId,
Tags: []*awsefs.Tag{
{
Key: aws.String("Name"),
Value: aws.String(d.getFullVolumeName(name)),
},
},
})
if err != nil {
// To not leak the EFS instances remove the filesystem that couldn't
// be tagged with correct name before returning error response.
_, deleteErr := d.efsClient().DeleteFileSystem(
&awsefs.DeleteFileSystemInput{
FileSystemId: fileSystem.FileSystemId,
})
if deleteErr != nil {
ctx.WithFields(log.Fields{
"filesystemid": *fileSystem.FileSystemId,
}).Error("failed to delete EFS")
}
return nil, err
}
// Wait until FS is in "available" state
for {
state, err := d.getFileSystemLifeCycleState(*fileSystem.FileSystemId)
if err == nil {
if state != awsefs.LifeCycleStateCreating {
break
}
ctx.WithFields(log.Fields{
"state": state,
"filesystemid": *fileSystem.FileSystemId,
}).Info("EFS not ready")
} else {
ctx.WithFields(log.Fields{
"error": err,
"filesystemid": *fileSystem.FileSystemId,
}).Error("failed to retrieve EFS state")
}
// Wait for 2 seconds
<-time.After(2 * time.Second)
}
return d.VolumeInspect(ctx, *fileSystem.FileSystemId,
&types.VolumeInspectOpts{Attachments: false})
}
// VolumeRemove removes a volume.
func (d *driver) VolumeRemove(
ctx types.Context,
volumeID string,
opts types.Store) error {
// Remove MountTarget(s)
resp, err := d.efsClient().DescribeMountTargets(
&awsefs.DescribeMountTargetsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return err
}
for _, mountTarget := range resp.MountTargets {
_, err = d.efsClient().DeleteMountTarget(
&awsefs.DeleteMountTargetInput{
MountTargetId: aws.String(*mountTarget.MountTargetId),
})
if err != nil {
return err
}
}
// FileSystem can be deleted only after all mountpoints are deleted (
// just in "deleting" life cycle state). Here code will wait until all
// mountpoints are deleted.
for {
resp, err := d.efsClient().DescribeMountTargets(
&awsefs.DescribeMountTargetsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return err
}
if len(resp.MountTargets) == 0 {
break
} else {
ctx.WithFields(log.Fields{
"mounttargets": resp.MountTargets,
"filesystemid": volumeID,
}).Info("waiting for MountTargets deletion")
}
<-time.After(2 * time.Second)
}
// Remove FileSystem
_, err = d.efsClient().DeleteFileSystem(
&awsefs.DeleteFileSystemInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return err
}
for {
ctx.WithFields(log.Fields{
"filesystemid": volumeID,
}).Info("waiting for FileSystem deletion")
_, err := d.efsClient().DescribeFileSystems(
&awsefs.DescribeFileSystemsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "FileSystemNotFound" {
break
} else {
return err
}
} else {
return err
}
}
<-time.After(2 * time.Second)
}
return nil
}
// VolumeAttach attaches a volume and provides a token clients can use
// to validate that device has appeared locally.
func (d *driver) VolumeAttach(
ctx types.Context,
volumeID string,
opts *types.VolumeAttachOpts) (*types.Volume, string, error) {
vol, err := d.VolumeInspect(ctx, volumeID,
&types.VolumeInspectOpts{Attachments: true})
if err != nil {
return nil, "", err
}
inst, err := d.InstanceInspect(ctx, nil)
if err != nil {
return nil, "", err
}
var ma *types.VolumeAttachment
for _, att := range vol.Attachments {
if att.InstanceID.ID == inst.InstanceID.ID {
ma = att
break
}
}
// No mount targets were found
if ma == nil {
request := &awsefs.CreateMountTargetInput{
FileSystemId: aws.String(vol.ID),
SubnetId: aws.String(inst.InstanceID.ID),
}
if len(d.securityGroups()) > 0 {
request.SecurityGroups = aws.StringSlice(d.securityGroups())
}
// TODO(mhrabovcin): Should we block here until MountTarget is in "available"
// LifeCycleState? Otherwise mount could fail until creation is completed.
_, err = d.efsClient().CreateMountTarget(request)
// Failed to create mount target
if err != nil {
return nil, "", err
}
}
return vol, "", err
}
// VolumeDetach detaches a volume.
func (d *driver) VolumeDetach(
ctx types.Context,
volumeID string,
opts *types.VolumeDetachOpts) (*types.Volume, error) {
// TODO(kasisnu): Think about what to do here?
// It is safe to remove the mount target
// when it is no longer being used anywhere
return nil, nil
}
// VolumeCreateFromSnapshot (not implemented).
func (d *driver) VolumeCreateFromSnapshot(
ctx types.Context,
snapshotID, volumeName string,
opts *types.VolumeCreateOpts) (*types.Volume, error) {
return nil, types.ErrNotImplemented
}
// VolumeCopy copies an existing volume (not implemented)
func (d *driver) VolumeCopy(
ctx types.Context,
volumeID, volumeName string,
opts types.Store) (*types.Volume, error) {
return nil, types.ErrNotImplemented
}
// VolumeSnapshot snapshots a volume (not implemented)
func (d *driver) VolumeSnapshot(
ctx types.Context,
volumeID, snapshotName string,
opts types.Store) (*types.Snapshot, error) {
return nil, types.ErrNotImplemented
}
func (d *driver) Snapshots(
ctx types.Context,
opts types.Store) ([]*types.Snapshot, error) {
return nil, nil
}
func (d *driver) SnapshotInspect(
ctx types.Context,
snapshotID string,
opts types.Store) (*types.Snapshot, error) {
return nil, nil
}
func (d *driver) SnapshotCopy(
ctx types.Context,
snapshotID, snapshotName, destinationID string,
opts types.Store) (*types.Snapshot, error) {
return nil, nil
}
func (d *driver) SnapshotRemove(
ctx types.Context,
snapshotID string,
opts types.Store) error {
return nil
}
// Retrieve all filesystems with tags from AWS API. This is very expensive
// operation as it issues AWS SDK call per filesystem to retrieve tags.
func (d *driver) getAllFileSystems() (filesystems []*awsefs.FileSystemDescription, err error) {
resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{})
if err != nil {
return nil, err
}
filesystems = append(filesystems, resp.FileSystems...)
for resp.NextMarker != nil {
resp, err = d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{
Marker: resp.NextMarker,
})
if err != nil {
return nil, err
}
filesystems = append(filesystems, resp.FileSystems...)
}
return filesystems, nil
}
func (d *driver) getFileSystemLifeCycleState(fileSystemID string) (string, error) {
resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{
FileSystemId: aws.String(fileSystemID),
})
if err != nil {
return "", err
}
fileSystem := resp.FileSystems[0]
return *fileSystem.LifeCycleState, nil
}
func (d *driver) getPrintableName(name string) string {
return strings.TrimPrefix(name, d.tag()+tagDelimiter)
}
func (d *driver) getFullVolumeName(name string) string {
return d.tag() + tagDelimiter + name
}
func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) (
[]*types.VolumeAttachment, error) {
if volumeID == "" {
return nil, goof.New("missing volume ID")
}
resp, err := d.efsClient().DescribeMountTargets(
&awsefs.DescribeMountTargetsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return nil, err
}
ld, ldOK := context.LocalDevices(ctx)
var atts []*types.VolumeAttachment
for _, mountTarget := range resp.MountTargets {
var dev string
var status string
if ldOK {
// TODO(kasisnu): Check lifecycle state and build the path better
dev = *mountTarget.IpAddress + ":" + "/"
if _, ok := ld.DeviceMap[dev]; ok {
status = "Exported and Mounted"
} else {
status = "Exported and Unmounted"
}
} else {
status = "Exported"
}
attachmentSD := &types.VolumeAttachment{
VolumeID: *mountTarget.FileSystemId,
InstanceID: &types.InstanceID{ID: *mountTarget.SubnetId, Driver: d.Name()},
DeviceName: dev,
Status: status,
}
atts = append(atts, attachmentSD)
}
return atts, nil
}
func (d *driver) efsClient() *awsefs.EFS {
config := aws.NewConfig().
WithCredentials(d.awsCreds).
WithRegion(d.region())
if types.Debug {
config = config.
WithLogger(newAwsLogger()).
WithLogLevel(aws.LogDebug)
}
return awsefs.New(session.New(), config)
}
func (d *driver) accessKey() string {
return d.config.GetString("efs.accessKey")
}
func (d *driver) secretKey() string {
return d.config.GetString("efs.secretKey")
}
func (d *driver) securityGroups() []string {
return strings.Split(d.config.GetString("efs.securityGroups"), ",")
}
func (d *driver) region() string {
return d.config.GetString("efs.region")
}
func (d *driver) tag() string {
return d.config.GetString("efs.tag")
}
// Simple logrus adapter for AWS Logger interface
type awsLogger struct {
logger *log.Logger
}
func newAwsLogger() *awsLogger {
return &awsLogger{
logger: log.StandardLogger(),
}
}
func (l *awsLogger) Log(args ...interface{}) {
l.logger.Println(args...)
}
EFS creation can fail if same volume name is used with different tag
package storage
import (
"crypto/md5"
"fmt"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/akutz/gofig"
"github.com/akutz/goof"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
awsefs "github.com/aws/aws-sdk-go/service/efs"
"github.com/emccode/libstorage/api/context"
"github.com/emccode/libstorage/api/registry"
"github.com/emccode/libstorage/api/types"
"github.com/emccode/libstorage/drivers/storage/efs"
)
const (
tagDelimiter = "/"
)
// Driver represents a EFS driver implementation of StorageDriver
type driver struct {
config gofig.Config
awsCreds *credentials.Credentials
}
func init() {
registry.RegisterStorageDriver(efs.Name, newDriver)
}
func newDriver() types.StorageDriver {
return &driver{}
}
// Name returns the name of the driver
func (d *driver) Name() string {
return efs.Name
}
// Init initializes the driver.
func (d *driver) Init(ctx types.Context, config gofig.Config) error {
d.config = config
fields := log.Fields{
"accessKey": d.accessKey(),
"secretKey": d.secretKey(),
"region": d.region(),
"tag": d.tag(),
}
if d.accessKey() == "" {
fields["accessKey"] = ""
} else {
fields["accessKey"] = "******"
}
if d.secretKey() == "" {
fields["secretKey"] = ""
} else {
fields["secretKey"] = "******"
}
d.awsCreds = credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.StaticProvider{Value: credentials.Value{AccessKeyID: d.accessKey(), SecretAccessKey: d.secretKey()}},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New()),
},
})
ctx.WithFields(fields).Info("storage driver initialized")
return nil
}
// InstanceInspect returns an instance.
func (d *driver) InstanceInspect(
ctx types.Context,
opts types.Store) (*types.Instance, error) {
iid := context.MustInstanceID(ctx)
if iid.ID != "" {
return &types.Instance{InstanceID: iid}, nil
}
var awsSubnetID string
if err := iid.UnmarshalMetadata(&awsSubnetID); err != nil {
return nil, err
}
instanceID := &types.InstanceID{ID: awsSubnetID, Driver: d.Name()}
return &types.Instance{InstanceID: instanceID}, nil
}
// Type returns the type of storage a driver provides
func (d *driver) Type(ctx types.Context) (types.StorageType, error) {
return types.NAS, nil
}
// NextDeviceInfo returns the information about the driver's next available
// device workflow.
func (d *driver) NextDeviceInfo(
ctx types.Context) (*types.NextDeviceInfo, error) {
return nil, nil
}
// Volumes returns all volumes or a filtered list of volumes.
func (d *driver) Volumes(
ctx types.Context,
opts *types.VolumesOpts) ([]*types.Volume, error) {
fileSystems, err := d.getAllFileSystems()
if err != nil {
return nil, err
}
var volumesSD []*types.Volume
for _, fileSystem := range fileSystems {
// Make sure that name is popullated
if fileSystem.Name == nil {
ctx.WithFields(log.Fields{
"filesystemid": *fileSystem.FileSystemId,
}).Warn("missing EFS filesystem name")
continue
}
// Only volumes with partition prefix
if !strings.HasPrefix(*fileSystem.Name, d.tag()+tagDelimiter) {
continue
}
// Only volumes in "available" state
if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable {
continue
}
volumeSD := &types.Volume{
Name: d.getPrintableName(*fileSystem.Name),
ID: *fileSystem.FileSystemId,
Size: *fileSystem.SizeInBytes.Value,
Attachments: nil,
}
var atts []*types.VolumeAttachment
if opts.Attachments {
atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId)
if err != nil {
return nil, err
}
}
if len(atts) > 0 {
volumeSD.Attachments = atts
}
volumesSD = append(volumesSD, volumeSD)
}
return volumesSD, nil
}
// VolumeInspect inspects a single volume.
func (d *driver) VolumeInspect(
ctx types.Context,
volumeID string,
opts *types.VolumeInspectOpts) (*types.Volume, error) {
resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return nil, err
}
if len(resp.FileSystems) > 0 {
fileSystem := resp.FileSystems[0]
// Only volumes in "available" state
if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable {
return nil, nil
}
// Name is optional via tag so make sure it exists
var fileSystemName string
if fileSystem.Name != nil {
fileSystemName = *fileSystem.Name
} else {
ctx.WithFields(log.Fields{
"filesystemid": *fileSystem.FileSystemId,
}).Warn("missing EFS filesystem name")
}
volume := &types.Volume{
Name: d.getPrintableName(fileSystemName),
ID: *fileSystem.FileSystemId,
Size: *fileSystem.SizeInBytes.Value,
Attachments: nil,
}
var atts []*types.VolumeAttachment
if opts.Attachments {
atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId)
if err != nil {
return nil, err
}
}
if len(atts) > 0 {
volume.Attachments = atts
}
return volume, nil
}
return nil, types.ErrNotFound{}
}
// VolumeCreate creates a new volume.
func (d *driver) VolumeCreate(
ctx types.Context,
name string,
opts *types.VolumeCreateOpts) (*types.Volume, error) {
// Token is limited to 64 ASCII characters so just create MD5 hash from full
// tag/name identifier
creationToken := fmt.Sprintf("%x", md5.Sum([]byte(d.getFullVolumeName(name))))
request := &awsefs.CreateFileSystemInput{
CreationToken: aws.String(creationToken),
PerformanceMode: aws.String(awsefs.PerformanceModeGeneralPurpose),
}
if opts.Type != nil && strings.ToLower(*opts.Type) == "maxio" {
request.PerformanceMode = aws.String(awsefs.PerformanceModeMaxIo)
}
fileSystem, err := d.efsClient().CreateFileSystem(request)
if err != nil {
return nil, err
}
_, err = d.efsClient().CreateTags(&awsefs.CreateTagsInput{
FileSystemId: fileSystem.FileSystemId,
Tags: []*awsefs.Tag{
{
Key: aws.String("Name"),
Value: aws.String(d.getFullVolumeName(name)),
},
},
})
if err != nil {
// To not leak the EFS instances remove the filesystem that couldn't
// be tagged with correct name before returning error response.
_, deleteErr := d.efsClient().DeleteFileSystem(
&awsefs.DeleteFileSystemInput{
FileSystemId: fileSystem.FileSystemId,
})
if deleteErr != nil {
ctx.WithFields(log.Fields{
"filesystemid": *fileSystem.FileSystemId,
}).Error("failed to delete EFS")
}
return nil, err
}
// Wait until FS is in "available" state
for {
state, err := d.getFileSystemLifeCycleState(*fileSystem.FileSystemId)
if err == nil {
if state != awsefs.LifeCycleStateCreating {
break
}
ctx.WithFields(log.Fields{
"state": state,
"filesystemid": *fileSystem.FileSystemId,
}).Info("EFS not ready")
} else {
ctx.WithFields(log.Fields{
"error": err,
"filesystemid": *fileSystem.FileSystemId,
}).Error("failed to retrieve EFS state")
}
// Wait for 2 seconds
<-time.After(2 * time.Second)
}
return d.VolumeInspect(ctx, *fileSystem.FileSystemId,
&types.VolumeInspectOpts{Attachments: false})
}
// VolumeRemove removes a volume.
func (d *driver) VolumeRemove(
ctx types.Context,
volumeID string,
opts types.Store) error {
// Remove MountTarget(s)
resp, err := d.efsClient().DescribeMountTargets(
&awsefs.DescribeMountTargetsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return err
}
for _, mountTarget := range resp.MountTargets {
_, err = d.efsClient().DeleteMountTarget(
&awsefs.DeleteMountTargetInput{
MountTargetId: aws.String(*mountTarget.MountTargetId),
})
if err != nil {
return err
}
}
// FileSystem can be deleted only after all mountpoints are deleted (
// just in "deleting" life cycle state). Here code will wait until all
// mountpoints are deleted.
for {
resp, err := d.efsClient().DescribeMountTargets(
&awsefs.DescribeMountTargetsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return err
}
if len(resp.MountTargets) == 0 {
break
} else {
ctx.WithFields(log.Fields{
"mounttargets": resp.MountTargets,
"filesystemid": volumeID,
}).Info("waiting for MountTargets deletion")
}
<-time.After(2 * time.Second)
}
// Remove FileSystem
_, err = d.efsClient().DeleteFileSystem(
&awsefs.DeleteFileSystemInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return err
}
for {
ctx.WithFields(log.Fields{
"filesystemid": volumeID,
}).Info("waiting for FileSystem deletion")
_, err := d.efsClient().DescribeFileSystems(
&awsefs.DescribeFileSystemsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "FileSystemNotFound" {
break
} else {
return err
}
} else {
return err
}
}
<-time.After(2 * time.Second)
}
return nil
}
// VolumeAttach attaches a volume and provides a token clients can use
// to validate that device has appeared locally.
func (d *driver) VolumeAttach(
ctx types.Context,
volumeID string,
opts *types.VolumeAttachOpts) (*types.Volume, string, error) {
vol, err := d.VolumeInspect(ctx, volumeID,
&types.VolumeInspectOpts{Attachments: true})
if err != nil {
return nil, "", err
}
inst, err := d.InstanceInspect(ctx, nil)
if err != nil {
return nil, "", err
}
var ma *types.VolumeAttachment
for _, att := range vol.Attachments {
if att.InstanceID.ID == inst.InstanceID.ID {
ma = att
break
}
}
// No mount targets were found
if ma == nil {
request := &awsefs.CreateMountTargetInput{
FileSystemId: aws.String(vol.ID),
SubnetId: aws.String(inst.InstanceID.ID),
}
if len(d.securityGroups()) > 0 {
request.SecurityGroups = aws.StringSlice(d.securityGroups())
}
// TODO(mhrabovcin): Should we block here until MountTarget is in "available"
// LifeCycleState? Otherwise mount could fail until creation is completed.
_, err = d.efsClient().CreateMountTarget(request)
// Failed to create mount target
if err != nil {
return nil, "", err
}
}
return vol, "", err
}
// VolumeDetach detaches a volume.
func (d *driver) VolumeDetach(
ctx types.Context,
volumeID string,
opts *types.VolumeDetachOpts) (*types.Volume, error) {
// TODO(kasisnu): Think about what to do here?
// It is safe to remove the mount target
// when it is no longer being used anywhere
return nil, nil
}
// VolumeCreateFromSnapshot (not implemented).
func (d *driver) VolumeCreateFromSnapshot(
ctx types.Context,
snapshotID, volumeName string,
opts *types.VolumeCreateOpts) (*types.Volume, error) {
return nil, types.ErrNotImplemented
}
// VolumeCopy copies an existing volume (not implemented)
func (d *driver) VolumeCopy(
ctx types.Context,
volumeID, volumeName string,
opts types.Store) (*types.Volume, error) {
return nil, types.ErrNotImplemented
}
// VolumeSnapshot snapshots a volume (not implemented)
func (d *driver) VolumeSnapshot(
ctx types.Context,
volumeID, snapshotName string,
opts types.Store) (*types.Snapshot, error) {
return nil, types.ErrNotImplemented
}
func (d *driver) Snapshots(
ctx types.Context,
opts types.Store) ([]*types.Snapshot, error) {
return nil, nil
}
func (d *driver) SnapshotInspect(
ctx types.Context,
snapshotID string,
opts types.Store) (*types.Snapshot, error) {
return nil, nil
}
func (d *driver) SnapshotCopy(
ctx types.Context,
snapshotID, snapshotName, destinationID string,
opts types.Store) (*types.Snapshot, error) {
return nil, nil
}
func (d *driver) SnapshotRemove(
ctx types.Context,
snapshotID string,
opts types.Store) error {
return nil
}
// Retrieve all filesystems with tags from AWS API. This is very expensive
// operation as it issues AWS SDK call per filesystem to retrieve tags.
func (d *driver) getAllFileSystems() (filesystems []*awsefs.FileSystemDescription, err error) {
resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{})
if err != nil {
return nil, err
}
filesystems = append(filesystems, resp.FileSystems...)
for resp.NextMarker != nil {
resp, err = d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{
Marker: resp.NextMarker,
})
if err != nil {
return nil, err
}
filesystems = append(filesystems, resp.FileSystems...)
}
return filesystems, nil
}
func (d *driver) getFileSystemLifeCycleState(fileSystemID string) (string, error) {
resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{
FileSystemId: aws.String(fileSystemID),
})
if err != nil {
return "", err
}
fileSystem := resp.FileSystems[0]
return *fileSystem.LifeCycleState, nil
}
func (d *driver) getPrintableName(name string) string {
return strings.TrimPrefix(name, d.tag()+tagDelimiter)
}
func (d *driver) getFullVolumeName(name string) string {
return d.tag() + tagDelimiter + name
}
func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) (
[]*types.VolumeAttachment, error) {
if volumeID == "" {
return nil, goof.New("missing volume ID")
}
resp, err := d.efsClient().DescribeMountTargets(
&awsefs.DescribeMountTargetsInput{
FileSystemId: aws.String(volumeID),
})
if err != nil {
return nil, err
}
ld, ldOK := context.LocalDevices(ctx)
var atts []*types.VolumeAttachment
for _, mountTarget := range resp.MountTargets {
var dev string
var status string
if ldOK {
// TODO(kasisnu): Check lifecycle state and build the path better
dev = *mountTarget.IpAddress + ":" + "/"
if _, ok := ld.DeviceMap[dev]; ok {
status = "Exported and Mounted"
} else {
status = "Exported and Unmounted"
}
} else {
status = "Exported"
}
attachmentSD := &types.VolumeAttachment{
VolumeID: *mountTarget.FileSystemId,
InstanceID: &types.InstanceID{ID: *mountTarget.SubnetId, Driver: d.Name()},
DeviceName: dev,
Status: status,
}
atts = append(atts, attachmentSD)
}
return atts, nil
}
func (d *driver) efsClient() *awsefs.EFS {
config := aws.NewConfig().
WithCredentials(d.awsCreds).
WithRegion(d.region())
if types.Debug {
config = config.
WithLogger(newAwsLogger()).
WithLogLevel(aws.LogDebug)
}
return awsefs.New(session.New(), config)
}
func (d *driver) accessKey() string {
return d.config.GetString("efs.accessKey")
}
func (d *driver) secretKey() string {
return d.config.GetString("efs.secretKey")
}
func (d *driver) securityGroups() []string {
return strings.Split(d.config.GetString("efs.securityGroups"), ",")
}
func (d *driver) region() string {
return d.config.GetString("efs.region")
}
func (d *driver) tag() string {
return d.config.GetString("efs.tag")
}
// Simple logrus adapter for AWS Logger interface
type awsLogger struct {
logger *log.Logger
}
func newAwsLogger() *awsLogger {
return &awsLogger{
logger: log.StandardLogger(),
}
}
func (l *awsLogger) Log(args ...interface{}) {
l.logger.Println(args...)
}
|
// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT
// This file was machine generated by github.com/tensorflow/tensorflow/tensorflow/go/genop/internal
//
// WARNING: This generation of wrapper function for TensorFlow ops is in an
// experimental state. The generated API can change without notice.
package op
import tf "github.com/tensorflow/tensorflow/tensorflow/go"
// optionalAttr is an intentionally un-exported type to hide
// details of how optional attributes to operations are implemented.
type optionalAttr map[string]interface{}
func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, int, error) {
size, err := op.OutputListSize(output)
if err!= nil {
return nil, start, err
}
list := make([]tf.Output, size)
for i := 0; i < size; i++ {
list[i] = op.Output(start + i)
}
return list, start + size, nil
}
// XlaSpmdShardToFullShapeAttr is an optional argument to XlaSpmdShardToFullShape.
type XlaSpmdShardToFullShapeAttr func(optionalAttr)
// XlaSpmdShardToFullShapeDim sets the optional dim attribute to value.
// If not specified, defaults to -1
func XlaSpmdShardToFullShapeDim(value int64) XlaSpmdShardToFullShapeAttr {
return func(m optionalAttr) {
m["dim"] = value
}
}
// An op used by XLA SPMD partitioner to switch from manual partitioning to
//
// automatic partitioning. It converts the shard-shaped, manually partitioned input
// into full-shaped tensor to be partitioned automatically with the same sharding
// used by manual partitioning. The conversion can happen partially in subgroups,
// by specifying the dim attribute, where only that dim will be converted.
func XlaSpmdShardToFullShape(scope *Scope, input tf.Output, manual_sharding string, full_shape tf.Shape, optional...XlaSpmdShardToFullShapeAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"manual_sharding": manual_sharding, "full_shape": full_shape}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "XlaSpmdShardToFullShape",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Wraps the XLA Sort operator, documented at
//
// https://www.tensorflow.org/performance/xla/operation_semantics#sort
//.
//
// Sorts a tensor. Currently only sorts in ascending order are supported.
//
// Arguments:
// input: A `Tensor` of type T.
//
// Returns A `Tensor` of type T.
func XlaSort(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "XlaSort",
Input: []tf.Input{
input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Receives the named tensor from another XLA computation. Wraps the XLA Recv
//
// operator documented at
// https://www.tensorflow.org/performance/xla/operation_semantics#recv.
//
// Arguments:
// dtype: The type of the tensor.
// tensor_name: A string key that identifies the channel.
// shape: The shape of the tensor.
//
// Returns The tensor to receive.
func XlaRecv(scope *Scope, dtype tf.DataType, tensor_name string, shape tf.Shape) (tensor tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"dtype": dtype, "tensor_name": tensor_name, "shape": shape}
opspec := tf.OpSpec{
Type: "XlaRecv",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Wraps the XLA DynamicSlice operator, documented at
//
// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
//.
//
// DynamicSlice extracts a sub-array from the input array at dynamic
// start_indices. The size of the slice in each dimension is passed in
// size_indices, which specify the end point of exclusive slice intervals in each
// dimension -- [start, start + size). The shape of start_indices must have rank 1,
// with dimension size equal to the rank of operand.
//
// Arguments:
// input: A `Tensor` of type T.
// start_indices: List of N integers containing the slice size for each
// dimension. Each value must be strictly greater than zero, and start + size
// must be less than or equal to the size of the dimension to avoid
// implementation defined behavior.
//
func XlaDynamicSlice(scope *Scope, input tf.Output, start_indices tf.Output, size_indices tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "XlaDynamicSlice",
Input: []tf.Input{
input, start_indices, size_indices,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Set a bound for the given input value as a hint to Xla compiler,
//
// returns the same value.
func XlaSetBound(scope *Scope, input tf.Output, bound tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "XlaSetBound",
Input: []tf.Input{
input, bound,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Wraps the XLA DotGeneral operator, documented at
//
// https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
//.
//
// Arguments:
// lhs: the LHS tensor
// rhs: the RHS tensor
// dimension_numbers: a serialized xla::DotDimensionNumbers proto.
// precision_config: a serialized xla::PrecisionConfig proto.
func XlaDot(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
opspec := tf.OpSpec{
Type: "XlaDot",
Input: []tf.Input{
lhs, rhs,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Output a fact about factorials.
func Fact(scope *Scope) (fact tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "Fact",
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
//
// value: The bitwidth of the quantization; between 2 and 8, inclusive.
// If not specified, defaults to 8
func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
return func(m optionalAttr) {
m["num_bits"] = value
}
}
// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
//
// value: Whether to quantize into 2^num_bits - 1 distinct values.
// If not specified, defaults to false
func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
return func(m optionalAttr) {
m["narrow_range"] = value
}
}
// Compute gradients for a FakeQuantWithMinMaxVars operation.
//
// Arguments:
// gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
// inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
// min, max: Quantization interval, scalar floats.
//
//
//
// Returns:
// backprops_wrt_input: Backpropagated gradients w.r.t. inputs:
// `gradients * (inputs >= min && inputs <= max)`.
// backprop_wrt_min: Backpropagated gradients w.r.t. min parameter:
// `sum(gradients * (inputs < min))`.
// backprop_wrt_max: Backpropagated gradients w.r.t. max parameter:
// `sum(gradients * (inputs > max))`.
func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "FakeQuantWithMinMaxVarsGradient",
Input: []tf.Input{
gradients, inputs, min, max,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// Computes the eigen decomposition of a batch of self-adjoint matrices
//
// (Note: Only real inputs are supported).
//
// Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
// tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
//
// Arguments:
// a: the input tensor.
// max_iter: maximum number of sweep update, i.e., the whole lower triangular
// part or upper triangular part based on parameter lower. Heuristically, it has
// been argued that approximately log(min (M, N)) sweeps are needed in practice
// (Ref: Golub & van Loan "Matrix Computation").
// epsilon: the tolerance ratio.
// precision_config: a serialized xla::PrecisionConfig proto.
//
// Returns:
// s: Singular values. The values are sorted in reverse order of magnitude, so
// s[..., 0] is the largest value, s[..., 1] is the second largest, etc.
// u: Left singular vectors.
// v: Right singular vectors.
func XlaSvd(scope *Scope, a tf.Output, max_iter int64, epsilon float32, precision_config string) (s tf.Output, u tf.Output, v tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"max_iter": max_iter, "epsilon": epsilon, "precision_config": precision_config}
opspec := tf.OpSpec{
Type: "XlaSvd",
Input: []tf.Input{
a,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
// If not specified, defaults to -6
func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
m["min"] = value
}
}
// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
// If not specified, defaults to 6
func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
m["max"] = value
}
}
// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
// If not specified, defaults to 8
func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
m["num_bits"] = value
}
}
// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
// If not specified, defaults to false
func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
m["narrow_range"] = value
}
}
// Compute gradients for a FakeQuantWithMinMaxArgs operation.
//
// Arguments:
// gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
// inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
//
// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
// `gradients * (inputs >= min && inputs <= max)`.
func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "FakeQuantWithMinMaxArgsGradient",
Input: []tf.Input{
gradients, inputs,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Helper operator for performing XLA-style broadcasts
//
// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
// for binary operators.
//
// Arguments:
// lhs: the LHS input tensor
// rhs: the RHS input tensor
// broadcast_dims: an XLA-style broadcast dimension specification
//
// Returns:
// lhs_output: the broadcasted LHS tensor
// rhs_output: the broadcasted RHS tensor
func XlaBroadcastHelper(scope *Scope, lhs tf.Output, rhs tf.Output, broadcast_dims tf.Output) (lhs_output tf.Output, rhs_output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "XlaBroadcastHelper",
Input: []tf.Input{
lhs, rhs, broadcast_dims,
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Subtracts sparse `updates` from an existing tensor according to `indices`.
//
// This operation creates a new tensor by subtracting sparse `updates` from the
// passed in `tensor`.
// This operation is very similar to `tf.scatter_nd_sub`, except that the updates
// are subtracted from an existing tensor (as opposed to a variable). If the memory
// for the existing tensor cannot be re-used, a copy is made and updated.
//
// `indices` is an integer tensor containing indices into a new tensor of shape
// `shape`. The last dimension of `indices` can be at most the rank of `shape`:
//
// indices.shape[-1] <= shape.rank
//
// The last dimension of `indices` corresponds to indices into elements
// (if `indices.shape[-1] = shape.rank`) or slices
// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
// `shape`. `updates` is a tensor with shape
//
// indices.shape[:-1] + shape[indices.shape[-1]:]
//
// The simplest form of tensor_scatter_sub is to subtract individual elements
// from a tensor by index. For example, say we want to insert 4 scattered elements
// in a rank-1 tensor with 8 elements.
//
// In Python, this scatter subtract operation would look like this:
//
// ```python
// indices = tf.constant([[4], [3], [1], [7]])
// updates = tf.constant([9, 10, 11, 12])
// tensor = tf.ones([8], dtype=tf.int32)
// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
// print(updated)
// ```
//
// The resulting tensor would look like this:
//
// [1, -10, 1, -9, -8, 1, 1, -11]
//
// We can also, insert entire slices of a higher rank tensor all at once. For
// example, if we wanted to insert two slices in the first dimension of a
// rank-3 tensor with two matrices of new values.
//
// In Python, this scatter add operation would look like this:
//
// ```python
// indices = tf.constant([[0], [2]])
// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
// [7, 7, 7, 7], [8, 8, 8, 8]],
// [[5, 5, 5, 5], [6, 6, 6, 6],
// [7, 7, 7, 7], [8, 8, 8, 8]]])
// tensor = tf.ones([4, 4, 4],dtype=tf.int32)
// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
// print(updated)
// ```
//
// The resulting tensor would look like this:
//
// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
//
// Note that on CPU, if an out of bound index is found, an error is returned.
// On GPU, if an out of bound index is found, the index is ignored.
//
// Arguments:
// tensor: Tensor to copy/update.
// indices: Index tensor.
// updates: Updates to scatter into output.
//
// Returns A new tensor copied from tensor and updates subtracted according to the indices.
func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "TensorScatterSub",
Input: []tf.Input{
tensor, indices, updates,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Adds sparse `updates` to an existing tensor according to `indices`.
//
// This operation creates a new tensor by adding sparse `updates` to the passed
// in `tensor`.
// This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the updates
// are added onto an existing tensor (as opposed to a variable). If the memory
// for the existing tensor cannot be re-used, a copy is made and updated.
//
// `indices` is an integer tensor containing indices into a new tensor of shape
// `tensor.shape`. The last dimension of `indices` can be at most the rank of
// `tensor.shape`:
//
// indices.shape[-1] <= tensor.shape.rank
//
// The last dimension of `indices` corresponds to indices into elements
// (if `indices.shape[-1] = tensor.shape.rank`) or slices
// (if `indices.shape[-1] < tensor.shape.rank`) along dimension
// `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape
//
// indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
//
// The simplest form of tensor_scatter_add is to add individual elements to a
// tensor by index. For example, say we want to add 4 elements in a rank-1
// tensor with 8 elements.
//
// In Python, this scatter add operation would look like this:
//
// ```python
// indices = tf.constant([[4], [3], [1], [7]])
// updates = tf.constant([9, 10, 11, 12])
// tensor = tf.ones([8], dtype=tf.int32)
// updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
// print(updated)
// ```
//
// The resulting tensor would look like this:
//
// [1, 12, 1, 11, 10, 1, 1, 13]
//
// We can also, insert entire slices of a higher rank tensor all at once. For
// example, if we wanted to insert two slices in the first dimension of a
// rank-3 tensor with two matrices of new values.
//
// In Python, this scatter add operation would look like this:
//
// ```python
// indices = tf.constant([[0], [2]])
// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
// [7, 7, 7, 7], [8, 8, 8, 8]],
// [[5, 5, 5, 5], [6, 6, 6, 6],
// [7, 7, 7, 7], [8, 8, 8, 8]]])
// tensor = tf.ones([4, 4, 4],dtype=tf.int32)
// updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
// print(updated)
// ```
//
// The resulting tensor would look like this:
//
// [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
// [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
//
// Note that on CPU, if an out of bound index is found, an error is returned.
// On GPU, if an out of bound index is found, the index is ignored.
//
// Arguments:
// tensor: Tensor to copy/update.
// indices: Index tensor.
// updates: Updates to scatter into output.
//
// Returns A new tensor copied from tensor and updates added according to the indices.
func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "TensorScatterAdd",
Input: []tf.Input{
tensor, indices, updates,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Reshapes a quantized tensor as per the Reshape op.
//
// ```
//
// Arguments:
//
// shape: Defines the shape of the output tensor.
// input_min: The minimum value of the input.
// input_max: The maximum value of the input.
//
// Returns:
// output
// output_min: This value is copied from input_min.
// output_max: This value is copied from input_max.
func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "QuantizedReshape",
Input: []tf.Input{
tensor, shape, input_min, input_max,
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// QuantizeAndDequantizeV4GradAttr is an optional argument to QuantizeAndDequantizeV4Grad.
type QuantizeAndDequantizeV4GradAttr func(optionalAttr)
// QuantizeAndDequantizeV4GradAxis sets the optional axis attribute to value.
// If not specified, defaults to -1
func QuantizeAndDequantizeV4GradAxis(value int64) QuantizeAndDequantizeV4GradAttr {
return func(m optionalAttr) {
m["axis"] = value
}
}
// Returns the gradient of `QuantizeAndDequantizeV4`.
//
// Returns a gradient of 1 for inputs that are within the quantization range,
// or 0 otherwise.
func QuantizeAndDequantizeV4Grad(scope *Scope, gradients tf.Output, input tf.Output, input_min tf.Output, input_max tf.Output, optional...QuantizeAndDequantizeV4GradAttr) (input_backprop tf.Output, input_min_backprop tf.Output, input_max_backprop tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "QuantizeAndDequantizeV4Grad",
Input: []tf.Input{
gradients, input, input_min, input_max,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
type QuantizeAndDequantizeV2Attr func(optionalAttr)
// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
//
// value: Whether the quantization is signed or unsigned. (actually this parameter should
// have been called <b>`signed_output`</b>)
// If not specified, defaults to true
func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
m["signed_input"] = value
}
}
// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
//
// value: The bitwidth of the quantization.
// If not specified, defaults to 8
func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
m["num_bits"] = value
}
}
// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
//
// value: Whether the range is given or should be determined from the `input` tensor.
// If not specified, defaults to false
func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
m["range_given"] = value
}
}
// QuantizeAndDequantizeV2RoundMode sets the optional round_mode attribute to value.
//
// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
// used when rounding float values to their quantized equivalents. The following
// rounding modes are currently supported:
//
// * HALF_TO_EVEN: this is the default round_mode.
// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
// rounds up to -7.
//
// If not specified, defaults to "HALF_TO_EVEN"
func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
m["round_mode"] = value
}
}
// QuantizeAndDequantizeV2NarrowRange sets the optional narrow_range attribute to value.
//
// value: If True, then the absolute value of the quantized minimum value is the same as
// the quantized maximum value, instead of 1 greater.
// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
// If not specified, defaults to false
func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
m["narrow_range"] = value
}
}
// QuantizeAndDequantizeV2Axis sets the optional axis attribute to value.
//
// value: If specified, this axis is treated as a channel or slice axis, and a separate
// quantization range is used for each channel or slice along this axis.
// If not specified, defaults to -1
func QuantizeAndDequantizeV2Axis(value int64) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
m["axis"] = value
}
}
// Quantizes then dequantizes a tensor.
//
// This op simulates the precision loss from the quantized forward pass by:
//
// 1. Quantizing the tensor to fixed point numbers, which should match the target
// quantization method when it is used in inference.
// 2. Dequantizing it back to floating point numbers for the following ops, most
// likely matmul.
//
// There are different ways to quantize. This version uses only scaling, so 0.0
// maps to 0.
//
// From the specified 'num_bits' in the quantized output type, it determines
// minimum and maximum representable quantized values.
//
// e.g.
//
// * [-128, 127] for signed, num_bits = 8, or
// * [0, 255] for unsigned, num_bits = 8.
//
// If range_given == False, the initial input_min, input_max will be determined
// automatically as the minimum and maximum values in the input tensor, otherwise
// the specified values of input_min, input_max are used.
//
// Note: If the input_min, input_max are specified, they do not need to equal the
// actual minimum and maximum values in the tensor. e.g. in some cases it may be
// beneficial to specify these values such that the low probability extremes of the
// input distribution are clipped.
//
// This op determines the maximum scale_factor that would map the initial
// [input_min, input_max] range to a range that lies within the representable
// quantized range.
//
// It determines the scale from one of input_min and input_max, then updates the
// other one to maximize the representable range.
//
// e.g.
//
// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
// would update input_max to be 127 / 12.8 = 9.921875
// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
// would update input_min to be 128.0 / 12.7 = -10.07874
// * if the output is unsigned, input_min is forced to be 0, and only the
// specified input_max is used.
//
// After determining the scale_factor and updating the input range, it applies the
// following to each value in the 'input' tensor.
//
// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
//
// The above round function rounds the value based on the given round_mode.
//
//
// Arguments:
// input: Tensor to quantize and then dequantize.
// input_min: If `range_given == True`, this specifies the minimum input value that needs to
// be represented, otherwise it is determined from the min value of the `input`
// tensor.
// input_max: If `range_given == True`, this specifies the maximum input value that needs to
// be represented, otherwise it is determined from the max value of the `input`
// tensor.
func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional...QuantizeAndDequantizeV2Attr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "QuantizeAndDequantizeV2",
Input: []tf.Input{
input, input_min, input_max,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
type QuantizeAndDequantizeAttr func(optionalAttr)
// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
// If not specified, defaults to true
func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
m["signed_input"] = value
}
}
// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
// If not specified, defaults to 8
func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
m["num_bits"] = value
}
}
// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
// If not specified, defaults to false
func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
m["range_given"] = value
}
}
// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
// If not specified, defaults to 0
func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
m["input_min"] = value
}
}
// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
// If not specified, defaults to 0
func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
m["input_max"] = value
}
}
// Use QuantizeAndDequantizeV2 instead.
//
// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
func QuantizeAndDequantize(scope *Scope, input tf.Output, optional...QuantizeAndDequantizeAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "QuantizeAndDequantize",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// OneHotAttr is an optional argument to OneHot.
type OneHotAttr func(optionalAttr)
// OneHotAxis sets the optional axis attribute to value.
//
// value: The axis to fill (default: -1, a new inner-most axis).
// If not specified, defaults to -1
func OneHotAxis(value int64) OneHotAttr {
return func(m optionalAttr) {
m["axis"] = value
}
}
// Returns a one-hot tensor.
//
// The locations represented by indices in `indices` take value `on_value`,
// while all other locations take value `off_value`.
//
// If the input `indices` is rank `N`, the output will have rank `N+1`,
// The new axis is created at dimension `axis` (default: the new axis is
// appended at the end).
//
// If `indices` is a scalar the output shape will be a vector of length `depth`.
//
// If `indices` is a vector of length `features`, the output shape will be:
// ```
// features x depth if axis == -1
// depth x features if axis == 0
// ```
//
// If `indices` is a matrix (batch) with shape `[batch, features]`,
// the output shape will be:
// ```
// batch x features x depth if axis == -1
// batch x depth x features if axis == 1
// depth x batch x features if axis == 0
// ```
//
//
// Examples
// =========
//
// Suppose that
// ```
// indices = [0, 2, -1, 1]
// depth = 3
// on_value = 5.0
// off_value = 0.0
// axis = -1
// ```
//
// Then output is `[4 x 3]`:
// ```
// output =
// [5.0 0.0 0.0] // one_hot(0)
// [0.0 0.0 5.0] // one_hot(2)
// [0.0 0.0 0.0] // one_hot(-1)
// [0.0 5.0 0.0] // one_hot(1)
// ```
//
// Suppose that
// ```
// indices = [0, 2, -1, 1]
// depth = 3
// on_value = 0.0
// off_value = 3.0
// axis = 0
// ```
//
// Then output is `[3 x 4]`:
// ```
// output =
// [0.0 3.0 3.0 3.0]
// [3.0 3.0 3.0 0.0]
// [3.0 3.0 3.0 3.0]
// [3.0 0.0 3.0 3.0]
// // ^ one_hot(0)
// // ^ one_hot(2)
// // ^ one_hot(-1)
// // ^ one_hot(1)
// ```
//
// Suppose that
// ```
// indices = [[0, 2], [1, -1]]
// depth = 3
// on_value = 1.0
// off_value = 0.0
// axis = -1
// ```
//
// Then output is `[2 x 2 x 3]`:
// ```
// output =
// [
// [1.0, 0.0, 0.0] // one_hot(0)
// [0.0, 0.0, 1.0] // one_hot(2)
// ][
// [0.0, 1.0, 0.0] // one_hot(1)
// [0.0, 0.0, 0.0] // one_hot(-1)
// ]
// ```
//
// Arguments:
// indices: A tensor of indices.
// depth: A scalar defining the depth of the one hot dimension.
// on_value: A scalar defining the value to fill in output when `indices[j] = i`.
// off_value: A scalar defining the value to fill in output when `indices[j]!= i`.
//
// Returns The one-hot tensor.
func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional...OneHotAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "OneHot",
Input: []tf.Input{
indices, depth, on_value, off_value,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`.
//
// Arguments:
// input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.
// ksizes: The size of the sliding window for each dimension of `input`.
// strides: 1-D of length 5. How far the centers of two consecutive patches are in
// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
// padding: The type of padding algorithm to use.
//
// The size-related attributes are specified as follows:
//
// ```python
// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
// strides = [1, stride_planes, strides_rows, strides_cols, 1]
// ```
//
// Returns 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,
// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches
// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized
// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols`
// are the dimensions of the output patches.
func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
Type: "ExtractVolumePatches",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// DepthToSpaceAttr is an optional argument to DepthToSpace.
type DepthToSpaceAttr func(optionalAttr)
// DepthToSpaceDataFormat sets the optional data_format attribute to value.
// If not specified, defaults to "NHWC"
func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
// DepthToSpace for tensors of type T.
//
// Rearranges data from depth into blocks of spatial data.
// This is the reverse transformation of SpaceToDepth. More specifically,
// this op outputs a copy of the input tensor where values from the `depth`
// dimension are moved in spatial blocks to the `height` and `width` dimensions.
// The attr `block_size` indicates the input block size and how the data is moved.
//
// * Chunks of data of size `block_size * block_size` from depth are rearranged
// into non-overlapping blocks of size `block_size x block_size`
// * The width the output tensor is `input_depth * block_size`, whereas the
// height is `input_height * block_size`.
// * The Y, X coordinates within each block of the output image are determined
// by the high order component of the input channel index.
// * The depth of the input tensor must be divisible by
// `block_size * block_size`.
//
// The `data_format` attr specifies the layout of the input and output tensors
// with the following options:
// "NHWC": `[ batch, height, width, channels ]`
// "NCHW": `[ batch, channels, height, width ]`
// "NCHW_VECT_C":
// `qint8 [ batch, channels / 4, height, width, 4 ]`
//
// It is useful to consider the operation as transforming a 6-D Tensor.
// e.g. for data_format = NHWC,
// Each element in the input tensor can be specified via 6 coordinates,
// ordered by decreasing memory layout significance as:
// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
// within the input image, bX, bY means coordinates
// within the output block, oC means output channels).
// The output would be the input transposed to the following layout:
// n,iY,bY,iX,bX,oC
//
// This operation is useful for resizing the activations between convolutions
// (but keeping all data), e.g. instead of pooling. It is also useful for training
// purely convolutional models.
//
// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
// block_size = 2:
//
// ```
// x = [[[[1, 2, 3, 4]]]]
//
// ```
//
// This operation will output a tensor of shape `[1, 2, 2, 1]`:
//
// ```
// [[[[1], [2]],
// [[3], [4]]]]
// ```
//
// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
// the corresponding output will have 2x2 elements and will have a depth of
// 1 channel (1 = `4 / (block_size * block_size)`).
// The output element shape is `[2, 2, 1]`.
//
// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
//
// ```
// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
// ```
//
// This operation, for block size of 2, will return the following tensor of shape
// `[1, 2, 2, 3]`
//
// ```
// [[[[1, 2, 3], [4, 5, 6]],
// [[7, 8, 9], [10, 11, 12]]]]
//
// ```
//
// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
//
// ```
// x = [[[[1, 2, 3, 4],
// [5, 6, 7, 8]],
// [[9, 10, 11, 12],
// [13, 14, 15, 16]]]]
// ```
//
// the operator will return the following tensor of shape `[1 4 4 1]`:
//
// ```
// x = [[[ [1], [2], [5], [6]],
// [ [3], [4], [7], [8]],
// [ [9], [10], [13], [14]],
// [ [11], [12], [15], [16]]]]
//
// ```
//
// Arguments:
//
// block_size: The size of the spatial block, same as in Space2Depth.
func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional...DepthToSpaceAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"block_size": block_size}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "DepthToSpace",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// SpaceToDepthAttr is an optional argument to SpaceToDepth.
type SpaceToDepthAttr func(optionalAttr)
// SpaceToDepthDataFormat sets the optional data_format attribute to value.
// If not specified, defaults to "NHWC"
func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
// SpaceToDepth for tensors of type T.
//
// Rearranges blocks of spatial data, into depth. More specifically,
// this op outputs a copy of the input tensor where values from the `height`
// and `width` dimensions are moved to the `depth` dimension.
// The attr `block_size` indicates the input block size.
//
// * Non-overlapping blocks of size `block_size x block size` are rearranged
// into depth at each location.
// * The depth of the output tensor is `block_size * block_size * input_depth`.
// * The Y, X coordinates within each block of the input become the high order
// component of the output channel index.
// * The input tensor's height and width must be divisible by block_size.
//
// The `data_format` attr specifies the layout of the input and output tensors
// with the following options:
// "NHWC": `[ batch, height, width, channels ]`
// "NCHW": `[ batch, channels, height, width ]`
// "NCHW_VECT_C":
// `qint8 [ batch, channels / 4, height, width, 4 ]`
//
// It is useful to consider the operation as transforming a 6-D Tensor.
// e.g. for data_format = NHWC,
// Each element in the input tensor can be specified via 6 coordinates,
// ordered by decreasing memory layout significance as:
// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
// within the output image, bX, bY means coordinates
// within the input block, iC means input channels).
// The output would be a transpose to the following layout:
// n,oY,oX,bY,bX,iC
//
// This operation is useful for resizing the activations between convolutions
// (but keeping all data), e.g. instead of pooling. It is also useful for training
// purely convolutional models.
//
// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
// block_size = 2:
//
// ```
// x = [[[[1], [2]],
// [[3], [4]]]]
// ```
//
// This operation will output a tensor of shape `[1, 1, 1, 4]`:
//
// ```
// [[[[1, 2, 3, 4]]]]
// ```
//
// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
// the corresponding output will have a single element (i.e. width and height are
// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
// The output element shape is `[1, 1, 4]`.
//
// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
//
// ```
// x = [[[[1, 2, 3], [4, 5, 6]],
// [[7, 8, 9], [10, 11, 12]]]]
// ```
//
// This operation, for block_size of 2, will return the following tensor of shape
// `[1, 1, 1, 12]`
//
// ```
// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
// ```
//
// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
//
// ```
// x = [[[[1], [2], [5], [6]],
// [[3], [4], [7], [8]],
// [[9], [10], [13], [14]],
// [[11], [12], [15], [16]]]]
// ```
//
// the operator will return the following tensor of shape `[1 2 2 4]`:
//
// ```
// x = [[[[1, 2, 3, 4],
// [5, 6, 7, 8]],
// [[9, 10, 11, 12],
// [13, 14, 15, 16]]]]
// ```
//
// Arguments:
//
// block_size: The size of the spatial block.
func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional...SpaceToDepthAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"block_size": block_size}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "SpaceToDepth",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// BatchToSpace for 4-D tensors of type T.
//
// This is a legacy version of the more general BatchToSpaceND.
//
// Rearranges (permutes) data from batch into blocks of spatial data, followed by
// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
// this op outputs a copy of the input tensor where values from the `batch`
// dimension are moved in spatial blocks to the `height` and `width` dimensions,
// followed by cropping along the `height` and `width` dimensions.
//
// Arguments:
// input: 4-D tensor with shape
// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
// depth]`. Note that the batch size of the input tensor must be divisible by
// `block_size * block_size`.
// crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
// how many elements to crop from the intermediate result across the spatial
// dimensions as follows:
//
// crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
//
//
// Returns 4-D with shape `[batch, height, width, depth]`, where:
//
// height = height_pad - crop_top - crop_bottom
// width = width_pad - crop_left - crop_right
//
// The attr `block_size` must be greater than one. It indicates the block size.
//
// Some examples:
//
// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
//
// ```
// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
// ```
//
// The output tensor has shape `[1, 2, 2, 1]` and value:
//
// ```
// x = [[[[1], [2]], [[3], [4]]]]
// ```
//
// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
//
// ```
// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
// ```
//
// The output tensor has shape `[1, 2, 2, 3]` and value:
//
// ```
// x = [[[[1, 2, 3], [4, 5, 6]],
// [[7, 8, 9], [10, 11, 12]]]]
// ```
//
// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
//
// ```
// x = [[[[1], [3]], [[9], [11]]],
// [[[2], [4]], [[10], [12]]],
// [[[5], [7]], [[13], [15]]],
// [[[6], [8]], [[14], [16]]]]
// ```
//
// The output tensor has shape `[1, 4, 4, 1]` and value:
//
// ```
// x = [[[[1], [2], [3], [4]],
// [[5], [6], [7], [8]],
// [[9], [10], [11], [12]],
// [[13], [14], [15], [16]]]]
// ```
//
// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
//
// ```
// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
// ```
//
// The output tensor has shape `[2, 2, 4, 1]` and value:
//
// ```
// x = [[[[1], [3]], [[5], [7]]],
// [[[2], [4]], [[10], [12]]],
// [[[5], [7]], [[13], [15]]],
// [[[6], [8]], [[14], [16]]]]
// ```
func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"block_size": block_size}
opspec := tf.OpSpec{
Type: "BatchToSpace",
Input: []tf.Input{
input, crops,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// SpaceToBatch for 4-D tensors of type T.
//
// This is a legacy version of the more general SpaceToBatchND.
//
// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
// More specifically, this op outputs a copy of the input tensor where values from
// the `height` and `width` dimensions are moved to the `batch` dimension. After
// the zero-padding, both `height` and `width` of the input must be divisible by the
// block size.
//
// The attr `block_size` must be greater than one. It indicates the block size.
//
// * Non-overlapping blocks of size `block_size x block size` in the height and
// width dimensions are rearranged into the batch dimension at each location.
// * The batch of the output tensor is `batch * block_size * block_size`.
// * Both height_pad and width_pad must be divisible by block_size.
//
// The shape of the output will be:
//
// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
// depth]
//
// Some examples:
//
// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
//
// ```
// x = [[[[1], [2]], [[3], [4]]]]
// ```
//
// The output tensor has shape `[4, 1, 1, 1]` and value:
//
// ```
// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
// ```
//
// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
//
// ```
// x = [[[[1, 2, 3], [4, 5, 6]],
// [[7, 8, 9], [10, 11, 12]]]]
// ```
//
// The output tensor has shape `[4, 1, 1, 3]` and value:
//
// ```
// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
// ```
//
// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
//
// ```
// x = [[[[1], [2], [3], [4]],
// [[5], [6], [7], [8]],
// [[9], [10], [11], [12]],
// [[13], [14], [15], [16]]]]
// ```
//
// The output tensor has shape `[4, 2, 2, 1]` and value:
//
// ```
// x = [[[[1], [3]], [[9], [11]]],
// [[[2], [4]], [[10], [12]]],
// [[[5], [7]], [[13], [15]]],
// [[[6], [8]], [[14], [16]]]]
// ```
//
// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
//
// ```
// x = [[[[1], [2], [3], [4]],
// [[5], [6], [7], [8]]],
// [[[9], [10], [11], [12]],
// [[13], [14], [15], [16]]]]
// ```
//
// The output tensor has shape `[8, 1, 2, 1]` and value:
//
// ```
// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
// ```
//
// Among others, this operation is useful for reducing atrous convolution into
// regular convolution.
//
// Arguments:
// input: 4-D with shape `[batch, height, width, depth]`.
// paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
// the padding of the input with zeros across the spatial dimensions as follows:
//
// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
//
// The effective spatial dimensions of the zero-padded input tensor will be:
//
// height_pad = pad_top + height + pad_bottom
// width_pad = pad_left + width + pad_right
//
func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"block_size": block_size}
opspec := tf.OpSpec{
Type: "SpaceToBatch",
Input: []tf.Input{
input, paddings,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// SqueezeAttr is an optional argument to Squeeze.
type SqueezeAttr func(optionalAttr)
// SqueezeAxis sets the optional axis attribute to value.
//
// value: If specified, only squeezes the dimensions listed. The dimension
// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
// be in the range `[-rank(input), rank(input))`.
// If not specified, defaults to {}
//
// REQUIRES: len(value) >= 0
func SqueezeAxis(value []int64) SqueezeAttr {
return func(m optionalAttr) {
m["squeeze_dims"] = value
}
}
// Removes dimensions of size 1 from the shape of a tensor.
//
// Given a tensor `input`, this operation returns a tensor of the same type with
// all dimensions of size 1 removed. If you don't want to remove all size 1
// dimensions, you can remove specific size 1 dimensions by specifying
// `axis`.
//
// For example:
//
// ```
// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
// shape(squeeze(t)) ==> [2, 3]
// ```
//
// Or, to remove specific size 1 dimensions:
//
// ```
// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
// ```
//
// Arguments:
// input: The `input` to squeeze.
//
// Returns Contains the same data as `input`, but has one or more dimensions of
// size 1 removed.
func Squeeze(scope *Scope, input tf.Output, optional...SqueezeAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Squeeze",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// A placeholder op that passes through `input` when its output is not fed.
//
// Arguments:
// input: The default value to produce when `output` is not fed.
// shape: The (possibly partial) shape of the tensor.
//
// Returns A placeholder tensor that defaults to `input` if it is not fed.
func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
Type: "PlaceholderWithDefault",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// PlaceholderAttr is an optional argument to Placeholder.
type PlaceholderAttr func(optionalAttr)
// PlaceholderShape sets the optional shape attribute to value.
//
// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
// shape is unconstrained.
// If not specified, defaults to {unknown_rank:true}
func PlaceholderShape(value tf.Shape) PlaceholderAttr {
return func(m optionalAttr) {
m["shape"] = value
}
}
// A placeholder op for a value that will be fed into the computation.
//
// N.B. This operation will fail with an error if it is executed. It is
// intended as a way to represent a value that will always be fed, and to
// provide attrs that enable the fed value to be checked at runtime.
//
// Arguments:
// dtype: The type of elements in the tensor.
//
// Returns A placeholder tensor that must be replaced using the feed mechanism.
func Placeholder(scope *Scope, dtype tf.DataType, optional...PlaceholderAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Placeholder",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
//
// This is typically used by gradient computations for a broadcasting operation.
func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "BroadcastGradientArgs",
Input: []tf.Input{
s0, s1,
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Return the shape of s0 op s1 with broadcast.
//
// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "BroadcastArgs",
Input: []tf.Input{
s0, s1,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// TensorStridedSliceUpdateAttr is an optional argument to TensorStridedSliceUpdate.
type TensorStridedSliceUpdateAttr func(optionalAttr)
// TensorStridedSliceUpdateBeginMask sets the optional begin_mask attribute to value.
// If not specified, defaults to 0
func TensorStridedSliceUpdateBeginMask(value int64) TensorStridedSliceUpdateAttr {
return func(m optionalAttr) {
m["begin_mask"] = value
}
}
// TensorStridedSliceUpdateEndMask sets the optional end_mask attribute to value.
// If not specified, defaults to 0
func TensorStridedSliceUpdateEndMask(value int64) TensorStridedSliceUpdateAttr {
return func(m optionalAttr) {
m["end_mask"] = value
}
}
// TensorStridedSliceUpdateEllipsisMask sets the optional ellipsis_mask attribute to value.
// If not specified, defaults to 0
func TensorStridedSliceUpdateEllipsisMask(value int64) TensorStridedSliceUpdateAttr {
return func(m optionalAttr) {
m["ellipsis_mask"] = value
}
}
// TensorStridedSliceUpdateNewAxisMask sets the optional new_axis_mask attribute to value.
// If not specified, defaults to 0
func TensorStridedSliceUpdateNewAxisMask(value int64) TensorStridedSliceUpdateAttr {
return func(m optionalAttr) {
m["new_axis_mask"] = value
}
}
// TensorStridedSliceUpdateShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
// If not specified, defaults to 0
func TensorStridedSliceUpdateShrinkAxisMask(value int64) TensorStridedSliceUpdateAttr {
return func(m optionalAttr) {
m["shrink_axis_mask"] = value
}
}
// Assign `value` to the sliced l-value reference of `input`.
//
// The values of `value` are assigned to the positions in the tensor `input` that
// are selected by the slice parameters. The slice parameters `begin` `end`
// `strides` etc. work exactly as in `StridedSlice`.
//
// NOTE this op currently does not support broadcasting and so `value`'s shape
// must be exactly the shape produced by the slice of `input`.
func TensorStridedSliceUpdate(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional...TensorStridedSliceUpdateAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "TensorStridedSliceUpdate",
Input: []tf.Input{
input, begin, end, strides, value,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// StridedSliceAttr is an optional argument to StridedSlice.
type StridedSliceAttr func(optionalAttr)
// StridedSliceBeginMask sets the optional begin_mask attribute to value.
//
// value: a bitmask where a bit i being 1 means to ignore the begin
// value and instead use the largest interval possible. At runtime
// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or
// `[-1, n-1]` if `stride[i] < 0`
// If not specified, defaults to 0
func StridedSliceBeginMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
m["begin_mask"] = value
}
}
// StridedSliceEndMask sets the optional end_mask attribute to value.
//
// value: analogous to `begin_mask`
// If not specified, defaults to 0
func StridedSliceEndMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
m["end_mask"] = value
}
}
// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
//
// value: a bitmask where bit `i` being 1 means the `i`th
// position is actually an ellipsis. One bit at most can be 1.
// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
// is provided. This means that `foo[3:5] == foo[3:5,...]`. An ellipsis
// implicitly creates as many range specifications as necessary to fully
// specify the sliced range for every dimension. For example for a 4-dimensional
// tensor `foo` the slice `foo[2,..., 5:8]` implies `foo[2, :, :, 5:8]`.
// If not specified, defaults to 0
func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
m["ellipsis_mask"] = value
}
}
// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
//
// value: a bitmask where bit `i` being 1 means the `i`th
// specification creates a new shape 1 dimension. For example
// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
// If not specified, defaults to 0
func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
m["new_axis_mask"] = value
}
}
// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
//
// value: a bitmask where bit `i` implies that the `i`th
// specification should shrink the dimensionality. begin and end
// must imply a slice of size 1 in the dimension. For example in
// python one might do `foo[:, 3, :]` which would result in
// `shrink_axis_mask` being 2.
// If not specified, defaults to 0
func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
m["shrink_axis_mask"] = value
}
}
// Return a strided slice from `input`.
//
// Note, most python users will want to use the Python `Tensor.__getitem__`
// or `Variable.__getitem__` rather than this op directly.
//
// The goal of this op is to produce a new tensor with a subset of
// the elements from the `n` dimensional `input` tensor. The subset is chosen using
// a sequence of `m` sparse range specifications encoded into the arguments
// of this function. Note, in some cases
// `m` could be equal to `n`, but this need not be the case. Each
// range specification entry can be one of the following:
//
// - An ellipsis (...). Ellipses are used to imply zero or more
// dimensions of full-dimension selection and are produced using
// `ellipsis_mask`. For example, `foo[...]` is the identity slice.
//
// - A new axis. This is used to insert a new shape=1 dimension and is
// produced using `new_axis_mask`. For example, `foo[:,...]` where
// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
//
//
// - A range `begin:end:stride`. This is used to specify how much to choose from
// a given dimension. `stride` can be any integer but 0. `begin` is an integer
// which represents the index of the first value to select while `end` represents
// the index of the last value to select. The number of values selected in each
// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
// `begin` and `end` can be negative where `-1` is the last element, `-2` is
// the second to last. `begin_mask` controls whether to replace the explicitly
// given `begin` with an implicit effective value of `0` if `stride > 0` and
// `-1` if `stride < 0`. `end_mask` is analogous but produces the number
// required to create the largest open interval. For example, given a shape
// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
// not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
// first dimension of a tensor while dropping the last two (in the original
// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
//
// - A single index. This is used to keep only elements that have a given
// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
// shape `(6,)` tensor. This is encoded in `begin` and `end` and
// `shrink_axis_mask`.
//
// Each conceptual range specification is encoded in the op's argument. This
// encoding is best understand by considering a non-trivial example. In
// particular,
// `foo[1, 2:4, None,..., :-3:-1, :]` will be encoded as
//
// ```
// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
// end = [2, 4, x, x, -3, x]
// strides = [1, 1, x, x, -1, 1]
// begin_mask = 1<<4 | 1<<5 = 48
// end_mask = 1<<5 = 32
// ellipsis_mask = 1<<3 = 8
// new_axis_mask = 1<<2 = 4
// shrink_axis_mask = 1<<0 = 1
// ```
//
// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
// the slice becomes (2, 1, 5, 5, 2, 5).
// Let us walk step by step through each argument specification.
//
// 1. The first argument in the example slice is turned into `begin = 1` and
// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
// also set the appropriate bit in `shrink_axis_mask`.
//
// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
// zero bits contributed.
//
// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
// dimension in the final shape. Dummy values are contributed to begin,
// end and stride, while the new_axis_mask bit is set.
//
// 4. `...` grab the full ranges from as many dimensions as needed to
// fully specify a slice for every dimension of the input shape.
//
// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
// with a dimension that has shape `s` is converted to a positive index
// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
// is done internally so begin, end and strides receive x, -3, and -1.
// The appropriate begin_mask bit is set to indicate the start range is the
// full range (ignoring the x).
//
// 6. `:` indicates that the entire contents of the corresponding dimension
// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
// `end_mask` are also set.
//
// *Requirements*:
// `0!= strides[i] for i in [0, m)`
// `ellipsis_mask must be a power of two (only one ellipsis)`
//
// Arguments:
//
// begin: `begin[k]` specifies the offset into the `k`th range specification.
// The exact dimension this corresponds to will be determined by context.
// Out-of-bounds values will be silently clamped. If the `k`th bit of
// `begin_mask` then `begin[k]` is ignored and the full range of the
// appropriate dimension is used instead. Negative values causes indexing
// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
// end: `end[i]` is like `begin` with the exception that `end_mask` is
// used to determine full ranges.
// strides: `strides[i]` specifies the increment in the `i`th specification
// after extracting a given element. Negative indices will reverse
// the original order. Out or range values are
// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional...StridedSliceAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "StridedSlice",
Input: []tf.Input{
input, begin, end, strides,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes the eigen decomposition of a batch of self-adjoint matrices
//
// (Note: Only real inputs are supported).
//
// Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
// tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
// i=0...N-1.
//
// Arguments:
// a: the input tensor.
// lower: a boolean specifies whether the calculation is done with the lower
// triangular part or the upper triangular part.
// max_iter: maximum number of sweep update, i.e., the whole lower triangular
// part or upper triangular part based on parameter lower. Heuristically, it has
// been argued that approximately logN sweeps are needed in practice (Ref: Golub &
// van Loan "Matrix Computation").
// epsilon: the tolerance ratio.
//
// Returns:
// w: The eigenvalues in ascending order, each repeated according to its
// multiplicity.
// v: The column v[..., :, i] is the normalized eigenvector corresponding to the
// eigenvalue w[..., i].
func XlaSelfAdjointEig(scope *Scope, a tf.Output, lower bool, max_iter int64, epsilon float32) (w tf.Output, v tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"lower": lower, "max_iter": max_iter, "epsilon": epsilon}
opspec := tf.OpSpec{
Type: "XlaSelfAdjointEig",
Input: []tf.Input{
a,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Ensures that the tensor's shape matches the expected shape.
//
// Raises an error if the input tensor's shape does not match the specified shape.
// Returns the input tensor otherwise.
//
// Arguments:
// input: A tensor, whose shape is to be validated.
// shape: The expected (possibly partially specified) shape of the input tensor.
//
// Returns A tensor with the same shape and contents as the input tensor or value.
func EnsureShape(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
Type: "EnsureShape",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// ShapeAttr is an optional argument to Shape.
type ShapeAttr func(optionalAttr)
// ShapeOutType sets the optional out_type attribute to value.
// If not specified, defaults to DT_INT32
func ShapeOutType(value tf.DataType) ShapeAttr {
return func(m optionalAttr) {
m["out_type"] = value
}
}
// Returns the shape of a tensor.
//
// This operation returns a 1-D integer tensor representing the shape of `input`.
//
// For example:
//
// ```
// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
// shape(t) ==> [2, 2, 3]
// ```
func Shape(scope *Scope, input tf.Output, optional...ShapeAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Shape",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// UniqueWithCountsV2Attr is an optional argument to UniqueWithCountsV2.
type UniqueWithCountsV2Attr func(optionalAttr)
// UniqueWithCountsV2OutIdx sets the optional out_idx attribute to value.
// If not specified, defaults to DT_INT32
func UniqueWithCountsV2OutIdx(value tf.DataType) UniqueWithCountsV2Attr {
return func(m optionalAttr) {
m["out_idx"] = value
}
}
// Finds unique elements along an axis of a tensor.
//
// This operation either returns a tensor `y` containing unique elements
// along the `axis` of a tensor. The returned unique elements is sorted
// in the same order as they occur along `axis` in `x`.
// This operation also returns a tensor `idx` and a tensor `count`
// that are the same size as the number of the elements in `x` along the
// `axis` dimension. The `idx` contains the index in the unique output `y`
// and the `count` contains the count in the unique output `y`.
// In other words, for an `1-D` tensor `x` with `axis = None:
//
// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
//
// For example:
//
// ```
// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
// y, idx, count = UniqueWithCountsV2(x, axis = [0])
// y ==> [1, 2, 4, 7, 8]
// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
// count ==> [2, 1, 3, 1, 2]
// ```
//
// For a `2-D` tensor `x` with `axis = 0`:
//
// ```
// x = tf.constant([[1, 0, 0],
// [1, 0, 0],
// [2, 0, 0]])
// y, idx, count = UniqueWithCountsV2(x, axis=[0])
// y ==> [[1, 0, 0],
// [2, 0, 0]]
// idx ==> [0, 0, 1]
// count ==> [2, 1]
// ```
//
// For a `2-D` tensor `x` with `axis = 1`:
//
// ```
// x = tf.constant([[1, 0, 0],
// [1, 0, 0],
// [2, 0, 0]])
// y, idx, count = UniqueWithCountsV2(x, axis=[1])
// y ==> [[1, 0],
// [1, 0],
// [2, 0]]
// idx ==> [0, 1, 1]
// count ==> [1, 2]
// ```
//
// Arguments:
// x: A `Tensor`.
// axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
// find the unique elements.
//
// Returns:
// y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
// idx: A 1-D Tensor. Has the same type as x that contains the index of each
// value of x in the output y.
// count: A 1-D Tensor. The count of each value of x in the output y.
func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "UniqueWithCountsV2",
Input: []tf.Input{
x, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// Shuffle dimensions of x according to a permutation and conjugate the result.
//
// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
// `y.shape[i] == x.shape[perm[i]] for i in [0, 1,..., rank(x) - 1]`
// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "ConjugateTranspose",
Input: []tf.Input{
x, perm,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes the inverse permutation of a tensor.
//
// This operation computes the inverse of an index permutation. It takes a 1-D
// integer tensor `x`, which represents the indices of a zero-based array, and
// swaps each value with its index position. In other words, for an output tensor
// `y` and an input tensor `x`, this operation computes the following:
//
// `y[x[i]] = i for i in [0, 1,..., len(x) - 1]`
//
// The values must include 0. There can be no duplicate values or negative values.
//
// For example:
//
// ```
// # tensor `x` is [3, 4, 0, 2, 1]
// invert_permutation(x) ==> [2, 4, 3, 0, 1]
// ```
//
// Arguments:
// x: 1-D.
//
// Returns 1-D.
func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "InvertPermutation",
Input: []tf.Input{
x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// PreventGradientAttr is an optional argument to PreventGradient.
type PreventGradientAttr func(optionalAttr)
// PreventGradientMessage sets the optional message attribute to value.
//
// value: Will be printed in the error when anyone tries to differentiate
// this operation.
// If not specified, defaults to ""
func PreventGradientMessage(value string) PreventGradientAttr {
return func(m optionalAttr) {
m["message"] = value
}
}
// An identity op that triggers an error if a gradient is requested.
//
// When executed in a graph, this op outputs its input tensor as-is.
//
// When building ops to compute gradients, the TensorFlow gradient system
// will return an error when trying to lookup the gradient of this op,
// because no gradient must ever be registered for this function. This
// op exists to prevent subtle bugs from silently returning unimplemented
// gradients in some corner cases.
//
// Arguments:
// input: any tensor.
//
// Returns the same input tensor.
func PreventGradient(scope *Scope, input tf.Output, optional...PreventGradientAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "PreventGradient",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Stops gradient computation.
//
// When executed in a graph, this op outputs its input tensor as-is.
//
// When building ops to compute gradients, this op prevents the contribution of
// its inputs to be taken into account. Normally, the gradient generator adds ops
// to a graph to compute the derivatives of a specified 'loss' by recursively
// finding out inputs that contributed to its computation. If you insert this op
// in the graph it inputs are masked from the gradient generator. They are not
// taken into account for computing gradients.
//
// This is useful any time you want to compute a value with TensorFlow but need
// to pretend that the value was a constant. For example, the softmax function
// for a vector x can be written as
//
// ```python
//
// def softmax(x):
// numerator = tf.exp(x)
// denominator = tf.reduce_sum(numerator)
// return numerator / denominator
// ```
//
// This however is susceptible to overflow if the values in x are large. An
// alternative more stable way is to subtract the maximum of x from each of the
// values.
//
// ```python
//
// def stable_softmax(x):
// z = x - tf.reduce_max(x)
// numerator = tf.exp(z)
// denominator = tf.reduce_sum(numerator)
// return numerator / denominator
// ```
//
// However, when we backprop through the softmax to x, we dont want to backprop
// through the `tf.reduce_max(x)` (if the max values are not unique then the
// gradient could flow to the wrong input) calculation and treat that as a
// constant. Therefore, we should write this out as
//
// ```python
//
// def stable_softmax(x):
// z = x - tf.stop_gradient(tf.reduce_max(x))
// numerator = tf.exp(z)
// denominator = tf.reduce_sum(numerator)
// return numerator / denominator
// ```
//
// Some other examples include:
//
// * The *EM* algorithm where the *M-step* should not involve backpropagation
// through the output of the *E-step*.
// * Contrastive divergence training of Boltzmann machines where, when
// differentiating the energy function, the training must not backpropagate
// through the graph that generated the samples from the model.
// * Adversarial training, where no backprop should happen through the adversarial
// example generation process.
func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "StopGradient",
Input: []tf.Input{
input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Identity op for gradient debugging.
//
// This op is hidden from public in Python. It is used by TensorFlow Debugger to
// register gradient tensors for gradient debugging.
// This op operates on non-reference-type tensors.
func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "DebugGradientIdentity",
Input: []tf.Input{
input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Gather slices from `params` into a Tensor with shape specified by `indices`.
//
// `indices` is a K-dimensional integer tensor, best thought of as a
// (K-1)-dimensional tensor of indices into `params`, where each element defines a
// slice of `params`:
//
// output[\\(i_0,..., i_{K-2}\\)] = params[indices[\\(i_0,..., i_{K-2}\\)]]
//
// Whereas in `tf.gather` `indices` defines slices into the `axis`
// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
//
// The last dimension of `indices` can be at most the rank of
// `params`:
//
// indices.shape[-1] <= params.rank
//
// The last dimension of `indices` corresponds to elements
// (if `indices.shape[-1] == params.rank`) or slices
// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
// of `params`. The output tensor has shape
//
// indices.shape[:-1] + params.shape[indices.shape[-1]:]
//
// Note that on CPU, if an out of bound index is found, an error is returned.
// On GPU, if an out of bound index is found, a 0 is stored in the
// corresponding output value.
//
// Some examples below.
//
// Simple indexing into a matrix:
//
// ```python
// indices = [[0, 0], [1, 1]]
// params = [['a', 'b'], ['c', 'd']]
// output = ['a', 'd']
// ```
//
// Slice indexing into a matrix:
//
// ```python
// indices = [[1], [0]]
// params = [['a', 'b'], ['c', 'd']]
// output = [['c', 'd'], ['a', 'b']]
// ```
//
// Indexing into a 3-tensor:
//
// ```python
// indices = [[1]]
// params = [[['a0', 'b0'], ['c0', 'd0']],
// [['a1', 'b1'], ['c1', 'd1']]]
// output = [[['a1', 'b1'], ['c1', 'd1']]]
//
//
// indices = [[0, 1], [1, 0]]
// params = [[['a0', 'b0'], ['c0', 'd0']],
// [['a1', 'b1'], ['c1', 'd1']]]
// output = [['c0', 'd0'], ['a1', 'b1']]
//
//
// indices = [[0, 0, 1], [1, 0, 1]]
// params = [[['a0', 'b0'], ['c0', 'd0']],
// [['a1', 'b1'], ['c1', 'd1']]]
// output = ['b0', 'b1']
// ```
//
// Batched indexing into a matrix:
//
// ```python
// indices = [[[0, 0]], [[0, 1]]]
// params = [['a', 'b'], ['c', 'd']]
// output = [['a'], ['b']]
// ```
//
// Batched slice indexing into a matrix:
//
// ```python
// indices = [[[1]], [[0]]]
// params = [['a', 'b'], ['c', 'd']]
// output = [[['c', 'd']], [['a', 'b']]]
// ```
//
// Batched indexing into a 3-tensor:
//
// ```python
// indices = [[[1]], [[0]]]
// params = [[['a0', 'b0'], ['c0', 'd0']],
// [['a1', 'b1'], ['c1', 'd1']]]
// output = [[[['a1', 'b1'], ['c1', 'd1']]],
// [[['a0', 'b0'], ['c0', 'd0']]]]
//
// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
// params = [[['a0', 'b0'], ['c0', 'd0']],
// [['a1', 'b1'], ['c1', 'd1']]]
// output = [[['c0', 'd0'], ['a1', 'b1']],
// [['a0', 'b0'], ['c1', 'd1']]]
//
//
// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
// params = [[['a0', 'b0'], ['c0', 'd0']],
// [['a1', 'b1'], ['c1', 'd1']]]
// output = [['b0', 'b1'], ['d0', 'c1']]
// ```
//
// See also `tf.gather` and `tf.batch_gather`.
//
// Arguments:
// params: The tensor from which to gather values.
// indices: Index tensor.
//
// Returns Values from `params` gathered from indices given by `indices`, with
// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "GatherNd",
Input: []tf.Input{
params, indices,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// GatherV2Attr is an optional argument to GatherV2.
type GatherV2Attr func(optionalAttr)
// GatherV2BatchDims sets the optional batch_dims attribute to value.
// If not specified, defaults to 0
func GatherV2BatchDims(value int64) GatherV2Attr {
return func(m optionalAttr) {
m["batch_dims"] = value
}
}
// Gather slices from `params` axis `axis` according to `indices`.
//
// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
// Produces an output tensor with shape `params.shape[:axis] +
// indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
//
// ```python
// # Scalar indices (output is rank(params) - 1).
// output[a_0,..., a_n, b_0,..., b_n] =
// params[a_0,..., a_n, indices, b_0,..., b_n]
//
// # Vector indices (output is rank(params)).
// output[a_0,..., a_n, i, b_0,..., b_n] =
// params[a_0,..., a_n, indices[i], b_0,..., b_n]
//
// # Higher rank indices (output is rank(params) + rank(indices) - 1).
// output[a_0,..., a_n, i,..., j, b_0,... b_n] =
// params[a_0,..., a_n, indices[i,..., j], b_0,..., b_n]
// ```
//
// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
// </div>
//
// Note that on CPU, if an out of bound index is found, an error is returned.
// On GPU, if an out of bound index is found, a 0 is stored in the
// corresponding output value.
//
// See also `tf.batch_gather` and `tf.gather_nd`.
//
// Arguments:
// params: The tensor from which to gather values. Must be at least rank
// `axis + 1`.
// indices: Index tensor. Must be in range `[0, params.shape[axis])`.
// axis: The axis in `params` to gather `indices` from. Defaults to the first
// dimension. Supports negative indexes.
//
// Returns Values from `params` gathered from indices given by `indices`, with
// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output, optional...GatherV2Attr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "GatherV2",
Input: []tf.Input{
params, indices, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Reverses specific dimensions of a tensor.
//
// Given a `tensor`, and a `int32` tensor `axis` representing the set of
// dimensions of `tensor` to reverse. This operation reverses each dimension
// `i` for which there exists `j` s.t. `axis[j] == i`.
//
// `tensor` can have up to 8 dimensions. The number of dimensions specified
// in `axis` may be 0 or more entries. If an index is specified more than
// once, a InvalidArgument error is raised.
//
// For example:
//
// ```
// # tensor 't' is [[[[ 0, 1, 2, 3],
// # [ 4, 5, 6, 7],
// # [ 8, 9, 10, 11]],
// # [[12, 13, 14, 15],
// # [16, 17, 18, 19],
// # [20, 21, 22, 23]]]]
// # tensor 't' shape is [1, 2, 3, 4]
//
// # 'dims' is [3] or 'dims' is [-1]
// reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
// [ 7, 6, 5, 4],
// [ 11, 10, 9, 8]],
// [[15, 14, 13, 12],
// [19, 18, 17, 16],
// [23, 22, 21, 20]]]]
//
// # 'dims' is '[1]' (or 'dims' is '[-3]')
// reverse(t, dims) ==> [[[[12, 13, 14, 15],
// [16, 17, 18, 19],
// [20, 21, 22, 23]
// [[ 0, 1, 2, 3],
// [ 4, 5, 6, 7],
// [ 8, 9, 10, 11]]]]
//
// # 'dims' is '[2]' (or 'dims' is '[-2]')
// reverse(t, dims) ==> [[[[8, 9, 10, 11],
// [4, 5, 6, 7],
// [0, 1, 2, 3]]
// [[20, 21, 22, 23],
// [16, 17, 18, 19],
// [12, 13, 14, 15]]]]
// ```
//
// Arguments:
// tensor: Up to 8-D.
// axis: 1-D. The indices of the dimensions to reverse. Must be in the range
// `[-rank(tensor), rank(tensor))`.
//
// Returns The same shape as `tensor`.
func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "ReverseV2",
Input: []tf.Input{
tensor, axis,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns the batched diagonal part of a batched tensor.
//
// This operation returns a tensor with the `diagonal` part
// of the batched `input`. The `diagonal` part is computed as follows:
//
// Assume `input` has `k` dimensions `[I, J, K,..., M, N]`, then the output is a
// tensor of rank `k - 1` with dimensions `[I, J, K,..., min(M, N)]` where:
//
// `diagonal[i, j, k,..., n] = input[i, j, k,..., n, n]`.
//
// The input must be at least a matrix.
//
// For example:
//
// ```
// # 'input' is [[[1, 0, 0, 0]
// [0, 2, 0, 0]
// [0, 0, 3, 0]
// [0, 0, 0, 4]],
// [[5, 0, 0, 0]
// [0, 6, 0, 0]
// [0, 0, 7, 0]
// [0, 0, 0, 8]]]
//
// and input.shape = (2, 4, 4)
//
// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
//
// which has shape (2, 4)
// ```
//
// Arguments:
// input: Rank `k` tensor where `k >= 2`.
//
// Returns The extracted diagonal(s) having shape
// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "MatrixDiagPart",
Input: []tf.Input{
input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// MatrixSetDiagV3Attr is an optional argument to MatrixSetDiagV3.
type MatrixSetDiagV3Attr func(optionalAttr)
// MatrixSetDiagV3Align sets the optional align attribute to value.
//
// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
// a string specifying how superdiagonals and subdiagonals should be aligned,
// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
// to the right (left-pads the row) and subdiagonals to the left (right-pads the
// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
// the opposite alignment.
// If not specified, defaults to "RIGHT_LEFT"
func MatrixSetDiagV3Align(value string) MatrixSetDiagV3Attr {
return func(m optionalAttr) {
m["align"] = value
}
}
// Returns a batched matrix tensor with new batched diagonal values.
//
// Given `input` and `diagonal`, this operation returns a tensor with the
// same shape and values as `input`, except for the specified diagonals of the
// innermost matrices. These will be overwritten by the values in `diagonal`.
//
// `input` has `r+1` dimensions `[I, J,..., L, M, N]`. When `k` is scalar or
// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J,..., L, max_diag_len]`.
// Otherwise, it has `r+1` dimensions `[I, J,..., L, num_diags, max_diag_len]`.
// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
//
// The output is a tensor of rank `k+1` with dimensions `[I, J,..., L, M, N]`.
// If `k` is scalar or `k[0] == k[1]`:
//
// ```
// output[i, j,..., l, m, n]
// = diagonal[i, j,..., l, n-max(k[1], 0)] ; if n - m == k[1]
// input[i, j,..., l, m, n] ; otherwise
// ```
//
// Otherwise,
//
// ```
// output[i, j,..., l, m, n]
// = diagonal[i, j,..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
// input[i, j,..., l, m, n] ; otherwise
// ```
// where `d = n - m`, `diag_index = k[1] - d`, and
// `index_in_diag = n - max(d, 0) + offset`.
//
// `offset` is zero except when the alignment of the diagonal is to the right.
// ```
// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
// and `d >= 0`) or
// (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
// and `d <= 0`)
// 0 ; otherwise
// ```
// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
//
// For example:
//
// ```
// # The main diagonal.
// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
// [7, 7, 7, 7],
// [7, 7, 7, 7]],
// [[7, 7, 7, 7],
// [7, 7, 7, 7],
// [7, 7, 7, 7]]])
// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
// [4, 5, 6]])
// tf.matrix_set_diag(input, diagonal)
// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
// [7, 2, 7, 7],
// [7, 7, 3, 7]],
// [[4, 7, 7, 7],
// [7, 5, 7, 7],
// [7, 7, 6, 7]]]
//
// # A superdiagonal (per batch).
// tf.matrix_set_diag(input, diagonal, k = 1)
// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
// [7, 7, 2, 7],
// [7, 7, 7, 3]],
// [[7, 4, 7, 7],
// [7, 7, 5, 7],
// [7, 7, 7, 6]]]
//
// # A band of diagonals.
// diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
// [6, 5, 8],
// [1, 2, 3],
// [4, 5, 0]],
// [[0, 1, 2],
// [5, 6, 4],
// [6, 1, 2],
// [3, 4, 0]]])
// tf.matrix_set_diag(input, diagonals, k = (-1, 2))
// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
// [4, 2, 5, 1],
// [7, 5, 3, 8]],
// [[6, 5, 1, 7],
// [3, 1, 6, 2],
// [7, 4, 2, 4]]]
//
// # LEFT_RIGHT alignment.
// diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
// [6, 5, 8],
// [1, 2, 3],
// [0, 4, 5]],
// [[1, 2, 0],
// [5, 6, 4],
// [6, 1, 2],
// [0, 3, 4]]])
// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
// [4, 2, 5, 1],
// [7, 5, 3, 8]],
// [[6, 5, 1, 7],
// [3, 1, 6, 2],
// [7, 4, 2, 4]]]
//
// ```
//
// Arguments:
// input: Rank `r+1`, where `r >= 1`.
// diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
// `k >= 1`.
// k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
// diagonal, and negative value means subdiagonals. `k` can be a single integer
// (for a single diagonal) or a pair of integers specifying the low and high ends
// of a matrix band. `k[0]` must not be larger than `k[1]`.
//
// Returns Rank `r+1`, with `output.shape = input.shape`.
func MatrixSetDiagV3(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output, optional...MatrixSetDiagV3Attr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "MatrixSetDiagV3",
Input: []tf.Input{
input, diagonal, k,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns a batched matrix tensor with new batched diagonal values.
//
// Given `input` and `diagonal`, this operation returns a tensor with the
// same shape and values as `input`, except for the specified diagonals of the
// innermost matrices. These will be overwritten by the values in `diagonal`.
//
// `input` has `r+1` dimensions `[I, J,..., L, M, N]`. When `k` is scalar or
// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J,..., L, max_diag_len]`.
// Otherwise, it has `r+1` dimensions `[I, J,..., L, num_diags, max_diag_len]`.
// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
//
// The output is a tensor of rank `k+1` with dimensions `[I, J,..., L, M, N]`.
// If `k` is scalar or `k[0] == k[1]`:
//
// ```
// output[i, j,..., l, m, n]
// = diagonal[i, j,..., l, n-max(k[1], 0)] ; if n - m == k[1]
// input[i, j,..., l, m, n] ; otherwise
// ```
//
// Otherwise,
//
// ```
// output[i, j,..., l, m, n]
// = diagonal[i, j,..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
// input[i, j,..., l, m, n] ; otherwise
// ```
// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
//
// For example:
//
// ```
// # The main diagonal.
// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
// [7, 7, 7, 7],
// [7, 7, 7, 7]],
// [[7, 7, 7, 7],
// [7, 7, 7, 7],
// [7, 7, 7, 7]]])
// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
// [4, 5, 6]])
// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
// [7, 2, 7, 7],
// [7, 7, 3, 7]],
// [[4, 7, 7, 7],
// [7, 5, 7, 7],
// [7, 7, 6, 7]]]
//
// # A superdiagonal (per batch).
// tf.matrix_set_diag(diagonal, k = 1)
// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
// [7, 7, 2, 7],
// [7, 7, 7, 3]],
// [[7, 4, 7, 7],
// [7, 7, 5, 7],
// [7, 7, 7, 6]]]
//
// # A band of diagonals.
// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)
// [4, 5, 0]],
// [[6, 1, 2],
// [3, 4, 0]]])
// tf.matrix_set_diag(diagonals, k = (-1, 0))
// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
// [4, 2, 7, 7],
// [0, 5, 3, 7]],
// [[6, 7, 7, 7],
// [3, 1, 7, 7],
// [7, 4, 2, 7]]]
//
// ```
//
// Arguments:
// input: Rank `r+1`, where `r >= 1`.
// diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
// `k >= 1`.
// k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
// diagonal, and negative value means subdiagonals. `k` can be a single integer
// (for a single diagonal) or a pair of integers specifying the low and high ends
// of a matrix band. `k[0]` must not be larger than `k[1]`.
//
// Returns Rank `r+1`, with `output.shape = input.shape`.
func MatrixSetDiagV2(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "MatrixSetDiagV2",
Input: []tf.Input{
input, diagonal, k,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns a diagonal tensor with a given diagonal values.
//
// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
// everything else padded with zeros. The diagonal is computed as follows:
//
// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
//
// `output[i1,..., ik, i1,..., ik] = diagonal[i1,..., ik]` and 0 everywhere else.
//
// For example:
//
// ```
// # 'diagonal' is [1, 2, 3, 4]
// tf.diag(diagonal) ==> [[1, 0, 0, 0]
// [0, 2, 0, 0]
// [0, 0, 3, 0]
// [0, 0, 0, 4]]
// ```
//
// Arguments:
// diagonal: Rank k tensor where k is at most 1.
func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "Diag",
Input: []tf.Input{
diagonal,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns a tensor of ones with the same shape and type as x.
//
// Arguments:
// x: a tensor of type T.
//
// Returns a tensor of the same shape and type as x but filled with ones.
func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "OnesLike",
Input: []tf.Input{
x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns a constant tensor on the host. Only for writing C++ tests.
//
// Arguments:
// value: Attr `value` is the tensor to return.
//
func HostConst(scope *Scope, value tf.Tensor, dtype tf.DataType) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"value": value, "dtype": dtype}
opspec := tf.OpSpec{
Type: "HostConst",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Splits a tensor into `num_split` tensors along one dimension.
//
// Arguments:
// axis: 0-D. The dimension along which to split. Must be in the range
// `[-rank(value), rank(value))`.
// value: The tensor to split.
// num_split: The number of ways to split. Must evenly divide
// `value.shape[split_dim]`.
//
// Returns They are identically shaped tensors, whose shape matches that of `value`
// except along `axis`, where their sizes are
// `values.shape[split_dim] / num_split`.
func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"num_split": num_split}
opspec := tf.OpSpec{
Type: "Split",
Input: []tf.Input{
axis, value,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
if scope.Err()!= nil {
return
}
var idx int
var err error
if output, idx, err = makeOutputList(op, idx, "output"); err!= nil {
scope.UpdateErr("Split", err)
return
}
return output
}
// Computes offsets of concat inputs within its output.
//
// For example:
//
// ```
// # 'x' is [2, 2, 7]
// # 'y' is [2, 3, 7]
// # 'z' is [2, 5, 7]
// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
// ```
//
// This is typically used by gradient computations for a concat operation.
//
// Arguments:
// concat_dim: The dimension along which to concatenate.
// shape: The `N` int32 vectors representing shape of tensors being concatenated.
//
// Returns The `N` int32 vectors representing the starting offset
// of input tensors within the concatenated output.
func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "ConcatOffset",
Input: []tf.Input{
concat_dim, tf.OutputList(shape),
},
}
op := scope.AddOperation(opspec)
if scope.Err()!= nil {
return
}
var idx int
var err error
if offset, idx, err = makeOutputList(op, idx, "offset"); err!= nil {
scope.UpdateErr("ConcatOffset", err)
return
}
return offset
}
// Converts an array of flat indices into a tuple of coordinate arrays.
//
//
// Example:
//
// ```
// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
// # 'dims' represent a hypothetical (3, 3) tensor of indices:
// # [[0, 1, *2*],
// # [3, 4, *5*],
// # [6, *7*, 8]]
// # For each entry from 'indices', this operation returns
// # its coordinates (marked with '*'), such as
// # 2 ==> (0, 2)
// # 5 ==> (1, 2)
// # 7 ==> (2, 1)
// y ==> [[0, 1, 2], [2, 2, 1]]
// ```
//
// @compatibility(numpy)
// Equivalent to np.unravel_index
// @end_compatibility
//
// Arguments:
// indices: An 0-D or 1-D `int` Tensor whose elements are indices into the
// flattened version of an array of dimensions dims.
// dims: An 1-D `int` Tensor. The shape of the array to use for unraveling
// indices.
//
// Returns An 2-D (or 1-D if indices is 0-D) tensor where each row has the
// same shape as the indices array.
func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "UnravelIndex",
Input: []tf.Input{
indices, dims,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// EmptyAttr is an optional argument to Empty.
type EmptyAttr func(optionalAttr)
// EmptyInit sets the optional init attribute to value.
//
// value: If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content.
// If not specified, defaults to false
func EmptyInit(value bool) EmptyAttr {
return func(m optionalAttr) {
m["init"] = value
}
}
// Creates a tensor with the given shape.
//
// This operation creates a tensor of `shape` and `dtype`.
//
// Arguments:
// shape: 1-D. Represents the shape of the output tensor.
//
//
// Returns A `Tensor` of type `T`.
func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional...EmptyAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Empty",
Input: []tf.Input{
shape,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Subtracts `v` into specified rows of `x`.
//
// Computes y = x; y[i, :] -= v; return y.
//
// Arguments:
// x: A `Tensor` of type T.
// i: A vector. Indices into the left-most dimension of `x`.
// v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
//
// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "InplaceSub",
Input: []tf.Input{
x, i, v,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// PackAttr is an optional argument to Pack.
type PackAttr func(optionalAttr)
// PackAxis sets the optional axis attribute to value.
//
// value: Dimension along which to pack. Negative values wrap around, so the
// valid range is `[-(R+1), R+1)`.
// If not specified, defaults to 0
func PackAxis(value int64) PackAttr {
return func(m optionalAttr) {
m["axis"] = value
}
}
// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
//
// Packs the `N` tensors in `values` into a tensor with rank one higher than each
// tensor in `values`, by packing them along the `axis` dimension.
// Given a list of tensors of shape `(A, B, C)`;
//
// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
// Etc.
//
// For example:
//
// ```
// # 'x' is [1, 4]
// # 'y' is [2, 5]
// # 'z' is [3, 6]
// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
// ```
//
// This is the opposite of `unpack`.
//
// Arguments:
// values: Must be of same shape and type.
//
// Returns The packed tensor.
func Pack(scope *Scope, values []tf.Output, optional...PackAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Pack",
Input: []tf.Input{
tf.OutputList(values),
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// MfccAttr is an optional argument to Mfcc.
type MfccAttr func(optionalAttr)
// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
//
// value: The highest frequency to use when calculating the
// ceptstrum.
// If not specified, defaults to 4000
func MfccUpperFrequencyLimit(value float32) MfccAttr {
return func(m optionalAttr) {
m["upper_frequency_limit"] = value
}
}
// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
//
// value: The lowest frequency to use when calculating the
// ceptstrum.
// If not specified, defaults to 20
func MfccLowerFrequencyLimit(value float32) MfccAttr {
return func(m optionalAttr) {
m["lower_frequency_limit"] = value
}
}
// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
//
// value: Resolution of the Mel bank used internally.
// If not specified, defaults to 40
func MfccFilterbankChannelCount(value int64) MfccAttr {
return func(m optionalAttr) {
m["filterbank_channel_count"] = value
}
}
// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
//
// value: How many output channels to produce per time slice.
// If not specified, defaults to 13
func MfccDctCoefficientCount(value int64) MfccAttr {
return func(m optionalAttr) {
m["dct_coefficient_count"] = value
}
}
// Transforms a spectrogram into a form that's useful for speech recognition.
//
// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
// been effective as an input feature for machine learning. They are created by
// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
// higher frequencies that are less significant to the human ear. They have a long
// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
// is a good resource to learn more.
//
// Arguments:
// spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
// set to true.
// sample_rate: How many samples per second the source audio used.
func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional...MfccAttr) (output tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Mfcc",
Input: []tf.Input{
spectrogram, sample_rate,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
type AudioSpectrogramAttr func(optionalAttr)
// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
//
// value: Whether to return the squared magnitude or just the
// magnitude. Using squared magnitude can avoid extra calculations.
// If not specified, defaults to false
func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
return func(m optionalAttr) {
m["magnitude_squared"] = value
}
}
// Produces a visualization of audio data over time.
//
// Spectrograms are a standard way of representing audio information as a series of
// slices of frequency information, one slice for each window of time. By joining
// these together into a sequence, they form a distinctive fingerprint of the sound
// over time.
//
// This op expects to receive audio data as an input, stored as floats in the range
// -1 to 1, together with a window width in samples, and a stride specifying how
// far to move the window between slices. From this it generates a three
// dimensional output. The first dimension is for the channels in the input, so a
// stereo audio input would have two here for example. The second dimension is time,
// with successive frequency slices. The third dimension has an amplitude value for
// each frequency during that time slice.
//
// This means the layout when converted and saved as an image is rotated 90 degrees
// clockwise from a typical spectrogram. Time is descending down the Y axis, and
// the frequency decreases from left to right.
//
// Each value in the result represents the square root of the sum of the real and
// imaginary parts of an FFT on the current window of samples. In this way, the
// lowest dimension represents the power of each frequency in the current window,
// and adjacent windows are concatenated in the next dimension.
//
// To get a more intuitive and visual look at what this operation does, you can run
// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
// resulting spectrogram as a PNG image.
//
// Arguments:
// input: Float representation of audio data.
// window_size: How wide the input window is in samples. For the highest efficiency
// this should be a power of two, but other values are accepted.
// stride: How widely apart the center of adjacent sample windows should be.
//
// Returns 3D representation of the audio frequencies as an image.
func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional...AudioSpectrogramAttr) (spectrogram tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "AudioSpectrogram",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// DecodeWavAttr is an optional argument to DecodeWav.
type DecodeWavAttr func(optionalAttr)
// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
//
// value: Number of sample channels wanted.
// If not specified, defaults to -1
func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
return func(m optionalAttr) {
m["desired_channels"] = value
}
}
// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
//
// value: Length of audio requested.
// If not specified, defaults to -1
func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
return func(m optionalAttr) {
m["desired_samples"] = value
}
}
// Decode a 16-bit PCM WAV file to a float tensor.
//
// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
//
// When desired_channels is set, if the input contains fewer channels than this
// then the last channel will be duplicated to give the requested number, else if
// the input has more channels than requested then the additional channels will be
// ignored.
//
// If desired_samples is set, then the audio will be cropped or padded with zeroes
// to the requested length.
//
// The first output contains a Tensor with the content of the audio samples. The
// lowest dimension will be the number of channels, and the second will be the
// number of samples. For example, a ten-sample-long stereo WAV file should give an
// output shape of [10, 2].
//
// Arguments:
// contents: The WAV-encoded audio, usually from a file.
//
// Returns:
// audio: 2-D with shape `[length, channels]`.
// sample_rate: Scalar holding the sample rate found in the WAV header.
func DecodeWav(scope *Scope, contents tf.Output, optional...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "DecodeWav",
Input: []tf.Input{
contents,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// UnbatchGradAttr is an optional argument to UnbatchGrad.
type UnbatchGradAttr func(optionalAttr)
// UnbatchGradContainer sets the optional container attribute to value.
// If not specified, defaults to ""
func UnbatchGradContainer(value string) UnbatchGradAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
// UnbatchGradSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
func UnbatchGradSharedName(value string) UnbatchGradAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
// Gradient of Unbatch.
//
// Acts like Batch but using the given batch_index index of batching things as they
// become available. This ensures that the gradients are propagated back in the
// same session which did the forward pass.
//
// original_input: The input to the Unbatch operation this is the gradient of.
// batch_index: The batch_index given to the Unbatch operation this is the gradient
// of.
// grad: The downstream gradient.
// id: The id scalar emitted by Batch.
// batched_grad: The return value, either an empty tensor or the batched gradient.
// container: Container to control resource sharing.
// shared_name: Instances of UnbatchGrad with the same container and shared_name
// are assumed to possibly belong to the same batch. If left empty, the op name
// will be used as the shared name.
func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, id tf.Output, optional...UnbatchGradAttr) (batched_grad tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "UnbatchGrad",
Input: []tf.Input{
original_input, batch_index, grad, id,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
//
// For each entry in `x`, calculates the number of `1` (on) bits in the binary
// representation of that entry.
//
// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
// `int32` or `int64` and perform the bitcount on the result, than to feed in
// 8- or 16-bit inputs and then aggregate the resulting counts.
func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "PopulationCount",
Input: []tf.Input{
x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Bucketize each feature based on bucket boundaries.
//
// An op that returns a list of float tensors, where each tensor represents the
// bucketized values for a single feature.
//
// Arguments:
// float_values: float; List of Rank 1 Tensor each containing float values for a single feature.
// bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single
// feature.
//
// Returns int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "BoostedTreesBucketize",
Input: []tf.Input{
tf.OutputList(float_values), tf.OutputList(bucket_boundaries),
},
}
op := scope.AddOperation(opspec)
if scope.Err()!= nil {
return
}
var idx int
var err error
if buckets, idx, err = makeOutputList(op, idx, "buckets"); err!= nil {
scope.UpdateErr("BoostedTreesBucketize", err)
return
}
return buckets
}
// BoostedTreesCreateQuantileStreamResourceAttr is an optional argument to BoostedTreesCreateQuantileStreamResource.
type BoostedTreesCreateQuantileStreamResourceAttr func(optionalAttr)
// BoostedTreesCreateQuantileStreamResourceMaxElements sets the optional max_elements attribute to value.
//
// value: int; The maximum number of data points that can be fed to the stream.
// If not specified, defaults to 1099511627776
func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr {
return func(m optionalAttr) {
m["max_elements"] = value
}
}
// Create the Resource for Quantile Streams.
//
// Arguments:
// quantile_stream_resource_handle: resource; Handle to quantile stream resource.
// epsilon: float; The required approximation error of the stream resource.
// num_streams: int; The number of streams managed by the resource that shares the same epsilon.
//
// Returns the created operation.
func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, num_streams tf.Output, optional...BoostedTreesCreateQuantileStreamResourceAttr) (o *tf.Operation) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "BoostedTreesCreateQuantileStreamResource",
Input: []tf.Input{
quantile_stream_resource_handle, epsilon, num_streams,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
// Updates the tree ensemble by either adding a layer to the last tree being grown
//
// or by starting a new tree.
//
// Arguments:
// tree_ensemble_handle: Handle to the ensemble variable.
// feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
// the feature that will be used in the split.
// node_ids: List of rank 1 tensors representing the nodes for which this feature
// has a split.
// gains: List of rank 1 tensors representing the gains for each of the feature's
// split.
// thresholds: List of rank 1 tensors representing the thesholds for each of the
// feature's split.
// left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
// the feature's splits. Will be added to the previous node values to constitute
// the values of the left nodes.
// right_node_contribs: List of rank 2 tensors with right leaf contribs for each
// of the feature's splits. Will be added to the previous node values to constitute
// the values of the right nodes.
// max_depth: Max depth of the tree to build.
// learning_rate: shrinkage const for each new tree.
// pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
//
// Returns the created operation.
func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode int64) (o *tf.Operation) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"pruning_mode": pruning_mode}
opspec := tf.OpSpec{
Type: "BoostedTreesUpdateEnsemble",
Input: []tf.Input{
tree_ensemble_handle, feature_ids, tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), max_depth, learning_rate,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
// Runs multiple additive regression ensemble predictors on input instances and
//
// computes the update to cached logits. It is designed to be used during training.
// It traverses the trees starting from cached tree id and cached node id and
// calculates the updates to be pushed to the cache.
//
// Arguments:
//
// cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting
// tree of prediction.
// cached_node_ids: Rank 1 Tensor containing cached node id which is the starting
// node of prediction.
// bucketized_features: A list of rank 1 Tensors containing bucket id for each
// feature.
// logits_dimension: scalar, dimension of the logits, to be used for partial logits
// shape.
//
// Returns:
// partial_logits: Rank 2 Tensor containing logits update (with respect to cached
// values stored) for each example.
// tree_ids: Rank 1 Tensor containing new tree ids for each example.
// node_ids: Rank 1 Tensor containing new node ids in the new tree_ids.
func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, cached_node_ids tf.Output, bucketized_features []tf.Output, logits_dimension int64) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"logits_dimension": logits_dimension}
opspec := tf.OpSpec{
Type: "BoostedTreesTrainingPredict",
Input: []tf.Input{
tree_ensemble_handle, cached_tree_ids, cached_node_ids, tf.OutputList(bucketized_features),
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// Aggregates the summary of accumulated stats for the batch.
//
// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
//
// Arguments:
// node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
// gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
// hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
// feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).
// max_splits: int; the maximum number of splits possible in the whole tree.
// num_buckets: int; equals to the maximum possible value of bucketized feature.
//
// Returns output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])
// containing accumulated stats for each node, feature dimension and bucket.
func BoostedTreesAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
opspec := tf.OpSpec{
Type: "BoostedTreesAggregateStats",
Input: []tf.Input{
node_ids, gradients, hessians, feature,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Makes the summary of accumulated stats for the batch.
//
// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
//
// Arguments:
// node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
// gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
// hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
// bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
// max_splits: int; the maximum number of splits possible in the whole tree.
// num_buckets: int; equals to the maximum possible value of bucketized feature.
//
// Returns output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, bucketized_features_list []tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
opspec := tf.OpSpec{
Type: "BoostedTreesMakeStatsSummary",
Input: []tf.Input{
node_ids, gradients, hessians, tf.OutputList(bucketized_features_list),
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Deserializes a serialized tree ensemble config and replaces current tree
//
// ensemble.
//
// Arguments:
// tree_ensemble_handle: Handle to the tree ensemble.
// stamp_token: Token to use as the new value of the resource stamp.
// tree_ensemble_serialized: Serialized proto of the ensemble.
//
// Returns the created operation.
func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
if scope.Err()!= nil {
return
}
opspec := tf.OpSpec{
Type: "BoostedTreesDeserializeEnsemble",
Input: []tf.Input{
tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
},
}
return scope.AddOperation(opspec)
}
// Flush the quantile summaries from each quantile stream resource.
//
// An op that outputs a list of quantile summaries of a quantile stream resource.
// Each summary Tensor is rank 2, containing summaries (value, weight, min_rank,
// max_rank) for a single feature.
//
// Arguments:
// quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
//
func BoostedTreesFlushQuantileSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (summaries []tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"num_features": num_features}
opspec := tf.OpSpec{
Type: "BoostedTreesFlushQuantileSummaries",
Input: []tf.Input{
quantile_stream_resource_handle,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
if scope.Err()!= nil {
return
}
var idx int
var err error
if summaries, idx, err = makeOutputList(op, idx, "summaries"); err!= nil {
scope.UpdateErr("BoostedTreesFlushQuantileSummaries", err)
return
}
return summaries
}
// BoostedTreesSparseCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesSparseCalculateBestFeatureSplit.
type BoostedTreesSparseCalculateBestFeatureSplitAttr func(optionalAttr)
// BoostedTreesSparseCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
//
// value: A string indicating if this Op should perform inequality split or equality split.
// If not specified, defaults to "inequality"
func BoostedTreesSparseCalculateBestFeatureSplitSplitType(value string) BoostedTreesSparseCalculateBestFeatureSplitAttr {
return func(m optionalAttr) {
m["split_type"] = value
}
}
// Calculates gains for each feature and returns the best possible split information for the feature.
//
// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
//
// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
//
// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
//
// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
//
// Arguments:
// node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
// stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.
// stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.
// stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices.
// stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].
// l1: l1 regularization factor on leaf weights, per instance based.
// l2: l2 regularization factor on leaf weights, per instance based.
// tree_complexity: adjustment to the gain, per leaf based.
// min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
// logits_dimension: The dimension of logit, i.e., number of classes.
//
// Returns:
// node_ids: A Rank 1 tensor indicating possible node ids that can be split.
// gains: A Rank 1 tensor indicating the best gains to split each node.
// feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.
// thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.
// left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.
// This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.
// right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
// split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing.
// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
func BoostedTreesSparseCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional...BoostedTreesSparseCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
if scope.Err()!= nil {
return
}
attrs := map[string]interface{}{"logits_dimension": logits_dimension}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "BoostedTreesSparseCalculateBestFeatureSplit",
Input: []tf.Input{
node_id_range, stats_summary_indices, stats_summary_values, stats_summary_shape, l1, l2, tree_complexity, min_node_weight,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
}
// Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node.
//
// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
//
// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
//
// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
//
// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
//
// Arguments:
// node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
// stats_summaries_list: A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
// split_types: A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature.
// candidate_feature_ids: Rank 1 tensor with ids for each feature. This is the real id of the feature.
// l1: l1 regularization factor on leaf weights, per instance based.
// l2: l2 regularization factor on leaf weights, per instance based.
// tree_complexity: adjustment to the gain, per leaf based.
// min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
// logits_dimension: The dimension of logit, i.e., number of classes.
//
// Returns:
// node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
// gains: A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
// feature_ids: A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes.
// feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
// thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
// left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
// right_node |
import sys
import errno
import signal
from eventlet import patcher
select = patcher.original('select')
time = patcher.original('time')
sleep = time.sleep
from eventlet.support import get_errno, clear_sys_exc_info
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop, alarm_handler
EXC_MASK = select.POLLERR | select.POLLHUP
READ_MASK = select.POLLIN | select.POLLPRI
WRITE_MASK = select.POLLOUT
class Hub(BaseHub):
def __init__(self, clock=time.time):
super(Hub, self).__init__(clock)
self.poll = select.poll()
# poll.modify is new to 2.6
try:
self.modify = self.poll.modify
except AttributeError:
self.modify = self.poll.register
def add(self, evtype, fileno, cb):
listener = super(Hub, self).add(evtype, fileno, cb)
self.register(fileno, new=True)
return listener
def remove(self, listener):
super(Hub, self).remove(listener)
self.register(listener.fileno)
def register(self, fileno, new=False):
mask = 0
if self.listeners[READ].get(fileno):
mask |= READ_MASK | EXC_MASK
if self.listeners[WRITE].get(fileno):
mask |= WRITE_MASK | EXC_MASK
try:
if mask:
if new:
self.poll.register(fileno, mask)
else:
try:
self.modify(fileno, mask)
except (IOError, OSError):
self.poll.register(fileno, mask)
else:
try:
self.poll.unregister(fileno)
except (KeyError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
except ValueError:
# fileno is bad, issue 74
self.remove_descriptor(fileno)
raise
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
self.poll.unregister(fileno)
except (KeyError, ValueError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
def do_poll(self, seconds):
# poll.poll expects integral milliseconds
return self.poll.poll(int(seconds * 1000.0))
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
sleep(seconds)
return
try:
presult = self.do_poll(seconds)
except (IOError, select.error), e:
if get_errno(e) == errno.EINTR:
return
raise
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
if self.debug_blocking:
self.block_detect_pre()
for fileno, event in presult:
try:
if event & READ_MASK:
readers.get(fileno, noop).cb(fileno)
if event & WRITE_MASK:
writers.get(fileno, noop).cb(fileno)
if event & select.POLLNVAL:
self.remove_descriptor(fileno)
continue
if event & EXC_MASK:
readers.get(fileno, noop).cb(fileno)
writers.get(fileno, noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
clear_sys_exc_info()
if self.debug_blocking:
self.block_detect_post()
HUE-627 Spawning server pegs CPU at 100%
Patching evenlet 0.9.14
import sys
import errno
import signal
from eventlet import patcher
select = patcher.original('select')
time = patcher.original('time')
sleep = time.sleep
from eventlet.support import get_errno, clear_sys_exc_info
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop, alarm_handler
EXC_MASK = select.POLLERR | select.POLLHUP
READ_MASK = select.POLLIN | select.POLLPRI
WRITE_MASK = select.POLLOUT
class Hub(BaseHub):
def __init__(self, clock=time.time):
super(Hub, self).__init__(clock)
self.poll = select.poll()
# poll.modify is new to 2.6
try:
self.modify = self.poll.modify
except AttributeError:
self.modify = self.poll.register
def add(self, evtype, fileno, cb):
listener = super(Hub, self).add(evtype, fileno, cb)
self.register(fileno, new=True)
return listener
def remove(self, listener):
super(Hub, self).remove(listener)
self.register(listener.fileno)
def register(self, fileno, new=False):
mask = 0
if self.listeners[READ].get(fileno):
mask |= READ_MASK | EXC_MASK
if self.listeners[WRITE].get(fileno):
mask |= WRITE_MASK | EXC_MASK
try:
if mask:
if new:
self.poll.register(fileno, mask)
else:
try:
self.modify(fileno, mask)
except (IOError, OSError):
self.poll.register(fileno, mask)
else:
try:
self.poll.unregister(fileno)
except (KeyError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
except ValueError:
# fileno is bad, issue 74
self.remove_descriptor(fileno)
raise
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
self.poll.unregister(fileno)
except (KeyError, ValueError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
def do_poll(self, seconds):
# poll.poll expects integral milliseconds
return self.poll.poll(int(seconds * 1000.0))
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
sleep(seconds)
return
try:
presult = self.do_poll(seconds)
except (IOError, select.error), e:
if get_errno(e) == errno.EINTR:
return
raise
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
if self.debug_blocking:
self.block_detect_pre()
for fileno, event in presult:
try:
if event & READ_MASK:
readers.get(fileno, noop).cb(fileno)
if event & WRITE_MASK:
writers.get(fileno, noop).cb(fileno)
if event & select.POLLNVAL:
self.remove_descriptor(fileno)
continue
if event & EXC_MASK:
readers.get(fileno, noop).cb(fileno)
writers.get(fileno, noop).cb(fileno)
self.remove_descriptor(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
clear_sys_exc_info()
if self.debug_blocking:
self.block_detect_post()
|
import sys
import json
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from PIL import Image, ImageFont, ImageDraw
from tkinter import Tk
from tkinter import filedialog
from fpdf import FPDF
from datetime import datetime
"""
Default constructor for parsing yaml objects
Parses them generically
"""
def default_ctor(loader, tag_suffix, node):
return loader.construct_mapping(node, deep=True)
"""
Load a YAML config from a file
Returns a dict representing the RobotBuilder config
"""
def _load_yaml(path):
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
data = None
with open(path, 'r') as f:
yaml_elements = yaml.load_all(f, Loader=Loader)
for x in yaml_elements:
data = x
return data
"""
Get controllers from a RobotBuilder config
Returns a list of controllers (OI devices)
"""
def _get_controllers(rb_conf):
# Find the OI section of the RobotBuilder config
oi_section = [x for x in rb_conf['Children'] if x['Base'] == 'OI'][0]
# List of all of the controllers
controllers = oi_section['Children']
return controllers
"""
Get button bindings on a RobotBuilder controller
Returns a list of buttons
"""
def _get_bindings(controller):
return controller['Children']
class ControllerAnnotation:
def __init__(self, config_file_path=None, gui=False):
self.gui = gui
self.config = None
if config_file_path is not None:
self.config = _load_yaml(config_file_path)
self.config_map_files = self.__read_config_val('mapFiles')
"""
Reads a value from the config.
Returns None if not defined or if config doesn't exist
"""
def __read_config_val(self, key):
if self.config is None:
return None
return self.config.get(key, None)
return data
"""
Get the path to the controller config
If returns None, then skip the controller
"""
def __get_controller_config_path(self, controller_name):
map_file = None
# Try to get the map file from the config
if self.config_map_files is not None:
map_file = self.config_map_files.get(controller_name, None)
# If we couldn't get it from the config, ask the user for the map file
if map_file is None and self.gui:
# Ask the user to pick a config file for the controller
map_file = filedialog.askopenfilename(
initialdir="controllers",
title="Choose a controller file for " + controller_name,
filetypes=(("Controller Map", "*.yaml"), ("all files", "*.*")))
return map_file
"""
Get the controller config for a controller
Specifies the x/y of the buttons, the font size, the controller image, etc
If returns None, then skip the controller
"""
def __get_controller_config(self, controller_name):
path = self.__get_controller_config_path(controller_name)
if path is None or len(path) == 0:
return None
return _load_yaml(path)
"""
Draw a mapping image for a RobotBuilder controller
Saves to out/controller_name.jpg
Returns the filename of the output image, or None if the controller doesn't have a config
"""
def __draw_mapping_img(self, controller):
controller_name = controller['Name'] # The name of the controller, ex. "Driver Left"
controller_map = self.__get_controller_config(controller_name)
if controller_map is None: # No controller map available, skip
return None
# Get the buttons from the controller map
controller_buttons = controller_map['buttons']
# Open the base image for the controller and create a pillow drawing context
img = Image.open(controller_map['image'])
draw = ImageDraw.Draw(img)
# Setup the font for Pillow
font_size = controller_map['fontSize']
font = ImageFont.truetype('LiberationSans-Regular.ttf', font_size)
# Write the name of the controller on the upper-left of the image
draw.text((0, 0), controller_name, (128, 128, 128), font=font)
# Write the date and time under the controller name
cur_datetime = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
draw.text((0, font_size), 'As of ' + cur_datetime + ' UTC', (128, 128, 128), font=font)
# Dictionary of tuple(x, y) to boolean of positions where a command is drawn already
taken_positions = {}
for binding in _get_bindings(controller):
# Skip bindings that aren't to buttons
if binding.get('Base', '') != 'Joystick Button':
continue
# The ID of the button on the controller
btn_id = binding['Properties']['Button']['value']
# The name of the command to run
command_name = binding['Properties']['Command']['value']
# The name of the button binding
btn_name = binding.get('Name', 'No name specified')
# Find the button on the controller map
matching_btns = [cbtn for cbtn in controller_buttons if str(cbtn['id']) == btn_id]
# If the button was not specified in the controller map, give a warning
if len(matching_btns) == 0:
print('Warning: No entry in button map', self.__get_controller_config_path(controller_name), 'for button', btn_id, '(' + btn_name + ')')
continue
btn = matching_btns[0]
pos = (btn['x'], btn['y'])
while taken_positions.get(pos, False) != False:
pos = (pos[0], pos[1] + font_size)
# Draw the command name in the area specified by the controller map
draw.text(pos,
btn_name,
(0, 0, 0), font=font)
taken_positions[pos] = True
# Save the finished image to a file
output_path = 'out/' + controller_name + '.jpg'
img.save(output_path)
return output_path
"""
Gets the RobotBuilder config path,
either from the config or via file chooser
"""
def __get_rb_config_path(self):
# Determine RobotBuilder config file location from config
robotbuilder_config_path = self.__read_config_val('robotbuilderConfig')
# Ask the user to pick the robotbuilder config file if it wasn't in the config
if robotbuilder_config_path is None and self.gui:
robotbuilder_config_path = filedialog.askopenfilename(
title="Choose a RobotBuilder config",
filetypes=(("RobotBuilder Config", "*.yaml"), ("all files", "*.*")))
return robotbuilder_config_path
"""
Create all of the controller mapping images
and the final controller mapping pdf
"""
def create_mapping_files(self):
# Load the RobotBuilder config
rb_conf = _load_yaml(self.__get_rb_config_path())
# Create the PDF
pdf = FPDF()
# Draw all of the images and merge to PDF
for controller in _get_controllers(rb_conf):
img_path = self.__draw_mapping_img(controller)
if img_path is None: # Skipping controller
continue
# Add image to PDF
pdf.add_page()
pdf.image(img_path, w=200)
pdf.output('out/out.pdf')
# If we're not a library, then execute the annotator
if __name__ == '__main__':
# Load in the config file if it exists
config_file_path = None
if len(sys.argv) > 1:
config_file_path = sys.argv[1]
annotation = ControllerAnnotation(config_file_path, True)
annotation.create_mapping_files()
Only import tk if gui
import sys
import json
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from PIL import Image, ImageFont, ImageDraw
from fpdf import FPDF
from datetime import datetime
"""
Default constructor for parsing yaml objects
Parses them generically
"""
def default_ctor(loader, tag_suffix, node):
return loader.construct_mapping(node, deep=True)
"""
Load a YAML config from a file
Returns a dict representing the RobotBuilder config
"""
def _load_yaml(path):
yaml.add_multi_constructor('', default_ctor, Loader=Loader)
data = None
with open(path, 'r') as f:
yaml_elements = yaml.load_all(f, Loader=Loader)
for x in yaml_elements:
data = x
return data
"""
Get controllers from a RobotBuilder config
Returns a list of controllers (OI devices)
"""
def _get_controllers(rb_conf):
# Find the OI section of the RobotBuilder config
oi_section = [x for x in rb_conf['Children'] if x['Base'] == 'OI'][0]
# List of all of the controllers
controllers = oi_section['Children']
return controllers
"""
Get button bindings on a RobotBuilder controller
Returns a list of buttons
"""
def _get_bindings(controller):
return controller['Children']
class ControllerAnnotation:
def __init__(self, config_file_path=None, gui=False):
self.gui = gui
self.config = None
if config_file_path is not None:
self.config = _load_yaml(config_file_path)
self.config_map_files = self.__read_config_val('mapFiles')
"""
Reads a value from the config.
Returns None if not defined or if config doesn't exist
"""
def __read_config_val(self, key):
if self.config is None:
return None
return self.config.get(key, None)
return data
"""
Get the path to the controller config
If returns None, then skip the controller
"""
def __get_controller_config_path(self, controller_name):
map_file = None
# Try to get the map file from the config
if self.config_map_files is not None:
map_file = self.config_map_files.get(controller_name, None)
# If we couldn't get it from the config, ask the user for the map file
if map_file is None and self.gui:
from tkinter import filedialog
# Ask the user to pick a config file for the controller
map_file = filedialog.askopenfilename(
initialdir="controllers",
title="Choose a controller file for " + controller_name,
filetypes=(("Controller Map", "*.yaml"), ("all files", "*.*")))
return map_file
"""
Get the controller config for a controller
Specifies the x/y of the buttons, the font size, the controller image, etc
If returns None, then skip the controller
"""
def __get_controller_config(self, controller_name):
path = self.__get_controller_config_path(controller_name)
if path is None or len(path) == 0:
return None
return _load_yaml(path)
"""
Draw a mapping image for a RobotBuilder controller
Saves to out/controller_name.jpg
Returns the filename of the output image, or None if the controller doesn't have a config
"""
def __draw_mapping_img(self, controller):
controller_name = controller['Name'] # The name of the controller, ex. "Driver Left"
controller_map = self.__get_controller_config(controller_name)
if controller_map is None: # No controller map available, skip
return None
# Get the buttons from the controller map
controller_buttons = controller_map['buttons']
# Open the base image for the controller and create a pillow drawing context
img = Image.open(controller_map['image'])
draw = ImageDraw.Draw(img)
# Setup the font for Pillow
font_size = controller_map['fontSize']
font = ImageFont.truetype('LiberationSans-Regular.ttf', font_size)
# Write the name of the controller on the upper-left of the image
draw.text((0, 0), controller_name, (128, 128, 128), font=font)
# Write the date and time under the controller name
cur_datetime = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
draw.text((0, font_size), 'As of ' + cur_datetime + ' UTC', (128, 128, 128), font=font)
# Dictionary of tuple(x, y) to boolean of positions where a command is drawn already
taken_positions = {}
for binding in _get_bindings(controller):
# Skip bindings that aren't to buttons
if binding.get('Base', '') != 'Joystick Button':
continue
# The ID of the button on the controller
btn_id = binding['Properties']['Button']['value']
# The name of the command to run
command_name = binding['Properties']['Command']['value']
# The name of the button binding
btn_name = binding.get('Name', 'No name specified')
# Find the button on the controller map
matching_btns = [cbtn for cbtn in controller_buttons if str(cbtn['id']) == btn_id]
# If the button was not specified in the controller map, give a warning
if len(matching_btns) == 0:
print('Warning: No entry in button map', self.__get_controller_config_path(controller_name), 'for button', btn_id, '(' + btn_name + ')')
continue
btn = matching_btns[0]
pos = (btn['x'], btn['y'])
while taken_positions.get(pos, False) != False:
pos = (pos[0], pos[1] + font_size)
# Draw the command name in the area specified by the controller map
draw.text(pos,
btn_name,
(0, 0, 0), font=font)
taken_positions[pos] = True
# Save the finished image to a file
output_path = 'out/' + controller_name + '.jpg'
img.save(output_path)
return output_path
"""
Gets the RobotBuilder config path,
either from the config or via file chooser
"""
def __get_rb_config_path(self):
# Determine RobotBuilder config file location from config
robotbuilder_config_path = self.__read_config_val('robotbuilderConfig')
# Ask the user to pick the robotbuilder config file if it wasn't in the config
if robotbuilder_config_path is None and self.gui:
from tkinter import filedialog
robotbuilder_config_path = filedialog.askopenfilename(
title="Choose a RobotBuilder config",
filetypes=(("RobotBuilder Config", "*.yaml"), ("all files", "*.*")))
return robotbuilder_config_path
"""
Create all of the controller mapping images
and the final controller mapping pdf
"""
def create_mapping_files(self):
# Load the RobotBuilder config
rb_conf = _load_yaml(self.__get_rb_config_path())
# Create the PDF
pdf = FPDF()
# Draw all of the images and merge to PDF
for controller in _get_controllers(rb_conf):
img_path = self.__draw_mapping_img(controller)
if img_path is None: # Skipping controller
continue
# Add image to PDF
pdf.add_page()
pdf.image(img_path, w=200)
pdf.output('out/out.pdf')
# If we're not a library, then execute the annotator
if __name__ == '__main__':
# Load in the config file if it exists
config_file_path = None
if len(sys.argv) > 1:
config_file_path = sys.argv[1]
annotation = ControllerAnnotation(config_file_path, True)
annotation.create_mapping_files()
|
import time
from django.shortcuts import render, render_to_response
from django.views.generic import DetailView, ListView, CreateView, UpdateView, TemplateView, FormView
from django.views.generic.edit import ProcessFormView
from django.views.generic.detail import SingleObjectMixin
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from core.models import operator
from .models import growth, sample, readings, serial_number, recipe_layer, source
from .filters import growth_filter, RelationalFilterView
from .forms import growth_form, sample_form, p_form, split_form, readings_form, comments_form
from .forms import prerun_checklist_form, start_growth_form, prerun_growth_form, prerun_sources_form, postrun_checklist_form
import afm.models
import hall.models
from core.views import SessionHistoryMixin
class growth_list(SessionHistoryMixin, RelationalFilterView):
filterset_class = growth_filter
template_name = 'growths/growth_filter.html'
class afm_compare(ListView):
template_name = 'growths/afm_compare.html'
def get_queryset(self):
id_list = [int(id) for id in self.request.GET.getlist('afm')]
objects = afm.models.afm.objects.filter(id__in=id_list)
return objects
class GrowthDetailView(SessionHistoryMixin, DetailView):
model = growth
template_name = 'growths/growth_detail.html'
slug_field = 'growth_number'
context_object_name = 'growth'
def get_context_data(self, **kwargs):
context = super(GrowthDetailView, self).get_context_data(**kwargs)
context['samples'] = sample.objects.filter(growth=context['object']).order_by('pocket')
context['char_afm'] = afm.models.afm.objects.filter(growth=context['object']).order_by('sample__pocket', 'sample__piece', 'location', 'scan_number')
context['char_hall'] = hall.models.hall.objects.filter(growth=context['object']).order_by('sample__pocket', 'sample__piece', 'date')
return context
class SampleDetailView(SessionHistoryMixin, DetailView):
model = sample
template_name = 'growths/sample_detail.html'
context_object_name = 'sample'
def get_context_data(self, **kwargs):
context = super(SampleDetailView, self).get_context_data(**kwargs)
parents = []
obj = context['sample']
while obj.parent != obj:
parents.append(obj.parent)
obj = obj.parent
obj = context['sample']
context['parents'] = parents[::-1] # show in reverse order
context['siblings'] = sample.objects.filter(growth=obj.growth).exclude(pk=obj.id).order_by('-growth__growth_number', 'pocket', 'piece')
context['children'] = sample.objects.filter(parent=obj).exclude(pk=obj.id).order_by('-growth__growth_number', 'pocket', 'piece')
context['char_afm'] = afm.models.afm.objects.filter(sample=context['object']).order_by('sample__pocket', 'sample__piece', 'location', 'scan_number')
context['char_hall'] = hall.models.hall.objects.filter(sample=context['object']).order_by('sample__pocket', 'sample__piece', 'date')
return context
class SampleFamilyDetailView(ListView):
model = sample
template_name = 'growths/sample_family_detail.html'
context_object_name = 'samples'
def get_context_data(self, **kwargs):
growth_number = self.kwargs.get('growth', None)
pocket = self.kwargs.get('pocket', None)
context = super(SampleFamilyDetailView, self).get_context_data(**kwargs)
context['samples'] = sample.objects.filter(growth__growth_number=growth_number, pocket=pocket).order_by('pocket', 'piece')
context['growth'] = growth.get_growth(growth_number)
context['pocket'] = pocket
return context
def create_growth(request):
if request.method == "POST":
gform = growth_form(request.POST, instance=growth())
# sforms = [sample_form(request.POST, prefix=str(x), instnace=sample()) for x in range(0,6)]
pf_1 = p_form(request.POST, prefix="pf_1")
pf_2 = p_form(request.POST, prefix="pf_2")
pf_3 = p_form(request.POST, prefix="pf_3")
pf_4 = p_form(request.POST, prefix="pf_4")
pf_5 = p_form(request.POST, prefix="pf_5")
pf_6 = p_form(request.POST, prefix="pf_6")
sform_1 = sample_form(request.POST, instance=sample(), prefix="sform_1")
sform_2 = sample_form(request.POST, instance=sample(), prefix="sform_2")
sform_3 = sample_form(request.POST, instance=sample(), prefix="sform_3")
sform_4 = sample_form(request.POST, instance=sample(), prefix="sform_4")
sform_5 = sample_form(request.POST, instance=sample(), prefix="sform_5")
sform_6 = sample_form(request.POST, instance=sample(), prefix="sform_6")
sforms_list = []
sforms = [sform_1, sform_2, sform_3, sform_4, sform_5, sform_6]
pforms = [pf_1, pf_2, pf_3, pf_4, pf_5, pf_6]
for x in range(0, 6):
if (pforms[x]).has_changed():
print("The form has changed!!")
sforms_list.append(sforms[x])
if gform.is_valid() and all([sf.is_valid() for sf in sforms_list]):
print ("validation success")
new_g = gform.save()
pocket = 0
for sf in sforms_list:
pocket = pocket + 1
new_s = sf.save(growthid=new_g, pocketnum=pocket)
new_s.save()
print ("Here goes nothing")
if new_s.substrate_serial.startswith('wbg_'):
print ("Success! It does start with 'wbg_'")
entireserial = new_s.substrate_serial
newserialnumber = ''
for x in range(4, len(entireserial)):
newserialnumber = newserialnumber + entireserial[x]
newserialnumber = int(newserialnumber)
sn = serial_number.objects.create(serial_number = newserialnumber)
sn.save
# new_s = sf.save(commit=False)
# new_s.growth = new_g
# new_s.save()
# return HttpResponseRedirect(reverse('home'))
return HttpResponseRedirect(reverse('growth_detail', args=[new_g.growth_number]))
else:
num_items = 0
model = growth
query = model.objects.all()
last = str(query[len(query) - 1])
last_int = ''
for i in xrange(1, 5):
last_int += last[i]
last_int = (int(last_int) + 1)
last = ('g' + str(last_int))
currenttime = time.strftime("%Y-%m-%d")
gform = growth_form(instance=growth(), initial={'growth_number': last, 'date': currenttime})
# sform = [sample_form(prefix=str(x), instance=sample()) for x in range(0,6)]
last = serial_number.objects.latest('id')
lastnumber = last.serial_number
nextserial = lastnumber + 1
def generate_serial(sn):
return ('wbg_' + str(sn))
pf_1 = p_form(prefix="pf_1")
pf_2 = p_form(prefix="pf_2")
pf_3 = p_form(prefix="pf_3")
pf_4 = p_form(prefix="pf_4")
pf_5 = p_form(prefix="pf_5")
pf_6 = p_form(prefix="pf_6")
sform_1 = sample_form(instance=sample(), prefix="sform_1", initial={'substrate_serial': generate_serial(nextserial)})
sform_2 = sample_form(instance=sample(), prefix="sform_2", initial={'substrate_serial': generate_serial(nextserial + 1)})
sform_3 = sample_form(instance=sample(), prefix="sform_3", initial={'substrate_serial': generate_serial(nextserial + 2)})
sform_4 = sample_form(instance=sample(), prefix="sform_4", initial={'substrate_serial': generate_serial(nextserial + 3)})
sform_5 = sample_form(instance=sample(), prefix="sform_5", initial={'substrate_serial': generate_serial(nextserial + 4)})
sform_6 = sample_form(instance=sample(), prefix="sform_6", initial={'substrate_serial': generate_serial(nextserial + 5)})
return render(request, 'growths/create_growth.html',
{'gform': gform, 'sform_1': sform_1, 'sform_2': sform_2, 'sform_3': sform_3,
'sform_4': sform_4, 'sform_5': sform_5, 'sform_6': sform_6, 'pf_1': pf_1,
'pf_2': pf_2, 'pf_3': pf_3, 'pf_4': pf_4, 'pf_5': pf_5, 'pf_6': pf_6, })
class SplitSampleView(FormView):
form_class = split_form
template_name = 'growths/split_sample.html'
def get_form_kwargs(self):
kwargs = super(SplitSampleView, self).get_form_kwargs()
if self.request.method == 'GET' and 'sample' in self.request.GET:
kwargs.update({
'initial': {'parent': self.request.GET.get('sample')},
})
return kwargs
def form_valid(self, form):
num_pieces = form.cleaned_data['pieces']
parent = form.cleaned_data['parent']
piece_siblings = sample.get_piece_siblings(parent).order_by('-piece')
if piece_siblings:
last_letter = piece_siblings.first().piece
else:
last_letter = 'a'
parent.piece = 'a'
parent.save()
for i in range(num_pieces - 1):
last_letter = unichr(ord(last_letter) + 1)
parent.pk = None
parent.piece = last_letter
parent.save()
return HttpResponseRedirect(reverse('sample_family_detail', args=(parent.growth.growth_number, parent.pocket)))
class readings_detail(SessionHistoryMixin, DetailView):
model = growth
template_name = 'growths/readings_detail.html'
slug_field = 'growth_number'
context_object_name = 'growth'
def get_context_data(self, **kwargs):
self.object = None
context = super(readings_detail, self).get_context_data(**kwargs)
context["growth"] = self.get_object()
context["readingslist"] = readings.objects.filter(growth=self.get_object())
return context
class update_readings(SingleObjectMixin, TemplateView):
context_object_name = 'growth'
queryset = growth.objects.all()
slug_field = 'growth_number'
template_name = 'growths/update_readings.html'
def get_context_data(self, **kwargs):
self.object = None
context = super(update_readings, self).get_context_data(**kwargs)
context["growth"] = self.get_object()
allreadings = readings.objects.filter(growth=self.get_object())
context["readings"] = allreadings
formlist = []
numberofreadings = 0
for reading in allreadings:
numberofreadings = numberofreadings + 1
rform = readings_form(instance=readings(), prefix=('reading' + str(numberofreadings)),
initial={'growth': reading.growth,
'layer': reading.layer, 'layer_desc': reading.layer_desc,
'pyro_out': reading.pyro_out, 'pyro_in': reading.pyro_in, 'ecp_temp': reading.ecp_temp, 'tc_out': reading.tc_out,
'tc_in': reading.tc_in, 'motor_rpm': reading.motor_rpm, 'gc_pressure': reading.gc_pressure,
'gc_position': reading.gc_position, 'voltage_in': reading.voltage_in,
'voltage_out': reading.voltage_out, 'current_in': reading.current_in,
'current_out': reading.current_out, 'top_vp_flow': reading.top_vp_flow,
'hydride_inner': reading.hydride_inner, 'hydride_outer': reading.hydride_outer,
'alkyl_flow_inner': reading.alkyl_flow_inner, 'alkyl_push_inner': reading.alkyl_push_inner,
'alkyl_flow_middle': reading.alkyl_flow_middle, 'alkyl_push_middle': reading.alkyl_push_middle,
'alkyl_flow_outer': reading.alkyl_flow_outer, 'alkyl_push_outer': reading.alkyl_push_outer,
'n2_flow': reading.n2_flow, 'h2_flow': reading.h2_flow, 'nh3_flow': reading.nh3_flow,
'hydride_pressure': reading.hydride_pressure, 'tmga1_flow': reading.tmga1_flow,
'tmga1_pressure': reading.tmga1_pressure, 'tmga2_flow': reading.tmga2_flow,
'tmga2_pressure': reading.tmga2_pressure, 'tega2_flow': reading.tega2_flow,
'tega2_pressure': reading.tega2_pressure, 'tmin1_flow': reading.tmin1_flow,
'tmin1_pressure': reading.tmin1_pressure, 'tmal1_flow': reading.tmal1_flow,
'tmal1_pressure': reading.tmal1_pressure, 'cp2mg_flow': reading.cp2mg_flow,
'cp2mg_pressure': reading.cp2mg_pressure, 'cp2mg_dilution': reading.cp2mg_dilution,
'silane_flow': reading.silane_flow, 'silane_dilution': reading.silane_dilution,
'silane_mix': reading.silane_mix, 'silane_pressure': reading.silane_pressure})
formlist.append(rform)
context["readingslist"] = formlist
return context
def post(self, request, **kwargs):
print ("IS THIS POST TEST WORKING? YES. YES IT IS.")
numberofreadings = len(readings.objects.filter(growth=self.get_object()))
print (numberofreadings)
for x in range(0, numberofreadings):
print("inside for loop")
print(x)
rform = readings_form(request.POST, prefix=('reading' + str(x+1)))
if rform.is_valid():
print ("rform is valid")
newgrowth = growth=self.get_object()
newlayer = rform.cleaned_data['layer']
newlayer_desc = rform.cleaned_data['layer_desc']
newpyro_out = rform.cleaned_data['pyro_out']
newpyro_in = rform.cleaned_data['pyro_in']
newecp_temp = rform.cleaned_data['ecp_temp']
newtc_out = rform.cleaned_data['tc_out']
newtc_in = rform.cleaned_data['tc_in']
newmotor_rpm = rform.cleaned_data['motor_rpm']
newgc_pressure = rform.cleaned_data['gc_pressure']
newgc_position = rform.cleaned_data['gc_position']
newvoltage_in = rform.cleaned_data['voltage_in']
newvoltage_out = rform.cleaned_data['voltage_out']
newcurrent_in = rform.cleaned_data['current_in']
newcurrent_out = rform.cleaned_data['current_out']
newtop_vp_flow = rform.cleaned_data['top_vp_flow']
newhydride_inner = rform.cleaned_data['hydride_inner']
newhydride_outer = rform.cleaned_data['hydride_outer']
newalkyl_flow_inner = rform.cleaned_data['alkyl_flow_inner']
newalkyl_push_inner = rform.cleaned_data['alkyl_push_inner']
newalkyl_flow_middle = rform.cleaned_data['alkyl_flow_middle']
newalkyl_push_middle = rform.cleaned_data['alkyl_push_middle']
newalkyl_flow_outer = rform.cleaned_data['alkyl_flow_outer']
newalkyl_push_outer = rform.cleaned_data['alkyl_push_outer']
newn2_flow = rform.cleaned_data['n2_flow']
newh2_flow = rform.cleaned_data['h2_flow']
newnh3_flow = rform.cleaned_data['nh3_flow']
newhydride_pressure = rform.cleaned_data['hydride_pressure']
newtmga1_flow = rform.cleaned_data['tmga1_flow']
newtmga1_pressure = rform.cleaned_data['tmga1_pressure']
newtmga2_flow = rform.cleaned_data['tmga2_flow']
newtmga2_pressure = rform.cleaned_data['tmga2_pressure']
newtega2_flow = rform.cleaned_data['tega2_flow']
newtega2_pressure = rform.cleaned_data['tega2_pressure']
newtmin1_flow = rform.cleaned_data['tmin1_flow']
newtmin1_pressure = rform.cleaned_data['tmin1_pressure']
newtmal1_flow = rform.cleaned_data['tmal1_flow']
newtmal1_pressure = rform.cleaned_data['tmal1_pressure']
newcp2mg_flow = rform.cleaned_data['cp2mg_flow']
newcp2mg_pressure = rform.cleaned_data['cp2mg_pressure']
newcp2mg_dilution = rform.cleaned_data['cp2mg_dilution']
newsilane_flow = rform.cleaned_data['silane_flow']
newsilane_dilution = rform.cleaned_data['silane_dilution']
newsilane_mix = rform.cleaned_data['silane_mix']
newsilane_pressure = rform.cleaned_data['silane_pressure']
print("LAYER DESCRIPTION:")
print (newlayer_desc)
thisreading = readings.objects.filter(growth=newgrowth, layer=newlayer)
thisreading.update(growth=newgrowth, layer = newlayer, layer_desc=newlayer_desc,
pyro_out=newpyro_out, pyro_in=newpyro_in, ecp_temp=newecp_temp, tc_out=newtc_out,
tc_in=newtc_in, motor_rpm=newmotor_rpm, gc_pressure=newgc_pressure,
gc_position=newgc_position, voltage_in=newvoltage_in, voltage_out=newvoltage_out,
current_in=newcurrent_in, current_out=newcurrent_out, top_vp_flow=newtop_vp_flow,
hydride_inner=newhydride_inner, hydride_outer=newhydride_outer,
alkyl_flow_inner=newalkyl_flow_inner, alkyl_push_inner=newalkyl_push_inner,
alkyl_flow_middle=newalkyl_flow_middle, alkyl_push_middle=newalkyl_push_middle,
alkyl_flow_outer=newalkyl_flow_outer, alkyl_push_outer=newalkyl_push_outer,
n2_flow=newn2_flow, h2_flow=newh2_flow, nh3_flow=newnh3_flow, hydride_pressure=newhydride_pressure,
tmga1_flow=newtmga1_flow, tmga1_pressure=newtmga1_pressure, tmga2_flow=newtmga2_flow,
tmga2_pressure=newtmga2_pressure, tega2_flow=newtega2_flow, tega2_pressure=newtega2_pressure,
tmin1_flow=newtmin1_flow, tmin1_pressure=newtmin1_pressure, tmal1_flow=newtmal1_flow,
tmal1_pressure=newtmal1_pressure, cp2mg_flow=newcp2mg_flow, cp2mg_pressure=newcp2mg_pressure,
cp2mg_dilution=newcp2mg_dilution, silane_flow=newsilane_flow, silane_dilution=newsilane_dilution,
silane_mix=newsilane_mix, silane_pressure=newsilane_pressure)
return HttpResponseRedirect(reverse('update_readings', args=[self.get_object()]))
class recipe_detail(SessionHistoryMixin, DetailView):
model = growth
template_name = 'growths/recipe_detail.html'
slug_field = 'growth_number'
context_object_name = 'growth'
def get_context_data(self, **kwargs):
context = super(recipe_detail, self).get_context_data(**kwargs)
context["recipes"] = recipe_layer.objects.filter(growth=self.get_object())
return context
class CreateGrowthStartView(TemplateView):
template_name = 'growths/create_growth_start.html'
def post(self, request, *args, **kwargs):
cgsform = start_growth_form(request.POST, prefix='cgsform')
commentsform = comments_form(request.POST, prefix='commentsform')
if cgsform.is_valid() and commentsform.is_valid():
comments = commentsform.cleaned_data['comment_field']
cgsform.save(runcomments=comments)
return HttpResponseRedirect(reverse('create_growth_prerun'))
else:
return render(request, self.template_name,
{'cgsform': cgsform, 'commentsform': commentsform})
def get_context_data(self, **kwargs):
context = super(CreateGrowthStartView, self).get_context_data(**kwargs)
try:
last_growth = growth.objects.latest('growth_number').growth_number
next_growth = 'g{0}'.format(int(last_growth[1:]) + 1)
except:
next_growth = 'g1000'
currenttime = time.strftime('%Y-%m-%d')
context['cgsform'] = start_growth_form(prefix='cgsform',
initial={
'growth_number': next_growth,
'date': currenttime,
'reactor': 'd180',
'operator': operator.objects.get(user=self.request.user),
})
context['commentsform'] = comments_form(prefix='commentsform')
return context
class CreateGrowthPrerunView(TemplateView):
template_name = 'growths/create_growth_prerun.html'
def post(self, request, *args, **kwargs):
lastgrowth = growth.objects.latest('growth_number')
pcform = prerun_checklist_form(request.POST, prefix='pcform')
pgform = prerun_growth_form(request.POST, prefix='pgform', instance=lastgrowth)
sourceform = prerun_sources_form(request.POST, prefix="sourceform")
commentsform = comments_form(request.POST, prefix='commentsform')
saved_forms = {}
sample_forms = []
for i in range(1, 7):
pf = p_form(request.POST, prefix='pf_{0}'.format(i))
saved_forms['pf_{0}'.format(i)] = pf
sf = sample_form(request.POST, instance=sample(), prefix='sform_{0}'.format(i))
saved_forms['sform_{0}'.format(i)] = sf
if pf.has_changed():
sample_forms.append(sf)
if sample_forms and pcform.is_valid() and pgform.is_valid() and sourceform.is_valid() and all([sf.is_valid() for sf in sample_forms]) and commentsform.is_valid():
lastgrowth = pgform.save()
for i, sform in enumerate(sample_forms):
pocket = i + 1
new_sample = sform.save(growth=lastgrowth, pocket=pocket)
return HttpResponseRedirect(reverse('create_growth_readings'))
else: # form did not validate
saved_forms.update({
'pcform': pcform,
'pgform': pgform,
'sourceform': sourceform,
'commentsform': commentsform
})
return render(request, self.template_name, saved_forms)
def get_context_data(self, **kwargs):
context = super(CreateGrowthPrerunView, self).get_context_data(**kwargs)
last_growth = growth.objects.latest('growth_number')
context['pcform'] = prerun_checklist_form(prefix='pcform')
context['pgform'] = prerun_growth_form(prefix='pgform',
initial={
'project': last_growth.project,
'investigation': last_growth.investigation,
'platter': last_growth.platter,
'reactor': last_growth.reactor,
})
try:
last_sources = source.objects.latest('date_time')
context['sourceform'] = prerun_sources_form(instance=last_sources, prefix='sourceform')
except:
context['sourceform'] = prerun_sources_form(prefix='sourceform')
context['commentsform'] = comments_form(prefix='commentsform',
initial={'comment_field': last_growth.run_comments})
for i in range(1, 7):
context['pf_{0}'.format(i)] = p_form(prefix='pf_{0}'.format(i))
context['sform_{0}'.format(i)] = sample_form(prefix='sform_{0}'.format(i), instance=sample(),
initial={'substrate_serial': 'wbg_{0}'.format(serial_number.generate_serial()),
'location': 'Lab'})
return context
class create_growth_readings(SingleObjectMixin, TemplateView):
context_object_name = 'growth'
queryset = growth.objects.all()
template_name = 'growths/create_growth_readings.html'
def get_context_data(self, **kwargs):
self.object = None
context = super(create_growth_readings, self).get_context_data(**kwargs)
lastgrowth = growth.objects.latest('growth_number')
commentsform = comments_form(prefix='commentsform', initial={'comment_field': lastgrowth.run_comments})
context["commentscontext"] = commentsform
context["growth"] = lastgrowth
allreadings = readings.objects.filter(growth=lastgrowth)
context["readings"] = allreadings
formlist = []
numberofreadings = 0
for reading in allreadings:
numberofreadings = numberofreadings + 1
rform = readings_form(instance=readings(), prefix=('reading' + str(numberofreadings)),
initial={'growth': reading.growth,
'layer': reading.layer, 'layer_desc': reading.layer_desc,
'pyro_out': reading.pyro_out, 'pyro_in': reading.pyro_in, 'ecp_temp': reading.ecp_temp, 'tc_out': reading.tc_out,
'tc_in': reading.tc_in, 'motor_rpm': reading.motor_rpm, 'gc_pressure': reading.gc_pressure,
'gc_position': reading.gc_position, 'voltage_in': reading.voltage_in,
'voltage_out': reading.voltage_out, 'current_in': reading.current_in,
'current_out': reading.current_out, 'top_vp_flow': reading.top_vp_flow,
'hydride_inner': reading.hydride_inner, 'hydride_outer': reading.hydride_outer,
'alkyl_flow_inner': reading.alkyl_flow_inner, 'alkyl_push_inner': reading.alkyl_push_inner,
'alkyl_flow_middle': reading.alkyl_flow_middle, 'alkyl_push_middle': reading.alkyl_push_middle,
'alkyl_flow_outer': reading.alkyl_flow_outer, 'alkyl_push_outer': reading.alkyl_push_outer,
'n2_flow': reading.n2_flow, 'h2_flow': reading.h2_flow, 'nh3_flow': reading.nh3_flow,
'hydride_pressure': reading.hydride_pressure, 'tmga1_flow': reading.tmga1_flow,
'tmga1_pressure': reading.tmga1_pressure, 'tmga2_flow': reading.tmga2_flow,
'tmga2_pressure': reading.tmga2_pressure, 'tega2_flow': reading.tega2_flow,
'tega2_pressure': reading.tega2_pressure, 'tmin1_flow': reading.tmin1_flow,
'tmin1_pressure': reading.tmin1_pressure, 'tmal1_flow': reading.tmal1_flow,
'tmal1_pressure': reading.tmal1_pressure, 'cp2mg_flow': reading.cp2mg_flow,
'cp2mg_pressure': reading.cp2mg_pressure, 'cp2mg_dilution': reading.cp2mg_dilution,
'silane_flow': reading.silane_flow, 'silane_dilution': reading.silane_dilution,
'silane_mix': reading.silane_mix, 'silane_pressure': reading.silane_pressure})
formlist.append(rform)
context["readingslist"] = formlist
return context
def post(self, request, **kwargs):
print ("IS THIS POST TEST WORKING? YES. YES IT IS.")
lastgrowth = growth.objects.latest('id')
lastgrowth = growth.objects.filter(growth_number=lastgrowth.growth_number)
commentsform = comments_form(request.POST, prefix='commentsform')
if commentsform.is_valid():
newcomments = commentsform.cleaned_data['comment_field']
lastgrowth.update(run_comments=newcomments)
lastgrowth = lastgrowth[0]
numberofreadings = len(readings.objects.filter(growth=lastgrowth))
print (numberofreadings)
for x in range(0, numberofreadings):
print("inside for loop")
print(x)
rform = readings_form(request.POST, prefix=('reading' + str(x+1)))
if rform.is_valid():
print ("rform is valid")
newlayer = rform.cleaned_data['layer']
newlayer_desc = rform.cleaned_data['layer_desc']
newpyro_out = rform.cleaned_data['pyro_out']
newpyro_in = rform.cleaned_data['pyro_in']
newecp_temp = rform.cleaned_data['ecp_temp']
newtc_out = rform.cleaned_data['tc_out']
newtc_in = rform.cleaned_data['tc_in']
newmotor_rpm = rform.cleaned_data['motor_rpm']
newgc_pressure = rform.cleaned_data['gc_pressure']
newgc_position = rform.cleaned_data['gc_position']
newvoltage_in = rform.cleaned_data['voltage_in']
newvoltage_out = rform.cleaned_data['voltage_out']
newcurrent_in = rform.cleaned_data['current_in']
newcurrent_out = rform.cleaned_data['current_out']
newtop_vp_flow = rform.cleaned_data['top_vp_flow']
newhydride_inner = rform.cleaned_data['hydride_inner']
newhydride_outer = rform.cleaned_data['hydride_outer']
newalkyl_flow_inner = rform.cleaned_data['alkyl_flow_inner']
newalkyl_push_inner = rform.cleaned_data['alkyl_push_inner']
newalkyl_flow_middle = rform.cleaned_data['alkyl_flow_middle']
newalkyl_push_middle = rform.cleaned_data['alkyl_push_middle']
newalkyl_flow_outer = rform.cleaned_data['alkyl_flow_outer']
newalkyl_push_outer = rform.cleaned_data['alkyl_push_outer']
newn2_flow = rform.cleaned_data['n2_flow']
newh2_flow = rform.cleaned_data['h2_flow']
newnh3_flow = rform.cleaned_data['nh3_flow']
newhydride_pressure = rform.cleaned_data['hydride_pressure']
newtmga1_flow = rform.cleaned_data['tmga1_flow']
newtmga1_pressure = rform.cleaned_data['tmga1_pressure']
newtmga2_flow = rform.cleaned_data['tmga2_flow']
newtmga2_pressure = rform.cleaned_data['tmga2_pressure']
newtega2_flow = rform.cleaned_data['tega2_flow']
newtega2_pressure = rform.cleaned_data['tega2_pressure']
newtmin1_flow = rform.cleaned_data['tmin1_flow']
newtmin1_pressure = rform.cleaned_data['tmin1_pressure']
newtmal1_flow = rform.cleaned_data['tmal1_flow']
newtmal1_pressure = rform.cleaned_data['tmal1_pressure']
newcp2mg_flow = rform.cleaned_data['cp2mg_flow']
newcp2mg_pressure = rform.cleaned_data['cp2mg_pressure']
newcp2mg_dilution = rform.cleaned_data['cp2mg_dilution']
newsilane_flow = rform.cleaned_data['silane_flow']
newsilane_dilution = rform.cleaned_data['silane_dilution']
newsilane_mix = rform.cleaned_data['silane_mix']
newsilane_pressure = rform.cleaned_data['silane_pressure']
print("LAYER DESCRIPTION:")
print (newlayer_desc)
thisreading = readings.objects.filter(growth=lastgrowth, layer=newlayer)
thisreading.update(layer=newlayer, layer_desc=newlayer_desc,
pyro_out=newpyro_out, pyro_in=newpyro_in, ecp_temp=newecp_temp, tc_out=newtc_out,
tc_in=newtc_in, motor_rpm=newmotor_rpm, gc_pressure=newgc_pressure,
gc_position=newgc_position, voltage_in=newvoltage_in, voltage_out=newvoltage_out,
current_in=newcurrent_in, current_out=newcurrent_out, top_vp_flow=newtop_vp_flow,
hydride_inner=newhydride_inner, hydride_outer=newhydride_outer,
alkyl_flow_inner=newalkyl_flow_inner, alkyl_push_inner=newalkyl_push_inner,
alkyl_flow_middle=newalkyl_flow_middle, alkyl_push_middle=newalkyl_push_middle,
alkyl_flow_outer=newalkyl_flow_outer, alkyl_push_outer=newalkyl_push_outer,
n2_flow=newn2_flow, h2_flow=newh2_flow, nh3_flow=newnh3_flow, hydride_pressure=newhydride_pressure,
tmga1_flow=newtmga1_flow, tmga1_pressure=newtmga1_pressure, tmga2_flow=newtmga2_flow,
tmga2_pressure=newtmga2_pressure, tega2_flow=newtega2_flow, tega2_pressure=newtega2_pressure,
tmin1_flow=newtmin1_flow, tmin1_pressure=newtmin1_pressure, tmal1_flow=newtmal1_flow,
tmal1_pressure=newtmal1_pressure, cp2mg_flow=newcp2mg_flow, cp2mg_pressure=newcp2mg_pressure,
cp2mg_dilution=newcp2mg_dilution, silane_flow=newsilane_flow, silane_dilution=newsilane_dilution,
silane_mix=newsilane_mix, silane_pressure=newsilane_pressure)
return HttpResponseRedirect(reverse('create_growth_readings'))
def create_growth_postrun(request):
if request.method == "POST":
prcform = postrun_checklist_form(request.POST, prefix='prcform')
prsform = prerun_sources_form(request.POST, prefix='prsform')
commentsform = comments_form(request.POST, prefix='commentsform')
if prcform.is_valid() and prsform.is_valid() and commentsform.is_valid():
print ("successful validation. Now let's do something.")
lastgrowth = growth.objects.latest('growth_number')
lastgrowth.update(run_comments=commentsform.cleaned_data['comment_field'])
prsform.save()
return HttpResponseRedirect(reverse('growth_detail', args=[lastgrowth]))
else:
lastgrowth = growth.objects.latest('growth_number')
prcform = postrun_checklist_form(prefix='prcform')
commentsform = comments_form(prefix='commentsform', initial={'comment_field': lastgrowth.run_comments})
try:
last_sources = source.objects.latest('date_time')
prsform = prerun_sources_form(instance=last_sources, prefix='prsform')
except:
prsform = prerun_sources_form(prefix='prsform')
return render(request, 'growths/create_growth_postrun.html', {'prcform': prcform, 'prsform': prsform, 'commentsform': commentsform})
Fixes bug in creategrowth readings view
Fixes bug where different growths were selected in GET and POST for the
creategrowth readings view.
import time
from django.shortcuts import render, render_to_response
from django.views.generic import DetailView, ListView, CreateView, UpdateView, TemplateView, FormView
from django.views.generic.edit import ProcessFormView
from django.views.generic.detail import SingleObjectMixin
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from core.models import operator
from .models import growth, sample, readings, serial_number, recipe_layer, source
from .filters import growth_filter, RelationalFilterView
from .forms import growth_form, sample_form, p_form, split_form, readings_form, comments_form
from .forms import prerun_checklist_form, start_growth_form, prerun_growth_form, prerun_sources_form, postrun_checklist_form
import afm.models
import hall.models
from core.views import SessionHistoryMixin
class growth_list(SessionHistoryMixin, RelationalFilterView):
filterset_class = growth_filter
template_name = 'growths/growth_filter.html'
class afm_compare(ListView):
template_name = 'growths/afm_compare.html'
def get_queryset(self):
id_list = [int(id) for id in self.request.GET.getlist('afm')]
objects = afm.models.afm.objects.filter(id__in=id_list)
return objects
class GrowthDetailView(SessionHistoryMixin, DetailView):
model = growth
template_name = 'growths/growth_detail.html'
slug_field = 'growth_number'
context_object_name = 'growth'
def get_context_data(self, **kwargs):
context = super(GrowthDetailView, self).get_context_data(**kwargs)
context['samples'] = sample.objects.filter(growth=context['object']).order_by('pocket')
context['char_afm'] = afm.models.afm.objects.filter(growth=context['object']).order_by('sample__pocket', 'sample__piece', 'location', 'scan_number')
context['char_hall'] = hall.models.hall.objects.filter(growth=context['object']).order_by('sample__pocket', 'sample__piece', 'date')
return context
class SampleDetailView(SessionHistoryMixin, DetailView):
model = sample
template_name = 'growths/sample_detail.html'
context_object_name = 'sample'
def get_context_data(self, **kwargs):
context = super(SampleDetailView, self).get_context_data(**kwargs)
parents = []
obj = context['sample']
while obj.parent != obj:
parents.append(obj.parent)
obj = obj.parent
obj = context['sample']
context['parents'] = parents[::-1] # show in reverse order
context['siblings'] = sample.objects.filter(growth=obj.growth).exclude(pk=obj.id).order_by('-growth__growth_number', 'pocket', 'piece')
context['children'] = sample.objects.filter(parent=obj).exclude(pk=obj.id).order_by('-growth__growth_number', 'pocket', 'piece')
context['char_afm'] = afm.models.afm.objects.filter(sample=context['object']).order_by('sample__pocket', 'sample__piece', 'location', 'scan_number')
context['char_hall'] = hall.models.hall.objects.filter(sample=context['object']).order_by('sample__pocket', 'sample__piece', 'date')
return context
class SampleFamilyDetailView(ListView):
model = sample
template_name = 'growths/sample_family_detail.html'
context_object_name = 'samples'
def get_context_data(self, **kwargs):
growth_number = self.kwargs.get('growth', None)
pocket = self.kwargs.get('pocket', None)
context = super(SampleFamilyDetailView, self).get_context_data(**kwargs)
context['samples'] = sample.objects.filter(growth__growth_number=growth_number, pocket=pocket).order_by('pocket', 'piece')
context['growth'] = growth.get_growth(growth_number)
context['pocket'] = pocket
return context
def create_growth(request):
if request.method == "POST":
gform = growth_form(request.POST, instance=growth())
# sforms = [sample_form(request.POST, prefix=str(x), instnace=sample()) for x in range(0,6)]
pf_1 = p_form(request.POST, prefix="pf_1")
pf_2 = p_form(request.POST, prefix="pf_2")
pf_3 = p_form(request.POST, prefix="pf_3")
pf_4 = p_form(request.POST, prefix="pf_4")
pf_5 = p_form(request.POST, prefix="pf_5")
pf_6 = p_form(request.POST, prefix="pf_6")
sform_1 = sample_form(request.POST, instance=sample(), prefix="sform_1")
sform_2 = sample_form(request.POST, instance=sample(), prefix="sform_2")
sform_3 = sample_form(request.POST, instance=sample(), prefix="sform_3")
sform_4 = sample_form(request.POST, instance=sample(), prefix="sform_4")
sform_5 = sample_form(request.POST, instance=sample(), prefix="sform_5")
sform_6 = sample_form(request.POST, instance=sample(), prefix="sform_6")
sforms_list = []
sforms = [sform_1, sform_2, sform_3, sform_4, sform_5, sform_6]
pforms = [pf_1, pf_2, pf_3, pf_4, pf_5, pf_6]
for x in range(0, 6):
if (pforms[x]).has_changed():
print("The form has changed!!")
sforms_list.append(sforms[x])
if gform.is_valid() and all([sf.is_valid() for sf in sforms_list]):
print ("validation success")
new_g = gform.save()
pocket = 0
for sf in sforms_list:
pocket = pocket + 1
new_s = sf.save(growthid=new_g, pocketnum=pocket)
new_s.save()
print ("Here goes nothing")
if new_s.substrate_serial.startswith('wbg_'):
print ("Success! It does start with 'wbg_'")
entireserial = new_s.substrate_serial
newserialnumber = ''
for x in range(4, len(entireserial)):
newserialnumber = newserialnumber + entireserial[x]
newserialnumber = int(newserialnumber)
sn = serial_number.objects.create(serial_number = newserialnumber)
sn.save
# new_s = sf.save(commit=False)
# new_s.growth = new_g
# new_s.save()
# return HttpResponseRedirect(reverse('home'))
return HttpResponseRedirect(reverse('growth_detail', args=[new_g.growth_number]))
else:
num_items = 0
model = growth
query = model.objects.all()
last = str(query[len(query) - 1])
last_int = ''
for i in xrange(1, 5):
last_int += last[i]
last_int = (int(last_int) + 1)
last = ('g' + str(last_int))
currenttime = time.strftime("%Y-%m-%d")
gform = growth_form(instance=growth(), initial={'growth_number': last, 'date': currenttime})
# sform = [sample_form(prefix=str(x), instance=sample()) for x in range(0,6)]
last = serial_number.objects.latest('id')
lastnumber = last.serial_number
nextserial = lastnumber + 1
def generate_serial(sn):
return ('wbg_' + str(sn))
pf_1 = p_form(prefix="pf_1")
pf_2 = p_form(prefix="pf_2")
pf_3 = p_form(prefix="pf_3")
pf_4 = p_form(prefix="pf_4")
pf_5 = p_form(prefix="pf_5")
pf_6 = p_form(prefix="pf_6")
sform_1 = sample_form(instance=sample(), prefix="sform_1", initial={'substrate_serial': generate_serial(nextserial)})
sform_2 = sample_form(instance=sample(), prefix="sform_2", initial={'substrate_serial': generate_serial(nextserial + 1)})
sform_3 = sample_form(instance=sample(), prefix="sform_3", initial={'substrate_serial': generate_serial(nextserial + 2)})
sform_4 = sample_form(instance=sample(), prefix="sform_4", initial={'substrate_serial': generate_serial(nextserial + 3)})
sform_5 = sample_form(instance=sample(), prefix="sform_5", initial={'substrate_serial': generate_serial(nextserial + 4)})
sform_6 = sample_form(instance=sample(), prefix="sform_6", initial={'substrate_serial': generate_serial(nextserial + 5)})
return render(request, 'growths/create_growth.html',
{'gform': gform, 'sform_1': sform_1, 'sform_2': sform_2, 'sform_3': sform_3,
'sform_4': sform_4, 'sform_5': sform_5, 'sform_6': sform_6, 'pf_1': pf_1,
'pf_2': pf_2, 'pf_3': pf_3, 'pf_4': pf_4, 'pf_5': pf_5, 'pf_6': pf_6, })
class SplitSampleView(FormView):
form_class = split_form
template_name = 'growths/split_sample.html'
def get_form_kwargs(self):
kwargs = super(SplitSampleView, self).get_form_kwargs()
if self.request.method == 'GET' and 'sample' in self.request.GET:
kwargs.update({
'initial': {'parent': self.request.GET.get('sample')},
})
return kwargs
def form_valid(self, form):
num_pieces = form.cleaned_data['pieces']
parent = form.cleaned_data['parent']
piece_siblings = sample.get_piece_siblings(parent).order_by('-piece')
if piece_siblings:
last_letter = piece_siblings.first().piece
else:
last_letter = 'a'
parent.piece = 'a'
parent.save()
for i in range(num_pieces - 1):
last_letter = unichr(ord(last_letter) + 1)
parent.pk = None
parent.piece = last_letter
parent.save()
return HttpResponseRedirect(reverse('sample_family_detail', args=(parent.growth.growth_number, parent.pocket)))
class readings_detail(SessionHistoryMixin, DetailView):
model = growth
template_name = 'growths/readings_detail.html'
slug_field = 'growth_number'
context_object_name = 'growth'
def get_context_data(self, **kwargs):
self.object = None
context = super(readings_detail, self).get_context_data(**kwargs)
context["growth"] = self.get_object()
context["readingslist"] = readings.objects.filter(growth=self.get_object())
return context
class update_readings(SingleObjectMixin, TemplateView):
context_object_name = 'growth'
queryset = growth.objects.all()
slug_field = 'growth_number'
template_name = 'growths/update_readings.html'
def get_context_data(self, **kwargs):
self.object = None
context = super(update_readings, self).get_context_data(**kwargs)
context["growth"] = self.get_object()
allreadings = readings.objects.filter(growth=self.get_object())
context["readings"] = allreadings
formlist = []
numberofreadings = 0
for reading in allreadings:
numberofreadings = numberofreadings + 1
rform = readings_form(instance=readings(), prefix=('reading' + str(numberofreadings)),
initial={'growth': reading.growth,
'layer': reading.layer, 'layer_desc': reading.layer_desc,
'pyro_out': reading.pyro_out, 'pyro_in': reading.pyro_in, 'ecp_temp': reading.ecp_temp, 'tc_out': reading.tc_out,
'tc_in': reading.tc_in, 'motor_rpm': reading.motor_rpm, 'gc_pressure': reading.gc_pressure,
'gc_position': reading.gc_position, 'voltage_in': reading.voltage_in,
'voltage_out': reading.voltage_out, 'current_in': reading.current_in,
'current_out': reading.current_out, 'top_vp_flow': reading.top_vp_flow,
'hydride_inner': reading.hydride_inner, 'hydride_outer': reading.hydride_outer,
'alkyl_flow_inner': reading.alkyl_flow_inner, 'alkyl_push_inner': reading.alkyl_push_inner,
'alkyl_flow_middle': reading.alkyl_flow_middle, 'alkyl_push_middle': reading.alkyl_push_middle,
'alkyl_flow_outer': reading.alkyl_flow_outer, 'alkyl_push_outer': reading.alkyl_push_outer,
'n2_flow': reading.n2_flow, 'h2_flow': reading.h2_flow, 'nh3_flow': reading.nh3_flow,
'hydride_pressure': reading.hydride_pressure, 'tmga1_flow': reading.tmga1_flow,
'tmga1_pressure': reading.tmga1_pressure, 'tmga2_flow': reading.tmga2_flow,
'tmga2_pressure': reading.tmga2_pressure, 'tega2_flow': reading.tega2_flow,
'tega2_pressure': reading.tega2_pressure, 'tmin1_flow': reading.tmin1_flow,
'tmin1_pressure': reading.tmin1_pressure, 'tmal1_flow': reading.tmal1_flow,
'tmal1_pressure': reading.tmal1_pressure, 'cp2mg_flow': reading.cp2mg_flow,
'cp2mg_pressure': reading.cp2mg_pressure, 'cp2mg_dilution': reading.cp2mg_dilution,
'silane_flow': reading.silane_flow, 'silane_dilution': reading.silane_dilution,
'silane_mix': reading.silane_mix, 'silane_pressure': reading.silane_pressure})
formlist.append(rform)
context["readingslist"] = formlist
return context
def post(self, request, **kwargs):
numberofreadings = len(readings.objects.filter(growth=self.get_object()))
print (numberofreadings)
for x in range(0, numberofreadings):
rform = readings_form(request.POST, prefix=('reading' + str(x+1)))
if rform.is_valid():
newgrowth = growth=self.get_object()
newlayer = rform.cleaned_data['layer']
newlayer_desc = rform.cleaned_data['layer_desc']
newpyro_out = rform.cleaned_data['pyro_out']
newpyro_in = rform.cleaned_data['pyro_in']
newecp_temp = rform.cleaned_data['ecp_temp']
newtc_out = rform.cleaned_data['tc_out']
newtc_in = rform.cleaned_data['tc_in']
newmotor_rpm = rform.cleaned_data['motor_rpm']
newgc_pressure = rform.cleaned_data['gc_pressure']
newgc_position = rform.cleaned_data['gc_position']
newvoltage_in = rform.cleaned_data['voltage_in']
newvoltage_out = rform.cleaned_data['voltage_out']
newcurrent_in = rform.cleaned_data['current_in']
newcurrent_out = rform.cleaned_data['current_out']
newtop_vp_flow = rform.cleaned_data['top_vp_flow']
newhydride_inner = rform.cleaned_data['hydride_inner']
newhydride_outer = rform.cleaned_data['hydride_outer']
newalkyl_flow_inner = rform.cleaned_data['alkyl_flow_inner']
newalkyl_push_inner = rform.cleaned_data['alkyl_push_inner']
newalkyl_flow_middle = rform.cleaned_data['alkyl_flow_middle']
newalkyl_push_middle = rform.cleaned_data['alkyl_push_middle']
newalkyl_flow_outer = rform.cleaned_data['alkyl_flow_outer']
newalkyl_push_outer = rform.cleaned_data['alkyl_push_outer']
newn2_flow = rform.cleaned_data['n2_flow']
newh2_flow = rform.cleaned_data['h2_flow']
newnh3_flow = rform.cleaned_data['nh3_flow']
newhydride_pressure = rform.cleaned_data['hydride_pressure']
newtmga1_flow = rform.cleaned_data['tmga1_flow']
newtmga1_pressure = rform.cleaned_data['tmga1_pressure']
newtmga2_flow = rform.cleaned_data['tmga2_flow']
newtmga2_pressure = rform.cleaned_data['tmga2_pressure']
newtega2_flow = rform.cleaned_data['tega2_flow']
newtega2_pressure = rform.cleaned_data['tega2_pressure']
newtmin1_flow = rform.cleaned_data['tmin1_flow']
newtmin1_pressure = rform.cleaned_data['tmin1_pressure']
newtmal1_flow = rform.cleaned_data['tmal1_flow']
newtmal1_pressure = rform.cleaned_data['tmal1_pressure']
newcp2mg_flow = rform.cleaned_data['cp2mg_flow']
newcp2mg_pressure = rform.cleaned_data['cp2mg_pressure']
newcp2mg_dilution = rform.cleaned_data['cp2mg_dilution']
newsilane_flow = rform.cleaned_data['silane_flow']
newsilane_dilution = rform.cleaned_data['silane_dilution']
newsilane_mix = rform.cleaned_data['silane_mix']
newsilane_pressure = rform.cleaned_data['silane_pressure']
thisreading = readings.objects.filter(growth=newgrowth, layer=newlayer)
thisreading.update(growth=newgrowth, layer = newlayer, layer_desc=newlayer_desc,
pyro_out=newpyro_out, pyro_in=newpyro_in, ecp_temp=newecp_temp, tc_out=newtc_out,
tc_in=newtc_in, motor_rpm=newmotor_rpm, gc_pressure=newgc_pressure,
gc_position=newgc_position, voltage_in=newvoltage_in, voltage_out=newvoltage_out,
current_in=newcurrent_in, current_out=newcurrent_out, top_vp_flow=newtop_vp_flow,
hydride_inner=newhydride_inner, hydride_outer=newhydride_outer,
alkyl_flow_inner=newalkyl_flow_inner, alkyl_push_inner=newalkyl_push_inner,
alkyl_flow_middle=newalkyl_flow_middle, alkyl_push_middle=newalkyl_push_middle,
alkyl_flow_outer=newalkyl_flow_outer, alkyl_push_outer=newalkyl_push_outer,
n2_flow=newn2_flow, h2_flow=newh2_flow, nh3_flow=newnh3_flow, hydride_pressure=newhydride_pressure,
tmga1_flow=newtmga1_flow, tmga1_pressure=newtmga1_pressure, tmga2_flow=newtmga2_flow,
tmga2_pressure=newtmga2_pressure, tega2_flow=newtega2_flow, tega2_pressure=newtega2_pressure,
tmin1_flow=newtmin1_flow, tmin1_pressure=newtmin1_pressure, tmal1_flow=newtmal1_flow,
tmal1_pressure=newtmal1_pressure, cp2mg_flow=newcp2mg_flow, cp2mg_pressure=newcp2mg_pressure,
cp2mg_dilution=newcp2mg_dilution, silane_flow=newsilane_flow, silane_dilution=newsilane_dilution,
silane_mix=newsilane_mix, silane_pressure=newsilane_pressure)
return HttpResponseRedirect(reverse('update_readings', args=[self.get_object()]))
class recipe_detail(SessionHistoryMixin, DetailView):
model = growth
template_name = 'growths/recipe_detail.html'
slug_field = 'growth_number'
context_object_name = 'growth'
def get_context_data(self, **kwargs):
context = super(recipe_detail, self).get_context_data(**kwargs)
context["recipes"] = recipe_layer.objects.filter(growth=self.get_object())
return context
class CreateGrowthStartView(TemplateView):
template_name = 'growths/create_growth_start.html'
def post(self, request, *args, **kwargs):
cgsform = start_growth_form(request.POST, prefix='cgsform')
commentsform = comments_form(request.POST, prefix='commentsform')
if cgsform.is_valid() and commentsform.is_valid():
comments = commentsform.cleaned_data['comment_field']
cgsform.save(runcomments=comments)
return HttpResponseRedirect(reverse('create_growth_prerun'))
else:
return render(request, self.template_name,
{'cgsform': cgsform, 'commentsform': commentsform})
def get_context_data(self, **kwargs):
context = super(CreateGrowthStartView, self).get_context_data(**kwargs)
try:
last_growth = growth.objects.latest('growth_number').growth_number
next_growth = 'g{0}'.format(int(last_growth[1:]) + 1)
except:
next_growth = 'g1000'
currenttime = time.strftime('%Y-%m-%d')
context['cgsform'] = start_growth_form(prefix='cgsform',
initial={
'growth_number': next_growth,
'date': currenttime,
'reactor': 'd180',
'operator': operator.objects.get(user=self.request.user),
})
context['commentsform'] = comments_form(prefix='commentsform')
return context
class CreateGrowthPrerunView(TemplateView):
template_name = 'growths/create_growth_prerun.html'
def post(self, request, *args, **kwargs):
lastgrowth = growth.objects.latest('growth_number')
pcform = prerun_checklist_form(request.POST, prefix='pcform')
pgform = prerun_growth_form(request.POST, prefix='pgform', instance=lastgrowth)
sourceform = prerun_sources_form(request.POST, prefix="sourceform")
commentsform = comments_form(request.POST, prefix='commentsform')
saved_forms = {}
sample_forms = []
for i in range(1, 7):
pf = p_form(request.POST, prefix='pf_{0}'.format(i))
saved_forms['pf_{0}'.format(i)] = pf
sf = sample_form(request.POST, instance=sample(), prefix='sform_{0}'.format(i))
saved_forms['sform_{0}'.format(i)] = sf
if pf.has_changed():
sample_forms.append(sf)
if sample_forms and pcform.is_valid() and pgform.is_valid() and sourceform.is_valid() and all([sf.is_valid() for sf in sample_forms]) and commentsform.is_valid():
lastgrowth = pgform.save()
for i, sform in enumerate(sample_forms):
pocket = i + 1
new_sample = sform.save(growth=lastgrowth, pocket=pocket)
return HttpResponseRedirect(reverse('create_growth_readings'))
else: # form did not validate
saved_forms.update({
'pcform': pcform,
'pgform': pgform,
'sourceform': sourceform,
'commentsform': commentsform
})
return render(request, self.template_name, saved_forms)
def get_context_data(self, **kwargs):
context = super(CreateGrowthPrerunView, self).get_context_data(**kwargs)
last_growth = growth.objects.latest('growth_number')
context['pcform'] = prerun_checklist_form(prefix='pcform')
context['pgform'] = prerun_growth_form(prefix='pgform',
initial={
'project': last_growth.project,
'investigation': last_growth.investigation,
'platter': last_growth.platter,
'reactor': last_growth.reactor,
})
try:
last_sources = source.objects.latest('date_time')
context['sourceform'] = prerun_sources_form(instance=last_sources, prefix='sourceform')
except:
context['sourceform'] = prerun_sources_form(prefix='sourceform')
context['commentsform'] = comments_form(prefix='commentsform',
initial={'comment_field': last_growth.run_comments})
for i in range(1, 7):
context['pf_{0}'.format(i)] = p_form(prefix='pf_{0}'.format(i))
context['sform_{0}'.format(i)] = sample_form(prefix='sform_{0}'.format(i), instance=sample(),
initial={'substrate_serial': 'wbg_{0}'.format(serial_number.generate_serial()),
'location': 'Lab'})
return context
class create_growth_readings(SingleObjectMixin, TemplateView):
context_object_name = 'growth'
queryset = growth.objects.all()
template_name = 'growths/create_growth_readings.html'
def get_context_data(self, **kwargs):
self.object = None
context = super(create_growth_readings, self).get_context_data(**kwargs)
lastgrowth = growth.objects.latest('growth_number')
commentsform = comments_form(prefix='commentsform', initial={'comment_field': lastgrowth.run_comments})
context["commentscontext"] = commentsform
context["growth"] = lastgrowth
allreadings = readings.objects.filter(growth=lastgrowth)
context["readings"] = allreadings
formlist = []
numberofreadings = 0
for reading in allreadings:
numberofreadings = numberofreadings + 1
rform = readings_form(instance=readings(), prefix=('reading' + str(numberofreadings)),
initial={'growth': reading.growth,
'layer': reading.layer, 'layer_desc': reading.layer_desc,
'pyro_out': reading.pyro_out, 'pyro_in': reading.pyro_in, 'ecp_temp': reading.ecp_temp, 'tc_out': reading.tc_out,
'tc_in': reading.tc_in, 'motor_rpm': reading.motor_rpm, 'gc_pressure': reading.gc_pressure,
'gc_position': reading.gc_position, 'voltage_in': reading.voltage_in,
'voltage_out': reading.voltage_out, 'current_in': reading.current_in,
'current_out': reading.current_out, 'top_vp_flow': reading.top_vp_flow,
'hydride_inner': reading.hydride_inner, 'hydride_outer': reading.hydride_outer,
'alkyl_flow_inner': reading.alkyl_flow_inner, 'alkyl_push_inner': reading.alkyl_push_inner,
'alkyl_flow_middle': reading.alkyl_flow_middle, 'alkyl_push_middle': reading.alkyl_push_middle,
'alkyl_flow_outer': reading.alkyl_flow_outer, 'alkyl_push_outer': reading.alkyl_push_outer,
'n2_flow': reading.n2_flow, 'h2_flow': reading.h2_flow, 'nh3_flow': reading.nh3_flow,
'hydride_pressure': reading.hydride_pressure, 'tmga1_flow': reading.tmga1_flow,
'tmga1_pressure': reading.tmga1_pressure, 'tmga2_flow': reading.tmga2_flow,
'tmga2_pressure': reading.tmga2_pressure, 'tega2_flow': reading.tega2_flow,
'tega2_pressure': reading.tega2_pressure, 'tmin1_flow': reading.tmin1_flow,
'tmin1_pressure': reading.tmin1_pressure, 'tmal1_flow': reading.tmal1_flow,
'tmal1_pressure': reading.tmal1_pressure, 'cp2mg_flow': reading.cp2mg_flow,
'cp2mg_pressure': reading.cp2mg_pressure, 'cp2mg_dilution': reading.cp2mg_dilution,
'silane_flow': reading.silane_flow, 'silane_dilution': reading.silane_dilution,
'silane_mix': reading.silane_mix, 'silane_pressure': reading.silane_pressure})
formlist.append(rform)
context["readingslist"] = formlist
return context
def post(self, request, **kwargs):
lastgrowth = growth.objects.latest('growth_number')
commentsform = comments_form(request.POST, prefix='commentsform')
if commentsform.is_valid():
newcomments = commentsform.cleaned_data['comment_field']
lastgrowth.update(run_comments=newcomments)
lastgrowth = lastgrowth[0]
numberofreadings = len(readings.objects.filter(growth=lastgrowth))
print (numberofreadings)
for x in range(0, numberofreadings):
rform = readings_form(request.POST, prefix=('reading' + str(x+1)))
if rform.is_valid():
newlayer = rform.cleaned_data['layer']
newlayer_desc = rform.cleaned_data['layer_desc']
newpyro_out = rform.cleaned_data['pyro_out']
newpyro_in = rform.cleaned_data['pyro_in']
newecp_temp = rform.cleaned_data['ecp_temp']
newtc_out = rform.cleaned_data['tc_out']
newtc_in = rform.cleaned_data['tc_in']
newmotor_rpm = rform.cleaned_data['motor_rpm']
newgc_pressure = rform.cleaned_data['gc_pressure']
newgc_position = rform.cleaned_data['gc_position']
newvoltage_in = rform.cleaned_data['voltage_in']
newvoltage_out = rform.cleaned_data['voltage_out']
newcurrent_in = rform.cleaned_data['current_in']
newcurrent_out = rform.cleaned_data['current_out']
newtop_vp_flow = rform.cleaned_data['top_vp_flow']
newhydride_inner = rform.cleaned_data['hydride_inner']
newhydride_outer = rform.cleaned_data['hydride_outer']
newalkyl_flow_inner = rform.cleaned_data['alkyl_flow_inner']
newalkyl_push_inner = rform.cleaned_data['alkyl_push_inner']
newalkyl_flow_middle = rform.cleaned_data['alkyl_flow_middle']
newalkyl_push_middle = rform.cleaned_data['alkyl_push_middle']
newalkyl_flow_outer = rform.cleaned_data['alkyl_flow_outer']
newalkyl_push_outer = rform.cleaned_data['alkyl_push_outer']
newn2_flow = rform.cleaned_data['n2_flow']
newh2_flow = rform.cleaned_data['h2_flow']
newnh3_flow = rform.cleaned_data['nh3_flow']
newhydride_pressure = rform.cleaned_data['hydride_pressure']
newtmga1_flow = rform.cleaned_data['tmga1_flow']
newtmga1_pressure = rform.cleaned_data['tmga1_pressure']
newtmga2_flow = rform.cleaned_data['tmga2_flow']
newtmga2_pressure = rform.cleaned_data['tmga2_pressure']
newtega2_flow = rform.cleaned_data['tega2_flow']
newtega2_pressure = rform.cleaned_data['tega2_pressure']
newtmin1_flow = rform.cleaned_data['tmin1_flow']
newtmin1_pressure = rform.cleaned_data['tmin1_pressure']
newtmal1_flow = rform.cleaned_data['tmal1_flow']
newtmal1_pressure = rform.cleaned_data['tmal1_pressure']
newcp2mg_flow = rform.cleaned_data['cp2mg_flow']
newcp2mg_pressure = rform.cleaned_data['cp2mg_pressure']
newcp2mg_dilution = rform.cleaned_data['cp2mg_dilution']
newsilane_flow = rform.cleaned_data['silane_flow']
newsilane_dilution = rform.cleaned_data['silane_dilution']
newsilane_mix = rform.cleaned_data['silane_mix']
newsilane_pressure = rform.cleaned_data['silane_pressure']
thisreading = readings.objects.filter(growth=lastgrowth, layer=newlayer)
thisreading.update(layer=newlayer, layer_desc=newlayer_desc,
pyro_out=newpyro_out, pyro_in=newpyro_in, ecp_temp=newecp_temp, tc_out=newtc_out,
tc_in=newtc_in, motor_rpm=newmotor_rpm, gc_pressure=newgc_pressure,
gc_position=newgc_position, voltage_in=newvoltage_in, voltage_out=newvoltage_out,
current_in=newcurrent_in, current_out=newcurrent_out, top_vp_flow=newtop_vp_flow,
hydride_inner=newhydride_inner, hydride_outer=newhydride_outer,
alkyl_flow_inner=newalkyl_flow_inner, alkyl_push_inner=newalkyl_push_inner,
alkyl_flow_middle=newalkyl_flow_middle, alkyl_push_middle=newalkyl_push_middle,
alkyl_flow_outer=newalkyl_flow_outer, alkyl_push_outer=newalkyl_push_outer,
n2_flow=newn2_flow, h2_flow=newh2_flow, nh3_flow=newnh3_flow, hydride_pressure=newhydride_pressure,
tmga1_flow=newtmga1_flow, tmga1_pressure=newtmga1_pressure, tmga2_flow=newtmga2_flow,
tmga2_pressure=newtmga2_pressure, tega2_flow=newtega2_flow, tega2_pressure=newtega2_pressure,
tmin1_flow=newtmin1_flow, tmin1_pressure=newtmin1_pressure, tmal1_flow=newtmal1_flow,
tmal1_pressure=newtmal1_pressure, cp2mg_flow=newcp2mg_flow, cp2mg_pressure=newcp2mg_pressure,
cp2mg_dilution=newcp2mg_dilution, silane_flow=newsilane_flow, silane_dilution=newsilane_dilution,
silane_mix=newsilane_mix, silane_pressure=newsilane_pressure)
return HttpResponseRedirect(reverse('create_growth_readings'))
def create_growth_postrun(request):
if request.method == "POST":
prcform = postrun_checklist_form(request.POST, prefix='prcform')
prsform = prerun_sources_form(request.POST, prefix='prsform')
commentsform = comments_form(request.POST, prefix='commentsform')
if prcform.is_valid() and prsform.is_valid() and commentsform.is_valid():
print ("successful validation. Now let's do something.")
lastgrowth = growth.objects.latest('growth_number')
lastgrowth.update(run_comments=commentsform.cleaned_data['comment_field'])
prsform.save()
return HttpResponseRedirect(reverse('growth_detail', args=[lastgrowth]))
else:
lastgrowth = growth.objects.latest('growth_number')
prcform = postrun_checklist_form(prefix='prcform')
commentsform = comments_form(prefix='commentsform', initial={'comment_field': lastgrowth.run_comments})
try:
last_sources = source.objects.latest('date_time')
prsform = prerun_sources_form(instance=last_sources, prefix='prsform')
except:
prsform = prerun_sources_form(prefix='prsform')
return render(request, 'growths/create_growth_postrun.html', {'prcform': prcform, 'prsform': prsform, 'commentsform': commentsform})
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
# combine.py – a simple tool for combining many text file into bigger one.
# Usage example:
# ./combine.py --dir datasets/russian/untagged/ \
# --ofile combined.txt \
# -p "*.rus" \
# -ie "utf-16" \
# -oe "utf-8"
import os
import sys
import argparse
import collections
from fnmatch import fnmatch
def accumulate_table(pattern, dir_name, files, result_table):
for filename in files:
if fnmatch(filename, pattern):
fl_path = os.path.join(dir_name, filename)
with open(fl_path, "r") as fl:
for row in fl:
fields = row.split(", ")
freq = int(fields[-1])
r_key = ", ".join(fields[0:(len(fields) - 1)])
result_table[r_key] += freq
def write_table(result_table, ofile):
for r_key, freq in result_table.most_common():
ofile.write(r_key)
ofile.write(", ")
ofile.write(str(freq))
ofile.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", default=None, type=str, help="A path to the directory containing the csv "
"files which should be reduced.")
parser.add_argument("-o", "--ofile", default=None, type=str, help="A path to the result file")
parser.add_argument("-p", "--pattern", default="*", type=str, help="File name pattern which should be applied to "
"filter the files which should be reduced.")
parser.add_argument("-r", "--recursive", default=0, choices=(0, 1), help="Recursively traverse sub-dirs of the "
"input directory.")
args = parser.parse_args()
out_file = file(args.ofile, "w") if args.ofile is not None else sys.stdout
result_table = collections.Counter()
os.path.walk(args.dir,
lambda pattern, dir_name, files: accumulate_table(pattern, dir_name, files, result_table),
args.pattern)
write_table(result_table, out_file)
out_file.close()
fixed comment in reduce.py
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
import os
import sys
import argparse
import collections
from fnmatch import fnmatch
def accumulate_table(pattern, dir_name, files, result_table):
for filename in files:
if fnmatch(filename, pattern):
fl_path = os.path.join(dir_name, filename)
with open(fl_path, "r") as fl:
for row in fl:
fields = row.split(", ")
freq = int(fields[-1])
r_key = ", ".join(fields[0:(len(fields) - 1)])
result_table[r_key] += freq
def write_table(result_table, ofile):
for r_key, freq in result_table.most_common():
ofile.write(r_key)
ofile.write(", ")
ofile.write(str(freq))
ofile.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", default=None, type=str, help="A path to the directory containing the csv "
"files which should be reduced.")
parser.add_argument("-o", "--ofile", default=None, type=str, help="A path to the result file")
parser.add_argument("-p", "--pattern", default="*", type=str, help="File name pattern which should be applied to "
"filter the files which should be reduced.")
parser.add_argument("-r", "--recursive", default=0, choices=(0, 1), help="Recursively traverse sub-dirs of the "
"input directory.")
args = parser.parse_args()
out_file = file(args.ofile, "w") if args.ofile is not None else sys.stdout
result_table = collections.Counter()
os.path.walk(args.dir,
lambda pattern, dir_name, files: accumulate_table(pattern, dir_name, files, result_table),
args.pattern)
write_table(result_table, out_file)
out_file.close() |
"""Retrieve Account Credential from Gate API."""
import logging
import murl
import requests
from ..consts import API_URL
LOG = logging.getLogger(__name__)
def get_env_credential(env='dev'):
"""Get Account Credential from Spinnaker for _env_.
Args:
env (str): Environment name to find credentials for.
Returns:
dict: Complete credentials for _env_::
{
'accountId': '123098123',
'accountType': 'dev',
'assumeRole': 'role/spinnakerManaged',
'bastionEnabled': False,
'challengeDestructiveActions': False,
'cloudProvider': 'aws',
'defaultKeyPair': 'dev_access',
'discoveryEnabled': False,
'eddaEnabled': False,
'environment': 'dev',
'front50Enabled': False,
'name': 'dev',
'primaryAccount': False,
'provider': 'aws',
'regions': [
{
'availabilityZones': ['us-east-1b', 'us-east-1c',
'us-east-1d', 'us-east-1e'],
'deprecated': False,
'name': 'us-east-1',
'preferredZones':
['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e'
]
}, {
'availabilityZones':
['us-west-2a', 'us-west-2b', 'us-west-2c'],
'deprecated': False,
'name': 'us-west-2',
'preferredZones':
['us-west-2a', 'us-west-2b', 'us-west-2c']
}
],
'requiredGroupMembership': [],
'sessionName': 'Spinnaker',
'type': 'aws'
}
"""
url = murl.Url(API_URL)
url.path = '/'.join(['credentials', env])
credential_response = requests.get(url.url)
assert credential_response.ok, 'Could not get credentials from Spinnaker.'
credential = credential_response.json()
LOG.debug('Credentials found:\n%s', credential)
return credential
docs: Fix argument style
"""Retrieve Account Credential from Gate API."""
import logging
import murl
import requests
from ..consts import API_URL
LOG = logging.getLogger(__name__)
def get_env_credential(env='dev'):
"""Get Account Credential from Spinnaker for *env*.
Args:
env (str): Environment name to find credentials for.
Returns:
dict: Complete credentials for *env*::
{
'accountId': '123098123',
'accountType': 'dev',
'assumeRole': 'role/spinnakerManaged',
'bastionEnabled': False,
'challengeDestructiveActions': False,
'cloudProvider': 'aws',
'defaultKeyPair': 'dev_access',
'discoveryEnabled': False,
'eddaEnabled': False,
'environment': 'dev',
'front50Enabled': False,
'name': 'dev',
'primaryAccount': False,
'provider': 'aws',
'regions': [
{
'availabilityZones': ['us-east-1b', 'us-east-1c',
'us-east-1d', 'us-east-1e'],
'deprecated': False,
'name': 'us-east-1',
'preferredZones':
['us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e'
]
}, {
'availabilityZones':
['us-west-2a', 'us-west-2b', 'us-west-2c'],
'deprecated': False,
'name': 'us-west-2',
'preferredZones':
['us-west-2a', 'us-west-2b', 'us-west-2c']
}
],
'requiredGroupMembership': [],
'sessionName': 'Spinnaker',
'type': 'aws'
}
"""
url = murl.Url(API_URL)
url.path = '/'.join(['credentials', env])
credential_response = requests.get(url.url)
assert credential_response.ok, 'Could not get credentials from Spinnaker.'
credential = credential_response.json()
LOG.debug('Credentials found:\n%s', credential)
return credential
|
import os
import re
import sys
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox
from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon
from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import scoreatpercentile
from fluff.color import create_colormap
from fluff.config import FONTSIZE
from fluff.fluffio import load_annotation
from fluff.track import Track
DEFAULT_COLORS = ["#e41a1c", "#4daf4a", "#377eb8"]
GENE_ARROW = "->"
GENE_ARROW = ArrowStyle._Curve(beginarrow=False, endarrow=True, head_length=.4, head_width=.4)
def colortext(x, y, texts, colors, **kwargs):
pos = {
"right": 1,
"center": 0.5,
"left": 0,
"top": 0,
"bottom": 1
}
ax = kwargs.get("ax")
verticalalignment = pos[kwargs.get("verticalalignment", "center")]
horizontalalignment = pos[kwargs.get("horizontalalignment", "center")]
annotation_clip = kwargs.get("clip_on", False)
fontproperties = kwargs.get("fontproperties", None)
textprops = {"fontproperties":fontproperties}
transform = kwargs.get("transform", None)
areas = []
for t,c in zip(texts, colors):
textprops["color"] = c
text = TextArea(t, textprops=textprops)
areas.append(text)
txt = HPacker(children=areas,
align="baseline",
pad=0, sep=0)
bbox = AnnotationBbox(txt, xy=(x, y),
xycoords='data',
annotation_clip=annotation_clip,
frameon=False,
boxcoords=("axes fraction"),
box_alignment=(
horizontalalignment,
verticalalignment), # alignment center, center
#bboxprops={"bbox_transmuter":transform},
)
ax.add_artist(bbox)
def hide_axes(ax):
for x in [ax.xaxis, ax.yaxis]:
x.set_major_formatter(NullFormatter())
x.set_major_locator(NullLocator())
for _, spine in ax.spines.iteritems():
spine.set_color('none')
def heatmap_plot(data, ind, outfile, tracks, titles, colors, bgcolors, scale, tscale, labels, fontsize):
font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
label_ratio = 4.0
# space between heatmaps
btw_space = 0.02
plot_width = 1.75 * len(tracks) + btw_space * len(tracks)
plot_height = 6
width_ratios = [label_ratio] * len(tracks)
numplots = len(tracks)
if labels is not None and len(labels) == len(ind):
plot_width += 1 / label_ratio
numplots += 1
width_ratios += [1]
# Create figure
fig = plt.figure(figsize=(plot_width, plot_height))
# Create subplot layout
gs = gridspec.GridSpec(1, numplots, width_ratios=width_ratios, )
axes = []
for i, track in enumerate(tracks):
c = create_colormap(bgcolors[i % len(bgcolors)], colors[i % len(colors)])
ax = fig.add_subplot(gs[i])
ax.set_title(titles[i], fontproperties=font, y=1)
axes.append(ax)
cax_mat = ax.pcolormesh(data[track][ind], cmap=c, vmin=0, vmax=scale * tscale[i])
hide_axes(ax)
ylim = ax.get_ylim()
#fig.colorbar(cax_mat, orientation="horizontal", pad=0.05)
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="2%", pad=0.05, pack_start=True)
fig.add_axes(ax_cb)
fig.colorbar(cax_mat, cax=ax_cb, orientation="horizontal")
#ax_cb.get_xaxis().set_major_locator(MaxNLocator(integer=True))
if labels is not None and len(labels) == len(ind):
axcluster = fig.add_subplot(gs[len(tracks)])
axcluster.axis('off')
divider = make_axes_locatable(axcluster)
ax_cb = divider.new_vertical(size="2%", pad=0.05, pack_start=True)
axbl = fig.add_axes(ax_cb)
axbl.axis('off')
min_y, max_y = ylim
s = 0
axcluster.hlines(y=0, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
labels = np.array(labels)
# Smaller cluster on the top ([::-1])
for i in range(max(labels) + 1)[::-1]:
prev = s
s += sum(labels == i)
axcluster.hlines(y=s + 1 - 1, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
axcluster.text(0.5, (prev + s) / 2,
str(i + 1),
verticalalignment="center",
horizontalalignment="center",
fontproperties=font)
axcluster.set_ylim(ylim)
fig.subplots_adjust(wspace=btw_space, hspace=0.02)
ext = outfile.split(".")[-1]
if ext not in ["png", "svg", "ps", "eps", "pdf"]:
outfile += ".png"
sys.stderr.write("Saving figure\n")
if outfile.endswith("png"):
plt.savefig(outfile, dpi=600, bbox_inches='tight')
else:
plt.savefig(outfile)
def coverage_plot(ax, x, data, color="red", percs=None):
"""
ax = matplotlib axes instance
x = x-axis coordinates
data = profile data
color = color in any way matplotlib accepts
"""
# Might change this into an argument for the function
if percs is None:
percs = [50, 90]
percs = [(100 - float(p)) / 2 for p in percs[::-1]]
alphas = [0.1, 0.4]
# Convert to numpy array
vals = np.array(data)
# Get the median
m = np.median(vals, axis=0)
# Draw the minimum percentiles
lines = [np.array([scoreatpercentile(vals[:, i], perc) for i in range(len(vals[0]))]) for perc in percs] + [m]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the maximum percentiles
lines = [m] + [np.array([scoreatpercentile(vals[:, i], 100 - perc) for i in range(len(vals[0]))]) for perc in
percs[::-1]]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas[::-1]):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the median
ax.plot(x, m, color="black", alpha=0.95, linewidth=0.8)
# ax.plot(x, mean(vals, axis = 0), color = "purple", alpha = 0.95, linewidth = 0.8)
def create_grid_figure(nrows, ncolumns, plotwidth=2.0, plotheight=2.0, pad=0.1, padleft=0.1, padright=0.1, padtop=0.1,
padbottom=0.1, clean=True):
wsize = padleft + (ncolumns * plotwidth) + (pad * (ncolumns - 1)) + padright
hsize = padtop + (nrows * plotheight) + (pad * (nrows - 1)) + padbottom
fig = plt.figure(figsize=(wsize, hsize))
wpadfraction = pad / wsize
hpadfraction = pad / hsize
wplotsize = plotwidth / wsize
hplotsize = plotheight / hsize
axes = {}
# Create all the subplots
for row in range(nrows):
axes[row] = {}
for col in range(ncolumns):
axes[row][col] = plt.subplot(nrows, ncolumns, row * ncolumns + col + 1)
# No labels, ticks, etc.
if clean:
for ax in [axes[row][col].xaxis, axes[row][col].yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
# Resize all the subplots
for row in range(nrows):
for col in range(ncolumns):
x0 = (padleft / wsize) + (wplotsize + wpadfraction) * col
x1 = wplotsize
y0 = (padbottom / hsize) + (nrows - row - 1) * (hplotsize + hpadfraction)
y1 = hplotsize
coords = [x0, y0, x1, y1]
axes[row][col].set_position(coords)
for s in axes[row][col].spines.values():
s.set_linewidth(0.8)
return fig, axes
def profile_screenshot(fname, interval, tracks, fontsize=None, colors=None, scalegroups=None, scale=None, show_scale=True, annotation=None, bgmode="color", fragmentsize=200, dpi=600, rmdup=False, rmrepeats=False, reverse=False, adjscale=False):
"""
Plot a genome browser like profile
Parameters
----------
fname: string
output file name
interval: string
interval to plot in "chrom:start-end" format
tracks: list
list of filenames
"""
if scalegroups is None:
scalegroups = []
if not fontsize:
fontsize = FONTSIZE
if not colors:
colors = DEFAULT_COLORS
# Plot size and padding definition
plotwidth = 6
plotheight = 0.3
pad = {
"left": 1.5,
"right": 0.05,
"top": 0.05,
"bottom": 0.05,
"row": 0,
"column": 3,
}
# adjust width for track names if they are to long
# kind of a quick hack
max_len = 0
for group in tracks:
names = [os.path.splitext(os.path.basename(t))[0].strip() for t in group]
l = sum([len(name) for name in names])
if l > max_len:
max_len = l
if max_len > 27:
pad["left"] = 3
# Genomic scale
scale_height = 0.1
# Annotation track height
annotation_height = 0.01
chrom, start, end = re.split(r'[-:]', interval)
start, end = int(start), int(end)
if annotation:
ann = load_annotation([chrom,start,end], annotation)
if ann:
annotation_height = 0.2 * len(ann.keys())
else:
annotation = False
nrows = len(tracks)
wsize = pad["left"] + plotwidth + pad["right"]
hsize = pad["top"] + (nrows * plotheight) + (pad["row"] * (nrows - 1)) + pad["bottom"]
hsize += scale_height + pad["row"] + annotation_height + pad["row"]
# initialize figure
fig = plt.figure(figsize=(wsize, hsize))
# initialize profile figure
pfig = ProfileFigure(fig=fig, fontsize=fontsize, pad=pad)
# add the genomic scale
pfig.add_panel(ScalePanel())
if type(scale) != type([]):
scale = [scale]
# add the signal tracks
c = 0
for group in tracks:
for i,track in enumerate(group):
panel = pfig.add_panel(
BamProfilePanel(track,
color = colors[c % len(colors)],
bgmode = bgmode,
name = os.path.splitext(os.path.split(track)[-1])[0],
fragmentsize = fragmentsize,
rmrepeats = rmrepeats,
rmdup = rmdup,
adjscale = adjscale,
show_scale = show_scale,
),
overlay= i != 0
)
panel.ymax = scale[c % len(scale)]
c += 1
# add the annotation panel
if annotation:
pfig.add_panel(AnnotationPanel(annotation))
pfig.plot([chrom, start, end], scalegroups=scalegroups, reverse=reverse)
plt.savefig(fname, dpi=dpi)
class ProfileFigure(object):
def __init__(self, fig=None, gs=None, fontsize=FONTSIZE, pad=None):
self._panels = []
if not fig:
fig = plt.figure()
self.fig = fig
self.pad = {}
if pad:
self.pad.update(pad)
relpad = {}
for k in ["left", "right"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figwidth()
for k in ["top", "bottom"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figheight()
if gs:
self.gs = gs
else:
gs = gridspec.GridSpec(1, 1)
gs.update(
left=relpad["left"],
right=1 - relpad["right"],
top=1 - relpad["top"],
bottom=relpad["bottom"],
wspace=0,
hspace=0
)
self.gs = gs[0]
self.font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
def _plot_panel_names(self, ax, panels):
names = [p.name for p in panels]
colors = ["black"]
if len(names) > 1:
tmp_names = []
colors = []
for name,color in zip(names, [p.color for p in panels]):
tmp_names.append("= ")
tmp_names.append(name + ", ")
colors += [color,"black"]
names = tmp_names
names[-1] = names[-1].strip(", ")
colortext(-0.01, 0.5,
names,
colors,
ax=ax,
horizontalalignment='right',
verticalalignment="center",
# transform=ax.transAxes,
clip_on=False,
fontproperties=self.font)
def plot(self, interval, scalegroups=None, reverse=False, **kwargs):
if scalegroups is None:
scalegroups = []
for panels in self._panels:
for panel in panels:
panel._load_data(interval)
gs0 = gridspec.GridSpecFromSubplotSpec(
len(self._panels),
1,
subplot_spec=self.gs,
height_ratios=[max([p.height for p in panels]) for panels in self._panels]
)
for panels in self._panels:
if isinstance(panels[-1], BamProfilePanel):
ymax = max([p.ymax for p in panels])
for panel in panels:
panel.ymax = ymax
if scalegroups and len(scalegroups) > 0:
for group in scalegroups:
ymax = max([self._panels[g][-1].ymax for g in group])
for g in group:
for panel in self._panels[g]:
panel.ymax = ymax
# These are quick hacks to to get the track groups to work
for panels in self._panels:
if len(panels) > 1:
# Set the alpha for overlapping tracks
for panel in panels:
panel.alpha = 0.5
for i, panels in enumerate(self._panels):
ax = plt.Subplot(self.fig, gs0[i])
plt.subplots_adjust(bottom=0, top=1, left=0, right=1, hspace=0)
# add track labels
self._plot_panel_names(ax, panels)
for panel in panels:
panel._plot(ax, interval, fig=self.fig, reverse=reverse, odd=i % 2, font=self.font, **kwargs)
self.fig.add_subplot(ax)
def add_panel(self, panel, overlay=False):
if overlay and len(self._panels) > 0:
self._panels[-1].append(panel)
else:
self._panels.append([panel])
return panel
class ProfilePanel(object):
name = ""
def hide_axes(self, axes):
for ax in [axes.xaxis, axes.yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_minor_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
ax.set_minor_locator(NullLocator())
for s in axes.spines.values():
s.set_color('none')
class BamProfilePanel(ProfilePanel):
def __init__(self, bamfile, height=1, color=None, bgmode=None, alpha=None, fragmentsize=200, rmdup=True,
rmrepeats=True, **kwargs):
self.height = height
self.track = Track.load(bamfile, fragmentsize=fragmentsize, rmdup=rmdup, rmrepeats=rmrepeats)
self.ymax = None
self.bgmode = bgmode
self.scalepm = kwargs.get("adjscale", False)
self.show_scale = kwargs.get("show_scale", True)
if color:
self.color = color
else:
self.color = "#a7004b"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
self.fragmentsize = fragmentsize
self.rmdup = rmdup
self.rmrepeats = rmrepeats
self.name = kwargs.get('name')
def _load_data(self, interval):
self.profile = self.track.get_profile(interval,
scalepm=self.scalepm)
if not self.ymax:
self.ymax = np.nanmax(self.profile) * 1.10
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
# Background of profile
if self.bgmode == "stripes":
bgcol = {0: "white", 1: (0.95, 0.95, 0.95)}[int(odd)]
ax.set_facecolor(bgcol)
elif self.bgmode == "color":
ax.set_facecolor(self.color)
ax.patch.set_alpha(0.07)
# get interval
chrom, start, end = interval
profile = self.profile
if reverse:
profile = profile[::-1]
# plot data
ax.fill_between(
range(start, end),
np.zeros(len(profile)),
profile,
edgecolor='face',
facecolor=self.color,
linewidth=0.5,
alpha=self.alpha)
# set the y-limit
ax.set_ylim(0, self.ymax)
# add y-limit label
if self.show_scale:
ax.text(0.005, 0.90,
int(ax.get_ylim()[-1] + 0.5),
horizontalalignment='left',
verticalalignment="top",
transform=ax.transAxes,
clip_on=False,
fontproperties=font)
ax.set_xlim(start, end)
self.hide_axes(ax)
class AnnotationPanel(ProfilePanel):
def __init__(self, annofile, height=0.3, vis="stack", color="black"):
self.annofile = annofile
self.height = height
self.vis = vis
self.color = color
def _load_data(self, interval):
self.gene_track = load_annotation(interval, self.annofile, vis=self.vis)
self.max_tracks = len(self.gene_track.keys())
self.height *= self.max_tracks
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
ax.set_ylim(- 1 * self.max_tracks, 0)
for track_id, genes in self.gene_track.items():
for gene in genes:
h_gene = -1 * track_id - 0.5
genestart = gene[1]
geneend = gene[2]
genename = gene[3]
if len(gene) >= 6:
genestrand = gene[5]
else:
genestrand = "+"
# BED12 format
if len(gene) == 12:
exonstarts = [int(x) for x in gene[11].split(",") if x]
exonsizes = [int(x) for x in gene[10].split(",") if x]
else:
exonstarts = [0]
exonsizes = [geneend - genestart]
x1 = (genestart - start)
x2 = (geneend - start)
if reverse:
x1 = end - genestart
x2 = end - geneend
gstart = x1 / float(end - start)
gend = x2 / float(end - start)
# Horizontal line for complete gene
ax.axhline(h_gene,
gstart,
gend,
color=self.color,
solid_capstyle="butt",
)
# Exons
for exonstart, exonsize in zip(exonstarts, exonsizes):
estart = (genestart + exonstart - start)
eend = (genestart + exonstart + exonsize - start)
if reverse:
estart = end - (genestart + exonstart)
eend = end - (genestart + exonstart + exonsize)
ax.axhspan(
h_gene - 0.35,
h_gene + 0.35,
estart / float(end - start),
eend / float(end - start),
linewidth=0.1,
color=self.color)
# Only draw arrows for BED12 entries
if len(gene) == 12:
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
figwidth, figheight = bbox.width, bbox.height
# Scale with absolute width of figure
step = 0.04 / figwidth
if reverse:
step = -step
for i in np.arange(gstart + step, gend - step, step):
if genestrand == "-":
astart = (i + step, h_gene)
aend = (i, h_gene)
else:
astart = (i, h_gene)
aend = (i + step, h_gene)
arr = FancyArrowPatch(
astart,
aend,
arrowstyle=GENE_ARROW,
mutation_scale=(figheight * fig.dpi) / 8 / self.max_tracks * 1.5,
linewidth=0.5,
color=self.color,
)
ax.add_patch(arr)
if gstart > 0:
ax.text(gstart - 0.01, h_gene, genename,
horizontalalignment="right",
verticalalignment="center",
fontproperties=font)
self.hide_axes(ax)
class ScalePanel(ProfilePanel):
def __init__(self, height=0.3, color=None, alpha=None):
self.height = height
if color:
self.color = color
else:
self.color = "black"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
def _load_data(self, interval):
pass
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
# Formatting
for s in ax.spines.values():
s.set_color('none')
ax.yaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_locator(NullLocator())
ax.set_xlim(start, end)
# ax.set_ylim(0,1)
# Set font
# Plot the numbers
ticks = [s for s in ax.xaxis.get_ticklocs()[:-1] if s > start and s < end]
xcoords = [(s - start) / (end - start) + 0.01 for s in ticks]
if reverse:
ticks = ticks[::-1]
for s, x in zip(ticks[:-1], xcoords[:-1]):
ax.text(
x,
0.5,
str(int(s)),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
ax.text(
0,
0.5,
chrom,
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
class ConservationPanel(ProfilePanel):
def __init__(self, track, target, height=1):
self.track = track
self.height = height
self.data = []
self.target = target
def _load_data(self, ival1):
for line in open(self.track):
vals = line.strip().split("\t")
for i in [1, 2, 4, 5]:
vals[i] = int(vals[i])
self.data.append(vals)
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
reverse_other = reverse
reverse_self = kwargs.get("reverse_self", False)
chrom, start, end = interval
c2, s2, e2 = self.target
span1 = float(end - start)
span2 = float(e2 - s2)
for [chrom1, start1, end1, chrom2, start2, end2] in self.data:
if reverse_self:
if reverse_other:
coords = [
[1 - (end1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
coords = [
[1 - (end1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
if reverse_other:
coords = [
[(start1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
else:
coords = [
[(start1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
poly = Polygon(coords,
facecolor="black",
edgecolor='none',
alpha=0.2,
)
ax.add_patch(poly)
self.hide_axes(ax)
colorbar overlap fixed
import os
import re
import sys
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
from matplotlib.offsetbox import HPacker, TextArea, AnnotationBbox
from matplotlib.patches import FancyArrowPatch, ArrowStyle, Polygon
from matplotlib.ticker import NullFormatter, NullLocator, MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import scoreatpercentile
from fluff.color import create_colormap
from fluff.config import FONTSIZE
from fluff.fluffio import load_annotation
from fluff.track import Track
DEFAULT_COLORS = ["#e41a1c", "#4daf4a", "#377eb8"]
GENE_ARROW = "->"
GENE_ARROW = ArrowStyle._Curve(beginarrow=False, endarrow=True, head_length=.4, head_width=.4)
def colortext(x, y, texts, colors, **kwargs):
pos = {
"right": 1,
"center": 0.5,
"left": 0,
"top": 0,
"bottom": 1
}
ax = kwargs.get("ax")
verticalalignment = pos[kwargs.get("verticalalignment", "center")]
horizontalalignment = pos[kwargs.get("horizontalalignment", "center")]
annotation_clip = kwargs.get("clip_on", False)
fontproperties = kwargs.get("fontproperties", None)
textprops = {"fontproperties":fontproperties}
transform = kwargs.get("transform", None)
areas = []
for t,c in zip(texts, colors):
textprops["color"] = c
text = TextArea(t, textprops=textprops)
areas.append(text)
txt = HPacker(children=areas,
align="baseline",
pad=0, sep=0)
bbox = AnnotationBbox(txt, xy=(x, y),
xycoords='data',
annotation_clip=annotation_clip,
frameon=False,
boxcoords=("axes fraction"),
box_alignment=(
horizontalalignment,
verticalalignment), # alignment center, center
#bboxprops={"bbox_transmuter":transform},
)
ax.add_artist(bbox)
def hide_axes(ax):
for x in [ax.xaxis, ax.yaxis]:
x.set_major_formatter(NullFormatter())
x.set_major_locator(NullLocator())
for _, spine in ax.spines.iteritems():
spine.set_color('none')
def heatmap_plot(data, ind, outfile, tracks, titles, colors, bgcolors, scale, tscale, labels, fontsize):
font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
label_ratio = 4.0
# space between heatmaps
btw_space = 0.02
plot_width = 1.75 * len(tracks) + btw_space * len(tracks)
plot_height = 6
width_ratios = [label_ratio] * len(tracks)
numplots = len(tracks)
if labels is not None and len(labels) == len(ind):
plot_width += 1 / label_ratio
numplots += 1
width_ratios += [1]
# Create figure
fig = plt.figure(figsize=(plot_width, plot_height))
# Create subplot layout
gs = gridspec.GridSpec(1, numplots, width_ratios=width_ratios, )
axes = []
for i, track in enumerate(tracks):
c = create_colormap(bgcolors[i % len(bgcolors)], colors[i % len(colors)])
ax = fig.add_subplot(gs[i])
ax.set_title(titles[i], fontproperties=font, y=1)
axes.append(ax)
cax_mat = ax.pcolormesh(data[track][ind], cmap=c, vmin=0, vmax=scale * tscale[i])
hide_axes(ax)
ylim = ax.get_ylim()
#fig.colorbar(cax_mat, orientation="horizontal", pad=0.05)
divider = make_axes_locatable(ax)
ax_cb = divider.new_vertical(size="2%", pad=0.05, pack_start=True)
fig.add_axes(ax_cb)
tick_locator = MaxNLocator(nbins=3)
cbar = fig.colorbar(cax_mat, cax=ax_cb, orientation="horizontal", ticks=tick_locator)
cbar_labels = cbar.ax.get_xticklabels()
cbar_ticks = cbar.ax.get_xticks()
if cbar_ticks[0] == 0:
# if the label is at the start of the colobar
# move it a bit inside to avoid overlapping
# with other labels
cbar_labels[0].set_horizontalalignment('left')
if cbar_ticks[-1] == 1:
# if the label is at the end of the colobar
# move it a bit inside to avoid overlapping
# with other labels
cbar_labels[-1].set_horizontalalignment('right')
# cbar.ax.set_xticklabels(labels, rotation=90)
if labels is not None and len(labels) == len(ind):
axcluster = fig.add_subplot(gs[len(tracks)])
axcluster.axis('off')
divider = make_axes_locatable(axcluster)
ax_cb = divider.new_vertical(size="2%", pad=0.05, pack_start=True)
axbl = fig.add_axes(ax_cb)
axbl.axis('off')
min_y, max_y = ylim
s = 0
axcluster.hlines(y=0, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
labels = np.array(labels)
# Smaller cluster on the top ([::-1])
for i in range(max(labels) + 1)[::-1]:
prev = s
s += sum(labels == i)
axcluster.hlines(y=s + 1 - 1, xmin=0, xmax=1, color="grey",
linewidth=0.5, alpha=0.5, linestyle='solid')
axcluster.text(0.5, (prev + s) / 2,
str(i + 1),
verticalalignment="center",
horizontalalignment="center",
fontproperties=font)
axcluster.set_ylim(ylim)
fig.subplots_adjust(wspace=btw_space, hspace=0.01)
ext = outfile.split(".")[-1]
if ext not in ["png", "svg", "ps", "eps", "pdf"]:
outfile += ".png"
sys.stderr.write("Saving figure\n")
if outfile.endswith("png"):
plt.savefig(outfile, dpi=600, bbox_inches='tight')
else:
plt.savefig(outfile)
def coverage_plot(ax, x, data, color="red", percs=None):
"""
ax = matplotlib axes instance
x = x-axis coordinates
data = profile data
color = color in any way matplotlib accepts
"""
# Might change this into an argument for the function
if percs is None:
percs = [50, 90]
percs = [(100 - float(p)) / 2 for p in percs[::-1]]
alphas = [0.1, 0.4]
# Convert to numpy array
vals = np.array(data)
# Get the median
m = np.median(vals, axis=0)
# Draw the minimum percentiles
lines = [np.array([scoreatpercentile(vals[:, i], perc) for i in range(len(vals[0]))]) for perc in percs] + [m]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the maximum percentiles
lines = [m] + [np.array([scoreatpercentile(vals[:, i], 100 - perc) for i in range(len(vals[0]))]) for perc in
percs[::-1]]
for (line_min, line_max), alpha in zip([(lines[i], lines[i + 1]) for i in range(len(percs))], alphas[::-1]):
ax.fill_between(x, line_min, line_max, facecolor=color, alpha=alpha, edgecolor='face')
# Draw the median
ax.plot(x, m, color="black", alpha=0.95, linewidth=0.8)
# ax.plot(x, mean(vals, axis = 0), color = "purple", alpha = 0.95, linewidth = 0.8)
def create_grid_figure(nrows, ncolumns, plotwidth=2.0, plotheight=2.0, pad=0.1, padleft=0.1, padright=0.1, padtop=0.1,
padbottom=0.1, clean=True):
wsize = padleft + (ncolumns * plotwidth) + (pad * (ncolumns - 1)) + padright
hsize = padtop + (nrows * plotheight) + (pad * (nrows - 1)) + padbottom
fig = plt.figure(figsize=(wsize, hsize))
wpadfraction = pad / wsize
hpadfraction = pad / hsize
wplotsize = plotwidth / wsize
hplotsize = plotheight / hsize
axes = {}
# Create all the subplots
for row in range(nrows):
axes[row] = {}
for col in range(ncolumns):
axes[row][col] = plt.subplot(nrows, ncolumns, row * ncolumns + col + 1)
# No labels, ticks, etc.
if clean:
for ax in [axes[row][col].xaxis, axes[row][col].yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
# Resize all the subplots
for row in range(nrows):
for col in range(ncolumns):
x0 = (padleft / wsize) + (wplotsize + wpadfraction) * col
x1 = wplotsize
y0 = (padbottom / hsize) + (nrows - row - 1) * (hplotsize + hpadfraction)
y1 = hplotsize
coords = [x0, y0, x1, y1]
axes[row][col].set_position(coords)
for s in axes[row][col].spines.values():
s.set_linewidth(0.8)
return fig, axes
def profile_screenshot(fname, interval, tracks, fontsize=None, colors=None, scalegroups=None, scale=None, show_scale=True, annotation=None, bgmode="color", fragmentsize=200, dpi=600, rmdup=False, rmrepeats=False, reverse=False, adjscale=False):
"""
Plot a genome browser like profile
Parameters
----------
fname: string
output file name
interval: string
interval to plot in "chrom:start-end" format
tracks: list
list of filenames
"""
if scalegroups is None:
scalegroups = []
if not fontsize:
fontsize = FONTSIZE
if not colors:
colors = DEFAULT_COLORS
# Plot size and padding definition
plotwidth = 6
plotheight = 0.3
pad = {
"left": 1.5,
"right": 0.05,
"top": 0.05,
"bottom": 0.05,
"row": 0,
"column": 3,
}
# adjust width for track names if they are to long
# kind of a quick hack
max_len = 0
for group in tracks:
names = [os.path.splitext(os.path.basename(t))[0].strip() for t in group]
l = sum([len(name) for name in names])
if l > max_len:
max_len = l
if max_len > 27:
pad["left"] = 3
# Genomic scale
scale_height = 0.1
# Annotation track height
annotation_height = 0.01
chrom, start, end = re.split(r'[-:]', interval)
start, end = int(start), int(end)
if annotation:
ann = load_annotation([chrom,start,end], annotation)
if ann:
annotation_height = 0.2 * len(ann.keys())
else:
annotation = False
nrows = len(tracks)
wsize = pad["left"] + plotwidth + pad["right"]
hsize = pad["top"] + (nrows * plotheight) + (pad["row"] * (nrows - 1)) + pad["bottom"]
hsize += scale_height + pad["row"] + annotation_height + pad["row"]
# initialize figure
fig = plt.figure(figsize=(wsize, hsize))
# initialize profile figure
pfig = ProfileFigure(fig=fig, fontsize=fontsize, pad=pad)
# add the genomic scale
pfig.add_panel(ScalePanel())
if type(scale) != type([]):
scale = [scale]
# add the signal tracks
c = 0
for group in tracks:
for i,track in enumerate(group):
panel = pfig.add_panel(
BamProfilePanel(track,
color = colors[c % len(colors)],
bgmode = bgmode,
name = os.path.splitext(os.path.split(track)[-1])[0],
fragmentsize = fragmentsize,
rmrepeats = rmrepeats,
rmdup = rmdup,
adjscale = adjscale,
show_scale = show_scale,
),
overlay= i != 0
)
panel.ymax = scale[c % len(scale)]
c += 1
# add the annotation panel
if annotation:
pfig.add_panel(AnnotationPanel(annotation))
pfig.plot([chrom, start, end], scalegroups=scalegroups, reverse=reverse)
plt.savefig(fname, dpi=dpi)
class ProfileFigure(object):
def __init__(self, fig=None, gs=None, fontsize=FONTSIZE, pad=None):
self._panels = []
if not fig:
fig = plt.figure()
self.fig = fig
self.pad = {}
if pad:
self.pad.update(pad)
relpad = {}
for k in ["left", "right"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figwidth()
for k in ["top", "bottom"]:
relpad[k] = float(self.pad.get(k,0)) / fig.get_figheight()
if gs:
self.gs = gs
else:
gs = gridspec.GridSpec(1, 1)
gs.update(
left=relpad["left"],
right=1 - relpad["right"],
top=1 - relpad["top"],
bottom=relpad["bottom"],
wspace=0,
hspace=0
)
self.gs = gs[0]
self.font = FontProperties(size=fontsize / 1.25, family=["Nimbus Sans L", "Helvetica", "sans-serif"])
def _plot_panel_names(self, ax, panels):
names = [p.name for p in panels]
colors = ["black"]
if len(names) > 1:
tmp_names = []
colors = []
for name,color in zip(names, [p.color for p in panels]):
tmp_names.append("= ")
tmp_names.append(name + ", ")
colors += [color,"black"]
names = tmp_names
names[-1] = names[-1].strip(", ")
colortext(-0.01, 0.5,
names,
colors,
ax=ax,
horizontalalignment='right',
verticalalignment="center",
# transform=ax.transAxes,
clip_on=False,
fontproperties=self.font)
def plot(self, interval, scalegroups=None, reverse=False, **kwargs):
if scalegroups is None:
scalegroups = []
for panels in self._panels:
for panel in panels:
panel._load_data(interval)
gs0 = gridspec.GridSpecFromSubplotSpec(
len(self._panels),
1,
subplot_spec=self.gs,
height_ratios=[max([p.height for p in panels]) for panels in self._panels]
)
for panels in self._panels:
if isinstance(panels[-1], BamProfilePanel):
ymax = max([p.ymax for p in panels])
for panel in panels:
panel.ymax = ymax
if scalegroups and len(scalegroups) > 0:
for group in scalegroups:
ymax = max([self._panels[g][-1].ymax for g in group])
for g in group:
for panel in self._panels[g]:
panel.ymax = ymax
# These are quick hacks to to get the track groups to work
for panels in self._panels:
if len(panels) > 1:
# Set the alpha for overlapping tracks
for panel in panels:
panel.alpha = 0.5
for i, panels in enumerate(self._panels):
ax = plt.Subplot(self.fig, gs0[i])
plt.subplots_adjust(bottom=0, top=1, left=0, right=1, hspace=0)
# add track labels
self._plot_panel_names(ax, panels)
for panel in panels:
panel._plot(ax, interval, fig=self.fig, reverse=reverse, odd=i % 2, font=self.font, **kwargs)
self.fig.add_subplot(ax)
def add_panel(self, panel, overlay=False):
if overlay and len(self._panels) > 0:
self._panels[-1].append(panel)
else:
self._panels.append([panel])
return panel
class ProfilePanel(object):
name = ""
def hide_axes(self, axes):
for ax in [axes.xaxis, axes.yaxis]:
ax.set_major_formatter(NullFormatter())
ax.set_minor_formatter(NullFormatter())
ax.set_major_locator(NullLocator())
ax.set_minor_locator(NullLocator())
for s in axes.spines.values():
s.set_color('none')
class BamProfilePanel(ProfilePanel):
def __init__(self, bamfile, height=1, color=None, bgmode=None, alpha=None, fragmentsize=200, rmdup=True,
rmrepeats=True, **kwargs):
self.height = height
self.track = Track.load(bamfile, fragmentsize=fragmentsize, rmdup=rmdup, rmrepeats=rmrepeats)
self.ymax = None
self.bgmode = bgmode
self.scalepm = kwargs.get("adjscale", False)
self.show_scale = kwargs.get("show_scale", True)
if color:
self.color = color
else:
self.color = "#a7004b"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
self.fragmentsize = fragmentsize
self.rmdup = rmdup
self.rmrepeats = rmrepeats
self.name = kwargs.get('name')
def _load_data(self, interval):
self.profile = self.track.get_profile(interval,
scalepm=self.scalepm)
if not self.ymax:
self.ymax = np.nanmax(self.profile) * 1.10
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
# Background of profile
if self.bgmode == "stripes":
bgcol = {0: "white", 1: (0.95, 0.95, 0.95)}[int(odd)]
ax.set_facecolor(bgcol)
elif self.bgmode == "color":
ax.set_facecolor(self.color)
ax.patch.set_alpha(0.07)
# get interval
chrom, start, end = interval
profile = self.profile
if reverse:
profile = profile[::-1]
# plot data
ax.fill_between(
range(start, end),
np.zeros(len(profile)),
profile,
edgecolor='face',
facecolor=self.color,
linewidth=0.5,
alpha=self.alpha)
# set the y-limit
ax.set_ylim(0, self.ymax)
# add y-limit label
if self.show_scale:
ax.text(0.005, 0.90,
int(ax.get_ylim()[-1] + 0.5),
horizontalalignment='left',
verticalalignment="top",
transform=ax.transAxes,
clip_on=False,
fontproperties=font)
ax.set_xlim(start, end)
self.hide_axes(ax)
class AnnotationPanel(ProfilePanel):
def __init__(self, annofile, height=0.3, vis="stack", color="black"):
self.annofile = annofile
self.height = height
self.vis = vis
self.color = color
def _load_data(self, interval):
self.gene_track = load_annotation(interval, self.annofile, vis=self.vis)
self.max_tracks = len(self.gene_track.keys())
self.height *= self.max_tracks
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
ax.set_ylim(- 1 * self.max_tracks, 0)
for track_id, genes in self.gene_track.items():
for gene in genes:
h_gene = -1 * track_id - 0.5
genestart = gene[1]
geneend = gene[2]
genename = gene[3]
if len(gene) >= 6:
genestrand = gene[5]
else:
genestrand = "+"
# BED12 format
if len(gene) == 12:
exonstarts = [int(x) for x in gene[11].split(",") if x]
exonsizes = [int(x) for x in gene[10].split(",") if x]
else:
exonstarts = [0]
exonsizes = [geneend - genestart]
x1 = (genestart - start)
x2 = (geneend - start)
if reverse:
x1 = end - genestart
x2 = end - geneend
gstart = x1 / float(end - start)
gend = x2 / float(end - start)
# Horizontal line for complete gene
ax.axhline(h_gene,
gstart,
gend,
color=self.color,
solid_capstyle="butt",
)
# Exons
for exonstart, exonsize in zip(exonstarts, exonsizes):
estart = (genestart + exonstart - start)
eend = (genestart + exonstart + exonsize - start)
if reverse:
estart = end - (genestart + exonstart)
eend = end - (genestart + exonstart + exonsize)
ax.axhspan(
h_gene - 0.35,
h_gene + 0.35,
estart / float(end - start),
eend / float(end - start),
linewidth=0.1,
color=self.color)
# Only draw arrows for BED12 entries
if len(gene) == 12:
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
figwidth, figheight = bbox.width, bbox.height
# Scale with absolute width of figure
step = 0.04 / figwidth
if reverse:
step = -step
for i in np.arange(gstart + step, gend - step, step):
if genestrand == "-":
astart = (i + step, h_gene)
aend = (i, h_gene)
else:
astart = (i, h_gene)
aend = (i + step, h_gene)
arr = FancyArrowPatch(
astart,
aend,
arrowstyle=GENE_ARROW,
mutation_scale=(figheight * fig.dpi) / 8 / self.max_tracks * 1.5,
linewidth=0.5,
color=self.color,
)
ax.add_patch(arr)
if gstart > 0:
ax.text(gstart - 0.01, h_gene, genename,
horizontalalignment="right",
verticalalignment="center",
fontproperties=font)
self.hide_axes(ax)
class ScalePanel(ProfilePanel):
def __init__(self, height=0.3, color=None, alpha=None):
self.height = height
if color:
self.color = color
else:
self.color = "black"
if alpha:
self.alpha = alpha
else:
self.alpha = 1
def _load_data(self, interval):
pass
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
chrom, start, end = interval
# Formatting
for s in ax.spines.values():
s.set_color('none')
ax.yaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_locator(NullLocator())
ax.set_xlim(start, end)
# ax.set_ylim(0,1)
# Set font
# Plot the numbers
ticks = [s for s in ax.xaxis.get_ticklocs()[:-1] if s > start and s < end]
xcoords = [(s - start) / (end - start) + 0.01 for s in ticks]
if reverse:
ticks = ticks[::-1]
for s, x in zip(ticks[:-1], xcoords[:-1]):
ax.text(
x,
0.5,
str(int(s)),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
ax.text(
0,
0.5,
chrom,
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
fontproperties=font,
color=self.color)
class ConservationPanel(ProfilePanel):
def __init__(self, track, target, height=1):
self.track = track
self.height = height
self.data = []
self.target = target
def _load_data(self, ival1):
for line in open(self.track):
vals = line.strip().split("\t")
for i in [1, 2, 4, 5]:
vals[i] = int(vals[i])
self.data.append(vals)
def _plot(self, ax, interval, reverse=False, fig=None, odd=False, font=None, **kwargs):
reverse_other = reverse
reverse_self = kwargs.get("reverse_self", False)
chrom, start, end = interval
c2, s2, e2 = self.target
span1 = float(end - start)
span2 = float(e2 - s2)
for [chrom1, start1, end1, chrom2, start2, end2] in self.data:
if reverse_self:
if reverse_other:
coords = [
[1 - (end1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
coords = [
[1 - (end1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[1 - (start1 - start) / span1, 1]
]
else:
if reverse_other:
coords = [
[(start1 - start) / span1, 1],
[1 - (end2 - s2) / span2, 0],
[1 - (start2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
else:
coords = [
[(start1 - start) / span1, 1],
[(start2 - s2) / span2, 0],
[(end2 - s2) / span2, 0],
[(end1 - start) / span1, 1]
]
poly = Polygon(coords,
facecolor="black",
edgecolor='none',
alpha=0.2,
)
ax.add_patch(poly)
self.hide_axes(ax)
|
"""
================================================================================
pyFluidSynth
Python bindings for FluidSynth
Copyright 2008, Nathan Whitehead <nwhitehe@gmail.com>
Released under the LGPL
This module contains python bindings for FluidSynth. FluidSynth is a
software synthesizer for generating music. It works like a MIDI
synthesizer. You load patches, set parameters, then send NOTEON and
NOTEOFF events to play notes. Instruments are defined in SoundFonts,
generally files with the extension SF2. FluidSynth can either be used
to play audio itself, or you can call a function that returns chunks
of audio data and output the data to the soundcard yourself.
FluidSynth works on all major platforms, so pyFluidSynth should also.
================================================================================
"""
from ctypes import *
from ctypes.util import find_library
import os
# A short circuited or expression to find the FluidSynth library
# (mostly needed for Windows distributions of libfluidsynth supplied with QSynth)
# DLL search method changed in Python 3.8
# https://docs.python.org/3/library/os.html#os.add_dll_directory
if hasattr(os, 'add_dll_directory'):
os.add_dll_directory(os.getcwd())
lib = find_library('fluidsynth') or \
find_library('libfluidsynth') or \
find_library('libfluidsynth-2') or \
find_library('libfluidsynth-1')
if lib is None:
raise ImportError("Couldn't find the FluidSynth library.")
# Dynamically link the FluidSynth library
# Architecture (32-/64-bit) must match your Python version
_fl = CDLL(lib)
# Helper function for declaring function prototypes
def cfunc(name, result, *args):
"""Build and apply a ctypes prototype complete with parameter flags"""
if hasattr(_fl, name):
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, _fl), tuple(aflags))
else: # Handle Fluidsynth 1.x, 2.x, etc. API differences
return None
# Bump this up when changing the interface for users
api_version = '1.3.0'
# Function prototypes for C versions of functions
FLUID_OK = 0
FLUID_FAILED = -1
fluid_version = cfunc('fluid_version', c_void_p,
('major', POINTER(c_int), 1),
('minor', POINTER(c_int), 1),
('micro', POINTER(c_int), 1))
majver = c_int()
fluid_version(majver, c_int(), c_int())
if majver.value > 1:
FLUIDSETTING_EXISTS = FLUID_OK
else:
FLUIDSETTING_EXISTS = 1
# fluid settings
new_fluid_settings = cfunc('new_fluid_settings', c_void_p)
fluid_settings_setstr = cfunc('fluid_settings_setstr', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('str', c_char_p, 1))
fluid_settings_setnum = cfunc('fluid_settings_setnum', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', c_double, 1))
fluid_settings_setint = cfunc('fluid_settings_setint', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', c_int, 1))
fluid_settings_copystr = cfunc('fluid_settings_copystr', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('str', c_char_p, 1),
('len', c_int, 1))
fluid_settings_getnum = cfunc('fluid_settings_getnum', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', POINTER(c_double), 1))
fluid_settings_getint = cfunc('fluid_settings_getint', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', POINTER(c_int), 1))
delete_fluid_settings = cfunc('delete_fluid_settings', None,
('settings', c_void_p, 1))
# fluid synth
new_fluid_synth = cfunc('new_fluid_synth', c_void_p,
('settings', c_void_p, 1))
delete_fluid_synth = cfunc('delete_fluid_synth', None,
('synth', c_void_p, 1))
fluid_synth_sfload = cfunc('fluid_synth_sfload', c_int,
('synth', c_void_p, 1),
('filename', c_char_p, 1),
('update_midi_presets', c_int, 1))
fluid_synth_sfunload = cfunc('fluid_synth_sfunload', c_int,
('synth', c_void_p, 1),
('sfid', c_int, 1),
('update_midi_presets', c_int, 1))
fluid_synth_program_select = cfunc('fluid_synth_program_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfid', c_int, 1),
('bank', c_int, 1),
('preset', c_int, 1))
fluid_synth_noteon = cfunc('fluid_synth_noteon', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('key', c_int, 1),
('vel', c_int, 1))
fluid_synth_noteoff = cfunc('fluid_synth_noteoff', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('key', c_int, 1))
fluid_synth_pitch_bend = cfunc('fluid_synth_pitch_bend', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('val', c_int, 1))
fluid_synth_cc = cfunc('fluid_synth_cc', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('ctrl', c_int, 1),
('val', c_int, 1))
fluid_synth_get_cc = cfunc('fluid_synth_get_cc', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('num', c_int, 1),
('pval', POINTER(c_int), 1))
fluid_synth_program_change = cfunc('fluid_synth_program_change', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('prg', c_int, 1))
fluid_synth_unset_program = cfunc('fluid_synth_unset_program', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1))
fluid_synth_get_program = cfunc('fluid_synth_get_program', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfont_id', POINTER(c_int), 1),
('bank_num', POINTER(c_int), 1),
('preset_num', POINTER(c_int), 1))
fluid_synth_bank_select = cfunc('fluid_synth_bank_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('bank', c_int, 1))
fluid_synth_sfont_select = cfunc('fluid_synth_sfont_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfid', c_int, 1))
fluid_synth_program_reset = cfunc('fluid_synth_program_reset', c_int,
('synth', c_void_p, 1))
fluid_synth_system_reset = cfunc('fluid_synth_system_reset', c_int,
('synth', c_void_p, 1))
fluid_synth_write_s16 = cfunc('fluid_synth_write_s16', c_void_p,
('synth', c_void_p, 1),
('len', c_int, 1),
('lbuf', c_void_p, 1),
('loff', c_int, 1),
('lincr', c_int, 1),
('rbuf', c_void_p, 1),
('roff', c_int, 1),
('rincr', c_int, 1))
class fluid_synth_channel_info_t(Structure):
_fields_ = [
('assigned', c_int),
('sfont_id', c_int),
('bank', c_int),
('program', c_int),
('name', c_char*32),
('reserved', c_char*32)]
fluid_synth_get_channel_info = cfunc('fluid_synth_get_channel_info', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('info', POINTER(fluid_synth_channel_info_t), 1))
fluid_synth_set_reverb_full = cfunc('fluid_synth_set_reverb_full', c_int,
('synth', c_void_p, 1),
('set', c_int, 1),
('roomsize', c_double, 1),
('damping', c_double, 1),
('width', c_double, 1),
('level', c_double, 1))
fluid_synth_set_chorus_full = cfunc('fluid_synth_set_chorus_full', c_int,
('synth', c_void_p, 1),
('set', c_int, 1),
('nr', c_int, 1),
('level', c_double, 1),
('speed', c_double, 1),
('depth_ms', c_double, 1),
('type', c_int, 1))
fluid_synth_set_reverb = cfunc('fluid_synth_set_reverb', c_int,
('synth', c_void_p, 1),
('roomsize', c_double, 1),
('damping', c_double, 1),
('width', c_double, 1),
('level', c_double, 1))
fluid_synth_set_chorus = cfunc('fluid_synth_set_chorus', c_int,
('synth', c_void_p, 1),
('nr', c_int, 1),
('level', c_double, 1),
('speed', c_double, 1),
('depth_ms', c_double, 1),
('type', c_int, 1))
fluid_synth_set_reverb_roomsize = cfunc('fluid_synth_set_reverb_roomsize', c_int,
('synth', c_void_p, 1),
('roomsize', c_double, 1))
fluid_synth_set_reverb_damp = cfunc('fluid_synth_set_reverb_damp', c_int,
('synth', c_void_p, 1),
('damping', c_double, 1))
fluid_synth_set_reverb_level = cfunc('fluid_synth_set_reverb_level', c_int,
('synth', c_void_p, 1),
('level', c_double, 1))
fluid_synth_set_reverb_width = cfunc('fluid_synth_set_reverb_width', c_int,
('synth', c_void_p, 1),
('width', c_double, 1))
fluid_synth_set_chorus_nr = cfunc('fluid_synth_set_chorus_nr', c_int,
('synth', c_void_p, 1),
('nr', c_int, 1))
fluid_synth_set_chorus_level = cfunc('fluid_synth_set_chorus_level', c_int,
('synth', c_void_p, 1),
('level', c_double, 1))
fluid_synth_set_chorus_type = cfunc('fluid_synth_set_chorus_type', c_int,
('synth', c_void_p, 1),
('type', c_int, 1))
fluid_synth_get_reverb_roomsize = cfunc('fluid_synth_get_reverb_roomsize', c_double,
('synth', c_void_p, 1))
fluid_synth_get_reverb_damp = cfunc('fluid_synth_get_reverb_damp', c_double,
('synth', c_void_p, 1))
fluid_synth_get_reverb_level = cfunc('fluid_synth_get_reverb_level', c_double,
('synth', c_void_p, 1))
fluid_synth_get_reverb_width = cfunc('fluid_synth_get_reverb_width', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_nr = cfunc('fluid_synth_get_chorus_nr', c_int,
('synth', c_void_p, 1))
fluid_synth_get_chorus_level = cfunc('fluid_synth_get_chorus_level', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_speed_Hz = cfunc('fluid_synth_get_chorus_speed_Hz', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_depth_ms = cfunc('fluid_synth_get_chorus_depth_ms', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_type = cfunc('fluid_synth_get_chorus_type', c_int,
('synth', c_void_p, 1))
fluid_synth_set_midi_router = cfunc('fluid_synth_set_midi_router', None,
('synth', c_void_p, 1),
('router', c_void_p, 1))
fluid_synth_handle_midi_event = cfunc('fluid_synth_handle_midi_event', POINTER(c_int),
('data', c_void_p, 1),
('event', c_void_p, 1))
# fluid sequencer
new_fluid_sequencer2 = cfunc('new_fluid_sequencer2', c_void_p,
('use_system_timer', c_int, 1))
fluid_sequencer_process = cfunc('fluid_sequencer_process', None,
('seq', c_void_p, 1),
('msec', c_uint, 1))
fluid_sequencer_register_fluidsynth = cfunc('fluid_sequencer_register_fluidsynth', c_short,
('seq', c_void_p, 1),
('synth', c_void_p, 1))
fluid_sequencer_register_client = cfunc('fluid_sequencer_register_client', c_short,
('seq', c_void_p, 1),
('name', c_char_p, 1),
('callback', CFUNCTYPE(None, c_uint, c_void_p, c_void_p, c_void_p), 1),
('data', c_void_p, 1))
fluid_sequencer_get_tick = cfunc('fluid_sequencer_get_tick', c_uint,
('seq', c_void_p, 1))
fluid_sequencer_set_time_scale = cfunc('fluid_sequencer_set_time_scale', None,
('seq', c_void_p, 1),
('scale', c_double, 1))
fluid_sequencer_get_time_scale = cfunc('fluid_sequencer_get_time_scale', c_double,
('seq', c_void_p, 1))
fluid_sequencer_send_at = cfunc('fluid_sequencer_send_at', c_int,
('seq', c_void_p, 1),
('evt', c_void_p, 1),
('time', c_uint, 1),
('absolute', c_int, 1))
delete_fluid_sequencer = cfunc('delete_fluid_sequencer', None,
('seq', c_void_p, 1))
# fluid event
new_fluid_event = cfunc('new_fluid_event', c_void_p)
fluid_event_set_source = cfunc('fluid_event_set_source', None,
('evt', c_void_p, 1),
('src', c_void_p, 1))
fluid_event_set_dest = cfunc('fluid_event_set_dest', None,
('evt', c_void_p, 1),
('dest', c_void_p, 1))
fluid_event_timer = cfunc('fluid_event_timer', None,
('evt', c_void_p, 1),
('data', c_void_p, 1))
fluid_event_note = cfunc('fluid_event_note', None,
('evt', c_void_p, 1),
('channel', c_int, 1),
('key', c_short, 1),
('vel', c_short, 1),
('duration', c_uint, 1))
fluid_event_noteon = cfunc('fluid_event_noteon', None,
('evt', c_void_p, 1),
('channel', c_int, 1),
('key', c_short, 1),
('vel', c_short, 1))
fluid_event_noteoff = cfunc('fluid_event_noteoff', None,
('evt', c_void_p, 1),
('channel', c_int, 1),
('key', c_short, 1))
delete_fluid_event = cfunc('delete_fluid_event', None,
('evt', c_void_p, 1))
# fluid audio driver
new_fluid_audio_driver = cfunc('new_fluid_audio_driver', c_void_p,
('settings', c_void_p, 1),
('synth', c_void_p, 1))
delete_fluid_audio_driver = cfunc('delete_fluid_audio_driver', None,
('driver', c_void_p, 1))
# fluid midi driver
new_fluid_midi_driver = cfunc('new_fluid_midi_driver', c_void_p,
('settings', c_void_p, 1),
('handler', CFUNCTYPE(POINTER(c_int), c_void_p, c_void_p), 1),
('event_handler_data', c_void_p, 1))
# fluid midi router rule
class fluid_midi_router_t(Structure):
_fields_ = [
('synth', c_void_p),
('rules_mutex', c_void_p),
('rules', c_void_p*6),
('free_rules', c_void_p),
('event_handler', c_void_p),
('event_handler_data', c_void_p),
('nr_midi_channels', c_int),
('cmd_rule', c_void_p),
('cmd_rule_type', POINTER(c_int))]
delete_fluid_midi_router_rule = cfunc('delete_fluid_midi_router_rule', c_int,
('rule', c_void_p, 1))
new_fluid_midi_router_rule = cfunc('new_fluid_midi_router_rule', c_void_p)
fluid_midi_router_rule_set_chan = cfunc('fluid_midi_router_rule_set_chan', None,
('rule', c_void_p, 1),
('min', c_int, 1),
('max', c_int, 1),
('mul', c_float, 1),
('add', c_int, 1))
fluid_midi_router_rule_set_param1 = cfunc('fluid_midi_router_rule_set_param1', None,
('rule', c_void_p, 1),
('min', c_int, 1),
('max', c_int, 1),
('mul', c_float, 1),
('add', c_int, 1))
fluid_midi_router_rule_set_param2 = cfunc('fluid_midi_router_rule_set_param2', None,
('rule', c_void_p, 1),
('min', c_int, 1),
('max', c_int, 1),
('mul', c_float, 1),
('add', c_int, 1))
# fluid midi router
new_fluid_midi_router = cfunc('new_fluid_midi_router', POINTER(fluid_midi_router_t),
('settings', c_void_p, 1),
('handler', CFUNCTYPE(POINTER(c_int), c_void_p, c_void_p), 1),
('event_handler_data', c_void_p, 1))
fluid_midi_router_handle_midi_event = cfunc('fluid_midi_router_handle_midi_event', POINTER(c_int),
('data', c_void_p, 1),
('event', c_void_p, 1))
fluid_midi_router_clear_rules = cfunc('fluid_midi_router_clear_rules', c_int,
('router', POINTER(fluid_midi_router_t), 1))
fluid_midi_router_set_default_rules = cfunc('fluid_midi_router_set_default_rules', c_int,
('router', POINTER(fluid_midi_router_t), 1))
fluid_midi_router_add_rule = cfunc('fluid_midi_router_add_rule', c_int,
('router', POINTER(fluid_midi_router_t), 1),
('rule', c_void_p, 1),
('type', c_int, 1))
# fluidsynth 2.x
new_fluid_cmd_handler=cfunc('new_fluid_cmd_handler', c_void_p,
('synth', c_void_p, 1),
('router', c_void_p, 1))
fluid_synth_get_sfont_by_id = cfunc('fluid_synth_get_sfont_by_id', c_void_p,
('synth', c_void_p, 1),
('id', c_int, 1))
fluid_sfont_get_preset = cfunc('fluid_sfont_get_preset', c_void_p,
('sfont', c_void_p, 1),
('banknum', c_int, 1),
('prenum', c_int, 1))
fluid_preset_get_name = cfunc('fluid_preset_get_name', c_char_p,
('preset', c_void_p, 1))
fluid_synth_set_reverb = cfunc('fluid_synth_set_reverb', c_int,
('synth', c_void_p, 1),
('roomsize', c_double, 1),
('damping', c_double, 1),
('width', c_double, 1),
('level', c_double, 1))
fluid_synth_set_chorus = cfunc('fluid_synth_set_chorus', c_int,
('synth', c_void_p, 1),
('nr', c_int, 1),
('level', c_double, 1),
('speed', c_double, 1),
('depth_ms', c_double, 1),
('type', c_int, 1))
fluid_synth_get_chorus_speed = cfunc('fluid_synth_get_chorus_speed', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_depth = cfunc('fluid_synth_get_chorus_depth', c_double,
('synth', c_void_p, 1))
def fluid_synth_write_s16_stereo(synth, len):
"""Return generated samples in stereo 16-bit format
Return value is a Numpy array of samples.
"""
import numpy
buf = create_string_buffer(len * 4)
fluid_synth_write_s16(synth, len, buf, 0, 2, buf, 1, 2)
return numpy.frombuffer(buf[:], dtype=numpy.int16)
# Object-oriented interface, simplifies access to functions
class Synth:
"""Synth represents a FluidSynth synthesizer"""
def __init__(self, gain=0.2, samplerate=44100, channels=256, **kwargs):
"""Create new synthesizer object to control sound generation
Optional keyword arguments:
gain : scale factor for audio output, default is 0.2
lower values are quieter, allow more simultaneous notes
samplerate : output samplerate in Hz, default is 44100 Hz
added capability for passing arbitrary fluid settings using args
"""
st = new_fluid_settings()
fluid_settings_setnum(st, b'synth.gain', gain)
fluid_settings_setnum(st, b'synth.sample-rate', samplerate)
fluid_settings_setint(st, b'synth.midi-channels', channels)
for opt,val in kwargs.items():
self.setting(opt, val)
self.settings = st
self.synth = new_fluid_synth(st)
self.audio_driver = None
self.midi_driver = None
self.router = None
def setting(self, opt, val):
"""change an arbitrary synth setting, type-smart"""
if isinstance(val, (str, bytes)):
fluid_settings_setstr(self.settings, opt.encode(), val.encode())
elif isinstance(val, int):
fluid_settings_setint(self.settings, opt.encode(), val)
elif isinstance(val, float):
fluid_settings_setnum(self.settings, opt.encode(), c_double(val))
def get_setting(self, opt):
"""get current value of an arbitrary synth setting"""
val = c_int()
if fluid_settings_getint(self.settings, opt.encode(), byref(val)) == FLUIDSETTING_EXISTS:
return val.value
strval = create_string_buffer(32)
if fluid_settings_copystr(self.settings, opt.encode(), strval, 32) == FLUIDSETTING_EXISTS:
return strval.value.decode()
num = c_double()
if fluid_settings_getnum(self.settings, opt.encode(), byref(num)) == FLUIDSETTING_EXISTS:
return round(num.value, 6)
return None
def start(self, driver=None, device=None, midi_driver=None):
"""Start audio output driver in separate background thread
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver : which audio driver to use for output
device : the device to use for audio output
midi_driver : the midi driver to use for communicating with midi devices
see http://www.fluidsynth.org/api/fluidsettings.xml for allowed values and defaults by platform
"""
driver = driver or self.get_setting('audio.driver')
device = device or self.get_setting('audio.%s.device' % driver)
midi_driver = midi_driver or self.get_setting('midi.driver')
self.setting('audio.driver', driver)
self.setting('audio.%s.device' % driver, device)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
self.setting('midi.driver', midi_driver)
self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth)
if new_fluid_cmd_handler:
new_fluid_cmd_handler(self.synth, self.router)
else:
fluid_synth_set_midi_router(self.synth, self.router)
self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router)
def delete(self):
if self.audio_driver:
delete_fluid_audio_driver(self.audio_driver)
delete_fluid_synth(self.synth)
delete_fluid_settings(self.settings)
def sfload(self, filename, update_midi_preset=0):
"""Load SoundFont and return its ID"""
return fluid_synth_sfload(self.synth, filename.encode(), update_midi_preset)
def sfunload(self, sfid, update_midi_preset=0):
"""Unload a SoundFont and free memory it used"""
return fluid_synth_sfunload(self.synth, sfid, update_midi_preset)
def program_select(self, chan, sfid, bank, preset):
"""Select a program"""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset)
def program_unset(self, chan):
"""Set the preset of a MIDI channel to an unassigned state"""
return fluid_synth_unset_program(self.synth, chan)
def channel_info(self, chan):
"""get soundfont, bank, prog, preset name of channel"""
if fluid_synth_get_channel_info is not None:
info=fluid_synth_channel_info_t()
fluid_synth_get_channel_info(self.synth, chan, byref(info))
return (info.sfont_id, info.bank, info.program, info.name)
else:
(sfontid, banknum, presetnum) = self.program_info(chan)
presetname = self.sfpreset_name(sfontid, banknum, presetnum)
return (sfontid, banknum, presetnum, presetname)
def program_info(self, chan):
"""get active soundfont, bank, prog on a channel"""
if fluid_synth_get_program is not None:
sfontid=c_int()
banknum=c_int()
presetnum=c_int()
fluid_synth_get_program(self.synth, chan, byref(sfontid), byref(banknum), byref(presetnum))
return (sfontid.value, banknum.value, presetnum.value)
else:
(sfontid, banknum, prognum, presetname) = self.channel_info(chan)
return (sfontid, banknum, prognum)
def sfpreset_name(self, sfid, bank, prenum):
"""Return name of a soundfont preset"""
if fluid_synth_get_sfont_by_id is not None:
sfont=fluid_synth_get_sfont_by_id(self.synth, sfid)
preset=fluid_sfont_get_preset(sfont, bank, prenum)
if not preset:
return None
return fluid_preset_get_name(preset).decode('ascii')
else:
(sfontid, banknum, presetnum, presetname) = self.channel_info(chan)
return presetname
def router_clear(self):
if self.router is not None:
fluid_midi_router_clear_rules(self.router)
def router_default(self):
if self.router is not None:
fluid_midi_router_set_default_rules(self.router)
def router_begin(self, type):
"""types are [note|cc|prog|pbend|cpress|kpress]"""
if self.router is not None:
if type=='note':
self.router.cmd_rule_type=0
elif type=='cc':
self.router.cmd_rule_type=1
elif type=='prog':
self.router.cmd_rule_type=2
elif type=='pbend':
self.router.cmd_rule_type=3
elif type=='cpress':
self.router.cmd_rule_type=4
elif type=='kpress':
self.router.cmd_rule_type=5
if 'self.router.cmd_rule' in globals():
delete_fluid_midi_router_rule(self.router.cmd_rule)
self.router.cmd_rule = new_fluid_midi_router_rule()
def router_end(self):
if self.router is not None:
if self.router.cmd_rule is None:
return
if fluid_midi_router_add_rule(self.router, self.router.cmd_rule, self.router.cmd_rule_type)<0:
delete_fluid_midi_router_rule(self.router.cmd_rule)
self.router.cmd_rule=None
def router_chan(self, min, max, mul, add):
if self.router is not None:
fluid_midi_router_rule_set_chan(self.router.cmd_rule, min, max, mul, add)
def router_par1(self, min, max, mul, add):
if self.router is not None:
fluid_midi_router_rule_set_param1(self.router.cmd_rule, min, max, mul, add)
def router_par2(self, min, max, mul, add):
if self.router is not None:
fluid_midi_router_rule_set_param2(self.router.cmd_rule, min, max, mul, add)
def set_reverb(self, roomsize=-1.0, damping=-1.0, width=-1.0, level=-1.0):
"""
roomsize Reverb room size value (0.0-1.0)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0)
"""
if fluid_synth_set_reverb is not None:
return fluid_synth_set_reverb(self.synth, roomsize, damping, width, level)
else:
set=0
if roomsize>=0:
set+=0b0001
if damping>=0:
set+=0b0010
if width>=0:
set+=0b0100
if level>=0:
set+=0b1000
return fluid_synth_set_reverb_full(self.synth, set, roomsize, damping, width, level)
def set_chorus(self, nr=-1, level=-1.0, speed=-1.0, depth=-1.0, type=-1):
"""
nr Chorus voice count (0-99, CPU time consumption proportional to this value)
level Chorus level (0.0-10.0)
speed Chorus speed in Hz (0.29-5.0)
depth_ms Chorus depth (max value depends on synth sample rate, 0.0-21.0 is safe for sample rate values up to 96KHz)
type Chorus waveform type (0=sine, 1=triangle)
"""
if fluid_synth_set_chorus is not None:
return fluid_synth_set_chorus(self.synth, nr, level, speed, depth, type)
else:
set=0
if nr>=0:
set+=0b00001
if level>=0:
set+=0b00010
if speed>=0:
set+=0b00100
if depth>=0:
set+=0b01000
if type>=0:
set+=0b10000
return fluid_synth_set_chorus_full(self.synth, set, nr, level, speed, depth, type)
def set_reverb_roomsize(self, roomsize):
if fluid_synth_set_reverb_roomsize is not None:
return fluid_synth_set_reverb_roomsize(self.synth, roomsize)
else:
return self.set_reverb(roomsize=roomsize)
def set_reverb_damp(self, damping):
if fluid_synth_set_reverb_damp is not None:
return fluid_synth_set_reverb_damp(self.synth, damping)
else:
return self.set_reverb(damping=damping)
def set_reverb_level(self, level):
if fluid_synth_set_reverb_level is not None:
return fluid_synth_set_reverb_level(self.synth, level)
else:
return self.set_reverb(level=level)
def set_reverb_width(self, width):
if fluid_synth_set_reverb_width is not None:
return fluid_synth_set_reverb_width(self.synth, width)
else:
return self.set_reverb(width=width)
def set_chorus_nr(self, nr):
if fluid_synth_set_chorus_nr is not None:
return fluid_synth_set_chorus_nr(self.synth, nr)
else:
return self.set_chorus(nr=nr)
def set_chorus_level(self, level):
if fluid_synth_set_chorus_level is not None:
return fluid_synth_set_chorus_level(self.synth, level)
else:
return self.set_chorus(leve=level)
def set_chorus_speed(self, speed):
if fluid_synth_set_chorus_speed is not None:
return fluid_synth_set_chorus_speed(self.synth, speed)
else:
return self.set_chorus(speed=speed)
def set_chorus_depth(self, depth):
if fluid_synth_set_chorus_depth is not None:
return fluid_synth_set_chorus_depth(self.synth, depth)
else:
return self.set_chorus(depth=depth)
def set_chorus_type(self, type):
if fluid_synth_set_chorus_type is not None:
return fluid_synth_set_chorus_type(self.synth, type)
else:
return self.set_chorus(type=type)
def get_reverb_roomsize(self):
return fluid_synth_get_reverb_roomsize(self.synth)
def get_reverb_damp(self):
return fluid_synth_get_reverb_damp(self.synth)
def get_reverb_level(self):
return fluid_synth_get_reverb_level(self.synth)
def get_reverb_width(self):
return fluid_synth_get_reverb_width(self.synth)
def get_chorus_nr(self):
return fluid_synth_get_chorus_nr(self.synth)
def get_chorus_level(self):
return fluid_synth_get_reverb_level(self.synth)
def get_chorus_speed(self):
if fluid_synth_get_chorus_speed is not None:
return fluid_synth_get_chorus_speed(self.synth)
else:
return fluid_synth_get_chorus_speed_Hz(self.synth)
def get_chorus_depth(self):
if fluid_synth_get_chorus_depth is not None:
return fluid_synth_get_chorus_depth(self.synth)
else:
return fluid_synth_get_chorus_depth_ms(self.synth)
def get_chorus_type(self):
return fluid_synth_get_chorus_type(self.synth)
def noteon(self, chan, key, vel):
"""Play a note"""
if key < 0 or key > 128:
return False
if chan < 0:
return False
if vel < 0 or vel > 128:
return False
return fluid_synth_noteon(self.synth, chan, key, vel)
def noteoff(self, chan, key):
"""Stop a note"""
if key < 0 or key > 128:
return False
if chan < 0:
return False
return fluid_synth_noteoff(self.synth, chan, key)
def pitch_bend(self, chan, val):
"""Adjust pitch of a playing channel by small amounts
A pitch bend value of 0 is no pitch change from default.
A value of -2048 is 1 semitone down.
A value of 2048 is 1 semitone up.
Maximum values are -8192 to +8192 (transposing by 4 semitones).
"""
return fluid_synth_pitch_bend(self.synth, chan, val + 8192)
def cc(self, chan, ctrl, val):
"""Send control change value
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1 : vibrato
7 : volume
10 : pan (left to right)
11 : expression (soft to loud)
64 : sustain
91 : reverb
93 : chorus
"""
return fluid_synth_cc(self.synth, chan, ctrl, val)
def get_cc(self, chan, num):
i=c_int()
fluid_synth_get_cc(self.synth, chan, num, byref(i))
return i.value
def program_change(self, chan, prg):
"""Change the program"""
return fluid_synth_program_change(self.synth, chan, prg)
def bank_select(self, chan, bank):
"""Choose a bank"""
return fluid_synth_bank_select(self.synth, chan, bank)
def sfont_select(self, chan, sfid):
"""Choose a SoundFont"""
return fluid_synth_sfont_select(self.synth, chan, sfid)
def program_reset(self):
"""Reset the programs on all channels"""
return fluid_synth_program_reset(self.synth)
def system_reset(self):
"""Stop all notes and reset all programs"""
return fluid_synth_system_reset(self.synth)
def get_samples(self, len=1024):
"""Generate audio samples
The return value will be a NumPy array containing the given
length of audio samples. If the synth is set to stereo output
(the default) the array will be size 2 * len.
"""
return fluid_synth_write_s16_stereo(self.synth, len)
class Sequencer:
def __init__(self, time_scale=1000, use_system_timer=True):
"""Create new sequencer object to control and schedule timing of midi events
Optional keyword arguments:
time_scale: ticks per second, defaults to 1000
use_system_timer: whether the sequencer should advance by itself
"""
self.client_callbacks = []
self.sequencer = new_fluid_sequencer2(use_system_timer)
fluid_sequencer_set_time_scale(self.sequencer, time_scale)
def register_fluidsynth(self, synth):
response = fluid_sequencer_register_fluidsynth(self.sequencer, synth.synth)
if response == FLUID_FAILED:
raise Error("Registering fluid synth failed")
return response
def register_client(self, name, callback, data=None):
c_callback = CFUNCTYPE(None, c_uint, c_void_p, c_void_p, c_void_p)(callback)
response = fluid_sequencer_register_client(self.sequencer, name.encode(), c_callback, data)
if response == FLUID_FAILED:
raise Error("Registering client failed")
# store in a list to prevent garbage collection
self.client_callbacks.append(c_callback)
return response
def note(self, time, channel, key, velocity, duration, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_note(evt, channel, key, velocity, duration)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def note_on(self, time, channel, key, velocity=127, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_noteon(evt, channel, key, velocity)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def note_off(self, time, channel, key, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_noteoff(evt, channel, key)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def timer(self, time, data=None, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_timer(evt, data)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def _create_event(self, source=-1, dest=-1):
evt = new_fluid_event()
fluid_event_set_source(evt, source)
fluid_event_set_dest(evt, dest)
return evt
def _schedule_event(self, evt, time, absolute=True):
response = fluid_sequencer_send_at(self.sequencer, evt, time, absolute)
if response == FLUID_FAILED:
raise Error("Scheduling event failed")
def get_tick(self):
return fluid_sequencer_get_tick(self.sequencer)
def process(self, msec):
fluid_sequencer_process(self.sequencer, msec)
def delete(self):
delete_fluid_sequencer(self.sequencer)
def raw_audio_string(data):
"""Return a string of bytes to send to soundcard
Input is a numpy array of samples. Default output format
is 16-bit signed (other formats not currently supported).
"""
import numpy
return (data.astype(numpy.int16)).tostring()
fix new_fluid_midi_driver typing
first argument should be pointer to void, not int
"""
================================================================================
pyFluidSynth
Python bindings for FluidSynth
Copyright 2008, Nathan Whitehead <nwhitehe@gmail.com>
Released under the LGPL
This module contains python bindings for FluidSynth. FluidSynth is a
software synthesizer for generating music. It works like a MIDI
synthesizer. You load patches, set parameters, then send NOTEON and
NOTEOFF events to play notes. Instruments are defined in SoundFonts,
generally files with the extension SF2. FluidSynth can either be used
to play audio itself, or you can call a function that returns chunks
of audio data and output the data to the soundcard yourself.
FluidSynth works on all major platforms, so pyFluidSynth should also.
================================================================================
"""
from ctypes import *
from ctypes.util import find_library
import os
# A short circuited or expression to find the FluidSynth library
# (mostly needed for Windows distributions of libfluidsynth supplied with QSynth)
# DLL search method changed in Python 3.8
# https://docs.python.org/3/library/os.html#os.add_dll_directory
if hasattr(os, 'add_dll_directory'):
os.add_dll_directory(os.getcwd())
lib = find_library('fluidsynth') or \
find_library('libfluidsynth') or \
find_library('libfluidsynth-2') or \
find_library('libfluidsynth-1')
if lib is None:
raise ImportError("Couldn't find the FluidSynth library.")
# Dynamically link the FluidSynth library
# Architecture (32-/64-bit) must match your Python version
_fl = CDLL(lib)
# Helper function for declaring function prototypes
def cfunc(name, result, *args):
"""Build and apply a ctypes prototype complete with parameter flags"""
if hasattr(_fl, name):
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, _fl), tuple(aflags))
else: # Handle Fluidsynth 1.x, 2.x, etc. API differences
return None
# Bump this up when changing the interface for users
api_version = '1.3.0'
# Function prototypes for C versions of functions
FLUID_OK = 0
FLUID_FAILED = -1
fluid_version = cfunc('fluid_version', c_void_p,
('major', POINTER(c_int), 1),
('minor', POINTER(c_int), 1),
('micro', POINTER(c_int), 1))
majver = c_int()
fluid_version(majver, c_int(), c_int())
if majver.value > 1:
FLUIDSETTING_EXISTS = FLUID_OK
else:
FLUIDSETTING_EXISTS = 1
# fluid settings
new_fluid_settings = cfunc('new_fluid_settings', c_void_p)
fluid_settings_setstr = cfunc('fluid_settings_setstr', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('str', c_char_p, 1))
fluid_settings_setnum = cfunc('fluid_settings_setnum', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', c_double, 1))
fluid_settings_setint = cfunc('fluid_settings_setint', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', c_int, 1))
fluid_settings_copystr = cfunc('fluid_settings_copystr', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('str', c_char_p, 1),
('len', c_int, 1))
fluid_settings_getnum = cfunc('fluid_settings_getnum', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', POINTER(c_double), 1))
fluid_settings_getint = cfunc('fluid_settings_getint', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', POINTER(c_int), 1))
delete_fluid_settings = cfunc('delete_fluid_settings', None,
('settings', c_void_p, 1))
# fluid synth
new_fluid_synth = cfunc('new_fluid_synth', c_void_p,
('settings', c_void_p, 1))
delete_fluid_synth = cfunc('delete_fluid_synth', None,
('synth', c_void_p, 1))
fluid_synth_sfload = cfunc('fluid_synth_sfload', c_int,
('synth', c_void_p, 1),
('filename', c_char_p, 1),
('update_midi_presets', c_int, 1))
fluid_synth_sfunload = cfunc('fluid_synth_sfunload', c_int,
('synth', c_void_p, 1),
('sfid', c_int, 1),
('update_midi_presets', c_int, 1))
fluid_synth_program_select = cfunc('fluid_synth_program_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfid', c_int, 1),
('bank', c_int, 1),
('preset', c_int, 1))
fluid_synth_noteon = cfunc('fluid_synth_noteon', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('key', c_int, 1),
('vel', c_int, 1))
fluid_synth_noteoff = cfunc('fluid_synth_noteoff', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('key', c_int, 1))
fluid_synth_pitch_bend = cfunc('fluid_synth_pitch_bend', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('val', c_int, 1))
fluid_synth_cc = cfunc('fluid_synth_cc', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('ctrl', c_int, 1),
('val', c_int, 1))
fluid_synth_get_cc = cfunc('fluid_synth_get_cc', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('num', c_int, 1),
('pval', POINTER(c_int), 1))
fluid_synth_program_change = cfunc('fluid_synth_program_change', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('prg', c_int, 1))
fluid_synth_unset_program = cfunc('fluid_synth_unset_program', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1))
fluid_synth_get_program = cfunc('fluid_synth_get_program', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfont_id', POINTER(c_int), 1),
('bank_num', POINTER(c_int), 1),
('preset_num', POINTER(c_int), 1))
fluid_synth_bank_select = cfunc('fluid_synth_bank_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('bank', c_int, 1))
fluid_synth_sfont_select = cfunc('fluid_synth_sfont_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfid', c_int, 1))
fluid_synth_program_reset = cfunc('fluid_synth_program_reset', c_int,
('synth', c_void_p, 1))
fluid_synth_system_reset = cfunc('fluid_synth_system_reset', c_int,
('synth', c_void_p, 1))
fluid_synth_write_s16 = cfunc('fluid_synth_write_s16', c_void_p,
('synth', c_void_p, 1),
('len', c_int, 1),
('lbuf', c_void_p, 1),
('loff', c_int, 1),
('lincr', c_int, 1),
('rbuf', c_void_p, 1),
('roff', c_int, 1),
('rincr', c_int, 1))
class fluid_synth_channel_info_t(Structure):
_fields_ = [
('assigned', c_int),
('sfont_id', c_int),
('bank', c_int),
('program', c_int),
('name', c_char*32),
('reserved', c_char*32)]
fluid_synth_get_channel_info = cfunc('fluid_synth_get_channel_info', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('info', POINTER(fluid_synth_channel_info_t), 1))
fluid_synth_set_reverb_full = cfunc('fluid_synth_set_reverb_full', c_int,
('synth', c_void_p, 1),
('set', c_int, 1),
('roomsize', c_double, 1),
('damping', c_double, 1),
('width', c_double, 1),
('level', c_double, 1))
fluid_synth_set_chorus_full = cfunc('fluid_synth_set_chorus_full', c_int,
('synth', c_void_p, 1),
('set', c_int, 1),
('nr', c_int, 1),
('level', c_double, 1),
('speed', c_double, 1),
('depth_ms', c_double, 1),
('type', c_int, 1))
fluid_synth_set_reverb = cfunc('fluid_synth_set_reverb', c_int,
('synth', c_void_p, 1),
('roomsize', c_double, 1),
('damping', c_double, 1),
('width', c_double, 1),
('level', c_double, 1))
fluid_synth_set_chorus = cfunc('fluid_synth_set_chorus', c_int,
('synth', c_void_p, 1),
('nr', c_int, 1),
('level', c_double, 1),
('speed', c_double, 1),
('depth_ms', c_double, 1),
('type', c_int, 1))
fluid_synth_set_reverb_roomsize = cfunc('fluid_synth_set_reverb_roomsize', c_int,
('synth', c_void_p, 1),
('roomsize', c_double, 1))
fluid_synth_set_reverb_damp = cfunc('fluid_synth_set_reverb_damp', c_int,
('synth', c_void_p, 1),
('damping', c_double, 1))
fluid_synth_set_reverb_level = cfunc('fluid_synth_set_reverb_level', c_int,
('synth', c_void_p, 1),
('level', c_double, 1))
fluid_synth_set_reverb_width = cfunc('fluid_synth_set_reverb_width', c_int,
('synth', c_void_p, 1),
('width', c_double, 1))
fluid_synth_set_chorus_nr = cfunc('fluid_synth_set_chorus_nr', c_int,
('synth', c_void_p, 1),
('nr', c_int, 1))
fluid_synth_set_chorus_level = cfunc('fluid_synth_set_chorus_level', c_int,
('synth', c_void_p, 1),
('level', c_double, 1))
fluid_synth_set_chorus_type = cfunc('fluid_synth_set_chorus_type', c_int,
('synth', c_void_p, 1),
('type', c_int, 1))
fluid_synth_get_reverb_roomsize = cfunc('fluid_synth_get_reverb_roomsize', c_double,
('synth', c_void_p, 1))
fluid_synth_get_reverb_damp = cfunc('fluid_synth_get_reverb_damp', c_double,
('synth', c_void_p, 1))
fluid_synth_get_reverb_level = cfunc('fluid_synth_get_reverb_level', c_double,
('synth', c_void_p, 1))
fluid_synth_get_reverb_width = cfunc('fluid_synth_get_reverb_width', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_nr = cfunc('fluid_synth_get_chorus_nr', c_int,
('synth', c_void_p, 1))
fluid_synth_get_chorus_level = cfunc('fluid_synth_get_chorus_level', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_speed_Hz = cfunc('fluid_synth_get_chorus_speed_Hz', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_depth_ms = cfunc('fluid_synth_get_chorus_depth_ms', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_type = cfunc('fluid_synth_get_chorus_type', c_int,
('synth', c_void_p, 1))
fluid_synth_set_midi_router = cfunc('fluid_synth_set_midi_router', None,
('synth', c_void_p, 1),
('router', c_void_p, 1))
fluid_synth_handle_midi_event = cfunc('fluid_synth_handle_midi_event', POINTER(c_int),
('data', c_void_p, 1),
('event', c_void_p, 1))
# fluid sequencer
new_fluid_sequencer2 = cfunc('new_fluid_sequencer2', c_void_p,
('use_system_timer', c_int, 1))
fluid_sequencer_process = cfunc('fluid_sequencer_process', None,
('seq', c_void_p, 1),
('msec', c_uint, 1))
fluid_sequencer_register_fluidsynth = cfunc('fluid_sequencer_register_fluidsynth', c_short,
('seq', c_void_p, 1),
('synth', c_void_p, 1))
fluid_sequencer_register_client = cfunc('fluid_sequencer_register_client', c_short,
('seq', c_void_p, 1),
('name', c_char_p, 1),
('callback', CFUNCTYPE(None, c_uint, c_void_p, c_void_p, c_void_p), 1),
('data', c_void_p, 1))
fluid_sequencer_get_tick = cfunc('fluid_sequencer_get_tick', c_uint,
('seq', c_void_p, 1))
fluid_sequencer_set_time_scale = cfunc('fluid_sequencer_set_time_scale', None,
('seq', c_void_p, 1),
('scale', c_double, 1))
fluid_sequencer_get_time_scale = cfunc('fluid_sequencer_get_time_scale', c_double,
('seq', c_void_p, 1))
fluid_sequencer_send_at = cfunc('fluid_sequencer_send_at', c_int,
('seq', c_void_p, 1),
('evt', c_void_p, 1),
('time', c_uint, 1),
('absolute', c_int, 1))
delete_fluid_sequencer = cfunc('delete_fluid_sequencer', None,
('seq', c_void_p, 1))
# fluid event
new_fluid_event = cfunc('new_fluid_event', c_void_p)
fluid_event_set_source = cfunc('fluid_event_set_source', None,
('evt', c_void_p, 1),
('src', c_void_p, 1))
fluid_event_set_dest = cfunc('fluid_event_set_dest', None,
('evt', c_void_p, 1),
('dest', c_void_p, 1))
fluid_event_timer = cfunc('fluid_event_timer', None,
('evt', c_void_p, 1),
('data', c_void_p, 1))
fluid_event_note = cfunc('fluid_event_note', None,
('evt', c_void_p, 1),
('channel', c_int, 1),
('key', c_short, 1),
('vel', c_short, 1),
('duration', c_uint, 1))
fluid_event_noteon = cfunc('fluid_event_noteon', None,
('evt', c_void_p, 1),
('channel', c_int, 1),
('key', c_short, 1),
('vel', c_short, 1))
fluid_event_noteoff = cfunc('fluid_event_noteoff', None,
('evt', c_void_p, 1),
('channel', c_int, 1),
('key', c_short, 1))
delete_fluid_event = cfunc('delete_fluid_event', None,
('evt', c_void_p, 1))
# fluid audio driver
new_fluid_audio_driver = cfunc('new_fluid_audio_driver', c_void_p,
('settings', c_void_p, 1),
('synth', c_void_p, 1))
delete_fluid_audio_driver = cfunc('delete_fluid_audio_driver', None,
('driver', c_void_p, 1))
# fluid midi driver
new_fluid_midi_driver = cfunc('new_fluid_midi_driver', c_void_p,
('settings', c_void_p, 1),
('handler', CFUNCTYPE(c_void_p, c_void_p, c_void_p), 1),
('event_handler_data', c_void_p, 1))
# fluid midi router rule
class fluid_midi_router_t(Structure):
_fields_ = [
('synth', c_void_p),
('rules_mutex', c_void_p),
('rules', c_void_p*6),
('free_rules', c_void_p),
('event_handler', c_void_p),
('event_handler_data', c_void_p),
('nr_midi_channels', c_int),
('cmd_rule', c_void_p),
('cmd_rule_type', POINTER(c_int))]
delete_fluid_midi_router_rule = cfunc('delete_fluid_midi_router_rule', c_int,
('rule', c_void_p, 1))
new_fluid_midi_router_rule = cfunc('new_fluid_midi_router_rule', c_void_p)
fluid_midi_router_rule_set_chan = cfunc('fluid_midi_router_rule_set_chan', None,
('rule', c_void_p, 1),
('min', c_int, 1),
('max', c_int, 1),
('mul', c_float, 1),
('add', c_int, 1))
fluid_midi_router_rule_set_param1 = cfunc('fluid_midi_router_rule_set_param1', None,
('rule', c_void_p, 1),
('min', c_int, 1),
('max', c_int, 1),
('mul', c_float, 1),
('add', c_int, 1))
fluid_midi_router_rule_set_param2 = cfunc('fluid_midi_router_rule_set_param2', None,
('rule', c_void_p, 1),
('min', c_int, 1),
('max', c_int, 1),
('mul', c_float, 1),
('add', c_int, 1))
# fluid midi router
new_fluid_midi_router = cfunc('new_fluid_midi_router', POINTER(fluid_midi_router_t),
('settings', c_void_p, 1),
('handler', CFUNCTYPE(POINTER(c_int), c_void_p, c_void_p), 1),
('event_handler_data', c_void_p, 1))
fluid_midi_router_handle_midi_event = cfunc('fluid_midi_router_handle_midi_event', POINTER(c_int),
('data', c_void_p, 1),
('event', c_void_p, 1))
fluid_midi_router_clear_rules = cfunc('fluid_midi_router_clear_rules', c_int,
('router', POINTER(fluid_midi_router_t), 1))
fluid_midi_router_set_default_rules = cfunc('fluid_midi_router_set_default_rules', c_int,
('router', POINTER(fluid_midi_router_t), 1))
fluid_midi_router_add_rule = cfunc('fluid_midi_router_add_rule', c_int,
('router', POINTER(fluid_midi_router_t), 1),
('rule', c_void_p, 1),
('type', c_int, 1))
# fluidsynth 2.x
new_fluid_cmd_handler=cfunc('new_fluid_cmd_handler', c_void_p,
('synth', c_void_p, 1),
('router', c_void_p, 1))
fluid_synth_get_sfont_by_id = cfunc('fluid_synth_get_sfont_by_id', c_void_p,
('synth', c_void_p, 1),
('id', c_int, 1))
fluid_sfont_get_preset = cfunc('fluid_sfont_get_preset', c_void_p,
('sfont', c_void_p, 1),
('banknum', c_int, 1),
('prenum', c_int, 1))
fluid_preset_get_name = cfunc('fluid_preset_get_name', c_char_p,
('preset', c_void_p, 1))
fluid_synth_set_reverb = cfunc('fluid_synth_set_reverb', c_int,
('synth', c_void_p, 1),
('roomsize', c_double, 1),
('damping', c_double, 1),
('width', c_double, 1),
('level', c_double, 1))
fluid_synth_set_chorus = cfunc('fluid_synth_set_chorus', c_int,
('synth', c_void_p, 1),
('nr', c_int, 1),
('level', c_double, 1),
('speed', c_double, 1),
('depth_ms', c_double, 1),
('type', c_int, 1))
fluid_synth_get_chorus_speed = cfunc('fluid_synth_get_chorus_speed', c_double,
('synth', c_void_p, 1))
fluid_synth_get_chorus_depth = cfunc('fluid_synth_get_chorus_depth', c_double,
('synth', c_void_p, 1))
def fluid_synth_write_s16_stereo(synth, len):
"""Return generated samples in stereo 16-bit format
Return value is a Numpy array of samples.
"""
import numpy
buf = create_string_buffer(len * 4)
fluid_synth_write_s16(synth, len, buf, 0, 2, buf, 1, 2)
return numpy.frombuffer(buf[:], dtype=numpy.int16)
# Object-oriented interface, simplifies access to functions
class Synth:
"""Synth represents a FluidSynth synthesizer"""
def __init__(self, gain=0.2, samplerate=44100, channels=256, **kwargs):
"""Create new synthesizer object to control sound generation
Optional keyword arguments:
gain : scale factor for audio output, default is 0.2
lower values are quieter, allow more simultaneous notes
samplerate : output samplerate in Hz, default is 44100 Hz
added capability for passing arbitrary fluid settings using args
"""
st = new_fluid_settings()
fluid_settings_setnum(st, b'synth.gain', gain)
fluid_settings_setnum(st, b'synth.sample-rate', samplerate)
fluid_settings_setint(st, b'synth.midi-channels', channels)
for opt,val in kwargs.items():
self.setting(opt, val)
self.settings = st
self.synth = new_fluid_synth(st)
self.audio_driver = None
self.midi_driver = None
self.router = None
def setting(self, opt, val):
"""change an arbitrary synth setting, type-smart"""
if isinstance(val, (str, bytes)):
fluid_settings_setstr(self.settings, opt.encode(), val.encode())
elif isinstance(val, int):
fluid_settings_setint(self.settings, opt.encode(), val)
elif isinstance(val, float):
fluid_settings_setnum(self.settings, opt.encode(), c_double(val))
def get_setting(self, opt):
"""get current value of an arbitrary synth setting"""
val = c_int()
if fluid_settings_getint(self.settings, opt.encode(), byref(val)) == FLUIDSETTING_EXISTS:
return val.value
strval = create_string_buffer(32)
if fluid_settings_copystr(self.settings, opt.encode(), strval, 32) == FLUIDSETTING_EXISTS:
return strval.value.decode()
num = c_double()
if fluid_settings_getnum(self.settings, opt.encode(), byref(num)) == FLUIDSETTING_EXISTS:
return round(num.value, 6)
return None
def start(self, driver=None, device=None, midi_driver=None):
"""Start audio output driver in separate background thread
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver : which audio driver to use for output
device : the device to use for audio output
midi_driver : the midi driver to use for communicating with midi devices
see http://www.fluidsynth.org/api/fluidsettings.xml for allowed values and defaults by platform
"""
driver = driver or self.get_setting('audio.driver')
device = device or self.get_setting('audio.%s.device' % driver)
midi_driver = midi_driver or self.get_setting('midi.driver')
self.setting('audio.driver', driver)
self.setting('audio.%s.device' % driver, device)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
self.setting('midi.driver', midi_driver)
self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth)
if new_fluid_cmd_handler:
new_fluid_cmd_handler(self.synth, self.router)
else:
fluid_synth_set_midi_router(self.synth, self.router)
self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router)
def delete(self):
if self.audio_driver:
delete_fluid_audio_driver(self.audio_driver)
delete_fluid_synth(self.synth)
delete_fluid_settings(self.settings)
def sfload(self, filename, update_midi_preset=0):
"""Load SoundFont and return its ID"""
return fluid_synth_sfload(self.synth, filename.encode(), update_midi_preset)
def sfunload(self, sfid, update_midi_preset=0):
"""Unload a SoundFont and free memory it used"""
return fluid_synth_sfunload(self.synth, sfid, update_midi_preset)
def program_select(self, chan, sfid, bank, preset):
"""Select a program"""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset)
def program_unset(self, chan):
"""Set the preset of a MIDI channel to an unassigned state"""
return fluid_synth_unset_program(self.synth, chan)
def channel_info(self, chan):
"""get soundfont, bank, prog, preset name of channel"""
if fluid_synth_get_channel_info is not None:
info=fluid_synth_channel_info_t()
fluid_synth_get_channel_info(self.synth, chan, byref(info))
return (info.sfont_id, info.bank, info.program, info.name)
else:
(sfontid, banknum, presetnum) = self.program_info(chan)
presetname = self.sfpreset_name(sfontid, banknum, presetnum)
return (sfontid, banknum, presetnum, presetname)
def program_info(self, chan):
"""get active soundfont, bank, prog on a channel"""
if fluid_synth_get_program is not None:
sfontid=c_int()
banknum=c_int()
presetnum=c_int()
fluid_synth_get_program(self.synth, chan, byref(sfontid), byref(banknum), byref(presetnum))
return (sfontid.value, banknum.value, presetnum.value)
else:
(sfontid, banknum, prognum, presetname) = self.channel_info(chan)
return (sfontid, banknum, prognum)
def sfpreset_name(self, sfid, bank, prenum):
"""Return name of a soundfont preset"""
if fluid_synth_get_sfont_by_id is not None:
sfont=fluid_synth_get_sfont_by_id(self.synth, sfid)
preset=fluid_sfont_get_preset(sfont, bank, prenum)
if not preset:
return None
return fluid_preset_get_name(preset).decode('ascii')
else:
(sfontid, banknum, presetnum, presetname) = self.channel_info(chan)
return presetname
def router_clear(self):
if self.router is not None:
fluid_midi_router_clear_rules(self.router)
def router_default(self):
if self.router is not None:
fluid_midi_router_set_default_rules(self.router)
def router_begin(self, type):
"""types are [note|cc|prog|pbend|cpress|kpress]"""
if self.router is not None:
if type=='note':
self.router.cmd_rule_type=0
elif type=='cc':
self.router.cmd_rule_type=1
elif type=='prog':
self.router.cmd_rule_type=2
elif type=='pbend':
self.router.cmd_rule_type=3
elif type=='cpress':
self.router.cmd_rule_type=4
elif type=='kpress':
self.router.cmd_rule_type=5
if 'self.router.cmd_rule' in globals():
delete_fluid_midi_router_rule(self.router.cmd_rule)
self.router.cmd_rule = new_fluid_midi_router_rule()
def router_end(self):
if self.router is not None:
if self.router.cmd_rule is None:
return
if fluid_midi_router_add_rule(self.router, self.router.cmd_rule, self.router.cmd_rule_type)<0:
delete_fluid_midi_router_rule(self.router.cmd_rule)
self.router.cmd_rule=None
def router_chan(self, min, max, mul, add):
if self.router is not None:
fluid_midi_router_rule_set_chan(self.router.cmd_rule, min, max, mul, add)
def router_par1(self, min, max, mul, add):
if self.router is not None:
fluid_midi_router_rule_set_param1(self.router.cmd_rule, min, max, mul, add)
def router_par2(self, min, max, mul, add):
if self.router is not None:
fluid_midi_router_rule_set_param2(self.router.cmd_rule, min, max, mul, add)
def set_reverb(self, roomsize=-1.0, damping=-1.0, width=-1.0, level=-1.0):
"""
roomsize Reverb room size value (0.0-1.0)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0)
"""
if fluid_synth_set_reverb is not None:
return fluid_synth_set_reverb(self.synth, roomsize, damping, width, level)
else:
set=0
if roomsize>=0:
set+=0b0001
if damping>=0:
set+=0b0010
if width>=0:
set+=0b0100
if level>=0:
set+=0b1000
return fluid_synth_set_reverb_full(self.synth, set, roomsize, damping, width, level)
def set_chorus(self, nr=-1, level=-1.0, speed=-1.0, depth=-1.0, type=-1):
"""
nr Chorus voice count (0-99, CPU time consumption proportional to this value)
level Chorus level (0.0-10.0)
speed Chorus speed in Hz (0.29-5.0)
depth_ms Chorus depth (max value depends on synth sample rate, 0.0-21.0 is safe for sample rate values up to 96KHz)
type Chorus waveform type (0=sine, 1=triangle)
"""
if fluid_synth_set_chorus is not None:
return fluid_synth_set_chorus(self.synth, nr, level, speed, depth, type)
else:
set=0
if nr>=0:
set+=0b00001
if level>=0:
set+=0b00010
if speed>=0:
set+=0b00100
if depth>=0:
set+=0b01000
if type>=0:
set+=0b10000
return fluid_synth_set_chorus_full(self.synth, set, nr, level, speed, depth, type)
def set_reverb_roomsize(self, roomsize):
if fluid_synth_set_reverb_roomsize is not None:
return fluid_synth_set_reverb_roomsize(self.synth, roomsize)
else:
return self.set_reverb(roomsize=roomsize)
def set_reverb_damp(self, damping):
if fluid_synth_set_reverb_damp is not None:
return fluid_synth_set_reverb_damp(self.synth, damping)
else:
return self.set_reverb(damping=damping)
def set_reverb_level(self, level):
if fluid_synth_set_reverb_level is not None:
return fluid_synth_set_reverb_level(self.synth, level)
else:
return self.set_reverb(level=level)
def set_reverb_width(self, width):
if fluid_synth_set_reverb_width is not None:
return fluid_synth_set_reverb_width(self.synth, width)
else:
return self.set_reverb(width=width)
def set_chorus_nr(self, nr):
if fluid_synth_set_chorus_nr is not None:
return fluid_synth_set_chorus_nr(self.synth, nr)
else:
return self.set_chorus(nr=nr)
def set_chorus_level(self, level):
if fluid_synth_set_chorus_level is not None:
return fluid_synth_set_chorus_level(self.synth, level)
else:
return self.set_chorus(leve=level)
def set_chorus_speed(self, speed):
if fluid_synth_set_chorus_speed is not None:
return fluid_synth_set_chorus_speed(self.synth, speed)
else:
return self.set_chorus(speed=speed)
def set_chorus_depth(self, depth):
if fluid_synth_set_chorus_depth is not None:
return fluid_synth_set_chorus_depth(self.synth, depth)
else:
return self.set_chorus(depth=depth)
def set_chorus_type(self, type):
if fluid_synth_set_chorus_type is not None:
return fluid_synth_set_chorus_type(self.synth, type)
else:
return self.set_chorus(type=type)
def get_reverb_roomsize(self):
return fluid_synth_get_reverb_roomsize(self.synth)
def get_reverb_damp(self):
return fluid_synth_get_reverb_damp(self.synth)
def get_reverb_level(self):
return fluid_synth_get_reverb_level(self.synth)
def get_reverb_width(self):
return fluid_synth_get_reverb_width(self.synth)
def get_chorus_nr(self):
return fluid_synth_get_chorus_nr(self.synth)
def get_chorus_level(self):
return fluid_synth_get_reverb_level(self.synth)
def get_chorus_speed(self):
if fluid_synth_get_chorus_speed is not None:
return fluid_synth_get_chorus_speed(self.synth)
else:
return fluid_synth_get_chorus_speed_Hz(self.synth)
def get_chorus_depth(self):
if fluid_synth_get_chorus_depth is not None:
return fluid_synth_get_chorus_depth(self.synth)
else:
return fluid_synth_get_chorus_depth_ms(self.synth)
def get_chorus_type(self):
return fluid_synth_get_chorus_type(self.synth)
def noteon(self, chan, key, vel):
"""Play a note"""
if key < 0 or key > 128:
return False
if chan < 0:
return False
if vel < 0 or vel > 128:
return False
return fluid_synth_noteon(self.synth, chan, key, vel)
def noteoff(self, chan, key):
"""Stop a note"""
if key < 0 or key > 128:
return False
if chan < 0:
return False
return fluid_synth_noteoff(self.synth, chan, key)
def pitch_bend(self, chan, val):
"""Adjust pitch of a playing channel by small amounts
A pitch bend value of 0 is no pitch change from default.
A value of -2048 is 1 semitone down.
A value of 2048 is 1 semitone up.
Maximum values are -8192 to +8192 (transposing by 4 semitones).
"""
return fluid_synth_pitch_bend(self.synth, chan, val + 8192)
def cc(self, chan, ctrl, val):
"""Send control change value
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1 : vibrato
7 : volume
10 : pan (left to right)
11 : expression (soft to loud)
64 : sustain
91 : reverb
93 : chorus
"""
return fluid_synth_cc(self.synth, chan, ctrl, val)
def get_cc(self, chan, num):
i=c_int()
fluid_synth_get_cc(self.synth, chan, num, byref(i))
return i.value
def program_change(self, chan, prg):
"""Change the program"""
return fluid_synth_program_change(self.synth, chan, prg)
def bank_select(self, chan, bank):
"""Choose a bank"""
return fluid_synth_bank_select(self.synth, chan, bank)
def sfont_select(self, chan, sfid):
"""Choose a SoundFont"""
return fluid_synth_sfont_select(self.synth, chan, sfid)
def program_reset(self):
"""Reset the programs on all channels"""
return fluid_synth_program_reset(self.synth)
def system_reset(self):
"""Stop all notes and reset all programs"""
return fluid_synth_system_reset(self.synth)
def get_samples(self, len=1024):
"""Generate audio samples
The return value will be a NumPy array containing the given
length of audio samples. If the synth is set to stereo output
(the default) the array will be size 2 * len.
"""
return fluid_synth_write_s16_stereo(self.synth, len)
class Sequencer:
def __init__(self, time_scale=1000, use_system_timer=True):
"""Create new sequencer object to control and schedule timing of midi events
Optional keyword arguments:
time_scale: ticks per second, defaults to 1000
use_system_timer: whether the sequencer should advance by itself
"""
self.client_callbacks = []
self.sequencer = new_fluid_sequencer2(use_system_timer)
fluid_sequencer_set_time_scale(self.sequencer, time_scale)
def register_fluidsynth(self, synth):
response = fluid_sequencer_register_fluidsynth(self.sequencer, synth.synth)
if response == FLUID_FAILED:
raise Error("Registering fluid synth failed")
return response
def register_client(self, name, callback, data=None):
c_callback = CFUNCTYPE(None, c_uint, c_void_p, c_void_p, c_void_p)(callback)
response = fluid_sequencer_register_client(self.sequencer, name.encode(), c_callback, data)
if response == FLUID_FAILED:
raise Error("Registering client failed")
# store in a list to prevent garbage collection
self.client_callbacks.append(c_callback)
return response
def note(self, time, channel, key, velocity, duration, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_note(evt, channel, key, velocity, duration)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def note_on(self, time, channel, key, velocity=127, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_noteon(evt, channel, key, velocity)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def note_off(self, time, channel, key, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_noteoff(evt, channel, key)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def timer(self, time, data=None, source=-1, dest=-1, absolute=True):
evt = self._create_event(source, dest)
fluid_event_timer(evt, data)
self._schedule_event(evt, time, absolute)
delete_fluid_event(evt)
def _create_event(self, source=-1, dest=-1):
evt = new_fluid_event()
fluid_event_set_source(evt, source)
fluid_event_set_dest(evt, dest)
return evt
def _schedule_event(self, evt, time, absolute=True):
response = fluid_sequencer_send_at(self.sequencer, evt, time, absolute)
if response == FLUID_FAILED:
raise Error("Scheduling event failed")
def get_tick(self):
return fluid_sequencer_get_tick(self.sequencer)
def process(self, msec):
fluid_sequencer_process(self.sequencer, msec)
def delete(self):
delete_fluid_sequencer(self.sequencer)
def raw_audio_string(data):
"""Return a string of bytes to send to soundcard
Input is a numpy array of samples. Default output format
is 16-bit signed (other formats not currently supported).
"""
import numpy
return (data.astype(numpy.int16)).tostring()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
from .... import units as u
from ... import ICRS, FK5
from ....time import Time
from ....table import Table
from ...angle_utilities import angular_separation
TOLERANCE = 0.03 # arcseconds
ROOT = os.path.dirname(os.path.abspath(__file__))
def test_icrs_no_e_fk5():
t = Table.read(os.path.join(ROOT, 'icrs_fk5.csv'), format='ascii')
for i in range(len(t)):
# Extract row
r = t[i]
# FK4 to FK5
c1 = ICRS(r['ra_in'], r['dec_in'],
unit=(u.degree, u.degree),
obstime=Time(r['obstime'], scale='utc'))
c2 = c1.transform_to(FK5).precess_to(Time(r['equinox_fk5'], scale='utc'))
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_fk5']), np.radians(r['dec_fk5']))
assert np.degrees(diff) * 3600. < TOLERANCE
# FK5 to FK4
c1 = FK5(r['ra_in'], r['dec_in'],
unit=(u.degree, u.degree),
obstime=Time(r['obstime'], scale='utc'),
equinox=Time(r['equinox_fk5'], scale='utc'))
c2 = c1.transform_to(ICRS)
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_icrs']), np.radians(r['dec_icrs']))
assert np.degrees(diff) * 3600. < TOLERANCE
passing icrs to fk5
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
from .... import units as u
from ...builtin_frames import ICRS, FK5
from ....time import Time
from ....table import Table
from ...angle_utilities import angular_separation
TOLERANCE = 0.03 # arcseconds
ROOT = os.path.dirname(os.path.abspath(__file__))
def test_icrs_no_e_fk5():
t = Table.read(os.path.join(ROOT, 'icrs_fk5.csv'), format='ascii')
for i in range(len(t)):
# Extract row
r = t[i]
# ICRS to FK5
c1 = ICRS(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg)
c2 = c1.transform_to(FK5(equinox=Time(r['equinox_fk5'], scale='utc')))
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_fk5']),
np.radians(r['dec_fk5']))
assert np.degrees(diff) * 3600. < TOLERANCE
# FK5 to ICRS
c1 = FK5(ra=r['ra_in']*u.deg, dec=r['dec_in']*u.deg,
equinox=Time(r['equinox_fk5'], scale='utc'))
c2 = c1.transform_to(ICRS)
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_icrs']),
np.radians(r['dec_icrs']))
assert np.degrees(diff) * 3600. < TOLERANCE
|
import os
import tee
import pkg_resources
from os.path import abspath, join, isfile
from python import Python
from process import Process
from chdir import chdir
from exit import err_exit
class Setuptools(object):
"""Interface to setuptools functions."""
def __init__(self, process=None):
self.python = Python()
self.process = process or Process(env=self.get_env())
def get_env(self):
# Make sure setuptools and its extensions are found if mkrelease
# has been installed with zc.buildout
path = []
for name in ('setuptools', 'setuptools-hg', 'setuptools-git',
'setuptools-subversion'):
try:
dist = pkg_resources.get_distribution(name)
except pkg_resources.DistributionNotFound:
continue
path.append(dist.location)
env = os.environ.copy()
env['PYTHONPATH'] = ':'.join(path)
env['HG_SETUPTOOLS_FORCE_CMD'] = '1'
return env
def is_valid_package(self, dir):
return isfile(join(dir, 'setup.py'))
def check_valid_package(self, dir):
if not self.is_valid_package(dir):
err_exit('No setup.py found in %(dir)s' % locals())
@chdir
def get_package_info(self, dir):
python = self.python
rc, lines = self.process.popen(
'"%(python)s" setup.py --name --version' % locals(), echo=False)
if rc == 0 and len(lines) > 1:
return lines[0], lines[1]
err_exit('Bad setup.py')
@chdir
def run_egg_info(self, dir, infoflags, scmtype='', quiet=False):
if not self.process.quiet:
print 'running egg_info'
echo = tee.After('running egg_info')
if quiet:
echo = tee.And(echo, tee.StartsWith('running'))
rc, lines = self._run_setup_py(
['egg_info'] + infoflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
filename = self._parse_egg_info_results(lines)
if filename and isfile(filename):
return abspath(filename)
err_exit('egg_info failed')
@chdir
def run_dist(self, dir, infoflags, distcmd, distflags, scmtype='', quiet=False):
if not self.process.quiet:
print 'running', distcmd
echo = tee.After('running %(distcmd)s' % locals())
if quiet:
echo = tee.And(echo, tee.StartsWith('running'))
rc, lines = self._run_setup_py(
['egg_info'] + infoflags + [distcmd] + distflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
filename = self._parse_dist_results(lines)
if filename and isfile(filename):
return abspath(filename)
err_exit('%(distcmd)s failed')
@chdir
def run_register(self, dir, infoflags, location, scmtype='', quiet=False):
if not self.process.quiet:
print 'running register'
echo = tee.After('running register')
if quiet:
echo = tee.And(echo, tee.Not(tee.StartsWith('Registering')))
serverflags = ['--repository="%s"' % location]
rc, lines = self._run_setup_py(
['egg_info'] + infoflags + ['register'] + serverflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
if self._parse_register_results(lines):
return rc
err_exit('register failed')
@chdir
def run_upload(self, dir, infoflags, distcmd, distflags, location, uploadflags, scmtype='', quiet=False):
if not self.process.quiet:
print 'running upload'
echo = tee.After('running upload')
if quiet:
echo = tee.And(echo, tee.Not(tee.StartsWith('Submitting')))
serverflags = ['--repository="%s"' % location]
rc, lines = self._run_setup_py(
['egg_info'] + infoflags + [distcmd] + distflags +
['upload'] + serverflags + uploadflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
if self._parse_upload_results(lines):
return rc
err_exit('upload failed')
def _run_setup_py(self, args, echo=True, echo2=True, scmtype=''):
"""Run setup.py with monkey-patched setuptools.
The patch forces setuptools to only use file-finders for the
selected 'scmtype'.
'args' contains the *list* of arguments that should be passed
to setup.py.
If 'scmtype' is the empty string, the patch is not applied.
"""
python = self.python
if scmtype:
patched = SCM_CHOOSER % locals()
setup_py = '-c"%(patched)s"' % locals()
else:
setup_py = 'setup.py %s' % ' '.join(args)
rc, lines = self.process.popen(
'"%(python)s" %(setup_py)s' % locals(), echo=echo, echo2=echo2)
# Remove setup.pyc turd
if isfile('setup.pyc'):
os.remove('setup.pyc')
return rc, lines
def _parse_egg_info_results(self, lines):
for line in lines:
if line.startswith("writing manifest file '"):
return line.split("'")[1]
return ''
def _parse_dist_results(self, lines):
# This relies on --formats=zip or --formats=egg
for line in lines:
if line.startswith("creating '") and "' and adding '" in line:
return line.split("'")[1]
return ''
def _parse_register_results(self, lines):
current, expect = None, 'running register'
for line in lines:
if line == expect:
if line != 'Server response (200): OK':
current, expect = expect, 'Server response (200): OK'
else:
if current == 'running register':
return True
return False
def _parse_upload_results(self, lines):
current, expect = None, 'running upload'
for line in lines:
if line == expect:
if line != 'Server response (200): OK':
current, expect = expect, 'Server response (200): OK'
else:
if current == 'running upload':
return True
return False
SCM_CHOOSER = """\
import sys
import distutils
import pkg_resources
from os.path import basename
def walk_revctrl(dirname=''):
finder = False
items = []
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
if %(scmtype)r in ep.name:
finder = True
finder_items = []
distutils.log.info('using %%s file-finder', ep.name)
for item in ep.load()(dirname):
if not basename(item).startswith(('.svn', '.hg', '.git')):
finder_items.append(item)
distutils.log.info('%%d files found', len(finder_items))
items.extend(finder_items)
if not finder:
print >>sys.stderr, 'No %(scmtype)s file-finder ' \
'(setuptools-%(scmtype)s extension missing?)'
sys.exit(1)
if not items:
sys.exit(1)
return items
import setuptools.command.egg_info
setuptools.command.egg_info.walk_revctrl = walk_revctrl
sys.argv = ['setup.py'] + %(args)r
import setup
"""
Fix missing string expansion.
import os
import tee
import pkg_resources
from os.path import abspath, join, isfile
from python import Python
from process import Process
from chdir import chdir
from exit import err_exit
class Setuptools(object):
"""Interface to setuptools functions."""
def __init__(self, process=None):
self.python = Python()
self.process = process or Process(env=self.get_env())
def get_env(self):
# Make sure setuptools and its extensions are found if mkrelease
# has been installed with zc.buildout
path = []
for name in ('setuptools', 'setuptools-hg', 'setuptools-git',
'setuptools-subversion'):
try:
dist = pkg_resources.get_distribution(name)
except pkg_resources.DistributionNotFound:
continue
path.append(dist.location)
env = os.environ.copy()
env['PYTHONPATH'] = ':'.join(path)
env['HG_SETUPTOOLS_FORCE_CMD'] = '1'
return env
def is_valid_package(self, dir):
return isfile(join(dir, 'setup.py'))
def check_valid_package(self, dir):
if not self.is_valid_package(dir):
err_exit('No setup.py found in %(dir)s' % locals())
@chdir
def get_package_info(self, dir):
python = self.python
rc, lines = self.process.popen(
'"%(python)s" setup.py --name --version' % locals(), echo=False)
if rc == 0 and len(lines) > 1:
return lines[0], lines[1]
err_exit('Bad setup.py')
@chdir
def run_egg_info(self, dir, infoflags, scmtype='', quiet=False):
if not self.process.quiet:
print 'running egg_info'
echo = tee.After('running egg_info')
if quiet:
echo = tee.And(echo, tee.StartsWith('running'))
rc, lines = self._run_setup_py(
['egg_info'] + infoflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
filename = self._parse_egg_info_results(lines)
if filename and isfile(filename):
return abspath(filename)
err_exit('egg_info failed')
@chdir
def run_dist(self, dir, infoflags, distcmd, distflags, scmtype='', quiet=False):
if not self.process.quiet:
print 'running', distcmd
echo = tee.After('running %(distcmd)s' % locals())
if quiet:
echo = tee.And(echo, tee.StartsWith('running'))
rc, lines = self._run_setup_py(
['egg_info'] + infoflags + [distcmd] + distflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
filename = self._parse_dist_results(lines)
if filename and isfile(filename):
return abspath(filename)
err_exit('%(distcmd)s failed' % locals())
@chdir
def run_register(self, dir, infoflags, location, scmtype='', quiet=False):
if not self.process.quiet:
print 'running register'
echo = tee.After('running register')
if quiet:
echo = tee.And(echo, tee.Not(tee.StartsWith('Registering')))
serverflags = ['--repository="%s"' % location]
rc, lines = self._run_setup_py(
['egg_info'] + infoflags + ['register'] + serverflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
if self._parse_register_results(lines):
return rc
err_exit('register failed')
@chdir
def run_upload(self, dir, infoflags, distcmd, distflags, location, uploadflags, scmtype='', quiet=False):
if not self.process.quiet:
print 'running upload'
echo = tee.After('running upload')
if quiet:
echo = tee.And(echo, tee.Not(tee.StartsWith('Submitting')))
serverflags = ['--repository="%s"' % location]
rc, lines = self._run_setup_py(
['egg_info'] + infoflags + [distcmd] + distflags +
['upload'] + serverflags + uploadflags,
echo=echo,
scmtype=scmtype)
if rc == 0:
if self._parse_upload_results(lines):
return rc
err_exit('upload failed')
def _run_setup_py(self, args, echo=True, echo2=True, scmtype=''):
"""Run setup.py with monkey-patched setuptools.
The patch forces setuptools to only use file-finders for the
selected 'scmtype'.
'args' contains the *list* of arguments that should be passed
to setup.py.
If 'scmtype' is the empty string, the patch is not applied.
"""
python = self.python
if scmtype:
patched = SCM_CHOOSER % locals()
setup_py = '-c"%(patched)s"' % locals()
else:
setup_py = 'setup.py %s' % ' '.join(args)
rc, lines = self.process.popen(
'"%(python)s" %(setup_py)s' % locals(), echo=echo, echo2=echo2)
# Remove setup.pyc turd
if isfile('setup.pyc'):
os.remove('setup.pyc')
return rc, lines
def _parse_egg_info_results(self, lines):
for line in lines:
if line.startswith("writing manifest file '"):
return line.split("'")[1]
return ''
def _parse_dist_results(self, lines):
# This relies on --formats=zip or --formats=egg
for line in lines:
if line.startswith("creating '") and "' and adding '" in line:
return line.split("'")[1]
return ''
def _parse_register_results(self, lines):
current, expect = None, 'running register'
for line in lines:
if line == expect:
if line != 'Server response (200): OK':
current, expect = expect, 'Server response (200): OK'
else:
if current == 'running register':
return True
return False
def _parse_upload_results(self, lines):
current, expect = None, 'running upload'
for line in lines:
if line == expect:
if line != 'Server response (200): OK':
current, expect = expect, 'Server response (200): OK'
else:
if current == 'running upload':
return True
return False
SCM_CHOOSER = """\
import sys
import distutils
import pkg_resources
from os.path import basename
def walk_revctrl(dirname=''):
finder = False
items = []
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
if %(scmtype)r in ep.name:
finder = True
finder_items = []
distutils.log.info('using %%s file-finder', ep.name)
for item in ep.load()(dirname):
if not basename(item).startswith(('.svn', '.hg', '.git')):
finder_items.append(item)
distutils.log.info('%%d files found', len(finder_items))
items.extend(finder_items)
if not finder:
print >>sys.stderr, 'No %(scmtype)s file-finder ' \
'(setuptools-%(scmtype)s extension missing?)'
sys.exit(1)
if not items:
sys.exit(1)
return items
import setuptools.command.egg_info
setuptools.command.egg_info.walk_revctrl = walk_revctrl
sys.argv = ['setup.py'] + %(args)r
import setup
"""
|
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from pyasm.common import Environment, jsonloads, jsondumps, TacticException, Common
from pyasm.web import DivWdg, Table
from pyasm.widget import IconWdg, TextWdg, SelectWdg, CheckboxWdg, RadioWdg, TextAreaWdg, HiddenWdg
from pyasm.command import Command
from pyasm.search import SearchType, Search
from pyasm.biz import File, Project, FileGroup, FileRange, Snapshot
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.container import DialogWdg
from tactic.ui.widget import IconButtonWdg
from tactic.ui.input import UploadButtonWdg, TextInputWdg
from tactic.ui.widget import ActionButtonWdg
from tactic_client_lib import TacticServerStub
import os
import os.path
import re
import shutil
__all__ = ['IngestUploadWdg', 'IngestCheckCmd', 'IngestUploadCmd']
class IngestUploadWdg(BaseRefreshWdg):
ARGS_KEYS = {
'base_dir': 'Base directory to check into',
'search_type': 'Search Type to ingest into',
'parent_key': 'Parent search key to relate create sobject to',
'process': 'The default process to ingest into',
'context': 'Fixed context to ingest into',
'ingest_data_view': 'Specify a ingest data view, defaults to edit',
'extra_data': 'Extra data (JSON) to be added to created sobjects',
'oncomplete_script_path': 'Script to be run on a finished ingest',
'update_mode': 'Takes values "true" or "false". When true, uploaded files will update existing file iff exactly one file exists already with the same name.',
'context_mode': 'Set or remove context case sensitivity.',
'hidden_options': 'Comma separated list of hidden settings i.e. "process,context_mode"',
'title': 'The title to display at the top',
'library_mode': 'Mode to determine if Ingest should handle huge amounts of files',
'dated_dirs': 'Determines update functionality, marked true if relative_dir is timestamped',
'update_process': 'Determines the update process for snapshots when the update_mode is set to true and one sobject is found',
'ignore_path_keywords': 'Comma separated string of path keywords to be hidden',
'project_code': 'Publish to another project',
}
def get_display(my):
my.sobjects = my.kwargs.get("sobjects")
# if search_keys are passed in, then these are used to copy
search_keys = my.kwargs.get("search_keys")
# add a project to copy to. Check that it is permitted
my.project_code = my.kwargs.get("project_code")
if search_keys:
my.sobjects = Search.get_by_search_keys(search_keys)
projects = Project.get_user_projects()
project_codes = [x.get_code() for x in projects]
if my.project_code not in project_codes:
my.project_code = None
asset_dir = Environment.get_asset_dir()
base_dir = my.kwargs.get("base_dir")
if base_dir:
if not base_dir.startswith(asset_dir):
raise Exception("Path needs to be in asset root")
else:
relative_dir = base_dir.replace(asset_dir, "")
relative_dir = relative_dir.strip("/")
else:
relative_dir = my.kwargs.get("relative_dir")
my.relative_dir = relative_dir
# This is used to check into a search key (not create a new sobject)
my.orig_sobject = None
my.search_key = my.kwargs.get("search_key") or ""
if my.search_key:
my.sobject = Search.get_by_search_key(my.search_key)
if my.kwargs.get("use_parent") in [True, 'true']:
my.orig_sobject = my.sobject
my.sobject = my.sobject.get_parent()
my.search_key = my.sobject.get_search_key()
my.search_type = my.sobject.get_search_type()
my.show_settings = my.kwargs.get("show_settings")
if not my.show_settings:
my.show_settings = False
else:
my.search_type = my.kwargs.get("search_type")
my.sobject = None
my.search_key = None
my.show_settings = my.kwargs.get("show_settings")
if my.show_settings == None:
my.show_settings = True
top = my.top
top.add_class("spt_ingest_top")
hidden = HiddenWdg(name="parent_key")
#hidden = TextWdg(name="parent_key")
top.add(hidden)
hidden.add_class("spt_parent_key")
if my.search_key:
hidden.set_value(my.search_key)
table = Table()
top.add(table)
table.add_row()
left = table.add_cell()
left.add_style("vertical-align: top")
left.add( my.get_content_wdg() )
if not my.search_key or my.show_settings:
if my.show_settings:
middle = table.add_cell()
middle.add_style("height: 10") # not sure why we need this height
middle.add_style("padding: 30px 20px")
line = DivWdg()
middle.add(line)
line.add_style("height: 100%")
line.add_style("border-style: solid")
line.add_style("border-width: 0px 0px 0px 1px")
line.add_style("border-color: #DDD")
line.add(" ")
right = table.add_cell()
right.add_class("spt_right_content")
right.add_style("vertical-align: top")
right.add( my.get_settings_wdg() )
if my.show_settings in [False, 'false']:
right.add_style("display: none")
else:
if my.orig_sobject and my.orig_sobject.column_exists("process"):
hidden = HiddenWdg(name="process")
#hidden = TextWdg(name="process")
top.add(hidden)
hidden.add_class("spt_process")
process = my.orig_sobject.get_value("process")
hidden.set_value(process)
return top
def get_file_wdg(my, sobject=None):
# template for each file item
file_template = DivWdg()
if not sobject:
file_template.add_class("spt_upload_file_template")
file_template.add_style("display: none")
else:
file_template.add_class("spt_upload_file")
file_template.add_style("margin-bottom: 3px")
file_template.add_style("padding: 3px")
file_template.add_style("height: 40px")
thumb_div = DivWdg()
file_template.add(thumb_div)
thumb_div.add_style("float: left")
thumb_div.add_style("width: 60");
thumb_div.add_style("height: 40");
thumb_div.add_style("overflow: hidden");
thumb_div.add_style("margin: 3 10 3 0");
thumb_div.add_class("spt_thumb")
info_div = DivWdg()
file_template.add(info_div)
info_div.add_style("float: left")
name_div = DivWdg()
name_div.add_class("spt_name")
info_div.add(name_div)
name_div.add_style("width: 225px")
name_div.add_style("overflow-x: hidden")
name_div.add_style("text-overflow: ellipsis")
date_div = DivWdg()
date_div.add_class("spt_date_label")
info_div.add(date_div)
date_div.add("")
date_div.add_style("opacity: 0.5")
date_div.add_style("font-size: 0.8em")
date_div.add_style("font-style: italic")
date_div.add_style("margin-top: 3px")
hidden_date_div = HiddenWdg("date")
hidden_date_div.add_class("spt_date")
info_div.add(date_div)
size_div = DivWdg()
size_div.add_class("spt_size")
file_template.add(size_div)
size_div.add_style("float: left")
size_div.add_style("width: 150px")
size_div.add_style("text-align: right")
remove_div = DivWdg()
remove_div.add_class("spt_remove")
file_template.add(remove_div)
icon = IconButtonWdg(title="Remove", icon="BS_REMOVE")
icon.add_style("float: right")
remove_div.add(icon)
#remove_div.add_style("text-align: right")
if sobject:
from pyasm.common import FormatValue
from tactic.ui.panel import ThumbWdg2
thumb = ThumbWdg2()
thumb_div.add(thumb)
thumb.set_sobject(sobject)
lib_path = thumb.get_lib_path()
name = os.path.basename(lib_path)
name = re.sub(r"_v\d+", "", name)
if sobject.get_base_search_type() == "sthpw/snapshot":
if sobject.get("snapshot_type") == "sequence":
paths = sobject.get_expanded_lib_paths()
file_range = sobject.get_file_range()
size = 0
for path in paths:
size += os.path.getsize(path)
name = "%s (%s)" % (name, file_range.get_display())
else:
size = os.path.getsize(lib_path)
else:
size = os.path.getsize(lib_path)
name_div.add( name )
size = FormatValue().get_format_value(size, "KB")
size_div.add(size)
file_template.add_attr("spt_search_key", sobject.get_search_key())
else:
# example data
size_div.add("433Mb")
name_div.add("image001.jpg")
return file_template
def get_settings_wdg(my):
div = DivWdg()
div.add_style("width: 400px")
div.add_style("padding: 20px")
title_wdg = DivWdg()
div.add(title_wdg)
title_wdg.add("Ingest Settings")
title_wdg.add_style("font-size: 25px")
# Build list of process names
process_names = set()
from pyasm.biz import Pipeline
from pyasm.widget import SelectWdg
search_type_obj = SearchType.get(my.search_type)
base_type = search_type_obj.get_base_key()
pipeline_search = Search("sthpw/pipeline")
if my.sobject:
pipeline_code = my.sobject.get_value("pipeline_code")
if pipeline_code:
pipeline_search.add_filter("code", pipeline_code)
else:
pipeline_search.set_null_filter()
pipeline_search.add_project_filter()
pipeline_search.add_filter("search_type", base_type)
pipelines = pipeline_search.get_sobjects()
for pipeline in pipelines:
process_names.update(pipeline.get_process_names())
selected_process = my.kwargs.get("process")
if selected_process:
process_names.add(selected_process)
if process_names:
process_names = list(process_names)
process_names.sort()
else:
process_names = []
if process_names:
process_names.append("---")
process_names.append("publish")
process_names.append("icon")
hidden_options = my.kwargs.get("hidden_options").split(',')
process_wdg = DivWdg()
div.add(process_wdg)
title_wdg = DivWdg()
process_wdg.add(title_wdg)
title_wdg.add("Process")
title_wdg.add_style("margin-top: 20px")
title_wdg.add_style("font-size: 16px")
process_wdg.add("<br/>")
select = SelectWdg("process")
process_wdg.add(select)
select.set_option("values", process_names)
select.add_empty_option("- Select Process to ingest to-")
if selected_process:
select.set_option("default", selected_process)
process_wdg.add("<br/>")
if "process" in hidden_options:
process_wdg.set_style("display: none")
# Metadata
#hidden_options.append("metadata")
if "metadata" not in hidden_options:
process_wdg.add("<hr/>")
title_wdg = DivWdg()
div.add(title_wdg)
title_wdg.add("Metadata")
title_wdg.add_style("margin-top: 20px")
title_wdg.add_style("font-size: 16px")
title_wdg.add_style("margin-bottom: 5px")
desc_wdg = DivWdg("The following metadata will be added to the ingested files.")
desc_wdg.add_style("margin-bottom: 10px")
div.add(desc_wdg)
from tactic.ui.panel import EditWdg
ingest_data_view = my.kwargs.get('metadata_view')
if not ingest_data_view:
ingest_data_view = my.kwargs.get('ingest_data_view')
if my.search_key:
sobject = SearchType.create("sthpw/snapshot")
else:
sobject = SearchType.create(my.search_type)
if my.show_settings:
edit = EditWdg(
search_key=sobject.get_search_key(),
mode='view',
view=ingest_data_view,
element_names=metadata_element_names,
show_header=False,
width="100%",
display_mode="single_cell",
extra_data=my.kwargs.get("extra_data"),
default=my.kwargs.get("default"),
)
div.add(edit)
div.add("<br/>")
# options
# update mode
map_div = DivWdg()
div.add(map_div)
map_div.add("<hr/>")
title_wdg = DivWdg()
map_div.add(title_wdg)
title_wdg.add("Mapping Files to Items")
title_wdg.add_style("margin-top: 20px")
title_wdg.add_style("font-size: 16px")
if "map_option" in hidden_options:
map_div.add_style("display: none")
if "update_option" not in hidden_options:
label_div = DivWdg()
label_div.add("Determine how the file maps to a particular item")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
update_mode_option = my.kwargs.get("update_mode")
if not update_mode_option:
update_mode_option = "true"
update_mode = SelectWdg(name="update mode")
update_mode.add_class("spt_update_mode_select")
update_mode.set_option("values", ["false", "true", "sequence"])
update_mode.set_option("labels", ["Always insert a new item", "Update duplicate items", "Update groups as sequences"])
update_mode.set_option("default", update_mode_option)
update_mode.add_style("margin-top: -3px")
update_mode.add_style("margin-right: 5px")
map_div.add(update_mode)
update_mode.add_behavior( {
"type": "listen",
"event_name": "set_ingest_update_mode",
"cbjs_action": '''
var value = bvr.firing_data.value;
bvr.src_el.value = value;
'''
} )
if not my.search_key and "ext_option" not in hidden_options:
label_div = DivWdg()
label_div.add("Ignore File Extension")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
ignore_ext_option = my.kwargs.get("ignore_ext")
if not ignore_ext_option:
ignore_ext_option = "false"
ignore_ext = SelectWdg(name="update mode")
ignore_ext.add_class("spt_ignore_ext_select")
ignore_ext.set_option("values", ["true", "false"])
ignore_ext.set_option("labels", ["Yes", "No"])
ignore_ext.set_option("default", ignore_ext_option)
ignore_ext.add_style("margin-top: -3px")
ignore_ext.add_style("margin-right: 5px")
map_div.add(ignore_ext)
if not my.search_key and "column_option" not in hidden_options:
label_div = DivWdg()
label_div.add("Map file name to column")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
column_option = my.kwargs.get("column")
if not column_option:
column_option = "name"
column_select = SelectWdg(name="update mode")
column_select.add_class("spt_column_select")
column_select.set_option("values", ["name", "code"])
column_select.set_option("labels", ["Name", "Code"])
column_select.set_option("default", column_option)
column_select.add_style("margin-top: -3px")
column_select.add_style("margin-right: 5px")
map_div.add(column_select)
if "zip_mode" not in hidden_options:
label_div = DivWdg()
label_div.add("When checking in zipped files:")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
column_option = my.kwargs.get("column")
if not column_option:
column_option = "name"
column_select = SelectWdg(name="zip mode")
column_select.add_class("spt_zip_mode_select")
column_select.set_option("values", ["single", "unzip"])
column_select.set_option("labels", ["Check-in as a single zipped file", "Unzip and check-in each file"])
column_select.set_option("default", "single")
column_select.add_style("margin-top: -3px")
column_select.add_style("margin-right: 5px")
map_div.add(column_select)
if not my.search_key and "context_mode" not in hidden_options:
map_div.add("<br/>")
map_div.add("<hr/>")
title_wdg = DivWdg()
map_div.add(title_wdg)
title_wdg.add("Context Mode")
title_wdg.add_style("font-size: 16px")
map_div.add("<br/>")
context_mode_option = my.kwargs.get("context_mode")
if not context_mode_option:
context_mode_option = "case_sensitive"
context_mode = SelectWdg(name="context_mode")
context_mode.add_class("spt_context_mode_select")
context_mode.set_option("values", "case_insensitive|case_sensitive")
context_mode.set_option("labels", "Case Insensitive|Case Sensitive")
context_mode.set_option("default", context_mode_option)
context_mode.add_style("margin-top: -3px")
context_mode.add_style("margin-right: 5px")
map_div.add(context_mode)
extra_data = my.kwargs.get("extra_data")
if not isinstance(extra_data, basestring):
extra_data = jsondumps(extra_data)
if extra_data and extra_data != "null":
# it needs a TextArea instead of Hidden because of JSON data
text = TextAreaWdg(name="extra_data")
text.add_style('display: none')
text.set_value(extra_data)
div.add(text)
return div
def get_content_wdg(my):
"""
asset_dir = Environment.get_asset_dir()
base_dir = my.kwargs.get("base_dir")
if base_dir:
if not base_dir.startswith(asset_dir):
raise Exception("Path needs to be in asset root")
else:
relative_dir = base_dir.replace(asset_dir, "")
relative_dir = relative_dir.strip("/")
else:
relative_dir = my.kwargs.get("relative_dir")
my.relative_dir = relative_dir
"""
div = DivWdg()
div.add_style("width: auto")
div.add_style("min-width: 600px")
div.add_style("padding: 20px")
div.add_color("background", "background")
header_div = DivWdg()
div.add(header_div)
if my.show_settings:
button_div = DivWdg()
header_div.add(button_div)
button = IconButtonWdg(title="Expand Options", icon="BS_MENU_HAMBURGER")
button_div.add(button)
button_div.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var right = top.getElement(".spt_right_content");
spt.toggle_show_hide(right);
'''
} )
title = my.kwargs.get("title")
if not title:
if my.project_code:
project_title = Project.get_by_code(my.project_code).get_value("title")
title = "Copy files to '%s'" % project_title
title_description = "These will be copied to the asset library"
else:
title = "Ingest Files"
title_description = "Either drag files into the queue box or click 'Add Files to Queue'"
else:
title_description = "Either drag files into the queue box or click 'Add Files to Queue'"
title_wdg = DivWdg()
header_div.add(title_wdg)
title_wdg.add("<span style='font-size: 25px'>%s</span>" % title)
title_wdg.add("<br/>")
title_wdg.add(title_description)
title_wdg.add_style("display", "inline-block")
# create the help button
is_admin_site = Project.get().is_admin()
show_help = my.kwargs.get("show_help") or True
if my.kwargs.get("show_help") not in ['false', False] and is_admin_site:
help_button_wdg = DivWdg()
header_div.add(help_button_wdg)
help_button_wdg.add_styles("float: right; margin-top: 11px;")
help_button = ActionButtonWdg(title="?", tip="Ingestion Widget Help", size='s')
help_button_wdg.add(help_button)
help_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''spt.help.load_alias("ingestion_widget")'''
} )
div.add("<hr style='margin-right: 4px'/>")
shelf_div = DivWdg()
div.add(shelf_div)
shelf_div.add_style("margin-bottom: 10px")
if my.search_key:
div.add("<input class='spt_input' type='hidden' name='search_key' value='%s'/>" % my.search_key)
else:
div.add("<input class='spt_input' type='hidden' name='search_key' value=''/>")
if not my.search_type:
div.add("No search type specfied")
return div
if my.relative_dir:
folder_div = DivWdg()
shelf_div.add(folder_div)
folder_div.add("Folder: %s" % my.relative_dir)
folder_div.add_style("opacity: 0.5")
folder_div.add_style("font-style: italic")
folder_div.add_style("margin-bottom: 10px")
# update_process
my.update_process = my.kwargs.get("update_process") or ""
# ignore_path_keywords
my.ignore_path_keywords = my.kwargs.get("ignore_path_keywords") or ""
from tactic.ui.input import Html5UploadWdg
upload = Html5UploadWdg(multiple=True)
shelf_div.add(upload)
button = ActionButtonWdg(title="Add Files to Queue", width=150, color="warning")
#button.add_style("float: right")
button.add_style("display: inline-block")
button.add_style("margin-top: -3px")
shelf_div.add(button)
button.add_behavior( {
'type': 'click',
'normal_ext': File.NORMAL_EXT,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var files_el = top.getElement(".spt_to_ingest_files");
var regex = new RegExp('(' + bvr.normal_ext.join('|') + ')$', 'i');
//clear upload progress
var upload_bar = top.getElement('.spt_upload_progress');
if (upload_bar) {
upload_bar.setStyle('width','0%');
upload_bar.innerHTML = '';
}
var upload_button = top.getElement(".spt_upload_files_top");
var onchange = function (evt) {
var files = spt.html5upload.get_files();
var delay = 0;
for (var i = 0; i < files.length; i++) {
var size = files[i].size;
var file_name = files[i].name;
var is_normal = regex.test(file_name);
if (size >= 10*1024*1024 || is_normal) {
spt.drag.show_file(files[i], files_el, 0, false);
}
else {
spt.drag.show_file(files[i], files_el, delay, true);
if (size < 100*1024) delay += 50;
else if (size < 1024*1024) delay += 500;
else if (size < 10*1024*1024) delay += 1000;
}
}
upload_button.setStyle("display", "");
}
spt.html5upload.clear();
spt.html5upload.set_form( top );
spt.html5upload.select_file( onchange );
'''
} )
button = ActionButtonWdg(title="Clear")
#button.add_style("float: right")
button.add_style("display: inline-block")
button.add_style("margin-top: -3px")
shelf_div.add(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var file_els = top.getElements(".spt_upload_file");
for ( var i = 0; i < file_els.length; i++) {
spt.behavior.destroy( file_els[i] );
};
var background = top.getElement(".spt_files_background");
background.setStyle("display", "");
var button = top.getElement(".spt_upload_file_button");
button.setStyle("display", "none");
//clear upload progress
var upload_bar = top.getElement('.spt_upload_progress');
if (upload_bar) {
upload_bar.setStyle('width','0%');
upload_bar.innerHTML = '';
upload_bar.setStyle("visibility", "hidden");
var info_el = top.getElement(".spt_upload_info");
info_el.innerHTML = "";
}
'''
} )
ingest = my.get_ingest_button()
shelf_div.add(ingest)
ingest.add_style("float: right")
shelf_div.add("<br clear='all'/>")
progress_wdg = my.get_progress_div()
shelf_div.add(progress_wdg)
border_color_light = div.get_color("background2", 8)
border_color_dark = div.get_color("background2", -15)
background_mouseout = div.get_color("background", 10)
background_mouseenter = div.get_color("background", 8)
files_div = DivWdg()
files_div.add_style("position: relative")
files_div.add_class("spt_to_ingest_files")
div.add(files_div)
files_div.add_style("max-height: 400px")
files_div.add_style("height: 400px")
files_div.add_style("overflow-y: auto")
files_div.add_style("padding: 3px")
files_div.add_color("background", background_mouseout)
files_div.add_style("border: 3px dashed %s" % border_color_light)
#files_div.add_style("border-radius: 20px 20px 20px 20px")
files_div.add_style("z-index: 1")
files_div.add_style("width", "586px")
#files_div.add_style("display: none")
bgcolor = div.get_color("background")
bgcolor2 = div.get_color("background", -3)
#style_text = "text-align: center; margin-top: 100px; color: #A0A0A0; font-size: 3.0em; z-index: 10;"
background = DivWdg()
background.add_class("spt_files_background")
files_div.add(background)
if my.sobjects:
background.add_style("display: none")
background.add_style("text-align: center")
background.add_style("margin-top: 75px")
background.add_style("font-size: 3.0em")
background.add_style("z-index: 10")
background.add_color("color", "color", 70)
icon = "<i class='fa fa-cloud-upload' style='font-size: 150px'> </i>"
background.add(icon)
background_text = DivWdg("<p>Drag Files Here</p>")
background.add(background_text)
files_div.add_behavior( {
'type': 'mouseover',
'cbjs_action': '''
bvr.src_el.setStyle("border","3px dashed %s")
bvr.src_el.setStyle("background","%s")
''' % (border_color_dark, background_mouseenter)
} )
files_div.add_behavior( {
'type': 'mouseout',
'cbjs_action': '''
bvr.src_el.setStyle("border", "3px dashed %s")
bvr.src_el.setStyle("background","%s")
''' % (border_color_light, background_mouseout)
} )
background.add( my.get_select_files_button() )
# Test drag and drop files
files_div.add_attr("ondragenter", "return false")
files_div.add_attr("ondragover", "return false")
files_div.add_attr("ondrop", "spt.drag.noop(event, this)")
files_div.add_behavior( {
'type': 'load',
'normal_ext': File.NORMAL_EXT,
'cbjs_action': '''
spt.drag = {}
var background;
spt.drag.show_file = function(file, top, delay, icon) {
var background = top.getElement(".spt_files_background");
background.setStyle("display", "none");
var template = top.getElement(".spt_upload_file_template");
var clone = spt.behavior.clone(template);
clone.removeClass("spt_upload_file_template");
clone.addClass("spt_upload_file");
clone.setStyle("display", "");
if (typeof(delay) == 'undefined') {
delay = 0;
}
// remember the file handle
clone.file = file;
var name = file.name;
var size = parseInt(file.size / 1024 * 10) / 10;
var thumb_el = clone.getElement(".spt_thumb");
var date_label_el = clone.getElement(".spt_date_label");
var date_el = clone.getElement(".spt_date");
//var loadingImage = loadImage(
setTimeout( function() {
var draw_empty_icon = function() {
var img = $(document.createElement("div"));
img.setStyle("width", "58");
img.setStyle("height", "34");
//img.innerHTML = "MP4";
img.setStyle("border", "1px dotted #222")
thumb_el.appendChild(img);
};
if (icon) {
var loadingImage = loadImage(
file,
function (img) {
if (img.width)
thumb_el.appendChild(img);
else
draw_empty_icon();
},
{maxWidth: 80, maxHeight: 60, canvas: true, contain: true}
);
}
else {
draw_empty_icon();
}
loadImage.parseMetaData(
file,
function(data) {
if (data.exif) {
var date = data.exif.get('DateTimeOriginal');
if (date) {
date_label_el.innerHTML = date;
if (date_el) {
date_el.value = date;
}
}
}
}
);
}, delay );
/*
var reader = new FileReader();
reader.thumb_el = thumb_el;
reader.onload = function(e) {
this.thumb_el.innerHTML = [
'<img class="thumb" src="',
e.target.result,
'" title="', escape(name),
'" width="60px"',
'" padding="5px"',
'"/>'
].join('');
}
reader.readAsDataURL(file);
*/
clone.getElement(".spt_name").innerHTML = file.name;
clone.getElement(".spt_size").innerHTML = size + " KB";
clone.inject(top);
}
spt.drag.noop = function(evt, el) {
var top = $(el).getParent(".spt_ingest_top");
var files_el = top.getElement(".spt_to_ingest_files");
evt.stopPropagation();
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
var files = evt.dataTransfer.files;
var delay = 0;
var skip = false;
var regex = new RegExp('(' + bvr.normal_ext.join('|') + ')$', 'i');
for (var i = 0; i < files.length; i++) {
var size = files[i].size;
var file_name = files[i].name;
var is_normal = regex.test(file_name);
if (size >= 10*1024*1024 || is_normal) {
spt.drag.show_file(files[i], files_el, 0, false);
}
else {
spt.drag.show_file(files[i], files_el, delay, true);
if (size < 100*1024) delay += 50;
else if (size < 1024*1024) delay += 500;
else if (size < 10*1024*1024) delay += 1000;
}
}
// get all of the current filenames
var filenames = []
var items = top.getElements(".spt_upload_file");
for (var i = 0; i < items.length; i++) {
var file = items[i].file;
filenames.push(file.name);
}
// check if this is a sequence or zip
var server = TacticServerStub.get();
var cmd = 'tactic.ui.tools.IngestCheckCmd';
var kwargs = {
file_names: filenames
};
var ret_val = server.execute_cmd(cmd, kwargs);
var info = ret_val.info;
var num_sequences = 0;
for (var i = 0; i < info.length; i++) {
if (info[i].is_sequence) {
num_sequences += 1;
}
}
var ok = function() {
var upload_button = top.getElement(".spt_upload_files_top");
upload_button.setStyle("display", "");
}
if (num_sequences > 0) {
spt.confirm(num_sequences + " Sequences detected. Do you wish to group these files as sequences?", function() {
spt.named_events.fire_event("set_ingest_update_mode", {
options: {
value: 'sequence'
}
} );
});
}
ok();
}
'''
} )
# create a template that will be filled in for each file
files_div.add_relay_behavior( {
'type': 'mouseenter',
'color': files_div.get_color("background3", -5),
'bvr_match_class': 'spt_upload_file',
'cbjs_action': '''
bvr.src_el.setStyle("background", bvr.color);
'''
} )
files_div.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_upload_file',
'cbjs_action': '''
bvr.src_el.setStyle("background", "");
'''
} )
files_div.add_relay_behavior( {
'type': 'mouseup',
'bvr_match_class': 'spt_remove',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var el = bvr.src_el.getParent(".spt_upload_file");
spt.behavior.destroy_element(el);
var els = top.getElements(".spt_upload_file");
if (els.length == 0) {
var background = top.getElement(".spt_files_background");
background.setStyle("display", "");
var upload_button = top.getElement(".spt_upload_files_top");
upload_button.setStyle("display", "none");
}
'''
} )
"""
metadata_view = "test/wizard/metadata"
files_div.add_relay_behavior( {
'type': 'mouseup',
'view': metadata_view,
'bvr_match_class': 'spt_upload_file',
'cbjs_action': '''
var class_name = 'tactic.ui.panel.CustomLayoutWdg';
var kwargs = {
view: bvr.view
}
spt.app_busy.show("Loading Metadata");
spt.panel.load_popup("Metadata", class_name, kwargs);
spt.app_busy.hide();
'''
} )
"""
# add the passed in sobject files
for sobject in my.sobjects:
files_div.add( my.get_file_wdg(sobject) )
# add the template
files_div.add( my.get_file_wdg() )
div.add("<br/>")
#upload_wdg = my.get_ingest_button()
#div.add(upload_wdg)
return div
def get_ingest_button(my):
div = DivWdg()
library_mode = my.kwargs.get("library_mode") or False
dated_dirs = my.kwargs.get("dated_dirs") or False
# NOTE: files variable is passed in automatically
upload_init = '''
var info_el = top.getElement(".spt_upload_info");
info_el.innerHTML = "Uploading ...";
// start the upload
var progress_el = top.getElement(".spt_upload_progress");
var progress_top = top.getElement(".spt_upload_progress_top");
setTimeout( function() {
progress_el.setStyle("visibility", "visible");
progress_top.setStyle("margin-top", "0px");
}, 0);
server.start( {description: "Upload and check-in of ["+files.length+"] files"} );
'''
upload_progress = '''
var top = bvr.src_el.getParent(".spt_ingest_top");
progress_el = top.getElement(".spt_upload_progress");
var percent = Math.round(evt.loaded * 100 / evt.total);
progress_el.setStyle("width", percent + "%");
progress_el.innerHTML = String(percent) + "%";
progress_el.setStyle("background", "#f0ad4e");
'''
oncomplete_script = '''
spt.notify.show_message("Ingest Completed");
server.finish();
var file_els = top.getElements(".spt_upload_file");
for ( var i = 0; i < file_els.length; i++) {
spt.behavior.destroy( file_els[i] );
};
var background = top.getElement(".spt_files_background");
background.setStyle("display", "");
spt.message.stop_interval(message_key);
var info_el = top.getElement(".spt_upload_info");
info_el.innerHTML = '';
var progress_el = top.getElement(".spt_upload_progress");
var progress_top = top.getElement(".spt_upload_progress_top");
setTimeout( function() {
progress_el.setStyle("visibility", "hidden");
progress_top.setStyle("margin-top", "-30px");
}, 0);
spt.panel.refresh(top);
'''
script_found = True
oncomplete_script_path = my.kwargs.get("oncomplete_script_path")
if oncomplete_script_path:
script_folder, script_title = oncomplete_script_path.split("/")
oncomplete_script_expr = "@GET(config/custom_script['folder','%s']['title','%s'].script)" %(script_folder,script_title)
server = TacticServerStub.get()
oncomplete_script_ret = server.eval(oncomplete_script_expr, single=True)
if oncomplete_script_ret:
oncomplete_script = oncomplete_script + oncomplete_script_ret
else:
script_found = False
oncomplete_script = "alert('Error: oncomplete script not found');"
if my.kwargs.get("oncomplete_script"):
oncomplete_script = my.kwargs.get("oncomplete_script")
if my.kwargs.get("on_complete"):
oncomplete_script = my.kwargs.get("on_complete")
on_complete = '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var update_data_top = top.getElement(".spt_edit_top");
var progress_el = top.getElement(".spt_upload_progress");
progress_el.innerHTML = "100%";
progress_el.setStyle("width", "100%");
progress_el.setStyle("background", "#337ab7");
var info_el = top.getElement(".spt_upload_info");
var search_type = bvr.kwargs.search_type;
var relative_dir = bvr.kwargs.relative_dir;
var context = bvr.kwargs.context;
var update_process = bvr.kwargs.update_process;
var ignore_path_keywords = bvr.kwargs.ignore_path_keywords;
var library_mode = bvr.kwargs.library_mode;
var dated_dirs = bvr.kwargs.dated_dirs;
var project_code = bvr.kwargs.project_code;
if (!project_code) {
project_code = null;
}
// Data comes from Ingest Settings
var context_mode_select = top.getElement(".spt_context_mode_select");
var context_mode = context_mode_select ? context_mode_select.value : bvr.kwargs.context_mode;
// settings
var update_mode = null;
var ignore_ext = null;
var column = null;
var zip_mode = null;
var update_mode_select = top.getElement(".spt_update_mode_select");
if (update_mode_select)
update_mode = update_mode_select.value;
var ignore_ext_select = top.getElement(".spt_ignore_ext_select");
if (ignore_ext_select)
ignore_ext = ignore_ext_select.value;
var column_select = top.getElement(".spt_column_select");
if (column_select)
column = column_select ? column_select.value : bvr.kwargs.column;
var zip_mode_select = top.getElement(".spt_zip_mode_select");
if (zip_mode_select)
zip_mode = zip_mode_select.value;
var filenames = [];
for (var i = 0; i != files.length;i++) {
var name = files[i].name;
if (name) {
filenames.push(name);
}
else {
filenames.push(files[i]);
}
}
var values = spt.api.get_input_values(top);
//var category = values.category[0];
var keywords = values["edit|user_keywords"];
if (keywords) {
keywords = keywords[0];
}
else {
keywords = "";
}
var extra_data = values.extra_data ? values.extra_data[0]: {};
var parent_key = values.parent_key[0];
var search_key = values.search_key[0];
var convert_el = top.getElement(".spt_image_convert")
if (convert_el) {
convert = spt.api.get_input_values(convert_el);
}
else {
convert = null;
}
var processes = values.process;
if (processes) {
process = processes[0];
if (!process) {
process = null;
}
}
else {
process = null;
}
var return_array = false;
// non-existent when my.show_settings is False
var update_data = update_data_top ? spt.api.get_input_values(update_data_top, null, return_array): {};
var message_key = spt.message.generate_key();
message_key = "IngestUploadCmd|" + search_key + "|" + message_key;
var kwargs = {
search_key: search_key,
search_type: search_type,
relative_dir: relative_dir,
filenames: filenames,
message_key: message_key,
parent_key: parent_key,
//category: category,
keywords: keywords,
update_process: update_process,
ignore_path_keywords: ignore_path_keywords,
extra_data: extra_data,
update_data: update_data,
process: process,
context: context,
convert: convert,
update_mode: update_mode,
ignore_ext: ignore_ext,
column: column,
library_mode: library_mode,
dated_dirs: dated_dirs,
context_mode: context_mode,
zip_mode: zip_mode,
project_code: project_code,
}
on_complete = function(rtn_data) {
''' + oncomplete_script + '''
};
var class_name = bvr.action_handler;
// TODO: make the async_callback return throw an e so we can run
// server.abort
server.execute_cmd(class_name, kwargs, {}, {on_complete:on_complete});
on_progress = function(message) {
msg = JSON.parse(message.message);
var percent = msg.progress;
var description = msg.description;
var error = msg.error;
info_el.innerHTML = description;
progress_el.setStyle("width", percent+"%");
progress_el.innerHTML = percent + "%";
if (error) {
progress_el.setStyle("background", "#F00");
spt.message.stop_interval(message_key);
}
}
spt.message.set_interval(message_key, on_progress, 500, bvr.src_el);
'''
upload_div = DivWdg()
search_keys = my.kwargs.get("search_keys")
if not search_keys:
upload_div.add_style("display: none")
upload_div.add_class("spt_upload_files_top")
div.add(upload_div)
if my.sobjects:
button = ActionButtonWdg(title="Copy Files", width=200, color="primary")
else:
button = ActionButtonWdg(title="Upload Files", width=200, color="primary")
upload_div.add(button)
#button.add_style("float: right")
#upload_div.add_style("margin-bottom: 20px")
upload_div.add("<br clear='all'/>")
action_handler = my.kwargs.get("action_handler")
if not action_handler:
action_handler = 'tactic.ui.tools.IngestUploadCmd';
context = my.kwargs.get("context")
context_mode = my.kwargs.get("context_mode")
button.add_behavior( {
'type': 'click_up',
'action_handler': action_handler,
'kwargs': {
'search_type': my.search_type,
'relative_dir': my.relative_dir,
'script_found': script_found,
'context': context,
'library_mode': library_mode,
'dated_dirs' : dated_dirs,
'context_mode': context_mode,
'update_process': my.update_process,
'ignore_path_keywords': my.ignore_path_keywords,
'project_code': my.project_code
},
'cbjs_action': '''
if (bvr.kwargs.script_found != true)
{
spt.alert("Error: provided on_complete script not found");
return;
}
var top = bvr.src_el.getParent(".spt_ingest_top");
var file_els = top.getElements(".spt_upload_file");
var num_files = file_els.length;
var files_top = top.getElement(".spt_to_ingest_files")
spt.notify.show_message("Ingesting "+num_files+" Files");
// get the server that will be used in the callbacks
var server = TacticServerStub.get();
// retrieved the stored file handles
var files = [];
for (var i = 0; i < file_els.length; i++) {
if (file_els[i].file) {
files.push( file_els[i].file );
}
else {
var search_key = file_els[i].getAttribute("spt_search_key");
files.push("search_key:"+search_key);
}
}
if (files.length == 0) {
spt.alert("Either click 'Add' or drag some files over to ingest.");
return;
}
// defined the callbacks
var upload_start = function(evt) {
}
var upload_progress = function(evt) {
%s;
}
var upload_complete = function(evt) {
%s;
}
var upload_file_kwargs = {
files: files,
upload_start: upload_start,
upload_complete: upload_complete,
upload_progress: upload_progress
};
if (bvr.ticket)
upload_file_kwargs['ticket'] = bvr.ticket;
%s;
spt.html5upload.set_form( top );
spt.html5upload.upload_file(upload_file_kwargs);
''' % (upload_progress, on_complete, upload_init)
} )
return div
def get_progress_div(my):
div = DivWdg()
div.add_style("overflow-y: hidden")
inner = DivWdg()
div.add(inner)
inner.add_class("spt_upload_progress_top")
inner.add_style("margin-top: -30px")
info = DivWdg()
inner.add(info)
info.add_class("spt_upload_info")
progress_div = DivWdg()
progress_div.add_class("spt_upload_progress_top")
inner.add(progress_div)
progress_div.add_style("width: 595px")
progress_div.add_style("height: 15px")
progress_div.add_style("margin-bottom: 10px")
progress_div.add_border()
#progress_div.add_style("display: none")
progress = DivWdg()
progress_div.add(progress)
progress.add_class("spt_upload_progress")
progress.add_style("width: 0px")
progress.add_style("visibility: hidden")
progress.add_style("height: 100%")
progress.add_gradient("background", "background3", -10)
progress.add_style("text-align: right")
progress.add_style("overflow: hidden")
progress.add_style("padding-right: 3px")
from tactic.ui.app import MessageWdg
progress.add_behavior( {
'type': 'load',
'cbjs_action': MessageWdg.get_onload_js()
} )
return div
"""
def get_data_wdg(my):
div = DivWdg()
from pyasm.biz import Pipeline
from pyasm.widget import SelectWdg
search_type_obj = SearchType.get(my.search_type)
base_type = search_type_obj.get_base_key()
search = Search("sthpw/pipeline")
search.add_filter("search_type", base_type)
pipelines = search.get_sobjects()
if pipelines:
pipeline = pipelines[0]
process_names = pipeline.get_process_names()
if process_names:
table = Table()
div.add(table)
table.add_row()
table.add_cell("Process: ")
select = SelectWdg("process")
table.add_cell(select)
process_names.append("---")
process_names.append("publish")
process_names.append("icon")
select.set_option("values", process_names)
####
buttons = Table()
div.add(buttons)
buttons.add_row()
#button = IconButtonWdg(title="Fill in Data", icon=IconWdg.EDIT)
button = ActionButtonWdg(title="Metadata")
button.add_style("float: left")
button.add_style("margin-top: -3px")
buttons.add_cell(button)
select_label = DivWdg("Update mode");
select_label.add_style("float: left")
select_label.add_style("margin-top: -3px")
select_label.add_style("margin-left: 20px")
buttons.add_cell(select_label)
update_mode_option = my.kwargs.get("update_mode")
if not update_mode_option:
update_mode_option = "true"
update_mode = SelectWdg(name="update mode")
update_mode.add_class("spt_update_mode_select")
update_mode.set_option("values", ["false", "true", "sequence"])
update_mode.set_option("labels", ["Off", "On", "Sequence"])
update_mode.set_option("default", update_mode_option)
update_mode.add_style("float: left")
update_mode.add_style("margin-top: -3px")
update_mode.add_style("margin-left: 5px")
update_mode.add_style("margin-right: 5px")
buttons.add_cell(update_mode)
update_info = DivWdg()
update_info.add_class("glyphicon")
update_info.add_class("glyphicon-info-sign")
update_info.add_style("float: left")
update_info.add_style("margin-top: -3px")
update_info.add_style("margin-left: 10px")
update_info.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.info("When update mode is on, if a file shares the name of one other file in the asset library, the file will update on ingest. If more than one file shares the name of an ingested asset, a new asset is created.<br> If sequence mode is selected, the system will update the sobject on ingest if a file sequence sharing the same name already exists.", {type: 'html'});
'''
} )
buttons.add_cell(update_info);
dialog = DialogWdg(display="false", show_title=False)
div.add(dialog)
dialog.set_as_activator(button, offset={'x':-10,'y':10})
dialog_data_div = DivWdg()
dialog_data_div.add_color("background", "background")
dialog_data_div.add_style("padding", "20px")
dialog.add(dialog_data_div)
# Order folders by date
name_div = DivWdg()
dialog_data_div.add(name_div)
name_div.add_style("margin: 15px 0px")
if SearchType.column_exists(my.search_type, "relative_dir"):
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "none")
category_div.add(checkbox)
category_div.add(" No categories")
category_div.add_style("margin-bottom: 5px")
checkbox.set_option("checked", "true")
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "by_day")
category_div.add(checkbox)
category_div.add(" Categorize files by Day")
category_div.add_style("margin-bottom: 5px")
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "by_week")
category_div.add(checkbox)
category_div.add(" Categorize files by Week")
category_div.add_style("margin-bottom: 5px")
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "by_year")
category_div.add(checkbox)
category_div.add(" Categorize files by Year")
category_div.add_style("margin-bottom: 5px")
name_div.add("<br/>")
# edit
from tactic.ui.panel import EditWdg
ingest_data_view = my.kwargs.get('ingest_data_view')
sobject = SearchType.create(my.search_type)
edit = EditWdg(search_key =sobject.get_search_key(), mode='view', view=ingest_data_view )
dialog_data_div.add(edit)
hidden = HiddenWdg(name="parent_key")
dialog_data_div.add(hidden)
hidden.add_class("spt_parent_key")
parent_key = my.kwargs.get("parent_key") or ""
if parent_key:
hidden.set_value(parent_key)
extra_data = my.kwargs.get("extra_data")
if not isinstance(extra_data, basestring):
extra_data = jsondumps(extra_data)
if extra_data and extra_data != "null":
# it needs a TextArea instead of Hidden because of JSON data
text = TextAreaWdg(name="extra_data")
text.add_style('display: none')
text.set_value(extra_data)
dialog_data_div.add(text)
return div
"""
def get_select_files_button(my):
button = ActionButtonWdg(title="Add Files to Queue", width=150, color="warning")
from tactic.ui.input import Html5UploadWdg
upload = Html5UploadWdg(multiple=True)
button.add(upload)
button.add_style("margin: 30px auto")
button.add_behavior( {
'type': 'click_up',
'normal_ext': File.NORMAL_EXT,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var files_el = top.getElement(".spt_to_ingest_files");
var regex = new RegExp('(' + bvr.normal_ext.join('|') + ')$', 'i');
// clear upload progress
var upload_bar = top.getElement('.spt_upload_progress');
if (upload_bar) {
upload_bar.setStyle('width','0%');
upload_bar.innerHTML = '';
}
var upload_button = top.getElement(".spt_upload_files_top");
var onchange = function (evt) {
var files = spt.html5upload.get_files();
var delay = 0;
for (var i = 0; i < files.length; i++) {
var size = files[i].size;
var file_name = files[i].name;
var is_normal = regex.test(file_name);
if (size >= 10*1024*1024 || is_normal) {
spt.drag.show_file(files[i], files_el, 0, false);
}
else {
spt.drag.show_file(files[i], files_el, delay, true);
if (size < 100*1024) delay += 50;
else if (size < 1024*1024) delay += 500;
else if (size < 10*1024*1024) delay += 1000;
}
}
upload_button.setStyle("display", "");
}
spt.html5upload.clear();
spt.html5upload.set_form( top );
spt.html5upload.select_file( onchange );
'''
} )
return button
class IngestCheckCmd(Command):
def execute(my):
from pyasm.biz import FileRange
file_names = my.kwargs.get("file_names")
info = FileRange.get_sequences(file_names)
#info = FileRange.check(file_names)
my.info = info
class IngestUploadCmd(Command):
# FOLDER_LIMIT can be adjusted as desired.
FOLDER_LIMIT = 500
def get_server(my):
if not my.server:
project_code = my.kwargs.get("project_code")
if not project_code:
my.server = TacticServerStub.get()
else:
my.server = TacticServerStub(protocol="local")
my.server.set_project(project_code)
return my.server
def execute(my):
my.server = None
my.message_key = my.kwargs.get("message_key")
try:
return my._execute()
except Exception, e:
if my.message_key:
msg = {
'progress': 100,
'error': '%s' % e,
'description': 'Error: %s' % e
}
server = my.get_server()
server.log_message(my.message_key, msg, status="in progress")
raise
def _execute(my):
library_mode = my.kwargs.get("library_mode")
current_folder = 0
dated_dirs = my.kwargs.get("dated_dirs")
filenames = my.kwargs.get("filenames")
relative_dir = my.kwargs.get("relative_dir")
base_dir = my.kwargs.get("base_dir")
if not base_dir:
upload_dir = Environment.get_upload_dir()
base_dir = upload_dir
context_mode = my.kwargs.get("context_mode")
if not context_mode:
context_mode = "case_sensitive"
update_mode = my.kwargs.get("update_mode")
ignore_ext = my.kwargs.get("ignore_ext")
column = my.kwargs.get("column")
if not column:
column = "name"
search_key = my.kwargs.get("search_key")
if search_key:
my.sobject = Search.get_by_search_key(search_key)
search_type = my.sobject.get_base_search_type()
else:
search_type = my.kwargs.get("search_type")
my.sobject = None
if not relative_dir:
project_code = Project.get_project_code()
search_type_obj = SearchType.get(search_type)
table = search_type_obj.get_table()
relative_dir = "%s/%s" % (project_code, table)
server = my.get_server()
parent_key = my.kwargs.get("parent_key")
category = my.kwargs.get("category")
keywords = my.kwargs.get("keywords")
update_process = my.kwargs.get("update_process")
ignore_path_keywords = my.kwargs.get("ignore_path_keywords")
if ignore_path_keywords:
ignore_path_keywords = ignore_path_keywords.split(",")
ignore_path_keywords = [x.strip() for x in ignore_path_keywords]
update_data = my.kwargs.get("update_data")
extra_data = my.kwargs.get("extra_data")
if extra_data:
extra_data = jsonloads(extra_data)
else:
extra_data = {}
update_sobject_found = False
# TODO: use this to generate a category
category_script_path = my.kwargs.get("category_script_path")
"""
ie:
from pyasm.checkin import ExifMetadataParser
parser = ExifMetadataParser(path=file_path)
tags = parser.get_metadata()
date = tags.get("EXIF DateTimeOriginal")
return date.split(" ")[0]
"""
# remap the filenames for seuqences
if update_mode == "sequence":
sequences = FileRange.get_sequences(filenames)
filenames = []
for sequence in sequences:
print "sequence: ", sequence
if sequence.get('is_sequence'):
filename = sequence.get("template")
else:
filename = sequence.get("filenames")[0]
filenames.append(filename)
input_prefix = update_data.get('input_prefix')
non_seq_filenames = []
if library_mode:
relative_dir = "%s/001" % relative_dir
snapshots = []
for count, filename in enumerate(filenames):
# Check if files should be updated.
# If so, attempt to find one to update.
# If more than one is found, do not update.
if filename.endswith("/"):
# this is a folder:
continue
if filename.startswith("search_key:"):
mode = "search_key"
tmp, search_key = filename.split("search_key:")
snapshot = Search.get_by_search_key(search_key)
if snapshot.get_search_type() == "sthpw/snapshot":
lib_path = snapshot.get_lib_path_by_type()
filename = os.path.basename(lib_path)
new_filename = re.sub(r"_v\d+", "", filename)
else:
snapshot = Snapshot.get_latest_by_sobject(snapshot, process="publish")
lib_path = snapshot.get_lib_path_by_type()
filename = os.path.basename(lib_path)
new_filename = re.sub(r"_v\d+", "", filename)
if not snapshot:
raise Exception("Must pass in snapshot search_key")
else:
mode = "multi"
new_filename = filename
if library_mode:
# get count of number of files in the current asset ingest dir
import glob
abs_path = Environment.get_asset_dir() + "/" + relative_dir + "/*"
if len(glob.glob(abs_path)) > my.FOLDER_LIMIT:
current_folder = current_folder + 1
relative_dir = "%s/%03d" % (relative_dir[:-4], current_folder)
unzip = my.kwargs.get("unzip")
zip_mode = my.kwargs.get("zip_mode")
if zip_mode in ['unzip'] or unzip in ["true", True] and filename.endswith(".zip"):
from pyasm.common import ZipUtil
unzip_dir = Environment.get_upload_dir()
if not os.path.exists(unzip_dir):
os.makedirs(unzip_dir)
zip_path = "%s/%s" % (base_dir, filename)
ZipUtil.extract(zip_path, base_dir=unzip_dir)
paths = ZipUtil.get_file_paths(zip_path)
new_kwargs = my.kwargs.copy()
new_kwargs['filenames'] = paths
new_kwargs['base_dir'] = unzip_dir
new_kwargs['zip_mode'] = "single"
ingest = IngestUploadCmd(**new_kwargs)
ingest.execute()
continue
if my.sobject:
sobject = my.sobject
elif update_mode in ["true", True, "update"]:
# first see if this sobjects still exists
search = Search(search_type)
# ingested files into search type applies filename without i.e. _v001 suffix
search.add_filter(column, new_filename)
if relative_dir and search.column_exists("relative_dir"):
if not dated_dirs:
search.add_filter("relative_dir", relative_dir)
sobjects = search.get_sobjects()
if len(sobjects) > 1:
sobject = None
elif len(sobjects) == 1:
sobject = sobjects[0]
update_sobject_found = True
else:
sobject = None
elif update_mode == "sequence":
# This check is not needed anymore as the sequence analyzer
# can handle a mix of sequence and non sequences
#if not FileGroup.is_sequence(filename):
# raise TacticException('Please modify sequence naming to have at least three digits [%s].' % filename)
search = Search(search_type)
search.add_filter(column, filename)
if relative_dir and search.column_exists("relative_dir"):
if not dated_dirs:
search.add_filter("relative_dir", relative_dir)
sobjects = search.get_sobjects()
if sobjects:
sobject = sobjects[0]
else:
sobject = None
else:
sobject = None
# Create a new entry
if not sobject:
if update_mode not in ['true', True, "update"]:
sobjects = []
my.check_existing_file(search_type, new_filename, relative_dir, update_mode, sobjects)
sobject = SearchType.create(search_type)
if ignore_ext in ['true', True]:
name, ext = os.path.splitext(new_filename)
else:
name = new_filename
# if the name contains a path, the only take basename
name = os.path.basename(name)
sobject.set_value(column, name)
if relative_dir and sobject.column_exists("relative_dir"):
sobject.set_value("relative_dir", relative_dir)
if mode == "search_key":
path = lib_path
elif relative_dir:
path = "%s/%s" % (relative_dir, filename)
else:
path = filename
# Don't want the keywords being extracted from lib_path, extract the relative dir path instead
# Using new_filename because it is the filename without version numbers
if relative_dir:
path_for_keywords = "%s/%s" % (relative_dir, new_filename)
else:
path_for_keywords = new_filename
file_keywords = Common.extract_keywords_from_path(path_for_keywords)
# Extract keywords from the path to be added to keywords_data,
# if ignore_path_keywords is found, remove the specified keywords
# from the path keywords
if ignore_path_keywords:
for ignore_path_keyword in ignore_path_keywords:
if ignore_path_keyword in file_keywords:
file_keywords.remove(ignore_path_keyword)
file_keywords.append(filename.lower())
file_keywords = " ".join(file_keywords)
new_file_keywords = ""
if SearchType.column_exists(search_type, "keywords"):
if keywords:
new_file_keywords = "%s %s" % (keywords, file_keywords)
else:
new_file_keywords = file_keywords
sobject.set_value("keywords", new_file_keywords)
if SearchType.column_exists(search_type, "user_keywords"):
if keywords:
sobject.set_value("user_keywords", keywords)
if SearchType.column_exists(search_type, "keywords_data"):
data = sobject.get_json_value("keywords_data", {})
data['user'] = keywords
data['path'] = file_keywords
sobject.set_json_value("keywords_data", data)
# extract metadata
#file_path = "%s/%s" % (base_dir, File.get_filesystem_name(filename))
if update_mode == "sequence":
sequence = sequences[count]
file_path = "%s/%s" % (base_dir, sequence.get("filenames")[0])
elif mode == "search_key":
file_path = path
else:
file_path = "%s/%s" % (base_dir, filename)
"""
# TEST: convert on upload
try:
convert = my.kwargs.get("convert")
if convert:
message_key = "IngestConvert001"
cmd = ConvertCbk(**convert)
cmd.execute()
except Exception, e:
print "WARNING: ", e
"""
# check if the file exists
if mode != "search_key" and not os.path.exists(file_path):
raise Exception("Path [%s] does not exist" % file_path)
# get the metadata from this image
if SearchType.column_exists(search_type, "relative_dir"):
if category and category not in ['none', None]:
from pyasm.checkin import ExifMetadataParser
parser = ExifMetadataParser(path=file_path)
tags = parser.get_metadata()
date = tags.get("EXIF DateTimeOriginal")
if not date:
date_str = "No-Date"
else:
date_str = str(date)
# this can't be parsed correctly by dateutils
parts = date_str.split(" ")
date_str = parts[0].replace(":", "-")
date_str = "%s %s" % (date_str, parts[1])
from dateutil import parser
orig_date = parser.parse(date_str)
if category == "by_day":
date_str = orig_date.strftime("%Y/%Y-%m-%d")
elif category == "by_month":
date_str = orig_date.strftime("%Y-%m")
elif category == "by_week":
date_str = orig_date.strftime("%Y/Week-%U")
full_relative_dir = "%s/%s" % (relative_dir, date_str)
sobject.set_value("relative_dir", full_relative_dir)
# Add parent sObject
if parent_key:
parent = Search.get_by_search_key(parent_key)
if parent:
try:
sobject.set_sobject_value(parent)
except:
pass
# for some unknown reason, this input prefix is ignored
if update_data.has_key("input_prefix"):
del(update_data['input_prefix'])
new_data = {}
for name, value in update_data.items():
if name == "input_prefix":
continue
name = name.replace("%s|"%input_prefix, "")
new_data[name] = value
if new_data:
from tactic.ui.panel import EditCmd
cmd = EditCmd(
view="edit",
sobject=sobject,
data=new_data,
commit="false",
)
cmd.execute()
for key, value in extra_data.items():
if SearchType.column_exists(search_type, key):
sobject.set_value(key, value)
"""
if category:
if SearchType.column_exists(search_type, "category"):
sobject.set_value("category", category)
if SearchType.column_exists(search_type, "relative_dir"):
full_relative_dir = "%s/%s" % (relative_dir, category)
sobject.set_value("relative_dir", category)
"""
sobject.commit()
search_key = sobject.get_search_key()
status = sobject.get_value("status", no_exception=True)
is_verified = status in ['Verified']
# use API to check in file
process = my.kwargs.get("process")
if not process:
process = "publish"
context = my.kwargs.get("context")
if not context:
context = process
if process == "icon":
context = "icon"
else:
context = "%s/%s" % (context, filename)
if context_mode == "case_insensitive":
context = context.lower()
version = None
if not is_verified and update_process and update_sobject_found:
process = update_process
# find what the version number should be
search = Search("sthpw/snapshot")
search.add_parent_filter(sobject)
search.add_filter("context", context)
search.add_order_by("version desc")
max_snapshot = search.get_sobject()
version = max_snapshot.get_value("version")
if not version:
version = 1
else:
version += 1
if update_mode == "sequence":
file_range = sequence.get("range")
if file_range == "":
raise Exception("Error: %s" % sequence.get("error"))
if sequence.get("is_sequence"):
file_path = "%s/%s" % (base_dir, sequence.get("template"))
snapshot = server.group_checkin(search_key, context, file_path, file_range, mode='move', version=version)
else:
file_path = "%s/%s" % (base_dir, sequence.get("filenames")[0])
snapshot = server.simple_checkin(search_key, context, file_path, mode='uploaded', version=version)
elif mode == "search_key":
if lib_path.find("##") != -1:
file_range = snapshot.get_file_range().get_display
file_path = lib_path
snapshot = server.group_checkin(search_key, context, file_path, file_range, mode='copy')
else:
# copy the file to a temporary location
tmp_dir = Environment.get_tmp_dir()
tmp_path = "%s/%s" % (tmp_dir, new_filename)
shutil.copy(file_path, tmp_path)
# auto create icon
snapshot = server.simple_checkin(search_key, context, tmp_path, process=process, mode='move')
elif my.kwargs.get("base_dir"):
# auto create icon
snapshot = server.simple_checkin(search_key, context, file_path, process=process, mode='move', version=version)
else:
snapshot = server.simple_checkin(search_key, context, filename, process=process, mode='uploaded', version=version)
snapshots.append(snapshot)
#server.update(snapshot, {"user_keywords": "abc 123"} )
percent = int((float(count)+1) / len(filenames)*100)
if my.message_key:
msg = {
'progress': percent,
'description': 'Checking in file [%s]' % filename,
}
server.log_message(my.message_key, msg, status="in progress")
if my.message_key:
msg = {
'progress': '100',
'description': 'Check-ins complete'
}
server.log_message(my.message_key, msg, status="complete")
return
def check_existing_file(my, search_type, new_filename, relative_dir, update_mode, sobjects):
project_code = Project.get_project_code()
file_search_type = SearchType.build_search_type(search_type, project_code)
search_name, search_ext = os.path.splitext(new_filename)
search_name = "%s.%%" % search_name
search_file = Search("sthpw/file")
search_file.add_filter("search_type", file_search_type)
search_file.add_filter("relative_dir", relative_dir)
search_file.add_filter("file_name", search_name, op='like')
file_sobjects = search_file.get_sobjects()
if file_sobjects and update_mode in ['true', True] and len(sobjects) > 1:
raise TacticException('Multiple files with the same name as "%s" already exist. Uncertain as to which file to update. Please individually update each file.' % new_filename)
elif file_sobjects:
raise TacticException('A file with the same name as "%s" already exists in the file table with path "%s". Please rename the file and ingest again.' % (new_filename, relative_dir))
def natural_sort(my,l):
'''
natural sort will makesure a list of names passed in is
sorted in an order of 1000 to be after 999 instead of right after 101
'''
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
"""
def find_sequences(my, filenames):
'''
Parse a list of filenames into a dictionary of sequences. Filenames not
part of a sequence are returned in the None key
:param filenames | [<str>, ..]
:return {<str> sequence: [<str> filename, ..], ..}
'''
local_filenames = filenames[:]
sequence_patterns = {}
sequences = {None: []}
# sort the files (by natural order) so we always generate a pattern
# based on the first potential file in a sequence
local_filenames = my.natural_sort(local_filenames)
for filename in local_filenames:
count = re.findall('\d+', filename)
if not count:
raise TacticException("Please ingest sequences only.")
base, file_ext = os.path.splitext(filename)
if file_ext:
file_ext = file_ext[1:]
# if last set of digits is not a file extension, and is less than 3 digits
# because common.get_dir_info only works with 3 of more digits
if len(count[-1]) <= 1 and file_ext.isalpha():
raise TacticException('Please modify sequence naming to have at least three digits.')
# if file extension found, and contains a number in the extension (but also not completely numbers)
# grab the second last set of digits
# ie. .mp3, .mp4, .23p
if file_ext and not file_ext.isalpha() and not file_ext.isdigit():
seq_digit_length = len(count[-2])
else:
seq_digit_length = len(count[-1])
# if file_ext is empty, or if file_ext[1] is all numbers, use expression below
# abc0001, abc.0001 ...etc
if not file_ext or file_ext.isdigit():
try:
pattern_expr = re.compile('^(.*)(\d{%d})([^\d]*)$'%seq_digit_length)
except:
sequences[None].append(filename)
continue
# then for regular filenames, try grabbing filenames by looking at the digits before the last dot
# for files with extensions:
# abc.0001.png, abc.0001.mp3, abc0001.mp3,
else:
try:
pattern_expr = re.compile('^(.*)(\d{%d})(\..*)$'%seq_digit_length)
except:
sequences[None].append(filename)
continue
pound_length = seq_digit_length
pounds = "#" * pound_length
# first, check to see if this filename matches a sequence
found = False
for key, pattern in sequence_patterns.items():
match = pattern.match(filename)
if not match:
continue
sequences[key].append(filename)
found = True
break
# if we've already been matched, then continue on
if found:
continue
# next, see if this filename should start a new sequence
basename = os.path.basename(filename)
pattern_match = pattern_expr.match(basename)
if pattern_match:
opts = (pattern_match.group(1), pattern_match.group(3))
key = '%s%s%s' % (opts[0], pounds, opts[1])
# create a new pattern based on the filename
sequence_pattern = re.compile('^%s\d+%s$' % opts)
sequence_patterns[key] = sequence_pattern
sequences[key] = [filename]
continue
# otherwise, add it to the list of non-sequences
sequences[None].append(filename)
# now that we have grouped everything, we'll merge back filenames
# that were potential sequences, but only contain a single file to the
# non-sequential list
for key, filenames in sequences.items():
if ( key is None or len(filenames) > 1 ):
continue
sequences.pop(key)
sequences[None] += filenames
return sequences, seq_digit_length
"""
BRANCH 4.6: added missing variable definition
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from pyasm.common import Environment, jsonloads, jsondumps, TacticException, Common
from pyasm.web import DivWdg, Table
from pyasm.widget import IconWdg, TextWdg, SelectWdg, CheckboxWdg, RadioWdg, TextAreaWdg, HiddenWdg
from pyasm.command import Command
from pyasm.search import SearchType, Search
from pyasm.biz import File, Project, FileGroup, FileRange, Snapshot
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.container import DialogWdg
from tactic.ui.widget import IconButtonWdg
from tactic.ui.input import UploadButtonWdg, TextInputWdg
from tactic.ui.widget import ActionButtonWdg
from tactic_client_lib import TacticServerStub
import os
import os.path
import re
import shutil
__all__ = ['IngestUploadWdg', 'IngestCheckCmd', 'IngestUploadCmd']
class IngestUploadWdg(BaseRefreshWdg):
ARGS_KEYS = {
'base_dir': 'Base directory to check into',
'search_type': 'Search Type to ingest into',
'parent_key': 'Parent search key to relate create sobject to',
'process': 'The default process to ingest into',
'context': 'Fixed context to ingest into',
'ingest_data_view': 'Specify a ingest data view, defaults to edit',
'extra_data': 'Extra data (JSON) to be added to created sobjects',
'oncomplete_script_path': 'Script to be run on a finished ingest',
'update_mode': 'Takes values "true" or "false". When true, uploaded files will update existing file iff exactly one file exists already with the same name.',
'context_mode': 'Set or remove context case sensitivity.',
'hidden_options': 'Comma separated list of hidden settings i.e. "process,context_mode"',
'title': 'The title to display at the top',
'library_mode': 'Mode to determine if Ingest should handle huge amounts of files',
'dated_dirs': 'Determines update functionality, marked true if relative_dir is timestamped',
'update_process': 'Determines the update process for snapshots when the update_mode is set to true and one sobject is found',
'ignore_path_keywords': 'Comma separated string of path keywords to be hidden',
'project_code': 'Publish to another project',
}
def get_display(my):
my.sobjects = my.kwargs.get("sobjects")
# if search_keys are passed in, then these are used to copy
search_keys = my.kwargs.get("search_keys")
# add a project to copy to. Check that it is permitted
my.project_code = my.kwargs.get("project_code")
if search_keys:
my.sobjects = Search.get_by_search_keys(search_keys)
projects = Project.get_user_projects()
project_codes = [x.get_code() for x in projects]
if my.project_code not in project_codes:
my.project_code = None
asset_dir = Environment.get_asset_dir()
base_dir = my.kwargs.get("base_dir")
if base_dir:
if not base_dir.startswith(asset_dir):
raise Exception("Path needs to be in asset root")
else:
relative_dir = base_dir.replace(asset_dir, "")
relative_dir = relative_dir.strip("/")
else:
relative_dir = my.kwargs.get("relative_dir")
my.relative_dir = relative_dir
# This is used to check into a search key (not create a new sobject)
my.orig_sobject = None
my.search_key = my.kwargs.get("search_key") or ""
if my.search_key:
my.sobject = Search.get_by_search_key(my.search_key)
if my.kwargs.get("use_parent") in [True, 'true']:
my.orig_sobject = my.sobject
my.sobject = my.sobject.get_parent()
my.search_key = my.sobject.get_search_key()
my.search_type = my.sobject.get_search_type()
my.show_settings = my.kwargs.get("show_settings")
if not my.show_settings:
my.show_settings = False
else:
my.search_type = my.kwargs.get("search_type")
my.sobject = None
my.search_key = None
my.show_settings = my.kwargs.get("show_settings")
if my.show_settings == None:
my.show_settings = True
top = my.top
top.add_class("spt_ingest_top")
hidden = HiddenWdg(name="parent_key")
#hidden = TextWdg(name="parent_key")
top.add(hidden)
hidden.add_class("spt_parent_key")
if my.search_key:
hidden.set_value(my.search_key)
table = Table()
top.add(table)
table.add_row()
left = table.add_cell()
left.add_style("vertical-align: top")
left.add( my.get_content_wdg() )
if not my.search_key or my.show_settings:
if my.show_settings:
middle = table.add_cell()
middle.add_style("height: 10") # not sure why we need this height
middle.add_style("padding: 30px 20px")
line = DivWdg()
middle.add(line)
line.add_style("height: 100%")
line.add_style("border-style: solid")
line.add_style("border-width: 0px 0px 0px 1px")
line.add_style("border-color: #DDD")
line.add(" ")
right = table.add_cell()
right.add_class("spt_right_content")
right.add_style("vertical-align: top")
right.add( my.get_settings_wdg() )
if my.show_settings in [False, 'false']:
right.add_style("display: none")
else:
if my.orig_sobject and my.orig_sobject.column_exists("process"):
hidden = HiddenWdg(name="process")
#hidden = TextWdg(name="process")
top.add(hidden)
hidden.add_class("spt_process")
process = my.orig_sobject.get_value("process")
hidden.set_value(process)
return top
def get_file_wdg(my, sobject=None):
# template for each file item
file_template = DivWdg()
if not sobject:
file_template.add_class("spt_upload_file_template")
file_template.add_style("display: none")
else:
file_template.add_class("spt_upload_file")
file_template.add_style("margin-bottom: 3px")
file_template.add_style("padding: 3px")
file_template.add_style("height: 40px")
thumb_div = DivWdg()
file_template.add(thumb_div)
thumb_div.add_style("float: left")
thumb_div.add_style("width: 60");
thumb_div.add_style("height: 40");
thumb_div.add_style("overflow: hidden");
thumb_div.add_style("margin: 3 10 3 0");
thumb_div.add_class("spt_thumb")
info_div = DivWdg()
file_template.add(info_div)
info_div.add_style("float: left")
name_div = DivWdg()
name_div.add_class("spt_name")
info_div.add(name_div)
name_div.add_style("width: 225px")
name_div.add_style("overflow-x: hidden")
name_div.add_style("text-overflow: ellipsis")
date_div = DivWdg()
date_div.add_class("spt_date_label")
info_div.add(date_div)
date_div.add("")
date_div.add_style("opacity: 0.5")
date_div.add_style("font-size: 0.8em")
date_div.add_style("font-style: italic")
date_div.add_style("margin-top: 3px")
hidden_date_div = HiddenWdg("date")
hidden_date_div.add_class("spt_date")
info_div.add(date_div)
size_div = DivWdg()
size_div.add_class("spt_size")
file_template.add(size_div)
size_div.add_style("float: left")
size_div.add_style("width: 150px")
size_div.add_style("text-align: right")
remove_div = DivWdg()
remove_div.add_class("spt_remove")
file_template.add(remove_div)
icon = IconButtonWdg(title="Remove", icon="BS_REMOVE")
icon.add_style("float: right")
remove_div.add(icon)
#remove_div.add_style("text-align: right")
if sobject:
from pyasm.common import FormatValue
from tactic.ui.panel import ThumbWdg2
thumb = ThumbWdg2()
thumb_div.add(thumb)
thumb.set_sobject(sobject)
lib_path = thumb.get_lib_path()
name = os.path.basename(lib_path)
name = re.sub(r"_v\d+", "", name)
if sobject.get_base_search_type() == "sthpw/snapshot":
if sobject.get("snapshot_type") == "sequence":
paths = sobject.get_expanded_lib_paths()
file_range = sobject.get_file_range()
size = 0
for path in paths:
size += os.path.getsize(path)
name = "%s (%s)" % (name, file_range.get_display())
else:
size = os.path.getsize(lib_path)
else:
size = os.path.getsize(lib_path)
name_div.add( name )
size = FormatValue().get_format_value(size, "KB")
size_div.add(size)
file_template.add_attr("spt_search_key", sobject.get_search_key())
else:
# example data
size_div.add("433Mb")
name_div.add("image001.jpg")
return file_template
def get_settings_wdg(my):
div = DivWdg()
div.add_style("width: 400px")
div.add_style("padding: 20px")
title_wdg = DivWdg()
div.add(title_wdg)
title_wdg.add("Ingest Settings")
title_wdg.add_style("font-size: 25px")
# Build list of process names
process_names = set()
from pyasm.biz import Pipeline
from pyasm.widget import SelectWdg
search_type_obj = SearchType.get(my.search_type)
base_type = search_type_obj.get_base_key()
pipeline_search = Search("sthpw/pipeline")
if my.sobject:
pipeline_code = my.sobject.get_value("pipeline_code")
if pipeline_code:
pipeline_search.add_filter("code", pipeline_code)
else:
pipeline_search.set_null_filter()
pipeline_search.add_project_filter()
pipeline_search.add_filter("search_type", base_type)
pipelines = pipeline_search.get_sobjects()
for pipeline in pipelines:
process_names.update(pipeline.get_process_names())
selected_process = my.kwargs.get("process")
if selected_process:
process_names.add(selected_process)
if process_names:
process_names = list(process_names)
process_names.sort()
else:
process_names = []
if process_names:
process_names.append("---")
process_names.append("publish")
process_names.append("icon")
hidden_options = my.kwargs.get("hidden_options").split(',')
process_wdg = DivWdg()
div.add(process_wdg)
title_wdg = DivWdg()
process_wdg.add(title_wdg)
title_wdg.add("Process")
title_wdg.add_style("margin-top: 20px")
title_wdg.add_style("font-size: 16px")
process_wdg.add("<br/>")
select = SelectWdg("process")
process_wdg.add(select)
select.set_option("values", process_names)
select.add_empty_option("- Select Process to ingest to-")
if selected_process:
select.set_option("default", selected_process)
process_wdg.add("<br/>")
if "process" in hidden_options:
process_wdg.set_style("display: none")
# Metadata
#hidden_options.append("metadata")
if "metadata" not in hidden_options:
process_wdg.add("<hr/>")
title_wdg = DivWdg()
div.add(title_wdg)
title_wdg.add("Metadata")
title_wdg.add_style("margin-top: 20px")
title_wdg.add_style("font-size: 16px")
title_wdg.add_style("margin-bottom: 5px")
desc_wdg = DivWdg("The following metadata will be added to the ingested files.")
desc_wdg.add_style("margin-bottom: 10px")
div.add(desc_wdg)
from tactic.ui.panel import EditWdg
ingest_data_view = my.kwargs.get('metadata_view')
if not ingest_data_view:
ingest_data_view = my.kwargs.get('ingest_data_view')
if my.search_key:
sobject = SearchType.create("sthpw/snapshot")
else:
sobject = SearchType.create(my.search_type)
metadata_element_names = my.kwargs.get("metadata_element_names")
if my.show_settings:
edit = EditWdg(
search_key=sobject.get_search_key(),
mode='view',
view=ingest_data_view,
element_names=metadata_element_names,
show_header=False,
width="100%",
display_mode="single_cell",
extra_data=my.kwargs.get("extra_data"),
default=my.kwargs.get("default"),
)
div.add(edit)
div.add("<br/>")
# options
# update mode
map_div = DivWdg()
div.add(map_div)
map_div.add("<hr/>")
title_wdg = DivWdg()
map_div.add(title_wdg)
title_wdg.add("Mapping Files to Items")
title_wdg.add_style("margin-top: 20px")
title_wdg.add_style("font-size: 16px")
if "map_option" in hidden_options:
map_div.add_style("display: none")
if "update_option" not in hidden_options:
label_div = DivWdg()
label_div.add("Determine how the file maps to a particular item")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
update_mode_option = my.kwargs.get("update_mode")
if not update_mode_option:
update_mode_option = "true"
update_mode = SelectWdg(name="update mode")
update_mode.add_class("spt_update_mode_select")
update_mode.set_option("values", ["false", "true", "sequence"])
update_mode.set_option("labels", ["Always insert a new item", "Update duplicate items", "Update groups as sequences"])
update_mode.set_option("default", update_mode_option)
update_mode.add_style("margin-top: -3px")
update_mode.add_style("margin-right: 5px")
map_div.add(update_mode)
update_mode.add_behavior( {
"type": "listen",
"event_name": "set_ingest_update_mode",
"cbjs_action": '''
var value = bvr.firing_data.value;
bvr.src_el.value = value;
'''
} )
if not my.search_key and "ext_option" not in hidden_options:
label_div = DivWdg()
label_div.add("Ignore File Extension")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
ignore_ext_option = my.kwargs.get("ignore_ext")
if not ignore_ext_option:
ignore_ext_option = "false"
ignore_ext = SelectWdg(name="update mode")
ignore_ext.add_class("spt_ignore_ext_select")
ignore_ext.set_option("values", ["true", "false"])
ignore_ext.set_option("labels", ["Yes", "No"])
ignore_ext.set_option("default", ignore_ext_option)
ignore_ext.add_style("margin-top: -3px")
ignore_ext.add_style("margin-right: 5px")
map_div.add(ignore_ext)
if not my.search_key and "column_option" not in hidden_options:
label_div = DivWdg()
label_div.add("Map file name to column")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
column_option = my.kwargs.get("column")
if not column_option:
column_option = "name"
column_select = SelectWdg(name="update mode")
column_select.add_class("spt_column_select")
column_select.set_option("values", ["name", "code"])
column_select.set_option("labels", ["Name", "Code"])
column_select.set_option("default", column_option)
column_select.add_style("margin-top: -3px")
column_select.add_style("margin-right: 5px")
map_div.add(column_select)
if "zip_mode" not in hidden_options:
label_div = DivWdg()
label_div.add("When checking in zipped files:")
map_div.add(label_div)
label_div.add_style("margin-top: 10px")
label_div.add_style("margin-bottom: 8px")
column_option = my.kwargs.get("column")
if not column_option:
column_option = "name"
column_select = SelectWdg(name="zip mode")
column_select.add_class("spt_zip_mode_select")
column_select.set_option("values", ["single", "unzip"])
column_select.set_option("labels", ["Check-in as a single zipped file", "Unzip and check-in each file"])
column_select.set_option("default", "single")
column_select.add_style("margin-top: -3px")
column_select.add_style("margin-right: 5px")
map_div.add(column_select)
if not my.search_key and "context_mode" not in hidden_options:
map_div.add("<br/>")
map_div.add("<hr/>")
title_wdg = DivWdg()
map_div.add(title_wdg)
title_wdg.add("Context Mode")
title_wdg.add_style("font-size: 16px")
map_div.add("<br/>")
context_mode_option = my.kwargs.get("context_mode")
if not context_mode_option:
context_mode_option = "case_sensitive"
context_mode = SelectWdg(name="context_mode")
context_mode.add_class("spt_context_mode_select")
context_mode.set_option("values", "case_insensitive|case_sensitive")
context_mode.set_option("labels", "Case Insensitive|Case Sensitive")
context_mode.set_option("default", context_mode_option)
context_mode.add_style("margin-top: -3px")
context_mode.add_style("margin-right: 5px")
map_div.add(context_mode)
extra_data = my.kwargs.get("extra_data")
if not isinstance(extra_data, basestring):
extra_data = jsondumps(extra_data)
if extra_data and extra_data != "null":
# it needs a TextArea instead of Hidden because of JSON data
text = TextAreaWdg(name="extra_data")
text.add_style('display: none')
text.set_value(extra_data)
div.add(text)
return div
def get_content_wdg(my):
"""
asset_dir = Environment.get_asset_dir()
base_dir = my.kwargs.get("base_dir")
if base_dir:
if not base_dir.startswith(asset_dir):
raise Exception("Path needs to be in asset root")
else:
relative_dir = base_dir.replace(asset_dir, "")
relative_dir = relative_dir.strip("/")
else:
relative_dir = my.kwargs.get("relative_dir")
my.relative_dir = relative_dir
"""
div = DivWdg()
div.add_style("width: auto")
div.add_style("min-width: 600px")
div.add_style("padding: 20px")
div.add_color("background", "background")
header_div = DivWdg()
div.add(header_div)
if my.show_settings:
button_div = DivWdg()
header_div.add(button_div)
button = IconButtonWdg(title="Expand Options", icon="BS_MENU_HAMBURGER")
button_div.add(button)
button_div.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var right = top.getElement(".spt_right_content");
spt.toggle_show_hide(right);
'''
} )
title = my.kwargs.get("title")
if not title:
if my.project_code:
project_title = Project.get_by_code(my.project_code).get_value("title")
title = "Copy files to '%s'" % project_title
title_description = "These will be copied to the asset library"
else:
title = "Ingest Files"
title_description = "Either drag files into the queue box or click 'Add Files to Queue'"
else:
title_description = "Either drag files into the queue box or click 'Add Files to Queue'"
title_wdg = DivWdg()
header_div.add(title_wdg)
title_wdg.add("<span style='font-size: 25px'>%s</span>" % title)
title_wdg.add("<br/>")
title_wdg.add(title_description)
title_wdg.add_style("display", "inline-block")
# create the help button
is_admin_site = Project.get().is_admin()
show_help = my.kwargs.get("show_help") or True
if my.kwargs.get("show_help") not in ['false', False] and is_admin_site:
help_button_wdg = DivWdg()
header_div.add(help_button_wdg)
help_button_wdg.add_styles("float: right; margin-top: 11px;")
help_button = ActionButtonWdg(title="?", tip="Ingestion Widget Help", size='s')
help_button_wdg.add(help_button)
help_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''spt.help.load_alias("ingestion_widget")'''
} )
div.add("<hr style='margin-right: 4px'/>")
shelf_div = DivWdg()
div.add(shelf_div)
shelf_div.add_style("margin-bottom: 10px")
if my.search_key:
div.add("<input class='spt_input' type='hidden' name='search_key' value='%s'/>" % my.search_key)
else:
div.add("<input class='spt_input' type='hidden' name='search_key' value=''/>")
if not my.search_type:
div.add("No search type specfied")
return div
if my.relative_dir:
folder_div = DivWdg()
shelf_div.add(folder_div)
folder_div.add("Folder: %s" % my.relative_dir)
folder_div.add_style("opacity: 0.5")
folder_div.add_style("font-style: italic")
folder_div.add_style("margin-bottom: 10px")
# update_process
my.update_process = my.kwargs.get("update_process") or ""
# ignore_path_keywords
my.ignore_path_keywords = my.kwargs.get("ignore_path_keywords") or ""
from tactic.ui.input import Html5UploadWdg
upload = Html5UploadWdg(multiple=True)
shelf_div.add(upload)
button = ActionButtonWdg(title="Add Files to Queue", width=150, color="warning")
#button.add_style("float: right")
button.add_style("display: inline-block")
button.add_style("margin-top: -3px")
shelf_div.add(button)
button.add_behavior( {
'type': 'click',
'normal_ext': File.NORMAL_EXT,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var files_el = top.getElement(".spt_to_ingest_files");
var regex = new RegExp('(' + bvr.normal_ext.join('|') + ')$', 'i');
//clear upload progress
var upload_bar = top.getElement('.spt_upload_progress');
if (upload_bar) {
upload_bar.setStyle('width','0%');
upload_bar.innerHTML = '';
}
var upload_button = top.getElement(".spt_upload_files_top");
var onchange = function (evt) {
var files = spt.html5upload.get_files();
var delay = 0;
for (var i = 0; i < files.length; i++) {
var size = files[i].size;
var file_name = files[i].name;
var is_normal = regex.test(file_name);
if (size >= 10*1024*1024 || is_normal) {
spt.drag.show_file(files[i], files_el, 0, false);
}
else {
spt.drag.show_file(files[i], files_el, delay, true);
if (size < 100*1024) delay += 50;
else if (size < 1024*1024) delay += 500;
else if (size < 10*1024*1024) delay += 1000;
}
}
upload_button.setStyle("display", "");
}
spt.html5upload.clear();
spt.html5upload.set_form( top );
spt.html5upload.select_file( onchange );
'''
} )
button = ActionButtonWdg(title="Clear")
#button.add_style("float: right")
button.add_style("display: inline-block")
button.add_style("margin-top: -3px")
shelf_div.add(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var file_els = top.getElements(".spt_upload_file");
for ( var i = 0; i < file_els.length; i++) {
spt.behavior.destroy( file_els[i] );
};
var background = top.getElement(".spt_files_background");
background.setStyle("display", "");
var button = top.getElement(".spt_upload_file_button");
button.setStyle("display", "none");
//clear upload progress
var upload_bar = top.getElement('.spt_upload_progress');
if (upload_bar) {
upload_bar.setStyle('width','0%');
upload_bar.innerHTML = '';
upload_bar.setStyle("visibility", "hidden");
var info_el = top.getElement(".spt_upload_info");
info_el.innerHTML = "";
}
'''
} )
ingest = my.get_ingest_button()
shelf_div.add(ingest)
ingest.add_style("float: right")
shelf_div.add("<br clear='all'/>")
progress_wdg = my.get_progress_div()
shelf_div.add(progress_wdg)
border_color_light = div.get_color("background2", 8)
border_color_dark = div.get_color("background2", -15)
background_mouseout = div.get_color("background", 10)
background_mouseenter = div.get_color("background", 8)
files_div = DivWdg()
files_div.add_style("position: relative")
files_div.add_class("spt_to_ingest_files")
div.add(files_div)
files_div.add_style("max-height: 400px")
files_div.add_style("height: 400px")
files_div.add_style("overflow-y: auto")
files_div.add_style("padding: 3px")
files_div.add_color("background", background_mouseout)
files_div.add_style("border: 3px dashed %s" % border_color_light)
#files_div.add_style("border-radius: 20px 20px 20px 20px")
files_div.add_style("z-index: 1")
files_div.add_style("width", "586px")
#files_div.add_style("display: none")
bgcolor = div.get_color("background")
bgcolor2 = div.get_color("background", -3)
#style_text = "text-align: center; margin-top: 100px; color: #A0A0A0; font-size: 3.0em; z-index: 10;"
background = DivWdg()
background.add_class("spt_files_background")
files_div.add(background)
if my.sobjects:
background.add_style("display: none")
background.add_style("text-align: center")
background.add_style("margin-top: 75px")
background.add_style("font-size: 3.0em")
background.add_style("z-index: 10")
background.add_color("color", "color", 70)
icon = "<i class='fa fa-cloud-upload' style='font-size: 150px'> </i>"
background.add(icon)
background_text = DivWdg("<p>Drag Files Here</p>")
background.add(background_text)
files_div.add_behavior( {
'type': 'mouseover',
'cbjs_action': '''
bvr.src_el.setStyle("border","3px dashed %s")
bvr.src_el.setStyle("background","%s")
''' % (border_color_dark, background_mouseenter)
} )
files_div.add_behavior( {
'type': 'mouseout',
'cbjs_action': '''
bvr.src_el.setStyle("border", "3px dashed %s")
bvr.src_el.setStyle("background","%s")
''' % (border_color_light, background_mouseout)
} )
background.add( my.get_select_files_button() )
# Test drag and drop files
files_div.add_attr("ondragenter", "return false")
files_div.add_attr("ondragover", "return false")
files_div.add_attr("ondrop", "spt.drag.noop(event, this)")
files_div.add_behavior( {
'type': 'load',
'normal_ext': File.NORMAL_EXT,
'cbjs_action': '''
spt.drag = {}
var background;
spt.drag.show_file = function(file, top, delay, icon) {
var background = top.getElement(".spt_files_background");
background.setStyle("display", "none");
var template = top.getElement(".spt_upload_file_template");
var clone = spt.behavior.clone(template);
clone.removeClass("spt_upload_file_template");
clone.addClass("spt_upload_file");
clone.setStyle("display", "");
if (typeof(delay) == 'undefined') {
delay = 0;
}
// remember the file handle
clone.file = file;
var name = file.name;
var size = parseInt(file.size / 1024 * 10) / 10;
var thumb_el = clone.getElement(".spt_thumb");
var date_label_el = clone.getElement(".spt_date_label");
var date_el = clone.getElement(".spt_date");
//var loadingImage = loadImage(
setTimeout( function() {
var draw_empty_icon = function() {
var img = $(document.createElement("div"));
img.setStyle("width", "58");
img.setStyle("height", "34");
//img.innerHTML = "MP4";
img.setStyle("border", "1px dotted #222")
thumb_el.appendChild(img);
};
if (icon) {
var loadingImage = loadImage(
file,
function (img) {
if (img.width)
thumb_el.appendChild(img);
else
draw_empty_icon();
},
{maxWidth: 80, maxHeight: 60, canvas: true, contain: true}
);
}
else {
draw_empty_icon();
}
loadImage.parseMetaData(
file,
function(data) {
if (data.exif) {
var date = data.exif.get('DateTimeOriginal');
if (date) {
date_label_el.innerHTML = date;
if (date_el) {
date_el.value = date;
}
}
}
}
);
}, delay );
/*
var reader = new FileReader();
reader.thumb_el = thumb_el;
reader.onload = function(e) {
this.thumb_el.innerHTML = [
'<img class="thumb" src="',
e.target.result,
'" title="', escape(name),
'" width="60px"',
'" padding="5px"',
'"/>'
].join('');
}
reader.readAsDataURL(file);
*/
clone.getElement(".spt_name").innerHTML = file.name;
clone.getElement(".spt_size").innerHTML = size + " KB";
clone.inject(top);
}
spt.drag.noop = function(evt, el) {
var top = $(el).getParent(".spt_ingest_top");
var files_el = top.getElement(".spt_to_ingest_files");
evt.stopPropagation();
evt.preventDefault();
evt.dataTransfer.dropEffect = 'copy';
var files = evt.dataTransfer.files;
var delay = 0;
var skip = false;
var regex = new RegExp('(' + bvr.normal_ext.join('|') + ')$', 'i');
for (var i = 0; i < files.length; i++) {
var size = files[i].size;
var file_name = files[i].name;
var is_normal = regex.test(file_name);
if (size >= 10*1024*1024 || is_normal) {
spt.drag.show_file(files[i], files_el, 0, false);
}
else {
spt.drag.show_file(files[i], files_el, delay, true);
if (size < 100*1024) delay += 50;
else if (size < 1024*1024) delay += 500;
else if (size < 10*1024*1024) delay += 1000;
}
}
// get all of the current filenames
var filenames = []
var items = top.getElements(".spt_upload_file");
for (var i = 0; i < items.length; i++) {
var file = items[i].file;
filenames.push(file.name);
}
// check if this is a sequence or zip
var server = TacticServerStub.get();
var cmd = 'tactic.ui.tools.IngestCheckCmd';
var kwargs = {
file_names: filenames
};
var ret_val = server.execute_cmd(cmd, kwargs);
var info = ret_val.info;
var num_sequences = 0;
for (var i = 0; i < info.length; i++) {
if (info[i].is_sequence) {
num_sequences += 1;
}
}
var ok = function() {
var upload_button = top.getElement(".spt_upload_files_top");
upload_button.setStyle("display", "");
}
if (num_sequences > 0) {
spt.confirm(num_sequences + " Sequences detected. Do you wish to group these files as sequences?", function() {
spt.named_events.fire_event("set_ingest_update_mode", {
options: {
value: 'sequence'
}
} );
});
}
ok();
}
'''
} )
# create a template that will be filled in for each file
files_div.add_relay_behavior( {
'type': 'mouseenter',
'color': files_div.get_color("background3", -5),
'bvr_match_class': 'spt_upload_file',
'cbjs_action': '''
bvr.src_el.setStyle("background", bvr.color);
'''
} )
files_div.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_upload_file',
'cbjs_action': '''
bvr.src_el.setStyle("background", "");
'''
} )
files_div.add_relay_behavior( {
'type': 'mouseup',
'bvr_match_class': 'spt_remove',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var el = bvr.src_el.getParent(".spt_upload_file");
spt.behavior.destroy_element(el);
var els = top.getElements(".spt_upload_file");
if (els.length == 0) {
var background = top.getElement(".spt_files_background");
background.setStyle("display", "");
var upload_button = top.getElement(".spt_upload_files_top");
upload_button.setStyle("display", "none");
}
'''
} )
"""
metadata_view = "test/wizard/metadata"
files_div.add_relay_behavior( {
'type': 'mouseup',
'view': metadata_view,
'bvr_match_class': 'spt_upload_file',
'cbjs_action': '''
var class_name = 'tactic.ui.panel.CustomLayoutWdg';
var kwargs = {
view: bvr.view
}
spt.app_busy.show("Loading Metadata");
spt.panel.load_popup("Metadata", class_name, kwargs);
spt.app_busy.hide();
'''
} )
"""
# add the passed in sobject files
for sobject in my.sobjects:
files_div.add( my.get_file_wdg(sobject) )
# add the template
files_div.add( my.get_file_wdg() )
div.add("<br/>")
#upload_wdg = my.get_ingest_button()
#div.add(upload_wdg)
return div
def get_ingest_button(my):
div = DivWdg()
library_mode = my.kwargs.get("library_mode") or False
dated_dirs = my.kwargs.get("dated_dirs") or False
# NOTE: files variable is passed in automatically
upload_init = '''
var info_el = top.getElement(".spt_upload_info");
info_el.innerHTML = "Uploading ...";
// start the upload
var progress_el = top.getElement(".spt_upload_progress");
var progress_top = top.getElement(".spt_upload_progress_top");
setTimeout( function() {
progress_el.setStyle("visibility", "visible");
progress_top.setStyle("margin-top", "0px");
}, 0);
server.start( {description: "Upload and check-in of ["+files.length+"] files"} );
'''
upload_progress = '''
var top = bvr.src_el.getParent(".spt_ingest_top");
progress_el = top.getElement(".spt_upload_progress");
var percent = Math.round(evt.loaded * 100 / evt.total);
progress_el.setStyle("width", percent + "%");
progress_el.innerHTML = String(percent) + "%";
progress_el.setStyle("background", "#f0ad4e");
'''
oncomplete_script = '''
spt.notify.show_message("Ingest Completed");
server.finish();
var file_els = top.getElements(".spt_upload_file");
for ( var i = 0; i < file_els.length; i++) {
spt.behavior.destroy( file_els[i] );
};
var background = top.getElement(".spt_files_background");
background.setStyle("display", "");
spt.message.stop_interval(message_key);
var info_el = top.getElement(".spt_upload_info");
info_el.innerHTML = '';
var progress_el = top.getElement(".spt_upload_progress");
var progress_top = top.getElement(".spt_upload_progress_top");
setTimeout( function() {
progress_el.setStyle("visibility", "hidden");
progress_top.setStyle("margin-top", "-30px");
}, 0);
spt.panel.refresh(top);
'''
script_found = True
oncomplete_script_path = my.kwargs.get("oncomplete_script_path")
if oncomplete_script_path:
script_folder, script_title = oncomplete_script_path.split("/")
oncomplete_script_expr = "@GET(config/custom_script['folder','%s']['title','%s'].script)" %(script_folder,script_title)
server = TacticServerStub.get()
oncomplete_script_ret = server.eval(oncomplete_script_expr, single=True)
if oncomplete_script_ret:
oncomplete_script = oncomplete_script + oncomplete_script_ret
else:
script_found = False
oncomplete_script = "alert('Error: oncomplete script not found');"
if my.kwargs.get("oncomplete_script"):
oncomplete_script = my.kwargs.get("oncomplete_script")
if my.kwargs.get("on_complete"):
oncomplete_script = my.kwargs.get("on_complete")
on_complete = '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var update_data_top = top.getElement(".spt_edit_top");
var progress_el = top.getElement(".spt_upload_progress");
progress_el.innerHTML = "100%";
progress_el.setStyle("width", "100%");
progress_el.setStyle("background", "#337ab7");
var info_el = top.getElement(".spt_upload_info");
var search_type = bvr.kwargs.search_type;
var relative_dir = bvr.kwargs.relative_dir;
var context = bvr.kwargs.context;
var update_process = bvr.kwargs.update_process;
var ignore_path_keywords = bvr.kwargs.ignore_path_keywords;
var library_mode = bvr.kwargs.library_mode;
var dated_dirs = bvr.kwargs.dated_dirs;
var project_code = bvr.kwargs.project_code;
if (!project_code) {
project_code = null;
}
// Data comes from Ingest Settings
var context_mode_select = top.getElement(".spt_context_mode_select");
var context_mode = context_mode_select ? context_mode_select.value : bvr.kwargs.context_mode;
// settings
var update_mode = null;
var ignore_ext = null;
var column = null;
var zip_mode = null;
var update_mode_select = top.getElement(".spt_update_mode_select");
if (update_mode_select)
update_mode = update_mode_select.value;
var ignore_ext_select = top.getElement(".spt_ignore_ext_select");
if (ignore_ext_select)
ignore_ext = ignore_ext_select.value;
var column_select = top.getElement(".spt_column_select");
if (column_select)
column = column_select ? column_select.value : bvr.kwargs.column;
var zip_mode_select = top.getElement(".spt_zip_mode_select");
if (zip_mode_select)
zip_mode = zip_mode_select.value;
var filenames = [];
for (var i = 0; i != files.length;i++) {
var name = files[i].name;
if (name) {
filenames.push(name);
}
else {
filenames.push(files[i]);
}
}
var values = spt.api.get_input_values(top);
//var category = values.category[0];
var keywords = values["edit|user_keywords"];
if (keywords) {
keywords = keywords[0];
}
else {
keywords = "";
}
var extra_data = values.extra_data ? values.extra_data[0]: {};
var parent_key = values.parent_key[0];
var search_key = values.search_key[0];
var convert_el = top.getElement(".spt_image_convert")
if (convert_el) {
convert = spt.api.get_input_values(convert_el);
}
else {
convert = null;
}
var processes = values.process;
if (processes) {
process = processes[0];
if (!process) {
process = null;
}
}
else {
process = null;
}
var return_array = false;
// non-existent when my.show_settings is False
var update_data = update_data_top ? spt.api.get_input_values(update_data_top, null, return_array): {};
var message_key = spt.message.generate_key();
message_key = "IngestUploadCmd|" + search_key + "|" + message_key;
var kwargs = {
search_key: search_key,
search_type: search_type,
relative_dir: relative_dir,
filenames: filenames,
message_key: message_key,
parent_key: parent_key,
//category: category,
keywords: keywords,
update_process: update_process,
ignore_path_keywords: ignore_path_keywords,
extra_data: extra_data,
update_data: update_data,
process: process,
context: context,
convert: convert,
update_mode: update_mode,
ignore_ext: ignore_ext,
column: column,
library_mode: library_mode,
dated_dirs: dated_dirs,
context_mode: context_mode,
zip_mode: zip_mode,
project_code: project_code,
}
on_complete = function(rtn_data) {
''' + oncomplete_script + '''
};
var class_name = bvr.action_handler;
// TODO: make the async_callback return throw an e so we can run
// server.abort
server.execute_cmd(class_name, kwargs, {}, {on_complete:on_complete});
on_progress = function(message) {
msg = JSON.parse(message.message);
var percent = msg.progress;
var description = msg.description;
var error = msg.error;
info_el.innerHTML = description;
progress_el.setStyle("width", percent+"%");
progress_el.innerHTML = percent + "%";
if (error) {
progress_el.setStyle("background", "#F00");
spt.message.stop_interval(message_key);
}
}
spt.message.set_interval(message_key, on_progress, 500, bvr.src_el);
'''
upload_div = DivWdg()
search_keys = my.kwargs.get("search_keys")
if not search_keys:
upload_div.add_style("display: none")
upload_div.add_class("spt_upload_files_top")
div.add(upload_div)
if my.sobjects:
button = ActionButtonWdg(title="Copy Files", width=200, color="primary")
else:
button = ActionButtonWdg(title="Upload Files", width=200, color="primary")
upload_div.add(button)
#button.add_style("float: right")
#upload_div.add_style("margin-bottom: 20px")
upload_div.add("<br clear='all'/>")
action_handler = my.kwargs.get("action_handler")
if not action_handler:
action_handler = 'tactic.ui.tools.IngestUploadCmd';
context = my.kwargs.get("context")
context_mode = my.kwargs.get("context_mode")
button.add_behavior( {
'type': 'click_up',
'action_handler': action_handler,
'kwargs': {
'search_type': my.search_type,
'relative_dir': my.relative_dir,
'script_found': script_found,
'context': context,
'library_mode': library_mode,
'dated_dirs' : dated_dirs,
'context_mode': context_mode,
'update_process': my.update_process,
'ignore_path_keywords': my.ignore_path_keywords,
'project_code': my.project_code
},
'cbjs_action': '''
if (bvr.kwargs.script_found != true)
{
spt.alert("Error: provided on_complete script not found");
return;
}
var top = bvr.src_el.getParent(".spt_ingest_top");
var file_els = top.getElements(".spt_upload_file");
var num_files = file_els.length;
var files_top = top.getElement(".spt_to_ingest_files")
spt.notify.show_message("Ingesting "+num_files+" Files");
// get the server that will be used in the callbacks
var server = TacticServerStub.get();
// retrieved the stored file handles
var files = [];
for (var i = 0; i < file_els.length; i++) {
if (file_els[i].file) {
files.push( file_els[i].file );
}
else {
var search_key = file_els[i].getAttribute("spt_search_key");
files.push("search_key:"+search_key);
}
}
if (files.length == 0) {
spt.alert("Either click 'Add' or drag some files over to ingest.");
return;
}
// defined the callbacks
var upload_start = function(evt) {
}
var upload_progress = function(evt) {
%s;
}
var upload_complete = function(evt) {
%s;
}
var upload_file_kwargs = {
files: files,
upload_start: upload_start,
upload_complete: upload_complete,
upload_progress: upload_progress
};
if (bvr.ticket)
upload_file_kwargs['ticket'] = bvr.ticket;
%s;
spt.html5upload.set_form( top );
spt.html5upload.upload_file(upload_file_kwargs);
''' % (upload_progress, on_complete, upload_init)
} )
return div
def get_progress_div(my):
div = DivWdg()
div.add_style("overflow-y: hidden")
inner = DivWdg()
div.add(inner)
inner.add_class("spt_upload_progress_top")
inner.add_style("margin-top: -30px")
info = DivWdg()
inner.add(info)
info.add_class("spt_upload_info")
progress_div = DivWdg()
progress_div.add_class("spt_upload_progress_top")
inner.add(progress_div)
progress_div.add_style("width: 595px")
progress_div.add_style("height: 15px")
progress_div.add_style("margin-bottom: 10px")
progress_div.add_border()
#progress_div.add_style("display: none")
progress = DivWdg()
progress_div.add(progress)
progress.add_class("spt_upload_progress")
progress.add_style("width: 0px")
progress.add_style("visibility: hidden")
progress.add_style("height: 100%")
progress.add_gradient("background", "background3", -10)
progress.add_style("text-align: right")
progress.add_style("overflow: hidden")
progress.add_style("padding-right: 3px")
from tactic.ui.app import MessageWdg
progress.add_behavior( {
'type': 'load',
'cbjs_action': MessageWdg.get_onload_js()
} )
return div
"""
def get_data_wdg(my):
div = DivWdg()
from pyasm.biz import Pipeline
from pyasm.widget import SelectWdg
search_type_obj = SearchType.get(my.search_type)
base_type = search_type_obj.get_base_key()
search = Search("sthpw/pipeline")
search.add_filter("search_type", base_type)
pipelines = search.get_sobjects()
if pipelines:
pipeline = pipelines[0]
process_names = pipeline.get_process_names()
if process_names:
table = Table()
div.add(table)
table.add_row()
table.add_cell("Process: ")
select = SelectWdg("process")
table.add_cell(select)
process_names.append("---")
process_names.append("publish")
process_names.append("icon")
select.set_option("values", process_names)
####
buttons = Table()
div.add(buttons)
buttons.add_row()
#button = IconButtonWdg(title="Fill in Data", icon=IconWdg.EDIT)
button = ActionButtonWdg(title="Metadata")
button.add_style("float: left")
button.add_style("margin-top: -3px")
buttons.add_cell(button)
select_label = DivWdg("Update mode");
select_label.add_style("float: left")
select_label.add_style("margin-top: -3px")
select_label.add_style("margin-left: 20px")
buttons.add_cell(select_label)
update_mode_option = my.kwargs.get("update_mode")
if not update_mode_option:
update_mode_option = "true"
update_mode = SelectWdg(name="update mode")
update_mode.add_class("spt_update_mode_select")
update_mode.set_option("values", ["false", "true", "sequence"])
update_mode.set_option("labels", ["Off", "On", "Sequence"])
update_mode.set_option("default", update_mode_option)
update_mode.add_style("float: left")
update_mode.add_style("margin-top: -3px")
update_mode.add_style("margin-left: 5px")
update_mode.add_style("margin-right: 5px")
buttons.add_cell(update_mode)
update_info = DivWdg()
update_info.add_class("glyphicon")
update_info.add_class("glyphicon-info-sign")
update_info.add_style("float: left")
update_info.add_style("margin-top: -3px")
update_info.add_style("margin-left: 10px")
update_info.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.info("When update mode is on, if a file shares the name of one other file in the asset library, the file will update on ingest. If more than one file shares the name of an ingested asset, a new asset is created.<br> If sequence mode is selected, the system will update the sobject on ingest if a file sequence sharing the same name already exists.", {type: 'html'});
'''
} )
buttons.add_cell(update_info);
dialog = DialogWdg(display="false", show_title=False)
div.add(dialog)
dialog.set_as_activator(button, offset={'x':-10,'y':10})
dialog_data_div = DivWdg()
dialog_data_div.add_color("background", "background")
dialog_data_div.add_style("padding", "20px")
dialog.add(dialog_data_div)
# Order folders by date
name_div = DivWdg()
dialog_data_div.add(name_div)
name_div.add_style("margin: 15px 0px")
if SearchType.column_exists(my.search_type, "relative_dir"):
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "none")
category_div.add(checkbox)
category_div.add(" No categories")
category_div.add_style("margin-bottom: 5px")
checkbox.set_option("checked", "true")
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "by_day")
category_div.add(checkbox)
category_div.add(" Categorize files by Day")
category_div.add_style("margin-bottom: 5px")
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "by_week")
category_div.add(checkbox)
category_div.add(" Categorize files by Week")
category_div.add_style("margin-bottom: 5px")
category_div = DivWdg()
name_div.add(category_div)
checkbox = RadioWdg("category")
checkbox.set_option("value", "by_year")
category_div.add(checkbox)
category_div.add(" Categorize files by Year")
category_div.add_style("margin-bottom: 5px")
name_div.add("<br/>")
# edit
from tactic.ui.panel import EditWdg
ingest_data_view = my.kwargs.get('ingest_data_view')
sobject = SearchType.create(my.search_type)
edit = EditWdg(search_key =sobject.get_search_key(), mode='view', view=ingest_data_view )
dialog_data_div.add(edit)
hidden = HiddenWdg(name="parent_key")
dialog_data_div.add(hidden)
hidden.add_class("spt_parent_key")
parent_key = my.kwargs.get("parent_key") or ""
if parent_key:
hidden.set_value(parent_key)
extra_data = my.kwargs.get("extra_data")
if not isinstance(extra_data, basestring):
extra_data = jsondumps(extra_data)
if extra_data and extra_data != "null":
# it needs a TextArea instead of Hidden because of JSON data
text = TextAreaWdg(name="extra_data")
text.add_style('display: none')
text.set_value(extra_data)
dialog_data_div.add(text)
return div
"""
def get_select_files_button(my):
button = ActionButtonWdg(title="Add Files to Queue", width=150, color="warning")
from tactic.ui.input import Html5UploadWdg
upload = Html5UploadWdg(multiple=True)
button.add(upload)
button.add_style("margin: 30px auto")
button.add_behavior( {
'type': 'click_up',
'normal_ext': File.NORMAL_EXT,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_ingest_top");
var files_el = top.getElement(".spt_to_ingest_files");
var regex = new RegExp('(' + bvr.normal_ext.join('|') + ')$', 'i');
// clear upload progress
var upload_bar = top.getElement('.spt_upload_progress');
if (upload_bar) {
upload_bar.setStyle('width','0%');
upload_bar.innerHTML = '';
}
var upload_button = top.getElement(".spt_upload_files_top");
var onchange = function (evt) {
var files = spt.html5upload.get_files();
var delay = 0;
for (var i = 0; i < files.length; i++) {
var size = files[i].size;
var file_name = files[i].name;
var is_normal = regex.test(file_name);
if (size >= 10*1024*1024 || is_normal) {
spt.drag.show_file(files[i], files_el, 0, false);
}
else {
spt.drag.show_file(files[i], files_el, delay, true);
if (size < 100*1024) delay += 50;
else if (size < 1024*1024) delay += 500;
else if (size < 10*1024*1024) delay += 1000;
}
}
upload_button.setStyle("display", "");
}
spt.html5upload.clear();
spt.html5upload.set_form( top );
spt.html5upload.select_file( onchange );
'''
} )
return button
class IngestCheckCmd(Command):
def execute(my):
from pyasm.biz import FileRange
file_names = my.kwargs.get("file_names")
info = FileRange.get_sequences(file_names)
#info = FileRange.check(file_names)
my.info = info
class IngestUploadCmd(Command):
# FOLDER_LIMIT can be adjusted as desired.
FOLDER_LIMIT = 500
def get_server(my):
if not my.server:
project_code = my.kwargs.get("project_code")
if not project_code:
my.server = TacticServerStub.get()
else:
my.server = TacticServerStub(protocol="local")
my.server.set_project(project_code)
return my.server
def execute(my):
my.server = None
my.message_key = my.kwargs.get("message_key")
try:
return my._execute()
except Exception, e:
if my.message_key:
msg = {
'progress': 100,
'error': '%s' % e,
'description': 'Error: %s' % e
}
server = my.get_server()
server.log_message(my.message_key, msg, status="in progress")
raise
def _execute(my):
library_mode = my.kwargs.get("library_mode")
current_folder = 0
dated_dirs = my.kwargs.get("dated_dirs")
filenames = my.kwargs.get("filenames")
relative_dir = my.kwargs.get("relative_dir")
base_dir = my.kwargs.get("base_dir")
if not base_dir:
upload_dir = Environment.get_upload_dir()
base_dir = upload_dir
context_mode = my.kwargs.get("context_mode")
if not context_mode:
context_mode = "case_sensitive"
update_mode = my.kwargs.get("update_mode")
ignore_ext = my.kwargs.get("ignore_ext")
column = my.kwargs.get("column")
if not column:
column = "name"
search_key = my.kwargs.get("search_key")
if search_key:
my.sobject = Search.get_by_search_key(search_key)
search_type = my.sobject.get_base_search_type()
else:
search_type = my.kwargs.get("search_type")
my.sobject = None
if not relative_dir:
project_code = Project.get_project_code()
search_type_obj = SearchType.get(search_type)
table = search_type_obj.get_table()
relative_dir = "%s/%s" % (project_code, table)
server = my.get_server()
parent_key = my.kwargs.get("parent_key")
category = my.kwargs.get("category")
keywords = my.kwargs.get("keywords")
update_process = my.kwargs.get("update_process")
ignore_path_keywords = my.kwargs.get("ignore_path_keywords")
if ignore_path_keywords:
ignore_path_keywords = ignore_path_keywords.split(",")
ignore_path_keywords = [x.strip() for x in ignore_path_keywords]
update_data = my.kwargs.get("update_data")
extra_data = my.kwargs.get("extra_data")
if extra_data:
extra_data = jsonloads(extra_data)
else:
extra_data = {}
update_sobject_found = False
# TODO: use this to generate a category
category_script_path = my.kwargs.get("category_script_path")
"""
ie:
from pyasm.checkin import ExifMetadataParser
parser = ExifMetadataParser(path=file_path)
tags = parser.get_metadata()
date = tags.get("EXIF DateTimeOriginal")
return date.split(" ")[0]
"""
# remap the filenames for seuqences
if update_mode == "sequence":
sequences = FileRange.get_sequences(filenames)
filenames = []
for sequence in sequences:
print "sequence: ", sequence
if sequence.get('is_sequence'):
filename = sequence.get("template")
else:
filename = sequence.get("filenames")[0]
filenames.append(filename)
input_prefix = update_data.get('input_prefix')
non_seq_filenames = []
if library_mode:
relative_dir = "%s/001" % relative_dir
snapshots = []
for count, filename in enumerate(filenames):
# Check if files should be updated.
# If so, attempt to find one to update.
# If more than one is found, do not update.
if filename.endswith("/"):
# this is a folder:
continue
if filename.startswith("search_key:"):
mode = "search_key"
tmp, search_key = filename.split("search_key:")
snapshot = Search.get_by_search_key(search_key)
if snapshot.get_search_type() == "sthpw/snapshot":
lib_path = snapshot.get_lib_path_by_type()
filename = os.path.basename(lib_path)
new_filename = re.sub(r"_v\d+", "", filename)
else:
snapshot = Snapshot.get_latest_by_sobject(snapshot, process="publish")
lib_path = snapshot.get_lib_path_by_type()
filename = os.path.basename(lib_path)
new_filename = re.sub(r"_v\d+", "", filename)
if not snapshot:
raise Exception("Must pass in snapshot search_key")
else:
mode = "multi"
new_filename = filename
if library_mode:
# get count of number of files in the current asset ingest dir
import glob
abs_path = Environment.get_asset_dir() + "/" + relative_dir + "/*"
if len(glob.glob(abs_path)) > my.FOLDER_LIMIT:
current_folder = current_folder + 1
relative_dir = "%s/%03d" % (relative_dir[:-4], current_folder)
unzip = my.kwargs.get("unzip")
zip_mode = my.kwargs.get("zip_mode")
if zip_mode in ['unzip'] or unzip in ["true", True] and filename.endswith(".zip"):
from pyasm.common import ZipUtil
unzip_dir = Environment.get_upload_dir()
if not os.path.exists(unzip_dir):
os.makedirs(unzip_dir)
zip_path = "%s/%s" % (base_dir, filename)
ZipUtil.extract(zip_path, base_dir=unzip_dir)
paths = ZipUtil.get_file_paths(zip_path)
new_kwargs = my.kwargs.copy()
new_kwargs['filenames'] = paths
new_kwargs['base_dir'] = unzip_dir
new_kwargs['zip_mode'] = "single"
ingest = IngestUploadCmd(**new_kwargs)
ingest.execute()
continue
if my.sobject:
sobject = my.sobject
elif update_mode in ["true", True, "update"]:
# first see if this sobjects still exists
search = Search(search_type)
# ingested files into search type applies filename without i.e. _v001 suffix
search.add_filter(column, new_filename)
if relative_dir and search.column_exists("relative_dir"):
if not dated_dirs:
search.add_filter("relative_dir", relative_dir)
sobjects = search.get_sobjects()
if len(sobjects) > 1:
sobject = None
elif len(sobjects) == 1:
sobject = sobjects[0]
update_sobject_found = True
else:
sobject = None
elif update_mode == "sequence":
# This check is not needed anymore as the sequence analyzer
# can handle a mix of sequence and non sequences
#if not FileGroup.is_sequence(filename):
# raise TacticException('Please modify sequence naming to have at least three digits [%s].' % filename)
search = Search(search_type)
search.add_filter(column, filename)
if relative_dir and search.column_exists("relative_dir"):
if not dated_dirs:
search.add_filter("relative_dir", relative_dir)
sobjects = search.get_sobjects()
if sobjects:
sobject = sobjects[0]
else:
sobject = None
else:
sobject = None
# Create a new entry
if not sobject:
if update_mode not in ['true', True, "update"]:
sobjects = []
my.check_existing_file(search_type, new_filename, relative_dir, update_mode, sobjects)
sobject = SearchType.create(search_type)
if ignore_ext in ['true', True]:
name, ext = os.path.splitext(new_filename)
else:
name = new_filename
# if the name contains a path, the only take basename
name = os.path.basename(name)
sobject.set_value(column, name)
if relative_dir and sobject.column_exists("relative_dir"):
sobject.set_value("relative_dir", relative_dir)
if mode == "search_key":
path = lib_path
elif relative_dir:
path = "%s/%s" % (relative_dir, filename)
else:
path = filename
# Don't want the keywords being extracted from lib_path, extract the relative dir path instead
# Using new_filename because it is the filename without version numbers
if relative_dir:
path_for_keywords = "%s/%s" % (relative_dir, new_filename)
else:
path_for_keywords = new_filename
file_keywords = Common.extract_keywords_from_path(path_for_keywords)
# Extract keywords from the path to be added to keywords_data,
# if ignore_path_keywords is found, remove the specified keywords
# from the path keywords
if ignore_path_keywords:
for ignore_path_keyword in ignore_path_keywords:
if ignore_path_keyword in file_keywords:
file_keywords.remove(ignore_path_keyword)
file_keywords.append(filename.lower())
file_keywords = " ".join(file_keywords)
new_file_keywords = ""
if SearchType.column_exists(search_type, "keywords"):
if keywords:
new_file_keywords = "%s %s" % (keywords, file_keywords)
else:
new_file_keywords = file_keywords
sobject.set_value("keywords", new_file_keywords)
if SearchType.column_exists(search_type, "user_keywords"):
if keywords:
sobject.set_value("user_keywords", keywords)
if SearchType.column_exists(search_type, "keywords_data"):
data = sobject.get_json_value("keywords_data", {})
data['user'] = keywords
data['path'] = file_keywords
sobject.set_json_value("keywords_data", data)
# extract metadata
#file_path = "%s/%s" % (base_dir, File.get_filesystem_name(filename))
if update_mode == "sequence":
sequence = sequences[count]
file_path = "%s/%s" % (base_dir, sequence.get("filenames")[0])
elif mode == "search_key":
file_path = path
else:
file_path = "%s/%s" % (base_dir, filename)
"""
# TEST: convert on upload
try:
convert = my.kwargs.get("convert")
if convert:
message_key = "IngestConvert001"
cmd = ConvertCbk(**convert)
cmd.execute()
except Exception, e:
print "WARNING: ", e
"""
# check if the file exists
if mode != "search_key" and not os.path.exists(file_path):
raise Exception("Path [%s] does not exist" % file_path)
# get the metadata from this image
if SearchType.column_exists(search_type, "relative_dir"):
if category and category not in ['none', None]:
from pyasm.checkin import ExifMetadataParser
parser = ExifMetadataParser(path=file_path)
tags = parser.get_metadata()
date = tags.get("EXIF DateTimeOriginal")
if not date:
date_str = "No-Date"
else:
date_str = str(date)
# this can't be parsed correctly by dateutils
parts = date_str.split(" ")
date_str = parts[0].replace(":", "-")
date_str = "%s %s" % (date_str, parts[1])
from dateutil import parser
orig_date = parser.parse(date_str)
if category == "by_day":
date_str = orig_date.strftime("%Y/%Y-%m-%d")
elif category == "by_month":
date_str = orig_date.strftime("%Y-%m")
elif category == "by_week":
date_str = orig_date.strftime("%Y/Week-%U")
full_relative_dir = "%s/%s" % (relative_dir, date_str)
sobject.set_value("relative_dir", full_relative_dir)
# Add parent sObject
if parent_key:
parent = Search.get_by_search_key(parent_key)
if parent:
try:
sobject.set_sobject_value(parent)
except:
pass
# for some unknown reason, this input prefix is ignored
if update_data.has_key("input_prefix"):
del(update_data['input_prefix'])
new_data = {}
for name, value in update_data.items():
if name == "input_prefix":
continue
name = name.replace("%s|"%input_prefix, "")
new_data[name] = value
if new_data:
from tactic.ui.panel import EditCmd
cmd = EditCmd(
view="edit",
sobject=sobject,
data=new_data,
commit="false",
)
cmd.execute()
for key, value in extra_data.items():
if SearchType.column_exists(search_type, key):
sobject.set_value(key, value)
"""
if category:
if SearchType.column_exists(search_type, "category"):
sobject.set_value("category", category)
if SearchType.column_exists(search_type, "relative_dir"):
full_relative_dir = "%s/%s" % (relative_dir, category)
sobject.set_value("relative_dir", category)
"""
sobject.commit()
search_key = sobject.get_search_key()
status = sobject.get_value("status", no_exception=True)
is_verified = status in ['Verified']
# use API to check in file
process = my.kwargs.get("process")
if not process:
process = "publish"
context = my.kwargs.get("context")
if not context:
context = process
if process == "icon":
context = "icon"
else:
context = "%s/%s" % (context, filename)
if context_mode == "case_insensitive":
context = context.lower()
version = None
if not is_verified and update_process and update_sobject_found:
process = update_process
# find what the version number should be
search = Search("sthpw/snapshot")
search.add_parent_filter(sobject)
search.add_filter("context", context)
search.add_order_by("version desc")
max_snapshot = search.get_sobject()
version = max_snapshot.get_value("version")
if not version:
version = 1
else:
version += 1
if update_mode == "sequence":
file_range = sequence.get("range")
if file_range == "":
raise Exception("Error: %s" % sequence.get("error"))
if sequence.get("is_sequence"):
file_path = "%s/%s" % (base_dir, sequence.get("template"))
snapshot = server.group_checkin(search_key, context, file_path, file_range, mode='move', version=version)
else:
file_path = "%s/%s" % (base_dir, sequence.get("filenames")[0])
snapshot = server.simple_checkin(search_key, context, file_path, mode='uploaded', version=version)
elif mode == "search_key":
if lib_path.find("##") != -1:
file_range = snapshot.get_file_range().get_display
file_path = lib_path
snapshot = server.group_checkin(search_key, context, file_path, file_range, mode='copy')
else:
# copy the file to a temporary location
tmp_dir = Environment.get_tmp_dir()
tmp_path = "%s/%s" % (tmp_dir, new_filename)
shutil.copy(file_path, tmp_path)
# auto create icon
snapshot = server.simple_checkin(search_key, context, tmp_path, process=process, mode='move')
elif my.kwargs.get("base_dir"):
# auto create icon
snapshot = server.simple_checkin(search_key, context, file_path, process=process, mode='move', version=version)
else:
snapshot = server.simple_checkin(search_key, context, filename, process=process, mode='uploaded', version=version)
snapshots.append(snapshot)
#server.update(snapshot, {"user_keywords": "abc 123"} )
percent = int((float(count)+1) / len(filenames)*100)
if my.message_key:
msg = {
'progress': percent,
'description': 'Checking in file [%s]' % filename,
}
server.log_message(my.message_key, msg, status="in progress")
if my.message_key:
msg = {
'progress': '100',
'description': 'Check-ins complete'
}
server.log_message(my.message_key, msg, status="complete")
return
def check_existing_file(my, search_type, new_filename, relative_dir, update_mode, sobjects):
project_code = Project.get_project_code()
file_search_type = SearchType.build_search_type(search_type, project_code)
search_name, search_ext = os.path.splitext(new_filename)
search_name = "%s.%%" % search_name
search_file = Search("sthpw/file")
search_file.add_filter("search_type", file_search_type)
search_file.add_filter("relative_dir", relative_dir)
search_file.add_filter("file_name", search_name, op='like')
file_sobjects = search_file.get_sobjects()
if file_sobjects and update_mode in ['true', True] and len(sobjects) > 1:
raise TacticException('Multiple files with the same name as "%s" already exist. Uncertain as to which file to update. Please individually update each file.' % new_filename)
elif file_sobjects:
raise TacticException('A file with the same name as "%s" already exists in the file table with path "%s". Please rename the file and ingest again.' % (new_filename, relative_dir))
def natural_sort(my,l):
'''
natural sort will makesure a list of names passed in is
sorted in an order of 1000 to be after 999 instead of right after 101
'''
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
"""
def find_sequences(my, filenames):
'''
Parse a list of filenames into a dictionary of sequences. Filenames not
part of a sequence are returned in the None key
:param filenames | [<str>, ..]
:return {<str> sequence: [<str> filename, ..], ..}
'''
local_filenames = filenames[:]
sequence_patterns = {}
sequences = {None: []}
# sort the files (by natural order) so we always generate a pattern
# based on the first potential file in a sequence
local_filenames = my.natural_sort(local_filenames)
for filename in local_filenames:
count = re.findall('\d+', filename)
if not count:
raise TacticException("Please ingest sequences only.")
base, file_ext = os.path.splitext(filename)
if file_ext:
file_ext = file_ext[1:]
# if last set of digits is not a file extension, and is less than 3 digits
# because common.get_dir_info only works with 3 of more digits
if len(count[-1]) <= 1 and file_ext.isalpha():
raise TacticException('Please modify sequence naming to have at least three digits.')
# if file extension found, and contains a number in the extension (but also not completely numbers)
# grab the second last set of digits
# ie. .mp3, .mp4, .23p
if file_ext and not file_ext.isalpha() and not file_ext.isdigit():
seq_digit_length = len(count[-2])
else:
seq_digit_length = len(count[-1])
# if file_ext is empty, or if file_ext[1] is all numbers, use expression below
# abc0001, abc.0001 ...etc
if not file_ext or file_ext.isdigit():
try:
pattern_expr = re.compile('^(.*)(\d{%d})([^\d]*)$'%seq_digit_length)
except:
sequences[None].append(filename)
continue
# then for regular filenames, try grabbing filenames by looking at the digits before the last dot
# for files with extensions:
# abc.0001.png, abc.0001.mp3, abc0001.mp3,
else:
try:
pattern_expr = re.compile('^(.*)(\d{%d})(\..*)$'%seq_digit_length)
except:
sequences[None].append(filename)
continue
pound_length = seq_digit_length
pounds = "#" * pound_length
# first, check to see if this filename matches a sequence
found = False
for key, pattern in sequence_patterns.items():
match = pattern.match(filename)
if not match:
continue
sequences[key].append(filename)
found = True
break
# if we've already been matched, then continue on
if found:
continue
# next, see if this filename should start a new sequence
basename = os.path.basename(filename)
pattern_match = pattern_expr.match(basename)
if pattern_match:
opts = (pattern_match.group(1), pattern_match.group(3))
key = '%s%s%s' % (opts[0], pounds, opts[1])
# create a new pattern based on the filename
sequence_pattern = re.compile('^%s\d+%s$' % opts)
sequence_patterns[key] = sequence_pattern
sequences[key] = [filename]
continue
# otherwise, add it to the list of non-sequences
sequences[None].append(filename)
# now that we have grouped everything, we'll merge back filenames
# that were potential sequences, but only contain a single file to the
# non-sequential list
for key, filenames in sequences.items():
if ( key is None or len(filenames) > 1 ):
continue
sequences.pop(key)
sequences[None] += filenames
return sequences, seq_digit_length
"""
|
#!/usr/bin/python
"""Brown Dwarf Flux ratio calculator:
Inputs :
Star name:
Mass of companion in Jupiter masses
Age of star: """
from __future__ import division, print_function
import numpy as np
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Wavelength Calibrate Spectra')
parser.add_argument('star_name', help='Input fits file to calibrate')
parser.add_argument('companion_mass', help='Mass of companion')
parser.add_argument('age', help='Star age')
args = parser.parse_args()
return args
def main(star_name, companion_mass, stellar_age):
"""Compute flux ratio of star to companion """
def calculate_flux_ratios(star_params, companion_params):
""" Flux ratios for the different bands """
f = 2.512
Flux_ratios = dict()
Flux_ratios["J"] = f ** (companion_params["Jmag"]-star_params["Jmag"])
Flux_ratios["H"] = f ** (companion_params["Hmag"]-star_params["Hmag"])
Flux_ratios["K"] = f ** (companion_params["Kmag"]-star_params["Kmag"])
return Flux_ratios
def get_stellar_params(star_name):
""" Astro query search """
#return Magnitudes, parralax, Temp
pass
def calculate_stellar_radius(star_params):
""" Based on R/Rs = (Ts/T)^2(L/Ls)^(1/2) equation"""
BminusV = star_params["Bmag"] - star_params["Vmag"]
if "Teff in star_params.keys(): # need table and interpolate to this B-V
teff_star
else: #
#Interpolate from B-V
teff_star = (4200-5100)/(1.16-0.85) * (BminusV-0.85) + 5100 # Linear interpolation
Ts_T = 5800. / teff_star # Temperature ratio
Dm = 4.83 - star_params["AbsVmag"] # Differnce of aboslute magnitude
L_Ls = 2.51 ** Dm # Luminosity ratio
R_Rs = (Ts_T)**2*(L_Ls)**0.5 # Raidus of Star in Solar Radii
return R_Rs # R star in solar radii
#BD_R = BD_Radius / R_Rs # Radius_bd / Radius_star
#BD_area_ratio = BD_R**2
pass
def get_brown_dwarf_information(star_name, companion_mass, age):
""" baraffe 2003 table search
Need the tables in a file somewhere"""
print("Interpolated companion parameters from barraffe 2003 tables.")
return BD_parameters # as a dictionary
if __name__ == '__main__':
args = vars(_parser())
star_name = args.pop('star_name')
companion_mass = args.pop('companion_mass')
Age = args.pop('Age')
opts = {k: args[k] for k in args}
main(star_name, companion_mass, age, **opts)
Order main structure.
#!/usr/bin/python
"""Brown Dwarf Flux ratio calculator:
Inputs :
Star name:
Mass of companion in Jupiter masses
Age of star: """
from __future__ import division, print_function
import numpy as np
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description='Wavelength Calibrate Spectra')
parser.add_argument('star_name', help='Input fits file to calibrate')
parser.add_argument('companion_mass', help='Mass of companion')
parser.add_argument('age', help='Star age')
args = parser.parse_args()
return args
def main(star_name, companion_mass, stellar_age):
"""Compute flux ratio of star to companion """
# Obtain Stellar parameters from astroquery
star_params = get_stellar_params(star_name):
# Get parameters for this mass and age
companion = get_brown_dwarf_information(companion_mass, stellar_age)
Flux_ratios = calculate_flux_ratios(star_params, companion)
# Print flux ratios using a generator
print("{} band companion/star Flux ratio = {} ".format(key, val) for key, val in Flux_ratios.(dic unpack method))
# Compare to area ratio
Rstar = calculate_stellar_radius(star_params)
Rcomp_Rstar = companion["Radius"] / Rstar
print("Radius Ratio of companion/star = {} ".format(Rcomp_Rstar))
print("Area Ratio of companion/star = {} ".format(Rcomp_Rstar**2))
def calculate_flux_ratios(star_params, companion_params):
""" Flux ratios for the different bands """
f = 2.512
Flux_ratios = dict()
Flux_ratios["J"] = f ** (companion_params["Jmag"]-star_params["Jmag"])
Flux_ratios["H"] = f ** (companion_params["Hmag"]-star_params["Hmag"])
Flux_ratios["K"] = f ** (companion_params["Kmag"]-star_params["Kmag"])
return Flux_ratios
def get_stellar_params(star_name):
""" Astro query search """
#return Magnitudes, parralax, Temp
pass
def calculate_stellar_radius(star_params):
""" Based on R/Rs = (Ts/T)^2(L/Ls)^(1/2) equation"""
BminusV = star_params["Bmag"] - star_params["Vmag"]
if "Teff in star_params.keys(): # need table and interpolate to this B-V
teff_star
else: #
#Interpolate from B-V
teff_star = (4200-5100)/(1.16-0.85) * (BminusV-0.85) + 5100 # Linear interpolation
Ts_T = 5800. / teff_star # Temperature ratio
Dm = 4.83 - star_params["AbsVmag"] # Differnce of aboslute magnitude
L_Ls = 2.51 ** Dm # Luminosity ratio
R_Rs = (Ts_T)**2*(L_Ls)**0.5 # Raidus of Star in Solar Radii
return R_Rs # R star in solar radii
#BD_R = BD_Radius / R_Rs # Radius_bd / Radius_star
#BD_area_ratio = BD_R**2
pass
def get_brown_dwarf_information(star_name, companion_mass, age):
""" baraffe 2003 table search
Need the tables in a file somewhere"""
print("Interpolated companion parameters from barraffe 2003 tables.")
return BD_parameters # as a dictionary
if __name__ == '__main__':
args = vars(_parser())
star_name = args.pop('star_name')
companion_mass = args.pop('companion_mass')
Age = args.pop('Age')
opts = {k: args[k] for k in args}
main(star_name, companion_mass, age, **opts) |
import json
import grequests
from django.core.management.base import BaseCommand
from programs.models import Program, ProgramModules, LearningPlan
from disciplines.models import Discipline, Semester, TrainingTerms
from modules.models import Module
class Command(BaseCommand):
help = "Create Django objects from raw&ugly UrFU data."
requires_system_checks = True
requires_migrations_checks = True
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
class GetPrograms:
pr_filename = 'uni_fixtures/programs.json'
def __init__(self):
oksos = []
with open('uni_fixtures/specialities.json', encoding='utf-8') as specialities:
specialities_json = json.load(specialities)
for speciality in specialities_json:
oksos.append(speciality["okso"])
oksos = list(set(oksos))
oksos = ["38.03.02", ]
print("Всего ОКСО: ", len(oksos))
open(self.pr_filename, 'w').close()
self.urls = [f"http://its.urfu.ru/api/programs?okso={okso}" for okso in oksos]
def exception(self, request, exception):
print(f"Problem: {request.url}: {exception}")
def async(self):
results = grequests.map((grequests.get(u) for u in self.urls), exception_handler=self.exception, size=10)
with open(self.pr_filename, 'a') as pr:
print("Загружаем программы из ИТС")
if results is not []:
print([r.content for r in results])
json.dump([r.content.decode('utf-8') for r in results], pr)
get_programs = GetPrograms()
get_programs.async()
its#22 TESTING
import json
import grequests
from django.core.management.base import BaseCommand
from programs.models import Program, ProgramModules, LearningPlan
from disciplines.models import Discipline, Semester, TrainingTerms
from modules.models import Module
class Command(BaseCommand):
help = "Create Django objects from raw&ugly UrFU data."
requires_system_checks = True
requires_migrations_checks = True
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
class GetPrograms:
pr_filename = 'uni_fixtures/programs.json'
def __init__(self):
oksos = []
with open('uni_fixtures/specialities.json', encoding='utf-8') as specialities:
specialities_json = json.load(specialities)
for speciality in specialities_json:
oksos.append(speciality["okso"])
oksos = list(set(oksos))
oksos = ["38.03.02", ]
print("Всего ОКСО: ", len(oksos))
open(self.pr_filename, 'w').close()
self.urls = [f"http://its.urfu.ru/api/programs?okso={okso}" for okso in oksos]
def exception(self, request, exception):
print(f"Problem: {request.url}: {exception}")
def async(self):
results = grequests.map((grequests.get(u) for u in self.urls), exception_handler=self.exception, size=10)
with open(self.pr_filename, 'a') as pr:
print("Загружаем программы из ИТС")
pr.write([r.json() for r in results])
get_programs = GetPrograms()
get_programs.async()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Oct 24, 2016
@author: stefan
'''
import json
import requests
class SKBRESTClient():
TRANSLATION_PATH = '1.0/skb/translation?'
TITLE_TRANSLATION_PATH = '1.0/skb/title_translation?'
KEYWORD_PATH = '1.0/skb/keyword_annotation'
def __init__(self, url):
'''
:param url: URL of the SKB web service
'''
self.url = url
def translate(self, **kwargs):
response = requests.get('%s/%s' % (self.url, self.TRANSLATION_PATH), params=kwargs)
return(response.text, kwargs['target'])
def title_translate(self, **kwargs):
response = requests.get('%s/%s' % (self.url, self.TITLE_TRANSLATION_PATH), params=kwargs)
return(response.text, kwargs['target'])
def save_doc_kw_skb(self, kwargs):
return(requests.post('%s/%s' % (self.url, self.KEYWORD_PATH),
data=json.dumps(kwargs),
headers={'Content-Type': 'application/json'}).text)
new: Added client for SKB sentiment dictionary
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Oct 24, 2016
@author: stefan
'''
import json
import requests
class SKBRESTClient(object):
TRANSLATION_PATH = '1.0/skb/translation?'
TITLE_TRANSLATION_PATH = '1.0/skb/title_translation?'
KEYWORD_PATH = '1.0/skb/keyword_annotation'
def __init__(self, url):
'''
:param url: URL of the SKB web service
'''
self.url = url
def translate(self, **kwargs):
response = requests.get('%s/%s' % (self.url, self.TRANSLATION_PATH), params=kwargs)
if response.status_code < 400:
return(response.text, kwargs['target'])
else:
return None
def title_translate(self, **kwargs):
response = requests.get('%s/%s' % (self.url, self.TITLE_TRANSLATION_PATH), params=kwargs)
if response.status_code < 400:
return(response.text, kwargs['target'])
else:
return None
def save_doc_kw_skb(self, kwargs):
response = requests.post('%s/%s' % (self.url, self.KEYWORD_PATH),
data=json.dumps(kwargs),
headers={'Content-Type': 'application/json'})
if response.status_code < 400:
return response.text
else:
return None
class SKBSentimentDictionary(dict):
SENTIMENT_PATH = '1.0/skb/sentiment_dict'
def __init__(self, url, language, emotion='polarity'):
self.url = '{}/{}'.format(url,
self.SENTIMENT_PATH)
res = requests.get(self.url,
params={'lang': language,
'emotion': emotion})
if res.status_code < 400:
response = json.loads(res.text)
data = {}
for document in response:
data[(document['term'], document['pos'])] = (document['value'],
document['definition'])
dict.__init__(self, data)
else:
dict.__init__(self, {})
|
from bs4 import BeautifulSoup
import re
import json
import time
from django.core.management.base import BaseCommand
from programs.models import Program, ProgramModules, LearningPlan
from disciplines.models import Discipline, Semester, TrainingTerms
from modules.models import Module
class Command(BaseCommand):
"""
Example: ./manage.py parse_new "/home/developer/КТОМ 4.html" uni_fixtures/modules.json ./get_programs.html "Конструкторско-технологическое обеспечение машиностроительных производств"
"""
help = "Create Django objects from raw&ugly UrFU data."
requires_system_checks = True
requires_migrations_checks = True
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def decompose(self, soup, tag, classname):
[el.decompose() for el in soup.find_all(tag, {'class': classname})]
def add_arguments(self, parser):
parser.add_argument('html_path', nargs=1)
parser.add_argument('uni_modules_path', nargs=1)
parser.add_argument('programs_path', nargs=1)
parser.add_argument('program_title', nargs=1)
def handle(self, *args, **options):
start_time = time.time()
html_path = options["html_path"][0]
uni_modules_path = options["uni_modules_path"][0]
program_title = options["program_title"][0]
programs_path = options["programs_path"][0]
try:
with open(html_path, encoding='utf-8') as html_file:
raw_html = '\n'.join(html_file.readlines())
except:
raise FileNotFoundError
try:
with open(uni_modules_path, encoding='utf-8') as modules_file:
modules_json = json.load(modules_file)
except:
raise FileNotFoundError
try:
with open(programs_path, encoding='utf-8') as programs_file:
raw_programs = '\n'.join(programs_file.readlines())
except:
raise FileNotFoundError
if raw_programs:
programs_soup = BeautifulSoup(raw_programs, 'lxml')
rows = []
for row in programs_soup.find_all('tr', {"class": "main-info"}):
rows.append([val.text.strip() for val in row.find_all('td')])
for row in rows:
try:
program = Program.objects.get(title=row[1])
except:
def level(x):
return {
'Магистр'.lower() in str(x).lower(): "m",
'Специалист'.lower() in str(x).lower(): "s",
'Бакалавр'.lower() in str(x).lower(): "b",
}[True]
program = Program(title=row[1],
training_direction=row[2],
level=level(row[4]),
)
program.save()
print(f"{self.bcolors.BOLD}Создана программа программа \"{row[1]}\"?{self.bcolors.ENDC}")
try:
program = Program.objects.filter(title=program_title).first()
program.status = "p"
program.save()
except:
raise NotImplementedError
if raw_html:
soup = BeautifulSoup(raw_html, 'lxml')
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.decompose(soup, "table", "menu_table")
self.decompose(soup, "td", "navpath")
self.decompose(soup, "div", "buttons")
soup.find('td', id="nav_td").decompose()
try:
stage = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.stage").text.strip().lower() == "утверждено"
except:
stage = False
try:
displayableTitle = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.displayableTitle").text.strip()
except:
displayableTitle = ""
try:
number = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.number").text.strip()
except:
number = ""
try:
active = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.active").text.strip()
except:
active = "нет"
try:
title = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.title").text.strip()
except:
title = ""
try:
loadTimeType = soup.find("td", id="EduVersionPlanTab.EduVersionPlan.loadTimeType").text.strip()
except:
loadTimeType = "часов в неделю"
html = soup.find("table", {"class": "basic"}).prettify()
lps = LearningPlan.objects.filter(uni_number=number, status="p")
if len(lps) > 0:
for lp in lps:
lp.uni_displayableTitle = displayableTitle
lp.uni_number = number
lp.uni_active = active
lp.uni_title = title
lp.uni_stage = stage
lp.uni_loadTimeType = loadTimeType
lp.uni_html = html
lp.save()
if lp not in program.learning_plans.all():
program.learning_plans.add(lp)
program.save()
else:
lp = LearningPlan(uni_displayableTitle=displayableTitle,
uni_number=number,
uni_active=active,
uni_title=title,
uni_stage=stage,
uni_loadTimeType=loadTimeType,
uni_html=html,
status="p"
)
lp.save()
program.learning_plans.add(lp)
program.save()
table = soup.find('table', id="EduVersionPlanTab.EduDisciplineList")
headers = [header.text.strip() for header in table.find_all('th')]
def find_row_index(row_text):
headers = table.find_all('th')
return headers.index(table.find('th', text=row_text))
def find_row_index_id(id):
headers = table.find_all('th')
return headers.index(table.find('th', id=id))
rows = []
for row in table.find_all('tr'):
rows.append([val.text.strip() for val in row.find_all('td')])
# Ищем модули
modules = []
for header in headers:
if "Номер модуля, дисциплины".lower() == header.lower():
module_numbers_col = headers.index(header)
for row in rows:
if row:
m = re.search('\d\d+', row[module_numbers_col])
if m and "М" in row[1]:
for module in modules_json:
if str(module["number"]) == str(m.group(0)):
print(str(module["number"]), str(m.group(0)), str(module["number"]) == str(m.group(0)))
module["row"] = row
modules.append(module)
program_modules = ProgramModules.objects.filter(program=program)
for module in modules:
print(" ", module['title'])
if program_modules.filter(module__uni_uuid=module["uuid"]):
print(f"Модуль есть: {module['title']}")
fulltime = False
if 'зао' not in number:
fulltime = True
print("fulltime: ", fulltime)
if fulltime:
term = TrainingTerms.objects.filter(title="4 года").first()
for module in [m for m in modules if m["disciplines"]]:
module_obj, semester = self.create_module(find_row_index_id, module, program)
def create_module(self, find_row_index_id, module, program):
print(f"Ищем или создаём модуль: {module['title']}")
for i in range(10, 0, -1):
try:
ze = module["row"][find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")]
try:
if int(ze) > 0:
semester = i
except:
pass
except:
semester = 99
if semester == 99:
print(f"Семестр: {self.bcolors.FAIL}{semester}{self.bcolors.ENDC}")
else:
print(f"Семестр: {semester}")
try:
module_obj = Module.objects.filter(title=module["title"]).first()
module_obj.uni_uuid = module["uuid"]
module_obj.uni_number = module["number"]
module_obj.uni_coordinator = module["coordinator"]
module_obj.uni_type = module["type"]
module_obj.uni_title = module["title"]
module_obj.uni_competence = module["competence"]
module_obj.uni_testUnits = module["testUnits"]
module_obj.uni_priority = module["priority"]
module_obj.uni_state = module["state"]
module_obj.uni_approvedDate = module["approvedDate"]
module_obj.uni_comment = module["comment"]
module_obj.uni_file = module["file"]
module_obj.uni_specialities = module["specialities"]
module_obj.program = program
module_obj.semester = semester
module_obj.status = 'p'
module_obj.save()
print(f"{self.bcolors.OKBLUE}Модуль найден: {module['title']}{self.bcolors.ENDC}")
except:
print(f"{self.bcolors.BOLD}Модуль создан: {module['title']}{self.bcolors.ENDC}")
module_obj = Module(title=module["title"],
uni_uuid=module["uuid"],
uni_number=module["number"],
uni_coordinator=module["coordinator"],
uni_type=module["type"],
uni_title=module["title"],
uni_competence=module["competence"],
uni_testUnits=module["testUnits"],
uni_priority=module["priority"],
uni_state=module["state"],
uni_approvedDate=module["approvedDate"],
uni_comment=module["comment"],
uni_file=module["file"],
uni_specialities=module["specialities"],
program=program,
semester=semester,
status='p',
)
module_obj.save()
program_module = ProgramModules.objects.filter(program=program, module=module_obj)
if not program_module:
program_module = ProgramModules(program=program, module=module_obj, semester=module_obj.semester, status="p")
program_module.save()
return module_obj, semester
new parser#23
from bs4 import BeautifulSoup
import re
import json
import time
from django.core.management.base import BaseCommand
from programs.models import Program, ProgramModules, LearningPlan
from disciplines.models import Discipline, Semester, TrainingTerms
from modules.models import Module
class Command(BaseCommand):
"""
Example: ./manage.py parse_new "/home/developer/КТОМ 4.html" uni_fixtures/modules.json ./get_programs.html "Конструкторско-технологическое обеспечение машиностроительных производств"
"""
help = "Create Django objects from raw&ugly UrFU data."
requires_system_checks = True
requires_migrations_checks = True
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def decompose(self, soup, tag, classname):
[el.decompose() for el in soup.find_all(tag, {'class': classname})]
def add_arguments(self, parser):
parser.add_argument('html_path', nargs=1)
parser.add_argument('uni_modules_path', nargs=1)
parser.add_argument('programs_path', nargs=1)
parser.add_argument('program_title', nargs=1)
def handle(self, *args, **options):
start_time = time.time()
html_path = options["html_path"][0]
uni_modules_path = options["uni_modules_path"][0]
program_title = options["program_title"][0]
programs_path = options["programs_path"][0]
try:
with open(html_path, encoding='utf-8') as html_file:
raw_html = '\n'.join(html_file.readlines())
except:
raise FileNotFoundError
try:
with open(uni_modules_path, encoding='utf-8') as modules_file:
modules_json = json.load(modules_file)
except:
raise FileNotFoundError
try:
with open(programs_path, encoding='utf-8') as programs_file:
raw_programs = '\n'.join(programs_file.readlines())
except:
raise FileNotFoundError
if raw_programs:
programs_soup = BeautifulSoup(raw_programs, 'lxml')
rows = []
for row in programs_soup.find_all('tr', {"class": "main-info"}):
rows.append([val.text.strip() for val in row.find_all('td')])
for row in rows:
try:
program = Program.objects.get(title=row[1])
except:
def level(x):
return {
'Магистр'.lower() in str(x).lower(): "m",
'Специалист'.lower() in str(x).lower(): "s",
'Бакалавр'.lower() in str(x).lower(): "b",
}[True]
program = Program(title=row[1],
training_direction=row[2],
level=level(row[4]),
)
program.save()
print(f"{self.bcolors.BOLD}Создана программа программа \"{row[1]}\"?{self.bcolors.ENDC}")
try:
program = Program.objects.filter(title=program_title).first()
program.status = "p"
program.save()
except:
raise NotImplementedError
if raw_html:
soup = BeautifulSoup(raw_html, 'lxml')
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.decompose(soup, "table", "menu_table")
self.decompose(soup, "td", "navpath")
self.decompose(soup, "div", "buttons")
soup.find('td', id="nav_td").decompose()
try:
stage = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.stage").text.strip().lower() == "утверждено"
except:
stage = False
try:
displayableTitle = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.displayableTitle").text.strip()
except:
displayableTitle = ""
try:
number = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.number").text.strip()
except:
number = ""
try:
active = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.active").text.strip()
except:
active = "нет"
try:
title = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.title").text.strip()
except:
title = ""
try:
loadTimeType = soup.find("td", id="EduVersionPlanTab.EduVersionPlan.loadTimeType").text.strip()
except:
loadTimeType = "часов в неделю"
html = soup.find("table", {"class": "basic"}).prettify()
lps = LearningPlan.objects.filter(uni_number=number, status="p")
if len(lps) > 0:
for lp in lps:
lp.uni_displayableTitle = displayableTitle
lp.uni_number = number
lp.uni_active = active
lp.uni_title = title
lp.uni_stage = stage
lp.uni_loadTimeType = loadTimeType
lp.uni_html = html
lp.save()
if lp not in program.learning_plans.all():
program.learning_plans.add(lp)
program.save()
else:
lp = LearningPlan(uni_displayableTitle=displayableTitle,
uni_number=number,
uni_active=active,
uni_title=title,
uni_stage=stage,
uni_loadTimeType=loadTimeType,
uni_html=html,
status="p"
)
lp.save()
program.learning_plans.add(lp)
program.save()
table = soup.find('table', id="EduVersionPlanTab.EduDisciplineList")
headers = [header.text.strip() for header in table.find_all('th')]
def find_row_index(row_text):
headers = table.find_all('th')
return headers.index(table.find('th', text=row_text))
def find_row_index_id(id):
headers = table.find_all('th')
return headers.index(table.find('th', id=id))
rows = []
for row in table.find_all('tr'):
rows.append([val.text.strip() for val in row.find_all('td')])
# Ищем модули
modules = []
for header in headers:
if "Номер модуля, дисциплины".lower() == header.lower():
module_numbers_col = headers.index(header)
for row in rows:
if row:
m = re.search('\d\d+', row[module_numbers_col])
if m and "М" in row[1]:
for module in modules_json:
if str(module["number"]) == str(m.group(0)):
module["row"] = row
modules.append(module)
program_modules = ProgramModules.objects.filter(program=program)
for module in modules:
print(" ", module['title'])
if program_modules.filter(module__uni_uuid=module["uuid"]):
print(f"Модуль есть: {module['title']}")
fulltime = False
if 'зао' not in number:
fulltime = True
print("fulltime: ", fulltime)
if fulltime:
term = TrainingTerms.objects.filter(title="4 года").first()
for module in [m for m in modules if m["disciplines"]]:
module_obj, semester = self.create_module(find_row_index_id, module, program)
def create_module(self, find_row_index_id, module, program):
print(f"Ищем или создаём модуль: {module['title']}")
for i in range(10, 0, -1):
try:
ze = module["row"][find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")]
try:
if int(ze) > 0:
semester = i
except:
pass
except:
semester = 99
if semester == 99:
print(f"Семестр: {self.bcolors.FAIL}{semester}{self.bcolors.ENDC}")
else:
print(f"Семестр: {semester}")
try:
module_obj = Module.objects.filter(title=module["title"]).first()
module_obj.uni_uuid = module["uuid"]
module_obj.uni_number = module["number"]
module_obj.uni_coordinator = module["coordinator"]
module_obj.uni_type = module["type"]
module_obj.uni_title = module["title"]
module_obj.uni_competence = module["competence"]
module_obj.uni_testUnits = module["testUnits"]
module_obj.uni_priority = module["priority"]
module_obj.uni_state = module["state"]
module_obj.uni_approvedDate = module["approvedDate"]
module_obj.uni_comment = module["comment"]
module_obj.uni_file = module["file"]
module_obj.uni_specialities = module["specialities"]
module_obj.program = program
module_obj.semester = semester
module_obj.status = 'p'
module_obj.save()
print(f"{self.bcolors.OKBLUE}Модуль найден: {module['title']}{self.bcolors.ENDC}")
except:
print(f"{self.bcolors.BOLD}Модуль создан: {module['title']}{self.bcolors.ENDC}")
module_obj = Module(title=module["title"],
uni_uuid=module["uuid"],
uni_number=module["number"],
uni_coordinator=module["coordinator"],
uni_type=module["type"],
uni_title=module["title"],
uni_competence=module["competence"],
uni_testUnits=module["testUnits"],
uni_priority=module["priority"],
uni_state=module["state"],
uni_approvedDate=module["approvedDate"],
uni_comment=module["comment"],
uni_file=module["file"],
uni_specialities=module["specialities"],
program=program,
semester=semester,
status='p',
)
module_obj.save()
program_module = ProgramModules.objects.filter(program=program, module=module_obj)
if not program_module:
print(f"{self.bcolors.WARNING}Модуль программы не найден, создаём: {module['title']} / {program.title}{self.bcolors.ENDC}")
program_module = ProgramModules(program=program, module=module_obj, semester=module_obj.semester, status="p")
program_module.save()
return module_obj, semester |
"""
ConnectionField and associated classes.
This module defines some basic classes of objects used to create
simulations of cortical sheets that take input through connection
fields that project from other cortical sheets (or laterally from
themselves).
ConnectionField: Holds a single connection field within a
CFProjection.
CFProjection: A set of ConnectionFields mapping from a Sheet into a
ProjectionSheet.
CFSheet: A subclass of ProjectionSheet that provides an interface to
the underlying ConnectionFields in any projection of type
CFProjection.
$Id$
"""
__version__ = '$Revision$'
from copy import copy
from numpy import abs,array,zeros,where
from numpy.oldnumeric import Float,Float32
import param
import patterngenerator
from patterngenerator import PatternGenerator
from functionfamily import TransferFn,IdentityTF
from functionfamily import LearningFn,Hebbian,IdentityLF
from functionfamily import ResponseFn,DotProduct
from functionfamily import CoordinateMapperFn,IdentityMF
from projection import Projection,ProjectionSheet, SheetMask
from sheetcoords import Slice
from sheetview import UnitView
from boundingregion import BoundingBox,BoundingRegionParameter
# CEBALERT: shouldn't be necessary, and depends on the implementation
# of numpy.vectorize
def simple_vectorize(fn,num_outputs=1,output_type=object,doc=''):
"""
Simplify creation of numpy.vectorize(fn) objects where all outputs
have the same typecode.
"""
from numpy import vectorize,sctype2char
# This function exists because I cannot figure out how I am
# supposed to stop vectorize() calling fn one extra time at the
# start. (It's supposed to call an extra time at the start to
# determine the output types UNLESS the output types are
# specified.)
vfn = vectorize(fn,doc=doc)
# stop vectorize calling fn an extra time at the start
# (works for our current numpy (1.1.1))
vfn.nout=num_outputs # number of outputs of fn
output_typecode = sctype2char(output_type)
vfn.otypes=output_typecode*num_outputs # typecodes of outputs of fn
import inspect
try:
fn_code = fn.func_code if hasattr(fn,'func_code') else fn.__call__.func_code
except:
raise TypeError("Couldn't find code of %s"%fn)
fn_args = inspect.getargs(fn_code)[0]
extra = 1 if fn_args[0]=='self' else 0
vfn.lastcallargs=len(fn_args)-extra # num args of fn
return vfn
# Specified explicitly when creating weights matrix - required
# for optimized C functions.
weight_type = Float32
class NullCFError(ValueError):
"""
Error thrown when trying to create an empty CF.
"""
def __init__(self,x,y,input,rows,cols):
ValueError.__init__(self,"ConnectionField at (%s,%s) (input_sheet=%s) has a zero-sized weights matrix (%s,%s); you may need to supply a larger bounds_template or increase the density of the sheet."%(x,y,input,rows,cols))
class ConnectionField(object):
"""
A set of weights on one input Sheet.
Each ConnectionField contributes to the activity of one unit on
the output sheet, and is normally used as part of a Projection
including many other ConnectionFields.
"""
__slots__ = ['weights','input_sheet_slice','mask',
'_has_norm_total','_norm_total']
def __get_norm_total(self):
"""
Return the stored norm_value, if any, or else the current sum of the weights.
See the norm_total property for more details.
"""
# The actual value is cached in _norm_total.
if self._has_norm_total:
return self._norm_total
else:
return abs(self.weights).sum()
def __set_norm_total(self,new_norm_total):
"""
Set an explicit value to be returned by norm_total.
See the norm_total property for more details.
"""
self._has_norm_total = True
self._norm_total = new_norm_total
def __del_norm_total(self):
"""
Delete any cached norm_total that may have been set.
See the norm_total property for more details.
"""
self._has_norm_total = False
# CB: Accessing norm_total as a property from the C code takes
# about 2% of run time for 90 iterations of lissom_oo_or. (As of
# r8139, using floating-point simulation time.)
norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total,
"""
The norm_total property returns a value useful in computing
a sum-based weight normalization.
By default, the value returned is simply the current sum of
the connection weights. However, another value can be
substituted by setting norm_total explicitly, and this cached
value will then be returned instead.
This mechanism has two main purposes. First, it allows a
learning function to cache the sum value for an output
function to use later without computation, which can result in
significant time savings. Second, the extra level of
indirection allows the sum value to be manipulated before it
is used, to implement operations like joint normalization
across corresponding CFs in multiple Projections.
Apart from such cases, norm_total can be ignored.
Note that every person who uses a class that sets or gets
norm_total must be very careful to ensure that stale values
will never be accessed. A good way to do this is to make sure
that the value is only set just before it will be used, and
deleted as soon as it has been accessed.
WARNING: Any c-optimized code can bypass this property and
access directly _has_norm_total, _norm_total
""")
def get_bounds(self,input_sheet):
return self.input_sheet_slice.compute_bounds(input_sheet)
# CEBALERT:
# template and mask: usually created ONCE by CFProjection and
# specified as a Slice and array (respectively). Otherwise,
# can be specified as BoundingBox and patterngenerator.
# Note that BoundingBox() is ok for a default even though it's
# mutable because we copy it inside init. Constant() is ok too
# because mask and weights_generator are not modified.
def __init__(self,input_sheet,x=0.0,y=0.0,template=BoundingBox(radius=0.1),
weights_generator=patterngenerator.Constant(),
mask=patterngenerator.Constant(),
output_fns=None,min_matrix_radius=1):
"""
Create weights at the specified (x,y) location on the
specified input_sheet.
The supplied template (if a BoundingRegion) is converted to a
Slice, moved to the specified (x,y) location, and then the
weights pattern is drawn inside by the weights_generator.
Note that if the appropriate template Slice is already known,
then it can be passed in instead of a BoundingRegion template.
This slice will then be used directly, instead of converting
the template into a Slice.
The supplied template object itself will not be modified (it
is copied before use).
The mask allows the weights to be limited to being non-zero in
a subset of the rectangular weights area. The actual mask
used is a view of the given mask created by cropping to the
boundaries of the input_sheet, so that the weights all
correspond to actual locations in the input sheet. For
instance, if a circular pattern of weights is desired, the
mask should have a disk-shaped pattern of elements with value
1, surrounded by elements with the value 0. If the CF extends
over the edge of the input sheet then the weights will
actually be half-moon (or similar) rather than circular.
"""
#print "Create CF",input_sheet.name,x,y,"template=",template,"wg=",weights_generator,"m=",mask,"ofs=",output_fns,"min r=",min_matrix_radius
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,input_sheet,force_odd=True,
min_matrix_radius=min_matrix_radius)
# Note: if passed in, mask is shared between CFs (but not if created here)
if not hasattr(mask,'view'):
mask = _create_mask(mask,template.compute_bounds(input_sheet),
# CEBALERT: it's not really worth adding more ALERTs on this
# topic, but...there's no way for the CF to control autosize
# and threshold.
input_sheet,True,0.5)
# CB: has to be set for C code. Can't be initialized at the
# class level, or it would become a read-only class attribute
# (because it's a slot:
# http://docs.python.org/reference/datamodel.html). Can we
# somehow avoid having to think about _has_norm_total in the
# python code? Could the C code initialize this value?
self._has_norm_total=False
if output_fns is None:
output_fns = []
# CEBALERT: now even more confusing; weights_slice is
# different from input_sheet_slice. At least need to rename.
weights_slice = self._create_input_sheet_slice(input_sheet,x,y,template,min_matrix_radius)
# CBNOTE: this would be clearer (but not perfect, and probably slower)
# m = mask_template[self.weights_slice()]
self.mask = weights_slice.submatrix(mask) # view of original mask
self.mask = array(self.mask,copy=1) # CEBALERT: why is this necessary?
# (without it, optimized learning function creates artifacts in CFs at
# left and right edges of sheet, at some densities)
# CBENHANCEMENT: might want to do something about a size
# that's specified (right now the size is assumed to be that
# of the bounds)
# shouldn't be extra computation of boundingbox because it's gone from Slice.__init__; could avoid extra lookups by getting straight from slice
w = weights_generator(x=x,y=y,bounds=self.get_bounds(input_sheet),
xdensity=input_sheet.xdensity,
ydensity=input_sheet.ydensity,
mask=self.mask)
# CEBALERT: unnecessary copy! Pass type to PG & have it draw
# in that. (Should be simple, except making it work for all
# the PG subclasses that override array creation in various
# ways (producing or using inconsistent types) turned out to
# be too painful.)
self.weights = w.astype(weight_type)
# CEBHACKALERT: the system of masking through multiplication
# by 0 works for now, while the output_fns are all
# multiplicative. But in the long run we need a better way to
# apply the mask. The same applies anywhere the mask is used,
# including in learningfn/. We should investigate masked
# arrays (from numpy).
for of in output_fns:
of(self.weights)
# CB: can this be renamed to something better?
def _create_input_sheet_slice(self,input_sheet,x,y,template,min_matrix_radius):
"""
Create the input_sheet_slice, which provides the appropriate
Slice for this CF on the input_sheet (as well as providing
this CF's exact bounds).
Also creates the weights_slice, which provides the Slice for
this weights matrix (in case it must be cropped at an edge).
"""
# copy required because the template gets modified here but
# needs to be used again
input_sheet_slice = copy(template)
input_sheet_slice.positionedcrop(x,y,input_sheet)
input_sheet_slice.crop_to_sheet(input_sheet)
# weights matrix cannot have a zero-sized dimension (could
# happen at this stage because of cropping)
nrows,ncols = input_sheet_slice.shape_on_sheet()
if nrows<1 or ncols<1:
raise NullCFError(x,y,input_sheet,nrows,ncols)
self.input_sheet_slice = input_sheet_slice
# not copied because we don't use again
template.positionlesscrop(x,y,input_sheet)
return template
# CEBALERT: unnecessary method; can use something like
# activity[cf.input_sheet_slice()]
def get_input_matrix(self, activity):
# CBNOTE: again, this might be clearer (but probably slower):
# activity[self.input_sheet_slice()]
return self.input_sheet_slice.submatrix(activity)
class CFPResponseFn(param.Parameterized):
"""
Map an input activity matrix into an output matrix using the CFs
in a CFProjection.
Objects in this hierarchy of callable function objects compute a
response matrix when given an input pattern and a set of
ConnectionField objects. Typically used as part of the activation
function for a neuron, computing activation for one Projection.
Objects in this class must support being called as a function with
the arguments specified below, and are assumed to modify the
activity matrix in place.
"""
__abstract=True
def __call__(self, iterator, input_activity, activity, strength, **params):
raise NotImplementedError
class CFPRF_Plugin(CFPResponseFn):
"""
Generic large-scale response function based on a simple single-CF function.
Applies the single_cf_fn to each CF in turn. For the default
single_cf_fn of DotProduct(), does a basic dot product of each CF with the
corresponding slice of the input array. This function is likely
to be slow to run, but it is easy to extend with any arbitrary
single-CF response function.
The single_cf_fn must be a function f(X,W) that takes two
identically shaped matrices X (the input) and W (the
ConnectionField weights) and computes a scalar activation value
based on those weights.
"""
single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(),
doc="Accepts a ResponseFn that will be applied to each CF individually.")
def __call__(self, iterator, input_activity, activity, strength):
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
X = cf.input_sheet_slice.submatrix(input_activity)
activity.flat[i] = single_cf_fn(X,cf.weights)
activity *= strength
class CFPLearningFn(param.Parameterized):
"""
Compute new CFs for a CFProjection based on input and output activity values.
Objects in this hierarchy of callable function objects compute a
new set of CFs when given input and output patterns and a set of
ConnectionField objects. Used for updating the weights of one
CFProjection.
Objects in this class must support being called as a function with
the arguments specified below.
"""
__abstract = True
def constant_sum_connection_rate(self,proj,learning_rate):
"""
Return the learning rate for a single connection assuming that
the total rate is to be divided evenly among all the units in
the connection field.
"""
return float(learning_rate)/proj.n_units()
# JABALERT: Should the learning_rate be a parameter of this object instead of an argument?
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""
Apply this learning function to the given set of ConnectionFields,
and input and output activities, using the given learning_rate.
"""
raise NotImplementedError
class CFPLF_Identity(CFPLearningFn):
"""CFLearningFunction performing no learning."""
single_cf_fn = param.ClassSelector(LearningFn,default=IdentityLF(),constant=True)
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
pass
class CFPLF_Plugin(CFPLearningFn):
"""CFPLearningFunction applying the specified single_cf_fn to each CF."""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="Accepts a LearningFn that will be applied to each CF individually.")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""Apply the specified single_cf_fn to every CF."""
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj,learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights,
single_connection_learning_rate)
cf.weights *= cf.mask
class CFPOutputFn(param.Parameterized):
"""
Type for an object that applies some operation (typically something
like normalization) to all CFs in a CFProjection for which the specified
mask (typically the activity at the destination of this projection)
is nonzero.
"""
__abstract = True
def __call__(self, iterator, **params):
"""Operate on each CF for which the mask is nonzero."""
raise NotImplementedError
class CFPOF_Plugin(CFPOutputFn):
"""
Applies the specified single_cf_fn to each CF in the CFProjection
for which the mask is nonzero.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),
doc="Accepts a TransferFn that will be applied to each CF individually.")
def __call__(self, iterator, **params):
if type(self.single_cf_fn) is not IdentityTF:
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
single_cf_fn(cf.weights)
del cf.norm_total
class CFPOF_Identity(CFPOutputFn):
"""
CFPOutputFn that leaves the CFs unchanged.
Must never be changed or subclassed, because it might never
be called. (I.e., it could simply be tested for and skipped.)
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),constant=True)
def __call__(self, iterator, **params):
pass
# CB: need to make usage of 'src' and 'input_sheet' consistent between
# ConnectionField and CFProjection (i.e. pick one of them).
class CFProjection(Projection):
"""
A projection composed of ConnectionFields from a Sheet into a ProjectionSheet.
CFProjection computes its activity using a response_fn of type
CFPResponseFn (typically a CF-aware version of mdot) and output_fns
(typically none). The initial contents of the
ConnectionFields mapping from the input Sheet into the target
ProjectionSheet are controlled by the weights_generator, cf_shape,
and weights_output_fn parameters, while the location of the
ConnectionField is controlled by the coord_mapper parameter.
Any subclass has to implement the interface
activate(self,input_activity) that computes the response from the
input and stores it in the activity array.
"""
response_fn = param.ClassSelector(CFPResponseFn,
default=CFPRF_Plugin(),
doc='Function for computing the Projection response to an input pattern.')
cf_type = param.Parameter(default=ConnectionField,constant=True,
doc="Type of ConnectionField to use when creating individual CFs.")
# JPHACKALERT: Not all support for null CFs has been implemented.
# CF plotting and C-optimized CFPxF_ functions need
# to be fixed to support null CFs without crashing.
allow_null_cfs = param.Boolean(default=False,
doc="Whether or not the projection can have entirely empty CFs")
nominal_bounds_template = BoundingRegionParameter(
default=BoundingBox(radius=0.1),doc="""
Bounds defining the Sheet area covered by a prototypical ConnectionField.
The true bounds will differ depending on the density (see create_slice_template()).""")
weights_generator = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,
doc="Generate initial weights values.")
cf_shape = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,
doc="Mask pattern to define the shape of the connection fields.")
same_cf_shape_for_all_cfs = param.Boolean(default=True,doc="""
Whether or not to share a single cf_shape mask for all CFs.
If True, the cf_shape is evaluated only once and shared for
all CFs, which saves computation time and memory. If False,
the cf_shape is evaluated once for each CF, allowing each to
have its own shape.""")
learning_fn = param.ClassSelector(CFPLearningFn,
default=CFPLF_Plugin(),
doc='Function for computing changes to the weights based on one activation step.')
# JABALERT: Shouldn't learning_rate be owned by the learning_fn?
learning_rate = param.Number(default=0.0,softbounds=(0,100),doc="""
Amount of learning at each step for this projection, specified
in units that are independent of the density of each Sheet.""")
weights_output_fns = param.HookList(default=[CFPOF_Plugin()],
class_=CFPOutputFn,
doc='Functions applied to each CF after learning.')
strength = param.Number(default=1.0,doc="""
Global multiplicative scaling applied to the Activity of this Sheet.""")
coord_mapper = param.ClassSelector(CoordinateMapperFn,
default=IdentityMF(),
doc='Function to map a projected coordinate into the target sheet.')
# CEBALERT: this is temporary (allows c++ matching in certain
# cases). We will allow the user to override the mask size, but
# by offering a scaling parameter.
autosize_mask = param.Boolean(
default=True,constant=True,precedence=-1,doc="""
Topographica sets the mask size so that it is the same as the connection field's
size, unless this parameter is False - in which case the user-specified size of
the cf_shape is used. In normal usage of Topographica, this parameter should
remain True.""")
mask_threshold = param.Number(default=0.5,constant=True,doc="""
If a unit is above this value in the cf_shape mask, it is
included; otherwise it is excluded from the mask.""")
apply_output_fns_init=param.Boolean(default=True,doc="""
Whether to apply the output function to connection fields (e.g. for
normalization) when the CFs are first created.""")
min_matrix_radius = param.Integer(default=1,bounds=(0,None),doc="""
Enforced minimum for radius of weights matrix.
The default of 1 gives a minimum matrix of 3x3. 0 would
allow a 1x1 matrix.""")
precedence = param.Number(default=0.8)
def __init__(self,initialize_cfs=True,**params):
"""
Initialize the Projection with a set of cf_type objects
(typically ConnectionFields), each located at the location
in the source sheet corresponding to the unit in the target
sheet. The cf_type objects are stored in the 'cfs' array.
The nominal_bounds_template specified may be altered: the
bounds must be fitted to the Sheet's matrix, and the weights
matrix must have odd dimensions. These altered bounds are
passed to the individual connection fields.
A mask for the weights matrix is constructed. The shape is
specified by cf_shape; the size defaults to the size
of the nominal_bounds_template.
"""
super(CFProjection,self).__init__(**params)
self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators')
# get the actual bounds_template by adjusting a copy of the
# nominal_bounds_template to ensure an odd slice, and to be
# cropped to sheet if necessary
self._slice_template = Slice(copy(self.nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.bounds_template = self._slice_template.compute_bounds(self.src)
self.mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
if initialize_cfs:
self._create_cfs()
### JCALERT! We might want to change the default value of the
### input value to self.src.activity; but it fails, raising a
### type error. It probably has to be clarified why this is
### happening
self.input_buffer = None
self.activity = array(self.dest.activity)
def _generate_coords(self):
X,Y = self.dest.sheetcoords_of_idx_grid()
vectorized_coord_mapper = simple_vectorize(self.coord_mapper,
num_outputs=2,
# CB: could switch to float32?
output_type=float)
return vectorized_coord_mapper(X,Y)
# CB: should be _initialize_cfs() since we already have 'initialize_cfs' flag?
def _create_cfs(self):
vectorized_create_cf = simple_vectorize(self._create_cf)
self.cfs = vectorized_create_cf(*self._generate_coords())
self.flatcfs = list(self.cfs.flat)
def _create_cf(self,x,y):
"""
Create a ConnectionField at x,y in the src sheet.
"""
# (to restore would need to have an r,c counter)
# self.debug("Creating CF(%d,%d) from src (%.3f,%.3f) to dest (%.3f,%.3f)"%(r,c,x_cf,y_cf,x,y))
try:
if self.apply_output_fns_init:
ofs = [wof.single_cf_fn for wof in self.weights_output_fns]
else:
ofs = []
if self.same_cf_shape_for_all_cfs:
mask_template = self.mask_template
else:
mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
CF = self.cf_type(self.src,x=x,y=y,
template=self._slice_template,
weights_generator=self.weights_generator,
mask=mask_template,
output_fns=ofs,
min_matrix_radius=self.min_matrix_radius)
except NullCFError:
if self.allow_null_cfs:
CF = None
else:
raise
return CF
def n_units(self):
"""Return the number of unmasked units in a typical ConnectionField."""
### JCALERT! Right now, we take the number of units at the
### center of the cfs matrix. It would be more reliable to
### calculate it directly from the target sheet density and
### the weight_bounds. Example:
#center_r,center_c = sheet2matrixidx(0,0,bounds,xdensity,ydensity)
rows,cols=self.cfs.shape
cf = self.cfs[rows/2,cols/2]
return len(cf.mask.ravel().nonzero()[0]) # CB: newer numpy array has .flatnonzero()
def cf(self,r,c):
"""Return the specified ConnectionField"""
# CB: should we offer convenience cf(x,y) (i.e. sheetcoords) method instead?
self.warning("CFProjection.cf(r,c) is deprecated: use cfs[r,c] instead")
return self.cfs[r,c]
def cf_bounds(self,r,c):
"""Return the bounds of the specified ConnectionField."""
return self.cfs[r,c].get_bounds(self.src)
def get_view(self, sheet_x, sheet_y, timestamp):
"""
Return a single connection field UnitView, for the unit
located nearest to sheet coordinate (sheet_x,sheet_y).
"""
matrix_data = zeros(self.src.activity.shape,Float)
(r,c) = self.dest.sheet2matrixidx(sheet_x,sheet_y)
r1,r2,c1,c2 = self.cfs[r,c].input_sheet_slice
matrix_data[r1:r2,c1:c2] = self.cfs[r,c].weights
# CB: the following would be equivalent with Slice __call__
# cf = self.cf(self.dest.sheet2matrixidx(sheet_x,sheet_y))
# matrix_data = numpy.zeros(self.src.activity.shape,Numeric.Float)
# matrix_data[cf.input_sheet_slice()]=cf.weights
return UnitView((matrix_data,self.src.bounds),sheet_x,sheet_y,self,timestamp)
def activate(self,input_activity):
"""Activate using the specified response_fn and output_fn."""
self.input_buffer = input_activity
self.activity *=0.0
self.response_fn(MaskedCFIter(self), input_activity, self.activity, self.strength)
for of in self.output_fns:
of(self.activity)
# CEBALERT: should add active_units_mask to match
# apply_learn_output_fns.
def learn(self):
"""
For a CFProjection, learn consists of calling the learning_fn.
"""
# Learning is performed if the input_buffer has already been set,
# i.e. there is an input to the Projection.
if self.input_buffer != None:
self.learning_fn(MaskedCFIter(self),self.input_buffer,self.dest.activity,self.learning_rate)
# CEBALERT: called 'learn' output fns here, but called 'weights' output fns
# elsewhere (mostly). Change all to 'learn'?
def apply_learn_output_fns(self,active_units_mask=True):
"""
Apply the weights_output_fns to each unit.
If active_units_mask is True, inactive units will be skipped.
"""
for of in self.weights_output_fns:
of(MaskedCFIter(self,active_units_mask=active_units_mask))
# CEBALERT: see gc alert in simulation.__new__
def _cleanup(self):
for cf in self.cfs.flat:
# cf could be None or maybe something else
if hasattr(cf,'input_sheet'):
cf.input_sheet=None
if hasattr(cf,'input_sheet_slice'):
cf.input_sheet_slice=None
if hasattr(cf,'weights_slice'):
cf.weights_slice=None
def n_bytes(self):
# Could also count the input_sheet_slice
rows,cols=self.cfs.shape
return super(CFProjection,self).n_bytes() + \
sum([cf.weights.nbytes +
cf.mask.nbytes
for cf,i in CFIter(self,ignore_sheet_mask=True)()])
def n_conns(self):
# Counts non-masked values, if mask is available; otherwise counts
# weights as connections if nonzero
rows,cols=self.cfs.shape
return sum([len((cf.mask if cf.mask is not None else cf.weights).ravel().nonzero()[0])
for cf,i in MaskedCFIter(self)()])
# CEB: have not yet decided proper location for this method
# JAB: should it be in PatternGenerator?
def _create_mask(shape,bounds_template,sheet,autosize=True,threshold=0.5):
"""
Create the mask (see ConnectionField.__init__()).
"""
# Calculate the size & aspect_ratio of the mask if appropriate;
# mask size set to be that of the weights matrix
if hasattr(shape, 'size') and autosize:
l,b,r,t = bounds_template.lbrt()
shape.size = t-b
shape.aspect_ratio = (r-l)/shape.size
# Center mask to matrixidx center
center_r,center_c = sheet.sheet2matrixidx(0,0)
center_x,center_y = sheet.matrixidx2sheet(center_r,center_c)
mask = shape(x=center_x,y=center_y,
bounds=bounds_template,
xdensity=sheet.xdensity,
ydensity=sheet.ydensity)
mask = where(mask>=threshold,mask,0.0)
# CB: unnecessary copy (same as for weights)
return mask.astype(weight_type)
import numpy
class CFIter(object):
"""
Iterator to walk through all ConnectionFields of all neurons in
the destination Sheet of the given CFProjection. Each iteration
yields the tuple (cf,i) where cf is the ConnectionField at
position i in the projection's flatcfs list.
If active_units_mask is True, inactive units will be skipped. If
ignore_sheet_mask is True, even units excluded by the sheet mask
will be included.
"""
# CB: as noted elsewhere, rename active_units_mask (to e.g.
# ignore_inactive_units).
def __init__(self,cfprojection,active_units_mask=False,ignore_sheet_mask=False):
self.proj = cfprojection
self.active_units_mask = active_units_mask
self.ignore_sheet_mask = ignore_sheet_mask
# CB: if there were a method on proj to access "dest.activity",
# then for MultiprocessorCFProjection, that method would be
# overridden to return the appropriate array. Similarly, a method
# on proj to access "dest.mask" would be overridden to return the
# appropriate array (the mask having been distributed).
def __nomask(self):
# return an array indicating all units should be processed
# dtype for C functions.
# could just be flat.
return numpy.ones(self.proj.dest.shape,dtype=self.proj.dest.activity.dtype)
# CEBALERT: make _
def get_sheet_mask(self):
if not self.ignore_sheet_mask:
return self.proj.dest.mask.data
else:
return self.__nomask()
# CEBALERT: make _ (and probably drop '_mask').
def get_active_units_mask(self):
if self.proj.dest.allow_skip_non_responding_units and self.active_units_mask:
return self.proj.dest.activity
else:
return self.__nomask()
# CEBALERT: rename to something like
def get_overall_mask(self):
"""
Return an array indicating whether or not each unit should be
processed.
"""
# JPHACKALERT: Should really check for the existence of the
# mask, rather than checking its type. This is a hack to
# support higher-order projections whose dest is a CF, instead
# of a sheet. The right thing to do is refactor so that CF
# masks and SheetMasks are subclasses of an abstract Mask
# type so that they support the same interfaces.
#
# CEBALERT: put back when supporting neighborhood masking
# (though preferably do what Jeff suggests instead)
# if isinstance(self.proj.dest.mask,SheetMask):
# return get_active_units_mask()
# else:
# CB: note that it's faster for our optimized C functions to
# combine the masks themselves, rather than using this method.
sheet_mask = self.get_sheet_mask()
active_units_mask = self.get_active_units_mask()
return numpy.logical_and(sheet_mask,active_units_mask)
# CB: should probably remove this; I don't know if I actually used
# it.
def get_shape(self):
return self.proj.dest.shape
def __call__(self):
mask = self.get_overall_mask()
for i,cf in enumerate(self.proj.flatcfs):
if cf is not None:
if mask.flat[i]:
yield cf,i
# CEBALERT: remove this once MaskedCFIter has been replaced elsewhere.
MaskedCFIter = CFIter
### We don't really need this class; its methods could probably be
### moved up to ProjectionSheet, because they may in fact be valid for
### all ProjectionSheets. But we're leaving it here, because it is
### likely to be useful in the future.
class CFSheet(ProjectionSheet):
"""
A ProjectionSheet providing access to the ConnectionFields in its CFProjections.
CFSheet is a Sheet built from units indexed by Sheet coordinates
(x,y). Each unit can have one or more ConnectionFields on another
Sheet (via this sheet's CFProjections). Thus CFSheet is a more
concrete version of a ProjectionSheet; a ProjectionSheet does not
require that there be units or weights of any kind. Unless you
need access to the underlying ConnectionFields for visualization
or analysis, CFSheet and ProjectionSheet are interchangeable.
"""
measure_maps = param.Boolean(True,doc="""
Whether to include this Sheet when measuring various maps to create SheetViews.""")
precedence = param.Number(0.5)
def update_unit_view(self,x,y,proj_name=''):
"""
Creates the list of UnitView objects for a particular unit in this CFSheet.
(There is one UnitView for each Projection to this CFSheet).
Each UnitView is then added to the sheet_views of its source sheet.
It returns the list of all UnitViews for the given unit.
"""
for p in self.in_connections:
if not isinstance(p,CFProjection):
self.debug("Skipping non-CFProjection "+p.name)
elif proj_name == '' or p.name==proj_name:
v = p.get_view(x,y,self.simulation.time())
src = v.projection.src
key = ('Weights',v.projection.dest.name,v.projection.name,x,y)
v.proj_src_name = v.projection.src.name
src.sheet_views[key] = v
### JCALERT! This should probably be deleted...
def release_unit_view(self,x,y):
self.release_sheet_view(('Weights',x,y))
class ResizableCFProjection(CFProjection):
"""
A CFProjection with resizable weights.
"""
# Less efficient memory usage than CFProjection because it stores
# the (x,y) position of each ConnectionField.
def _generate_coords(self):
# same as super's, but also stores the coords.
# CB: this is storing redundant info because generate_coords()
# returns output from mgrid. Might be better to store the 1d x
# and y coords, and generate the grids when needed?
self.X_cf,self.Y_cf = super(ResizableCFProjection,self)._generate_coords()
return self.X_cf,self.Y_cf
### This could be changed into a special __set__ method for
### bounds_template, instead of being a separate function, but
### having it be explicit like this might be clearer.
###
### This implementation is fairly slow, and for some algorithms
### that rely on changing the bounds frequently, it may be worth
### re-implementing it in C.
def change_bounds(self, nominal_bounds_template):
"""
Change the bounding box for all of the ConnectionFields in this Projection.
Calls change_bounds() on each ConnectionField.
Currently only allows reducing the size, but should be
extended to allow increasing as well.
"""
slice_template = Slice(copy(nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
bounds_template = slice_template.compute_bounds(self.src)
if not self.bounds_template.containsbb_exclusive(bounds_template):
if self.bounds_template.containsbb_inclusive(bounds_template):
self.debug('Initial and final bounds are the same.')
else:
self.warning('Unable to change_bounds; currently allows reducing only.')
return
# it's ok so we can store the bounds and resize the weights
mask_template = _create_mask(self.cf_shape,bounds_template,self.src,
self.autosize_mask,self.mask_threshold)
self.nominal_bounds_template = nominal_bounds_template
self.bounds_template = bounds_template
self._slice_template = slice_template
cfs = self.cfs
rows,cols = cfs.shape
output_fns = [wof.single_cf_fn for wof in self.weights_output_fns]
for r in xrange(rows):
for c in xrange(cols):
xcf,ycf = self.X_cf[0,c],self.Y_cf[r,0]
# CB: listhack - loop is candidate for replacement by numpy fn
self._change_cf_bounds(cfs[r,c],input_sheet=self.src,
x=xcf,y=ycf,
template=slice_template,
mask=mask_template,
output_fns=output_fns,
min_matrix_radius=self.min_matrix_radius)
def change_density(self, new_wt_density):
"""
Rescales the weight matrix in place, interpolating or resampling as needed.
Not yet implemented.
"""
raise NotImplementedError
def _change_cf_bounds(self,cf,input_sheet,x,y,template,mask,output_fns=None,min_matrix_radius=1):
"""
Change the bounding box for this ConnectionField.
Discards weights or adds new (zero) weights as necessary,
preserving existing values where possible.
Currently only supports reducing the size, not increasing, but
should be extended to support increasing as well.
Note that the supplied template will be modified, so if you're
also using them elsewhere you should pass copies.
"""
if output_fns is None:
output_fns = []
# CEBALERT: re-write to allow arbitrary resizing
or1,or2,oc1,oc2 = cf.input_sheet_slice
weights_slice = cf._create_input_sheet_slice(input_sheet,x,y,copy(template),min_matrix_radius)
r1,r2,c1,c2 = cf.input_sheet_slice
if not (r1 == or1 and r2 == or2 and c1 == oc1 and c2 == oc2):
# CB: note that it's faster to copy (i.e. replacing copy=1 with copy=0
# below slows down change_bounds().
cf.weights = array(cf.weights[r1-or1:r2-or1,c1-oc1:c2-oc1],copy=1)
# (so the obvious choice,
# cf.weights=cf.weights[r1-or1:r2-or1,c1-oc1:c2-oc1],
# is also slower).
cf.mask = weights_slice.submatrix(mask)
cf.mask = array(cf.mask,copy=1) # CB: why's this necessary?
# (see ALERT in __init__)
cf.weights *= cf.mask
for of in output_fns:
of(cf.weights)
del cf.norm_total
Made CFIter store activity, flatcfs, and mask (in preparation for it no longer storing the projection).
"""
ConnectionField and associated classes.
This module defines some basic classes of objects used to create
simulations of cortical sheets that take input through connection
fields that project from other cortical sheets (or laterally from
themselves).
ConnectionField: Holds a single connection field within a
CFProjection.
CFProjection: A set of ConnectionFields mapping from a Sheet into a
ProjectionSheet.
CFSheet: A subclass of ProjectionSheet that provides an interface to
the underlying ConnectionFields in any projection of type
CFProjection.
$Id$
"""
__version__ = '$Revision$'
from copy import copy
from numpy import abs,array,zeros,where
from numpy.oldnumeric import Float,Float32
import param
import patterngenerator
from patterngenerator import PatternGenerator
from functionfamily import TransferFn,IdentityTF
from functionfamily import LearningFn,Hebbian,IdentityLF
from functionfamily import ResponseFn,DotProduct
from functionfamily import CoordinateMapperFn,IdentityMF
from projection import Projection,ProjectionSheet, SheetMask
from sheetcoords import Slice
from sheetview import UnitView
from boundingregion import BoundingBox,BoundingRegionParameter
# CEBALERT: shouldn't be necessary, and depends on the implementation
# of numpy.vectorize
def simple_vectorize(fn,num_outputs=1,output_type=object,doc=''):
"""
Simplify creation of numpy.vectorize(fn) objects where all outputs
have the same typecode.
"""
from numpy import vectorize,sctype2char
# This function exists because I cannot figure out how I am
# supposed to stop vectorize() calling fn one extra time at the
# start. (It's supposed to call an extra time at the start to
# determine the output types UNLESS the output types are
# specified.)
vfn = vectorize(fn,doc=doc)
# stop vectorize calling fn an extra time at the start
# (works for our current numpy (1.1.1))
vfn.nout=num_outputs # number of outputs of fn
output_typecode = sctype2char(output_type)
vfn.otypes=output_typecode*num_outputs # typecodes of outputs of fn
import inspect
try:
fn_code = fn.func_code if hasattr(fn,'func_code') else fn.__call__.func_code
except:
raise TypeError("Couldn't find code of %s"%fn)
fn_args = inspect.getargs(fn_code)[0]
extra = 1 if fn_args[0]=='self' else 0
vfn.lastcallargs=len(fn_args)-extra # num args of fn
return vfn
# Specified explicitly when creating weights matrix - required
# for optimized C functions.
weight_type = Float32
class NullCFError(ValueError):
"""
Error thrown when trying to create an empty CF.
"""
def __init__(self,x,y,input,rows,cols):
ValueError.__init__(self,"ConnectionField at (%s,%s) (input_sheet=%s) has a zero-sized weights matrix (%s,%s); you may need to supply a larger bounds_template or increase the density of the sheet."%(x,y,input,rows,cols))
class ConnectionField(object):
"""
A set of weights on one input Sheet.
Each ConnectionField contributes to the activity of one unit on
the output sheet, and is normally used as part of a Projection
including many other ConnectionFields.
"""
__slots__ = ['weights','input_sheet_slice','mask',
'_has_norm_total','_norm_total']
def __get_norm_total(self):
"""
Return the stored norm_value, if any, or else the current sum of the weights.
See the norm_total property for more details.
"""
# The actual value is cached in _norm_total.
if self._has_norm_total:
return self._norm_total
else:
return abs(self.weights).sum()
def __set_norm_total(self,new_norm_total):
"""
Set an explicit value to be returned by norm_total.
See the norm_total property for more details.
"""
self._has_norm_total = True
self._norm_total = new_norm_total
def __del_norm_total(self):
"""
Delete any cached norm_total that may have been set.
See the norm_total property for more details.
"""
self._has_norm_total = False
# CB: Accessing norm_total as a property from the C code takes
# about 2% of run time for 90 iterations of lissom_oo_or. (As of
# r8139, using floating-point simulation time.)
norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total,
"""
The norm_total property returns a value useful in computing
a sum-based weight normalization.
By default, the value returned is simply the current sum of
the connection weights. However, another value can be
substituted by setting norm_total explicitly, and this cached
value will then be returned instead.
This mechanism has two main purposes. First, it allows a
learning function to cache the sum value for an output
function to use later without computation, which can result in
significant time savings. Second, the extra level of
indirection allows the sum value to be manipulated before it
is used, to implement operations like joint normalization
across corresponding CFs in multiple Projections.
Apart from such cases, norm_total can be ignored.
Note that every person who uses a class that sets or gets
norm_total must be very careful to ensure that stale values
will never be accessed. A good way to do this is to make sure
that the value is only set just before it will be used, and
deleted as soon as it has been accessed.
WARNING: Any c-optimized code can bypass this property and
access directly _has_norm_total, _norm_total
""")
def get_bounds(self,input_sheet):
return self.input_sheet_slice.compute_bounds(input_sheet)
# CEBALERT:
# template and mask: usually created ONCE by CFProjection and
# specified as a Slice and array (respectively). Otherwise,
# can be specified as BoundingBox and patterngenerator.
# Note that BoundingBox() is ok for a default even though it's
# mutable because we copy it inside init. Constant() is ok too
# because mask and weights_generator are not modified.
def __init__(self,input_sheet,x=0.0,y=0.0,template=BoundingBox(radius=0.1),
weights_generator=patterngenerator.Constant(),
mask=patterngenerator.Constant(),
output_fns=None,min_matrix_radius=1):
"""
Create weights at the specified (x,y) location on the
specified input_sheet.
The supplied template (if a BoundingRegion) is converted to a
Slice, moved to the specified (x,y) location, and then the
weights pattern is drawn inside by the weights_generator.
Note that if the appropriate template Slice is already known,
then it can be passed in instead of a BoundingRegion template.
This slice will then be used directly, instead of converting
the template into a Slice.
The supplied template object itself will not be modified (it
is copied before use).
The mask allows the weights to be limited to being non-zero in
a subset of the rectangular weights area. The actual mask
used is a view of the given mask created by cropping to the
boundaries of the input_sheet, so that the weights all
correspond to actual locations in the input sheet. For
instance, if a circular pattern of weights is desired, the
mask should have a disk-shaped pattern of elements with value
1, surrounded by elements with the value 0. If the CF extends
over the edge of the input sheet then the weights will
actually be half-moon (or similar) rather than circular.
"""
#print "Create CF",input_sheet.name,x,y,"template=",template,"wg=",weights_generator,"m=",mask,"ofs=",output_fns,"min r=",min_matrix_radius
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,input_sheet,force_odd=True,
min_matrix_radius=min_matrix_radius)
# Note: if passed in, mask is shared between CFs (but not if created here)
if not hasattr(mask,'view'):
mask = _create_mask(mask,template.compute_bounds(input_sheet),
# CEBALERT: it's not really worth adding more ALERTs on this
# topic, but...there's no way for the CF to control autosize
# and threshold.
input_sheet,True,0.5)
# CB: has to be set for C code. Can't be initialized at the
# class level, or it would become a read-only class attribute
# (because it's a slot:
# http://docs.python.org/reference/datamodel.html). Can we
# somehow avoid having to think about _has_norm_total in the
# python code? Could the C code initialize this value?
self._has_norm_total=False
if output_fns is None:
output_fns = []
# CEBALERT: now even more confusing; weights_slice is
# different from input_sheet_slice. At least need to rename.
weights_slice = self._create_input_sheet_slice(input_sheet,x,y,template,min_matrix_radius)
# CBNOTE: this would be clearer (but not perfect, and probably slower)
# m = mask_template[self.weights_slice()]
self.mask = weights_slice.submatrix(mask) # view of original mask
self.mask = array(self.mask,copy=1) # CEBALERT: why is this necessary?
# (without it, optimized learning function creates artifacts in CFs at
# left and right edges of sheet, at some densities)
# CBENHANCEMENT: might want to do something about a size
# that's specified (right now the size is assumed to be that
# of the bounds)
# shouldn't be extra computation of boundingbox because it's gone from Slice.__init__; could avoid extra lookups by getting straight from slice
w = weights_generator(x=x,y=y,bounds=self.get_bounds(input_sheet),
xdensity=input_sheet.xdensity,
ydensity=input_sheet.ydensity,
mask=self.mask)
# CEBALERT: unnecessary copy! Pass type to PG & have it draw
# in that. (Should be simple, except making it work for all
# the PG subclasses that override array creation in various
# ways (producing or using inconsistent types) turned out to
# be too painful.)
self.weights = w.astype(weight_type)
# CEBHACKALERT: the system of masking through multiplication
# by 0 works for now, while the output_fns are all
# multiplicative. But in the long run we need a better way to
# apply the mask. The same applies anywhere the mask is used,
# including in learningfn/. We should investigate masked
# arrays (from numpy).
for of in output_fns:
of(self.weights)
# CB: can this be renamed to something better?
def _create_input_sheet_slice(self,input_sheet,x,y,template,min_matrix_radius):
"""
Create the input_sheet_slice, which provides the appropriate
Slice for this CF on the input_sheet (as well as providing
this CF's exact bounds).
Also creates the weights_slice, which provides the Slice for
this weights matrix (in case it must be cropped at an edge).
"""
# copy required because the template gets modified here but
# needs to be used again
input_sheet_slice = copy(template)
input_sheet_slice.positionedcrop(x,y,input_sheet)
input_sheet_slice.crop_to_sheet(input_sheet)
# weights matrix cannot have a zero-sized dimension (could
# happen at this stage because of cropping)
nrows,ncols = input_sheet_slice.shape_on_sheet()
if nrows<1 or ncols<1:
raise NullCFError(x,y,input_sheet,nrows,ncols)
self.input_sheet_slice = input_sheet_slice
# not copied because we don't use again
template.positionlesscrop(x,y,input_sheet)
return template
# CEBALERT: unnecessary method; can use something like
# activity[cf.input_sheet_slice()]
def get_input_matrix(self, activity):
# CBNOTE: again, this might be clearer (but probably slower):
# activity[self.input_sheet_slice()]
return self.input_sheet_slice.submatrix(activity)
class CFPResponseFn(param.Parameterized):
"""
Map an input activity matrix into an output matrix using the CFs
in a CFProjection.
Objects in this hierarchy of callable function objects compute a
response matrix when given an input pattern and a set of
ConnectionField objects. Typically used as part of the activation
function for a neuron, computing activation for one Projection.
Objects in this class must support being called as a function with
the arguments specified below, and are assumed to modify the
activity matrix in place.
"""
__abstract=True
def __call__(self, iterator, input_activity, activity, strength, **params):
raise NotImplementedError
class CFPRF_Plugin(CFPResponseFn):
"""
Generic large-scale response function based on a simple single-CF function.
Applies the single_cf_fn to each CF in turn. For the default
single_cf_fn of DotProduct(), does a basic dot product of each CF with the
corresponding slice of the input array. This function is likely
to be slow to run, but it is easy to extend with any arbitrary
single-CF response function.
The single_cf_fn must be a function f(X,W) that takes two
identically shaped matrices X (the input) and W (the
ConnectionField weights) and computes a scalar activation value
based on those weights.
"""
single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(),
doc="Accepts a ResponseFn that will be applied to each CF individually.")
def __call__(self, iterator, input_activity, activity, strength):
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
X = cf.input_sheet_slice.submatrix(input_activity)
activity.flat[i] = single_cf_fn(X,cf.weights)
activity *= strength
class CFPLearningFn(param.Parameterized):
"""
Compute new CFs for a CFProjection based on input and output activity values.
Objects in this hierarchy of callable function objects compute a
new set of CFs when given input and output patterns and a set of
ConnectionField objects. Used for updating the weights of one
CFProjection.
Objects in this class must support being called as a function with
the arguments specified below.
"""
__abstract = True
def constant_sum_connection_rate(self,proj,learning_rate):
"""
Return the learning rate for a single connection assuming that
the total rate is to be divided evenly among all the units in
the connection field.
"""
return float(learning_rate)/proj.n_units()
# JABALERT: Should the learning_rate be a parameter of this object instead of an argument?
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""
Apply this learning function to the given set of ConnectionFields,
and input and output activities, using the given learning_rate.
"""
raise NotImplementedError
class CFPLF_Identity(CFPLearningFn):
"""CFLearningFunction performing no learning."""
single_cf_fn = param.ClassSelector(LearningFn,default=IdentityLF(),constant=True)
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
pass
class CFPLF_Plugin(CFPLearningFn):
"""CFPLearningFunction applying the specified single_cf_fn to each CF."""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="Accepts a LearningFn that will be applied to each CF individually.")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""Apply the specified single_cf_fn to every CF."""
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj,learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights,
single_connection_learning_rate)
cf.weights *= cf.mask
class CFPOutputFn(param.Parameterized):
"""
Type for an object that applies some operation (typically something
like normalization) to all CFs in a CFProjection for which the specified
mask (typically the activity at the destination of this projection)
is nonzero.
"""
__abstract = True
def __call__(self, iterator, **params):
"""Operate on each CF for which the mask is nonzero."""
raise NotImplementedError
class CFPOF_Plugin(CFPOutputFn):
"""
Applies the specified single_cf_fn to each CF in the CFProjection
for which the mask is nonzero.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),
doc="Accepts a TransferFn that will be applied to each CF individually.")
def __call__(self, iterator, **params):
if type(self.single_cf_fn) is not IdentityTF:
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
single_cf_fn(cf.weights)
del cf.norm_total
class CFPOF_Identity(CFPOutputFn):
"""
CFPOutputFn that leaves the CFs unchanged.
Must never be changed or subclassed, because it might never
be called. (I.e., it could simply be tested for and skipped.)
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),constant=True)
def __call__(self, iterator, **params):
pass
# CB: need to make usage of 'src' and 'input_sheet' consistent between
# ConnectionField and CFProjection (i.e. pick one of them).
class CFProjection(Projection):
"""
A projection composed of ConnectionFields from a Sheet into a ProjectionSheet.
CFProjection computes its activity using a response_fn of type
CFPResponseFn (typically a CF-aware version of mdot) and output_fns
(typically none). The initial contents of the
ConnectionFields mapping from the input Sheet into the target
ProjectionSheet are controlled by the weights_generator, cf_shape,
and weights_output_fn parameters, while the location of the
ConnectionField is controlled by the coord_mapper parameter.
Any subclass has to implement the interface
activate(self,input_activity) that computes the response from the
input and stores it in the activity array.
"""
response_fn = param.ClassSelector(CFPResponseFn,
default=CFPRF_Plugin(),
doc='Function for computing the Projection response to an input pattern.')
cf_type = param.Parameter(default=ConnectionField,constant=True,
doc="Type of ConnectionField to use when creating individual CFs.")
# JPHACKALERT: Not all support for null CFs has been implemented.
# CF plotting and C-optimized CFPxF_ functions need
# to be fixed to support null CFs without crashing.
allow_null_cfs = param.Boolean(default=False,
doc="Whether or not the projection can have entirely empty CFs")
nominal_bounds_template = BoundingRegionParameter(
default=BoundingBox(radius=0.1),doc="""
Bounds defining the Sheet area covered by a prototypical ConnectionField.
The true bounds will differ depending on the density (see create_slice_template()).""")
weights_generator = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,
doc="Generate initial weights values.")
cf_shape = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,
doc="Mask pattern to define the shape of the connection fields.")
same_cf_shape_for_all_cfs = param.Boolean(default=True,doc="""
Whether or not to share a single cf_shape mask for all CFs.
If True, the cf_shape is evaluated only once and shared for
all CFs, which saves computation time and memory. If False,
the cf_shape is evaluated once for each CF, allowing each to
have its own shape.""")
learning_fn = param.ClassSelector(CFPLearningFn,
default=CFPLF_Plugin(),
doc='Function for computing changes to the weights based on one activation step.')
# JABALERT: Shouldn't learning_rate be owned by the learning_fn?
learning_rate = param.Number(default=0.0,softbounds=(0,100),doc="""
Amount of learning at each step for this projection, specified
in units that are independent of the density of each Sheet.""")
weights_output_fns = param.HookList(default=[CFPOF_Plugin()],
class_=CFPOutputFn,
doc='Functions applied to each CF after learning.')
strength = param.Number(default=1.0,doc="""
Global multiplicative scaling applied to the Activity of this Sheet.""")
coord_mapper = param.ClassSelector(CoordinateMapperFn,
default=IdentityMF(),
doc='Function to map a projected coordinate into the target sheet.')
# CEBALERT: this is temporary (allows c++ matching in certain
# cases). We will allow the user to override the mask size, but
# by offering a scaling parameter.
autosize_mask = param.Boolean(
default=True,constant=True,precedence=-1,doc="""
Topographica sets the mask size so that it is the same as the connection field's
size, unless this parameter is False - in which case the user-specified size of
the cf_shape is used. In normal usage of Topographica, this parameter should
remain True.""")
mask_threshold = param.Number(default=0.5,constant=True,doc="""
If a unit is above this value in the cf_shape mask, it is
included; otherwise it is excluded from the mask.""")
apply_output_fns_init=param.Boolean(default=True,doc="""
Whether to apply the output function to connection fields (e.g. for
normalization) when the CFs are first created.""")
min_matrix_radius = param.Integer(default=1,bounds=(0,None),doc="""
Enforced minimum for radius of weights matrix.
The default of 1 gives a minimum matrix of 3x3. 0 would
allow a 1x1 matrix.""")
precedence = param.Number(default=0.8)
def __init__(self,initialize_cfs=True,**params):
"""
Initialize the Projection with a set of cf_type objects
(typically ConnectionFields), each located at the location
in the source sheet corresponding to the unit in the target
sheet. The cf_type objects are stored in the 'cfs' array.
The nominal_bounds_template specified may be altered: the
bounds must be fitted to the Sheet's matrix, and the weights
matrix must have odd dimensions. These altered bounds are
passed to the individual connection fields.
A mask for the weights matrix is constructed. The shape is
specified by cf_shape; the size defaults to the size
of the nominal_bounds_template.
"""
super(CFProjection,self).__init__(**params)
self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators')
# get the actual bounds_template by adjusting a copy of the
# nominal_bounds_template to ensure an odd slice, and to be
# cropped to sheet if necessary
self._slice_template = Slice(copy(self.nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.bounds_template = self._slice_template.compute_bounds(self.src)
self.mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
if initialize_cfs:
self._create_cfs()
### JCALERT! We might want to change the default value of the
### input value to self.src.activity; but it fails, raising a
### type error. It probably has to be clarified why this is
### happening
self.input_buffer = None
self.activity = array(self.dest.activity)
def _generate_coords(self):
X,Y = self.dest.sheetcoords_of_idx_grid()
vectorized_coord_mapper = simple_vectorize(self.coord_mapper,
num_outputs=2,
# CB: could switch to float32?
output_type=float)
return vectorized_coord_mapper(X,Y)
# CB: should be _initialize_cfs() since we already have 'initialize_cfs' flag?
def _create_cfs(self):
vectorized_create_cf = simple_vectorize(self._create_cf)
self.cfs = vectorized_create_cf(*self._generate_coords())
self.flatcfs = list(self.cfs.flat)
def _create_cf(self,x,y):
"""
Create a ConnectionField at x,y in the src sheet.
"""
# (to restore would need to have an r,c counter)
# self.debug("Creating CF(%d,%d) from src (%.3f,%.3f) to dest (%.3f,%.3f)"%(r,c,x_cf,y_cf,x,y))
try:
if self.apply_output_fns_init:
ofs = [wof.single_cf_fn for wof in self.weights_output_fns]
else:
ofs = []
if self.same_cf_shape_for_all_cfs:
mask_template = self.mask_template
else:
mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
CF = self.cf_type(self.src,x=x,y=y,
template=self._slice_template,
weights_generator=self.weights_generator,
mask=mask_template,
output_fns=ofs,
min_matrix_radius=self.min_matrix_radius)
except NullCFError:
if self.allow_null_cfs:
CF = None
else:
raise
return CF
def n_units(self):
"""Return the number of unmasked units in a typical ConnectionField."""
### JCALERT! Right now, we take the number of units at the
### center of the cfs matrix. It would be more reliable to
### calculate it directly from the target sheet density and
### the weight_bounds. Example:
#center_r,center_c = sheet2matrixidx(0,0,bounds,xdensity,ydensity)
rows,cols=self.cfs.shape
cf = self.cfs[rows/2,cols/2]
return len(cf.mask.ravel().nonzero()[0]) # CB: newer numpy array has .flatnonzero()
def cf(self,r,c):
"""Return the specified ConnectionField"""
# CB: should we offer convenience cf(x,y) (i.e. sheetcoords) method instead?
self.warning("CFProjection.cf(r,c) is deprecated: use cfs[r,c] instead")
return self.cfs[r,c]
def cf_bounds(self,r,c):
"""Return the bounds of the specified ConnectionField."""
return self.cfs[r,c].get_bounds(self.src)
def get_view(self, sheet_x, sheet_y, timestamp):
"""
Return a single connection field UnitView, for the unit
located nearest to sheet coordinate (sheet_x,sheet_y).
"""
matrix_data = zeros(self.src.activity.shape,Float)
(r,c) = self.dest.sheet2matrixidx(sheet_x,sheet_y)
r1,r2,c1,c2 = self.cfs[r,c].input_sheet_slice
matrix_data[r1:r2,c1:c2] = self.cfs[r,c].weights
# CB: the following would be equivalent with Slice __call__
# cf = self.cf(self.dest.sheet2matrixidx(sheet_x,sheet_y))
# matrix_data = numpy.zeros(self.src.activity.shape,Numeric.Float)
# matrix_data[cf.input_sheet_slice()]=cf.weights
return UnitView((matrix_data,self.src.bounds),sheet_x,sheet_y,self,timestamp)
def activate(self,input_activity):
"""Activate using the specified response_fn and output_fn."""
self.input_buffer = input_activity
self.activity *=0.0
self.response_fn(MaskedCFIter(self), input_activity, self.activity, self.strength)
for of in self.output_fns:
of(self.activity)
# CEBALERT: should add active_units_mask to match
# apply_learn_output_fns.
def learn(self):
"""
For a CFProjection, learn consists of calling the learning_fn.
"""
# Learning is performed if the input_buffer has already been set,
# i.e. there is an input to the Projection.
if self.input_buffer != None:
self.learning_fn(MaskedCFIter(self),self.input_buffer,self.dest.activity,self.learning_rate)
# CEBALERT: called 'learn' output fns here, but called 'weights' output fns
# elsewhere (mostly). Change all to 'learn'?
def apply_learn_output_fns(self,active_units_mask=True):
"""
Apply the weights_output_fns to each unit.
If active_units_mask is True, inactive units will be skipped.
"""
for of in self.weights_output_fns:
of(MaskedCFIter(self,active_units_mask=active_units_mask))
# CEBALERT: see gc alert in simulation.__new__
def _cleanup(self):
for cf in self.cfs.flat:
# cf could be None or maybe something else
if hasattr(cf,'input_sheet'):
cf.input_sheet=None
if hasattr(cf,'input_sheet_slice'):
cf.input_sheet_slice=None
if hasattr(cf,'weights_slice'):
cf.weights_slice=None
def n_bytes(self):
# Could also count the input_sheet_slice
rows,cols=self.cfs.shape
return super(CFProjection,self).n_bytes() + \
sum([cf.weights.nbytes +
cf.mask.nbytes
for cf,i in CFIter(self,ignore_sheet_mask=True)()])
def n_conns(self):
# Counts non-masked values, if mask is available; otherwise counts
# weights as connections if nonzero
rows,cols=self.cfs.shape
return sum([len((cf.mask if cf.mask is not None else cf.weights).ravel().nonzero()[0])
for cf,i in MaskedCFIter(self)()])
# CEB: have not yet decided proper location for this method
# JAB: should it be in PatternGenerator?
def _create_mask(shape,bounds_template,sheet,autosize=True,threshold=0.5):
"""
Create the mask (see ConnectionField.__init__()).
"""
# Calculate the size & aspect_ratio of the mask if appropriate;
# mask size set to be that of the weights matrix
if hasattr(shape, 'size') and autosize:
l,b,r,t = bounds_template.lbrt()
shape.size = t-b
shape.aspect_ratio = (r-l)/shape.size
# Center mask to matrixidx center
center_r,center_c = sheet.sheet2matrixidx(0,0)
center_x,center_y = sheet.matrixidx2sheet(center_r,center_c)
mask = shape(x=center_x,y=center_y,
bounds=bounds_template,
xdensity=sheet.xdensity,
ydensity=sheet.ydensity)
mask = where(mask>=threshold,mask,0.0)
# CB: unnecessary copy (same as for weights)
return mask.astype(weight_type)
import numpy
class CFIter(object):
"""
Iterator to walk through all ConnectionFields of all neurons in
the destination Sheet of the given CFProjection. Each iteration
yields the tuple (cf,i) where cf is the ConnectionField at
position i in the projection's flatcfs list.
If active_units_mask is True, inactive units will be skipped. If
ignore_sheet_mask is True, even units excluded by the sheet mask
will be included.
"""
def get_proj(self):
# print "access iterator.proj"
return self._proj
def set_proj(self,proj):
self._proj = proj
proj = property(get_proj,set_proj)
# CB: as noted elsewhere, rename active_units_mask (to e.g.
# ignore_inactive_units).
def __init__(self,cfprojection,active_units_mask=False,ignore_sheet_mask=False):
self.proj = cfprojection
self.flatcfs = cfprojection.flatcfs
self.activity = cfprojection.dest.activity
self.mask = cfprojection.dest.mask
self.allow_skip_non_responding_units = cfprojection.dest.allow_skip_non_responding_units
self.active_units_mask = active_units_mask
self.ignore_sheet_mask = ignore_sheet_mask
# CB: if there were a method on proj to access "dest.activity",
# then for MultiprocessorCFProjection, that method would be
# overridden to return the appropriate array. Similarly, a method
# on proj to access "dest.mask" would be overridden to return the
# appropriate array (the mask having been distributed).
def __nomask(self):
# return an array indicating all units should be processed
# dtype for C functions.
# could just be flat.
return numpy.ones(self.activity.shape,dtype=self.activity.dtype)
# CEBALERT: make _
def get_sheet_mask(self):
if not self.ignore_sheet_mask:
return self.mask.data
else:
return self.__nomask()
# CEBALERT: make _ (and probably drop '_mask').
def get_active_units_mask(self):
if self.allow_skip_non_responding_units and self.active_units_mask:
return self.activity
else:
return self.__nomask()
# CEBALERT: rename to something like
def get_overall_mask(self):
"""
Return an array indicating whether or not each unit should be
processed.
"""
# JPHACKALERT: Should really check for the existence of the
# mask, rather than checking its type. This is a hack to
# support higher-order projections whose dest is a CF, instead
# of a sheet. The right thing to do is refactor so that CF
# masks and SheetMasks are subclasses of an abstract Mask
# type so that they support the same interfaces.
#
# CEBALERT: put back when supporting neighborhood masking
# (though preferably do what Jeff suggests instead)
# if isinstance(self.proj.dest.mask,SheetMask):
# return get_active_units_mask()
# else:
# CB: note that it's faster for our optimized C functions to
# combine the masks themselves, rather than using this method.
sheet_mask = self.get_sheet_mask()
active_units_mask = self.get_active_units_mask()
return numpy.logical_and(sheet_mask,active_units_mask)
def __call__(self):
mask = self.get_overall_mask()
for i,cf in enumerate(self.flatcfs):
if cf is not None:
if mask.flat[i]:
yield cf,i
# CEBALERT: remove this once MaskedCFIter has been replaced elsewhere.
MaskedCFIter = CFIter
### We don't really need this class; its methods could probably be
### moved up to ProjectionSheet, because they may in fact be valid for
### all ProjectionSheets. But we're leaving it here, because it is
### likely to be useful in the future.
class CFSheet(ProjectionSheet):
"""
A ProjectionSheet providing access to the ConnectionFields in its CFProjections.
CFSheet is a Sheet built from units indexed by Sheet coordinates
(x,y). Each unit can have one or more ConnectionFields on another
Sheet (via this sheet's CFProjections). Thus CFSheet is a more
concrete version of a ProjectionSheet; a ProjectionSheet does not
require that there be units or weights of any kind. Unless you
need access to the underlying ConnectionFields for visualization
or analysis, CFSheet and ProjectionSheet are interchangeable.
"""
measure_maps = param.Boolean(True,doc="""
Whether to include this Sheet when measuring various maps to create SheetViews.""")
precedence = param.Number(0.5)
def update_unit_view(self,x,y,proj_name=''):
"""
Creates the list of UnitView objects for a particular unit in this CFSheet.
(There is one UnitView for each Projection to this CFSheet).
Each UnitView is then added to the sheet_views of its source sheet.
It returns the list of all UnitViews for the given unit.
"""
for p in self.in_connections:
if not isinstance(p,CFProjection):
self.debug("Skipping non-CFProjection "+p.name)
elif proj_name == '' or p.name==proj_name:
v = p.get_view(x,y,self.simulation.time())
src = v.projection.src
key = ('Weights',v.projection.dest.name,v.projection.name,x,y)
v.proj_src_name = v.projection.src.name
src.sheet_views[key] = v
### JCALERT! This should probably be deleted...
def release_unit_view(self,x,y):
self.release_sheet_view(('Weights',x,y))
class ResizableCFProjection(CFProjection):
"""
A CFProjection with resizable weights.
"""
# Less efficient memory usage than CFProjection because it stores
# the (x,y) position of each ConnectionField.
def _generate_coords(self):
# same as super's, but also stores the coords.
# CB: this is storing redundant info because generate_coords()
# returns output from mgrid. Might be better to store the 1d x
# and y coords, and generate the grids when needed?
self.X_cf,self.Y_cf = super(ResizableCFProjection,self)._generate_coords()
return self.X_cf,self.Y_cf
### This could be changed into a special __set__ method for
### bounds_template, instead of being a separate function, but
### having it be explicit like this might be clearer.
###
### This implementation is fairly slow, and for some algorithms
### that rely on changing the bounds frequently, it may be worth
### re-implementing it in C.
def change_bounds(self, nominal_bounds_template):
"""
Change the bounding box for all of the ConnectionFields in this Projection.
Calls change_bounds() on each ConnectionField.
Currently only allows reducing the size, but should be
extended to allow increasing as well.
"""
slice_template = Slice(copy(nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
bounds_template = slice_template.compute_bounds(self.src)
if not self.bounds_template.containsbb_exclusive(bounds_template):
if self.bounds_template.containsbb_inclusive(bounds_template):
self.debug('Initial and final bounds are the same.')
else:
self.warning('Unable to change_bounds; currently allows reducing only.')
return
# it's ok so we can store the bounds and resize the weights
mask_template = _create_mask(self.cf_shape,bounds_template,self.src,
self.autosize_mask,self.mask_threshold)
self.nominal_bounds_template = nominal_bounds_template
self.bounds_template = bounds_template
self._slice_template = slice_template
cfs = self.cfs
rows,cols = cfs.shape
output_fns = [wof.single_cf_fn for wof in self.weights_output_fns]
for r in xrange(rows):
for c in xrange(cols):
xcf,ycf = self.X_cf[0,c],self.Y_cf[r,0]
# CB: listhack - loop is candidate for replacement by numpy fn
self._change_cf_bounds(cfs[r,c],input_sheet=self.src,
x=xcf,y=ycf,
template=slice_template,
mask=mask_template,
output_fns=output_fns,
min_matrix_radius=self.min_matrix_radius)
def change_density(self, new_wt_density):
"""
Rescales the weight matrix in place, interpolating or resampling as needed.
Not yet implemented.
"""
raise NotImplementedError
def _change_cf_bounds(self,cf,input_sheet,x,y,template,mask,output_fns=None,min_matrix_radius=1):
"""
Change the bounding box for this ConnectionField.
Discards weights or adds new (zero) weights as necessary,
preserving existing values where possible.
Currently only supports reducing the size, not increasing, but
should be extended to support increasing as well.
Note that the supplied template will be modified, so if you're
also using them elsewhere you should pass copies.
"""
if output_fns is None:
output_fns = []
# CEBALERT: re-write to allow arbitrary resizing
or1,or2,oc1,oc2 = cf.input_sheet_slice
weights_slice = cf._create_input_sheet_slice(input_sheet,x,y,copy(template),min_matrix_radius)
r1,r2,c1,c2 = cf.input_sheet_slice
if not (r1 == or1 and r2 == or2 and c1 == oc1 and c2 == oc2):
# CB: note that it's faster to copy (i.e. replacing copy=1 with copy=0
# below slows down change_bounds().
cf.weights = array(cf.weights[r1-or1:r2-or1,c1-oc1:c2-oc1],copy=1)
# (so the obvious choice,
# cf.weights=cf.weights[r1-or1:r2-or1,c1-oc1:c2-oc1],
# is also slower).
cf.mask = weights_slice.submatrix(mask)
cf.mask = array(cf.mask,copy=1) # CB: why's this necessary?
# (see ALERT in __init__)
cf.weights *= cf.mask
for of in output_fns:
of(cf.weights)
del cf.norm_total
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 20.01.2014
@author: heinz-peterlang
The source files for the dictionaries (of format *.csv and *.txt) are being
copied by Jenkins from the opinion-mining-lexicon repository to
services.weblyzard.com.
'''
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import os
import urllib.parse
from datetime import datetime, timedelta
from socket import gethostbyname, gaierror
from weblyzard_api.util.http import Retrieve
LOCAL_DIR = '/opt/weblyzard/dictionaries/'
SERVER_URL = 'https://services.weblyzard.com/repo/resources/'
MAX_AGE_HOURS = 24
class WeblyzardDictionaries(object):
def __init__(self, user, password,
local_dir=LOCAL_DIR,
server_url=SERVER_URL,
max_age_hours=MAX_AGE_HOURS):
if not os.path.exists(local_dir):
os.makedirs(local_dir)
self.max_file_age = datetime.now() - timedelta(hours=max_age_hours)
self.local_dir = local_dir
self.server_url = server_url
self.retrieve = Retrieve(__file__)
self.user = user
self.password = password
@staticmethod
def is_online(server_url):
'''
Checks, whether the given url is online.
:param server_url: \
the url to check.
:returns:
True, if the dictionary server is online/reachable.
'''
hostname = urllib.parse.urlsplit(server_url).netloc
try:
gethostbyname(hostname)
return True
except gaierror:
return False
def get_dictionary(self, dictionary_uri):
''' tries to load the dictionary from the file-system. If the function
cannot find the file or if the file is too old (see MAX_AGE_HOURS),
the function will load the dictionary from the server.
:param dictionary_uri: URI for the dictionary, e.g. people/de/titles/all.txt
:returns: full file name of the dictionary
'''
if dictionary_uri.startswith('/'):
dictionary_uri = dictionary_uri[1:]
full_path = os.path.join(self.local_dir, dictionary_uri)
# skip retrieval, if the server is not available
if not self.is_online(SERVER_URL):
return full_path
fetch_file = True
if os.path.isfile(full_path):
last_mod = datetime.fromtimestamp(os.path.getmtime(full_path))
if last_mod < self.max_file_age:
last_mod_server = self.get_last_mod_date(dictionary_uri)
if last_mod_server < last_mod:
fetch_file = False
else:
fetch_file = False
if fetch_file:
self.get_from_server(dictionary_uri, full_path)
return full_path
def get_last_mod_date(self, dictionary_uri):
''' Requests the URL with a HEAD request to retrieve the last_modified
date of the file
:param dictionary_uri: URI for the dictionary, e.g. people/de/titles/all.txt
'''
full_url = urllib.parse.urljoin(self.server_url, dictionary_uri)
response = self.retrieve.open(full_url,
user=self.user,
pwd=self.password,
accept_gzip=False,
head_only=True)
last_modified = response.headers.get('Last-Modified')
if last_modified:
return datetime.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
def get_from_server(self, dictionary_uri, target_path):
''' Fetches a dictionary from the server and stores it on the local FS.
:param dictionary_uri: URI for the dictionary, e.g. people/de/titles/all.txt
:param target_path: destination on local FS to store the file
:returns: target_path if the file was saved
'''
full_url = urllib.parse.urljoin(self.server_url, dictionary_uri)
response = self.retrieve.open(full_url,
user=self.user,
pwd=self.password)
if response:
target_directory = os.path.dirname(target_path)
if not os.path.exists(target_directory):
os.makedirs(target_directory)
content = response.read()
if isinstance(content, bytes):
try:
content = content.decode('utf-8')
with open(target_path, 'w', encoding='utf-8') as f:
f.write(content)
except Exception as e:
with open(target_path, 'wb') as f:
f.write(content)
return target_path
def test_is_online():
url = "http://not-existinet-url-123.de/myservice"
assert WeblyzardDictionaries.is_online(url) == False
chg: mark time last accessed on local sentiment lexicons to avoid overeager reloading
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 20.01.2014
@author: heinz-peterlang
The source files for the dictionaries (of format *.csv and *.txt) are being
copied by Jenkins from the opinion-mining-lexicon repository to
services.weblyzard.com.
'''
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import os
import urllib.parse
from datetime import datetime, timedelta
from socket import gethostbyname, gaierror
from weblyzard_api.util.http import Retrieve
LOCAL_DIR = '/opt/weblyzard/dictionaries/'
SERVER_URL = 'https://services.weblyzard.com/repo/resources/'
MAX_AGE_HOURS = 24
class WeblyzardDictionaries(object):
def __init__(self, user, password,
local_dir=LOCAL_DIR,
server_url=SERVER_URL,
max_age_hours=MAX_AGE_HOURS):
if not os.path.exists(local_dir):
os.makedirs(local_dir)
self.max_file_age = datetime.now() - timedelta(hours=max_age_hours)
self.local_dir = local_dir
self.server_url = server_url
self.retrieve = Retrieve(__file__)
self.user = user
self.password = password
@staticmethod
def is_online(server_url):
'''
Checks, whether the given url is online.
:param server_url: \
the url to check.
:returns:
True, if the dictionary server is online/reachable.
'''
hostname = urllib.parse.urlsplit(server_url).netloc
try:
gethostbyname(hostname)
return True
except gaierror:
return False
def get_dictionary(self, dictionary_uri):
''' tries to load the dictionary from the file-system. If the function
cannot find the file or if the file is too old (see MAX_AGE_HOURS),
the function will load the dictionary from the server.
:param dictionary_uri: URI for the dictionary, e.g. people/de/titles/all.txt
:returns: full file name of the dictionary
'''
if dictionary_uri.startswith('/'):
dictionary_uri = dictionary_uri[1:]
full_path = os.path.join(self.local_dir, dictionary_uri)
# skip retrieval, if the server is not available
if not self.is_online(SERVER_URL):
return full_path
fetch_file = True
if os.path.isfile(full_path):
last_mod = datetime.fromtimestamp(os.path.getmtime(full_path))
if last_mod < self.max_file_age:
last_mod_server = self.get_last_mod_date(dictionary_uri)
if last_mod_server < last_mod:
fetch_file = False
timestamp = datetime.now().timestamp()
os.utime(full_path, (timestamp, timestamp))
else:
fetch_file = False
if fetch_file:
self.get_from_server(dictionary_uri, full_path)
return full_path
def get_last_mod_date(self, dictionary_uri):
''' Requests the URL with a HEAD request to retrieve the last_modified
date of the file
:param dictionary_uri: URI for the dictionary, e.g. people/de/titles/all.txt
'''
full_url = urllib.parse.urljoin(self.server_url, dictionary_uri)
response = self.retrieve.open(full_url,
user=self.user,
pwd=self.password,
accept_gzip=False,
head_only=True)
last_modified = response.headers.get('Last-Modified')
if last_modified:
return datetime.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
def get_from_server(self, dictionary_uri, target_path):
''' Fetches a dictionary from the server and stores it on the local FS.
:param dictionary_uri: URI for the dictionary, e.g. people/de/titles/all.txt
:param target_path: destination on local FS to store the file
:returns: target_path if the file was saved
'''
full_url = urllib.parse.urljoin(self.server_url, dictionary_uri)
response = self.retrieve.open(full_url,
user=self.user,
pwd=self.password)
if response:
target_directory = os.path.dirname(target_path)
if not os.path.exists(target_directory):
os.makedirs(target_directory)
content = response.read()
if isinstance(content, bytes):
try:
content = content.decode('utf-8')
with open(target_path, 'w', encoding='utf-8') as f:
f.write(content)
except Exception as e:
with open(target_path, 'wb') as f:
f.write(content)
return target_path
def test_is_online():
url = "http://not-existinet-url-123.de/myservice"
assert WeblyzardDictionaries.is_online(url) == False
|
from bs4 import BeautifulSoup
import re
import json
import time
from django.core.management.base import BaseCommand
from django.db.models import Q
from programs.models import Program, ProgramModules, LearningPlan
from disciplines.models import Discipline, Semester, TrainingTerms
from modules.models import Module
class Command(BaseCommand):
"""
Example: ./manage.py parse_new "/home/developer/КТОМ 4.html" uni_fixtures/modules.json ./get_programs.html "Конструкторско-технологическое обеспечение машиностроительных производств"
"""
help = "Create Django objects from raw&ugly UrFU data."
requires_system_checks = True
requires_migrations_checks = True
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def decompose(self, soup, tag, classname):
[el.decompose() for el in soup.find_all(tag, {'class': classname})]
def add_arguments(self, parser):
parser.add_argument('html_path', nargs=1)
parser.add_argument('uni_modules_path', nargs=1)
parser.add_argument('programs_path', nargs=1)
parser.add_argument('program_title', nargs=1)
def handle(self, *args, **options):
start_time = time.time()
html_path = options["html_path"][0]
uni_modules_path = options["uni_modules_path"][0]
program_title = options["program_title"][0]
programs_path = options["programs_path"][0]
try:
with open(html_path, encoding='utf-8') as html_file:
raw_html = '\n'.join(html_file.readlines())
except:
raise FileNotFoundError
try:
with open(uni_modules_path, encoding='utf-8') as modules_file:
modules_json = json.load(modules_file)
except:
raise FileNotFoundError
try:
with open(programs_path, encoding='utf-8') as programs_file:
raw_programs = '\n'.join(programs_file.readlines())
except:
raise FileNotFoundError
if raw_programs:
programs_soup = BeautifulSoup(raw_programs, 'lxml')
rows = []
for row in programs_soup.find_all('tr', {"class": "main-info"}):
rows.append([val.text.strip() for val in row.find_all('td')])
for row in rows:
try:
program = Program.objects.get(title=row[1])
except:
def level(x):
return {
'Магистр'.lower() in str(x).lower(): "m",
'Специалист'.lower() in str(x).lower(): "s",
'Бакалавр'.lower() in str(x).lower(): "b",
}[True]
program = Program(title=row[1],
training_direction=row[2],
level=level(row[4]),
)
program.save()
print(f"{self.bcolors.BOLD}Создана программа программа \"{row[1]}\"?{self.bcolors.ENDC}")
try:
program = Program.objects.filter(title=program_title).first()
program.status = "p"
program.save()
except:
raise NotImplementedError
if raw_html:
soup = BeautifulSoup(raw_html, 'lxml')
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.decompose(soup, "table", "menu_table")
self.decompose(soup, "td", "navpath")
self.decompose(soup, "div", "buttons")
soup.find('td', id="nav_td").decompose()
try:
stage = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.stage").text.strip().lower() == "утверждено"
except:
stage = False
try:
displayableTitle = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.displayableTitle").text.strip()
except:
displayableTitle = ""
try:
number = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.number").text.strip()
except:
number = ""
try:
active = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.active").text.strip()
except:
active = "нет"
try:
title = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.title").text.strip()
except:
title = ""
try:
loadTimeType = soup.find("td", id="EduVersionPlanTab.EduVersionPlan.loadTimeType").text.strip()
except:
loadTimeType = "часов в неделю"
html = soup.find("table", {"class": "basic"}).prettify()
lps = LearningPlan.objects.filter(uni_number=number, status="p")
if len(lps) > 0:
for lp in lps:
lp.uni_displayableTitle = displayableTitle
lp.uni_number = number
lp.uni_active = active
lp.uni_title = title
lp.uni_stage = stage
lp.uni_loadTimeType = loadTimeType
lp.uni_html = html
lp.save()
if lp not in program.learning_plans.all():
program.learning_plans.add(lp)
program.save()
else:
lp = LearningPlan(uni_displayableTitle=displayableTitle,
uni_number=number,
uni_active=active,
uni_title=title,
uni_stage=stage,
uni_loadTimeType=loadTimeType,
uni_html=html,
status="p"
)
lp.save()
program.learning_plans.add(lp)
program.save()
table = soup.find('table', id="EduVersionPlanTab.EduDisciplineList")
headers = [header.text.strip() for header in table.find_all('th')]
def find_row_index(row_text):
headers = table.find_all('th')
return headers.index(table.find('th', text=row_text))
def find_row_index_id(id):
headers = table.find_all('th')
return headers.index(table.find('th', id=id))
rows = []
for row in table.find_all('tr'):
rows.append([val.text.strip() for val in row.find_all('td')])
# Ищем модули
modules = []
for header in headers:
if "Номер модуля, дисциплины".lower() == header.lower():
module_numbers_col = headers.index(header)
for row in rows:
if row:
m = re.search('\d\d+', row[module_numbers_col])
if m and "М" in row[1]:
for module in modules_json:
if str(module["number"]) == str(m.group(0)):
module["row"] = row
modules.append(module)
program_modules = ProgramModules.objects.filter(program=program)
for module in modules:
print(" ", module['title'])
if program_modules.filter(module__uni_uuid=module["uuid"]):
print(f"Модуль есть: {module['title']}")
fulltime = False
if 'зао' not in number:
fulltime = True
print("fulltime: ", fulltime)
if fulltime:
term = TrainingTerms.objects.filter(title="4 года").first()
for module in [m for m in modules if m["disciplines"]]:
module_obj, semester = self.create_module(find_row_index_id, module, program)
if len(ProgramModules.objects.filter(Q(program=program))) != len(set([pm.module.title for pm in ProgramModules.objects.filter(Q(program=program))])):
print(f"{self.bcolors.FAIL}Найдено доблирование модулей программы. Поправьте в интерфейсе администратора.{self.bcolors.ENDC}")
import sys
sys.exit(1)
program_modules_fail = ProgramModules.objects.filter(~Q(id__in=[o.id for o in program_modules]), Q(program=program))
print(program_modules_fail)
for pmf in program_modules_fail:
remove = input(f"{self.bcolors.WARNING}Неверный модуль программы: {pmf.module.title}. Удалить?{self.bcolors.ENDC}")
if remove.lower() in ("y", "да", "ok", "ок"):
pmf.delete()
print(f"{self.bcolors.OKGREEN}Удалено.{self.bcolors.ENDC}")
def create_module(self, find_row_index_id, module, program):
print(f"{self.bcolors.HEADER}Ищем или создаём модуль: {module['title']}{self.bcolors.ENDC}")
for i in range(10, 0, -1):
try:
ze = module["row"][find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")]
try:
if int(ze) > 0:
semester = i
except:
pass
except:
semester = 99
if semester == 99:
print(f"Семестр: {self.bcolors.FAIL}{semester}{self.bcolors.ENDC}")
else:
print(f"Семестр: {semester}")
try:
module_obj = Module.objects.filter(title=module["title"], uni_number=module["number"]).first()
module_obj.uni_uuid = module["uuid"]
module_obj.uni_number = module["number"]
module_obj.uni_coordinator = module["coordinator"]
module_obj.uni_type = module["type"]
module_obj.uni_title = module["title"]
module_obj.uni_competence = module["competence"]
module_obj.uni_testUnits = module["testUnits"]
module_obj.uni_priority = module["priority"]
module_obj.uni_state = module["state"]
module_obj.uni_approvedDate = module["approvedDate"]
module_obj.uni_comment = module["comment"]
module_obj.uni_file = module["file"]
module_obj.uni_specialities = module["specialities"]
module_obj.program = program
module_obj.semester = semester
module_obj.status = 'p'
module_obj.save()
print(f"{self.bcolors.OKBLUE}Модуль найден: {module['title']}{self.bcolors.ENDC}")
except:
print(f"{self.bcolors.BOLD}Модуль создан: {module['title']}{self.bcolors.ENDC}")
module_obj = Module(title=module["title"],
uni_uuid=module["uuid"],
uni_number=module["number"],
uni_coordinator=module["coordinator"],
uni_type=module["type"],
uni_title=module["title"],
uni_competence=module["competence"],
uni_testUnits=module["testUnits"],
uni_priority=module["priority"],
uni_state=module["state"],
uni_approvedDate=module["approvedDate"],
uni_comment=module["comment"],
uni_file=module["file"],
uni_specialities=module["specialities"],
program=program,
semester=semester,
status='p',
)
module_obj.save()
program_modules = []
program_module = ProgramModules.objects.filter(program=program, module=module_obj)
if not program_module:
print(f"{self.bcolors.WARNING}Модуль программы не найден, создаём: {module['title']} / {program.title}{self.bcolors.ENDC}")
program_module = ProgramModules(program=program, module=module_obj, semester=module_obj.semester, status="p")
program_module.save()
else:
print(
f"{self.bcolors.OKBLUE}Модуль программы найден {module['title']} / {program.title}{self.bcolors.ENDC}")
program_modules.append(program_module)
print("")
return module_obj, semester
new parser#36
from bs4 import BeautifulSoup
import re
import json
import time
from django.core.management.base import BaseCommand
from django.db.models import Q
from programs.models import Program, ProgramModules, LearningPlan
from disciplines.models import Discipline, Semester, TrainingTerms
from modules.models import Module
class Command(BaseCommand):
"""
Example: ./manage.py parse_new "/home/developer/КТОМ 4.html" uni_fixtures/modules.json ./get_programs.html "Конструкторско-технологическое обеспечение машиностроительных производств"
"""
help = "Create Django objects from raw&ugly UrFU data."
requires_system_checks = True
requires_migrations_checks = True
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def decompose(self, soup, tag, classname):
[el.decompose() for el in soup.find_all(tag, {'class': classname})]
def add_arguments(self, parser):
parser.add_argument('html_path', nargs=1)
parser.add_argument('uni_modules_path', nargs=1)
parser.add_argument('programs_path', nargs=1)
parser.add_argument('program_title', nargs=1)
def handle(self, *args, **options):
start_time = time.time()
html_path = options["html_path"][0]
uni_modules_path = options["uni_modules_path"][0]
program_title = options["program_title"][0]
programs_path = options["programs_path"][0]
try:
with open(html_path, encoding='utf-8') as html_file:
raw_html = '\n'.join(html_file.readlines())
except:
raise FileNotFoundError
try:
with open(uni_modules_path, encoding='utf-8') as modules_file:
modules_json = json.load(modules_file)
except:
raise FileNotFoundError
try:
with open(programs_path, encoding='utf-8') as programs_file:
raw_programs = '\n'.join(programs_file.readlines())
except:
raise FileNotFoundError
if raw_programs:
programs_soup = BeautifulSoup(raw_programs, 'lxml')
rows = []
for row in programs_soup.find_all('tr', {"class": "main-info"}):
rows.append([val.text.strip() for val in row.find_all('td')])
for row in rows:
try:
program = Program.objects.get(title=row[1])
except:
def level(x):
return {
'Магистр'.lower() in str(x).lower(): "m",
'Специалист'.lower() in str(x).lower(): "s",
'Бакалавр'.lower() in str(x).lower(): "b",
}[True]
program = Program(title=row[1],
training_direction=row[2],
level=level(row[4]),
)
program.save()
print(f"{self.bcolors.BOLD}Создана программа программа \"{row[1]}\"?{self.bcolors.ENDC}")
try:
program = Program.objects.filter(title=program_title).first()
program.status = "p"
program.save()
except:
raise NotImplementedError
if raw_html:
soup = BeautifulSoup(raw_html, 'lxml')
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
self.decompose(soup, "table", "menu_table")
self.decompose(soup, "td", "navpath")
self.decompose(soup, "div", "buttons")
soup.find('td', id="nav_td").decompose()
try:
stage = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.stage").text.strip().lower() == "утверждено"
except:
stage = False
try:
displayableTitle = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.displayableTitle").text.strip()
except:
displayableTitle = ""
try:
number = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.number").text.strip()
except:
number = ""
try:
active = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.active").text.strip()
except:
active = "нет"
try:
title = soup.find('td', id="EduVersionPlanTab.EduVersionPlan.title").text.strip()
except:
title = ""
try:
loadTimeType = soup.find("td", id="EduVersionPlanTab.EduVersionPlan.loadTimeType").text.strip()
except:
loadTimeType = "часов в неделю"
html = soup.find("table", {"class": "basic"}).prettify()
lps = LearningPlan.objects.filter(uni_number=number, status="p")
if len(lps) > 0:
for lp in lps:
lp.uni_displayableTitle = displayableTitle
lp.uni_number = number
lp.uni_active = active
lp.uni_title = title
lp.uni_stage = stage
lp.uni_loadTimeType = loadTimeType
lp.uni_html = html
lp.save()
if lp not in program.learning_plans.all():
program.learning_plans.add(lp)
program.save()
else:
lp = LearningPlan(uni_displayableTitle=displayableTitle,
uni_number=number,
uni_active=active,
uni_title=title,
uni_stage=stage,
uni_loadTimeType=loadTimeType,
uni_html=html,
status="p"
)
lp.save()
program.learning_plans.add(lp)
program.save()
table = soup.find('table', id="EduVersionPlanTab.EduDisciplineList")
headers = [header.text.strip() for header in table.find_all('th')]
def find_row_index(row_text):
headers = table.find_all('th')
return headers.index(table.find('th', text=row_text))
def find_row_index_id(id):
headers = table.find_all('th')
return headers.index(table.find('th', id=id))
rows = []
for row in table.find_all('tr'):
rows.append([val.text.strip() for val in row.find_all('td')])
# Ищем модули
modules = []
for header in headers:
if "Номер модуля, дисциплины".lower() == header.lower():
module_numbers_col = headers.index(header)
for row in rows:
if row:
m = re.search('\d\d+', row[module_numbers_col])
if m and "М" in row[1]:
for module in modules_json:
if str(module["number"]) == str(m.group(0)):
module["row"] = row
modules.append(module)
program_modules = ProgramModules.objects.filter(program=program)
for module in modules:
print(" ", module['title'])
if program_modules.filter(module__uni_uuid=module["uuid"]):
print(f"Модуль есть: {module['title']}")
fulltime = False
if 'зао' not in number:
fulltime = True
print("fulltime: ", fulltime)
if fulltime:
term = TrainingTerms.objects.filter(title="4 года").first()
for module in [m for m in modules if m["disciplines"]]:
module_obj, semester = self.create_module(find_row_index_id, module, program)
if len(ProgramModules.objects.filter(Q(program=program))) != len(set([pm.module.title for pm in ProgramModules.objects.filter(Q(program=program))])):
print(f"{self.bcolors.FAIL}Найдено дублирование модулей программы. Поправьте в интерфейсе администратора.{self.bcolors.ENDC}")
import sys
sys.exit(1)
program_modules_fail = ProgramModules.objects.filter(~Q(id__in=[o.id for o in program_modules]), Q(program=program))
print(program_modules_fail)
for pmf in program_modules_fail:
remove = input(f"{self.bcolors.WARNING}Неверный модуль программы: {pmf.module.title}. Удалить?{self.bcolors.ENDC}")
if remove.lower() in ("y", "да", "ok", "ок"):
pmf.delete()
print(f"{self.bcolors.OKGREEN}Удалено.{self.bcolors.ENDC}")
def create_module(self, find_row_index_id, module, program):
print(f"{self.bcolors.HEADER}Ищем или создаём модуль: {module['title']}{self.bcolors.ENDC}")
for i in range(10, 0, -1):
try:
ze = module["row"][find_row_index_id(f"EduVersionPlanTab.EduDisciplineList.__term{i}.__term{i}headerCell")]
try:
if int(ze) > 0:
semester = i
except:
pass
except:
semester = 99
if semester == 99:
print(f"Семестр: {self.bcolors.FAIL}{semester}{self.bcolors.ENDC}")
else:
print(f"Семестр: {semester}")
try:
module_obj = Module.objects.filter(title=module["title"], uni_number=module["number"]).first()
module_obj.uni_uuid = module["uuid"]
module_obj.uni_number = module["number"]
module_obj.uni_coordinator = module["coordinator"]
module_obj.uni_type = module["type"]
module_obj.uni_title = module["title"]
module_obj.uni_competence = module["competence"]
module_obj.uni_testUnits = module["testUnits"]
module_obj.uni_priority = module["priority"]
module_obj.uni_state = module["state"]
module_obj.uni_approvedDate = module["approvedDate"]
module_obj.uni_comment = module["comment"]
module_obj.uni_file = module["file"]
module_obj.uni_specialities = module["specialities"]
module_obj.program = program
module_obj.semester = semester
module_obj.status = 'p'
module_obj.save()
print(f"{self.bcolors.OKBLUE}Модуль найден: {module['title']}{self.bcolors.ENDC}")
except:
print(f"{self.bcolors.BOLD}Модуль создан: {module['title']}{self.bcolors.ENDC}")
module_obj = Module(title=module["title"],
uni_uuid=module["uuid"],
uni_number=module["number"],
uni_coordinator=module["coordinator"],
uni_type=module["type"],
uni_title=module["title"],
uni_competence=module["competence"],
uni_testUnits=module["testUnits"],
uni_priority=module["priority"],
uni_state=module["state"],
uni_approvedDate=module["approvedDate"],
uni_comment=module["comment"],
uni_file=module["file"],
uni_specialities=module["specialities"],
program=program,
semester=semester,
status='p',
)
module_obj.save()
program_modules = []
program_module = ProgramModules.objects.filter(program=program, module=module_obj)
if not program_module:
print(f"{self.bcolors.WARNING}Модуль программы не найден, создаём: {module['title']} / {program.title}{self.bcolors.ENDC}")
program_module = ProgramModules(program=program, module=module_obj, semester=module_obj.semester, status="p")
program_module.save()
else:
print(
f"{self.bcolors.OKBLUE}Модуль программы найден {module['title']} / {program.title}{self.bcolors.ENDC}")
program_modules.append(program_module)
print("")
return module_obj, semester |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 07.04.2014
@author: heinz-peterlang
'''
from __future__ import print_function
import json
import logging
import hashlib
import unicodedata
from lxml import etree
from datetime import date, datetime
from weblyzard_api.model.exceptions import (MalformedJSONException,
UnexpectedFieldException,
MissingFieldException,
UnsupportedValueException)
logger = logging.getLogger('weblyzard_api.parsers')
class DatesToStrings(json.JSONEncoder):
def _encode(self, obj):
if isinstance(obj, dict):
def transform_date(o):
return self._encode(o.isoformat() if isinstance(o, datetime) else o)
return {transform_date(k): transform_date(v) for k, v in obj.items()}
else:
return obj
def encode(self, obj):
return super(DatesToStrings, self).encode(self._encode(obj))
class JSONParserBase(object):
'''
JSON Parser base class.
'''
#: Override this constant in the subclasses based on requirements.
FIELDS_REQUIRED = []
#: Override this constant in the subclasses based on requirements.
FIELDS_OPTIONAL = []
#: Override this constant in the subclasses based on requirements.
API_VERSION = None
@classmethod
def from_json_string(cls, json_string):
'''
Parses a JSON string.
:param json_string: The JSON to parse
:type json_string: str
:returns: The parsed object.
:rtype: :py:class:`weblyzard_api.model.xml_content.XMLContent` or \
:py:class:`wl_core.document.Document` or \
:py:class:`weblyzard_api.model.xml_content.Sentence` or\
dict.
'''
try:
api_dict = json.loads(json_string)
except Exception:
raise MalformedJSONException('JSON could not be parsed')
return cls.from_api_dict(api_dict)
@classmethod
def from_api_dict(cls, api_dict):
raise NotImplementedError
@classmethod
def _missing_fields(cls, api_dict):
'''
Checks if the given API dict misses a required field.
:param api_dict: The document to check as dict.
:type api_dict: dict
:returns: The list of missing fields, None if all present.
:rtype: list
'''
missing_fields = []
for key in cls.FIELDS_REQUIRED:
if key in api_dict:
# check if the fields contain non-null values
if api_dict[key] is None or api_dict[key] == '':
missing_fields.append(key)
else:
missing_fields.append(key)
if len(missing_fields) > 0:
return missing_fields
else:
return None
@classmethod
def _unexpected_fields(cls, api_dict):
'''
Checks if the given API dict contains an unexpected field.
:param api_dict: The document to check as dict.
:type api_dict: dict
:returns: The list of unexpected fields, None if all accepted.
:rtype: list
'''
allowed_fields = cls.FIELDS_REQUIRED + cls.FIELDS_OPTIONAL
unexpected_fields = []
for key in api_dict:
if key not in allowed_fields:
unexpected_fields.append(key)
if len(unexpected_fields) > 0:
return unexpected_fields
else:
return None
@classmethod
def _check_document_format(cls, api_dict, strict=True):
'''
Checks if the api_dict has all required fields and if there
are unexpected and unallowed keys.
:param api_dict: The dict to check.
:type api_dict: dict
:param strict: If set to true, an UnexpectedFieldException is raised \
if an unexpected key is contained in the dict.
:type strict: bool
'''
missing_fields = cls._missing_fields(api_dict)
if missing_fields is not None:
raise MissingFieldException("Missing field(s) %s" %
', '.join(missing_fields))
if strict:
unexpected_fields = cls._unexpected_fields(api_dict)
if unexpected_fields is not None:
raise UnexpectedFieldException("Got unexpected field(s): %s" %
', '.join(unexpected_fields))
@classmethod
def _validate_document(cls, json_document, strict=True):
''' '''
cls._check_document_format(json_document, strict)
if 'content' in json_document and 'content_type' not in json_document:
raise MissingFieldException(
"When field 'content' is set, 'content_type' must be set, too.")
elif 'content_type' in json_document and 'content' not in json_document:
raise MissingFieldException(
"When field 'content_type' is set, 'content' must be set, too.")
elif 'content' not in json_document and 'content_type' not in json_document and\
'sentences' not in json_document:
raise MissingFieldException(
"Either 'sentences' or 'content' and 'content_type' must be set.")
if 'content' in json_document and 'sentences' in json_document:
raise MalformedJSONException(
"If 'sentences' is set, 'content' must not be set.")
if 'content_type' in json_document and not json_document['content_type'] in cls.SUPPORTED_CONTENT_TYPES:
raise UnsupportedValueException("content_type %s is not supported. Supported are %s" %
(json_document['content_type'],
cls.SUPPORTED_CONTENT_TYPES))
meta_data = json_document.get('meta_data', {})
valid_from = None
if 'published_date' in meta_data:
try:
from dateutil.parser import parse
valid_from = parse(meta_data['published_date'])
except Exception as e:
raise MissingFieldException(
"Could not process published_date: %s" % meta_data['published_date'])
if not isinstance(valid_from, datetime):
raise UnsupportedValueException(
'Field published_date set but not parseable')
class XMLParser(object):
VERSION = None
SUPPORTED_NAMESPACE = None
DOCUMENT_NAMESPACES = None
ATTR_MAPPING = None
SENTENCE_MAPPING = None
ANNOTATION_MAPPING = None
FEATURE_MAPPING = None
RELATION_MAPPING = None
DEFAULT_NAMESPACE = 'wl'
@classmethod
def get_default_ns(cls):
return cls.SUPPORTED_NAMESPACE
@classmethod
def remove_control_characters(cls, value):
return ''.join(ch for ch in value if unicodedata.category(ch)[0] != 'C')
@classmethod
def encode_value(cls, value):
if isinstance(value, unicode):
return XMLParser.remove_control_characters(value)
elif isinstance(value, str):
return XMLParser.remove_control_characters(value.decode('utf-8'))
elif isinstance(value, date):
return value.isoformat()
elif isinstance(value, datetime):
return value.isoformat()
else:
try:
return json.dumps(value)
except Exception as e:
logger.error('could not encode {}: {}'.format(value, e))
return
@classmethod
def decode_value(cls, value):
try:
decoded = json.loads(value)
if decoded in (float('inf'), float('-inf'), float('nan')):
raise ValueError('deserializing of invalid json values')
else:
return decoded
except ValueError:
# ignore silently (expected behaviour)
return value
@classmethod
def cast_item(cls, item):
''' '''
if item.lower() == 'true':
return True
elif item.lower() == 'false':
return False
try:
return int(item)
except Exception:
pass
try:
return float(item)
except Exception:
pass
try:
return json.loads(item)
except Exception:
pass
return item
@classmethod
def get_xml_value(cls, value):
''' '''
try:
if isinstance(value, int) or isinstance(value, float) or \
isinstance(value, datetime):
value = str(value)
elif isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value, cls=DatesToStrings)
except Exception as e:
logger.error('could not encode {}: {}'.format(value, e))
value = str(value)
return value
@classmethod
def is_supported(cls, xml_content):
return 'xmlns:wl="{}"'.format(cls.SUPPORTED_NAMESPACE) in xml_content
@classmethod
def invert_mapping(cls, mapping):
result = {}
if mapping == None:
return result
invert_mapping = dict(zip(mapping.values(),
mapping.keys()))
for key, value in invert_mapping.iteritems():
if isinstance(key, tuple):
key, namespace = key
if namespace is not None:
key = '{%s}%s' % (cls.DOCUMENT_NAMESPACES[namespace], key)
result[key] = value
return result
@classmethod
def parse(cls, xml_content, remove_duplicates=True):
''' '''
parser = etree.XMLParser(recover=True, strip_cdata=False)
root = etree.fromstring(xml_content.replace('encoding="UTF-8"', ''),
parser=parser)
try:
invert_mapping = cls.invert_mapping(cls.ATTR_MAPPING)
attributes = cls.load_attributes(root.attrib,
mapping=invert_mapping)
except Exception as e:
logger.warn('could not process mapping {}: {}'.format(
cls.ATTR_MAPPING, e))
attributes = {}
sentences = cls.load_sentences(
root, remove_duplicates=remove_duplicates)
title_sentence_ids = [sentence['md5sum'] for sentence in sentences
if 'is_title' in sentence and sentence['is_title']]
title_annotations = []
body_annotations = []
for annotation in cls.load_annotations(root):
if 'md5sum' in annotation and annotation['md5sum'] in title_sentence_ids:
title_annotations.append(annotation)
else:
body_annotations.append(annotation)
features = cls.load_features(root)
relations = cls.load_relations(root)
return attributes, sentences, title_annotations, body_annotations, features, relations
@classmethod
def load_attributes(cls, attributes, mapping):
new_attributes = {}
for key, value in attributes.iteritems():
if mapping and key in mapping:
key = mapping.get(key, key)
value = cls.decode_value(value)
if not value == 'None':
new_attributes[key] = value
return new_attributes
@classmethod
def load_annotations(cls, root):
''' '''
annotations = []
annotation_mapping = cls.invert_mapping(cls.ANNOTATION_MAPPING)
for annotation_element in root.iterfind('{%s}annotation' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
annotations.append(cls.load_attributes(annotation_element.attrib,
mapping=annotation_mapping))
return annotations
@classmethod
def load_sentences(cls, root, remove_duplicates=True):
''' '''
sentences = []
seen_sentences = []
sentence_mapping = cls.invert_mapping(cls.SENTENCE_MAPPING)
for sent_element in root.iterfind('{%s}sentence' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
sent_attributes = cls.load_attributes(sent_element.attrib,
mapping=sentence_mapping)
sent_attributes['value'] = sent_element.text.strip()
if 'md5sum' in sent_attributes:
sent_id = sent_attributes['md5sum']
elif 'id' in sent_attributes:
sent_id = sent_attributes['id']
sent_attributes['md5sum'] = sent_id
del sent_attributes['id']
else:
sent_id = hashlib.md5(
sent_element.text.encode('utf-8')).hexdigest()
sent_attributes['md5sum'] = sent_id
if not sent_id in seen_sentences:
sentences.append(sent_attributes)
if remove_duplicates:
seen_sentences.append(sent_id)
return sentences
@classmethod
def load_features(cls, root):
''' '''
features = {}
# inverse feature mapping for loading
feature_mapping = cls.invert_mapping(cls.FEATURE_MAPPING)
for feat_element in root.iterfind('{%s}feature' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
feat_attributes = cls.load_attributes(feat_element.attrib,
mapping=feature_mapping)
if 'key' in feat_attributes and feat_attributes['key'] in features:
if not isinstance(features[feat_attributes['key']], list):
features[feat_attributes['key']] = [
features[feat_attributes['key']]]
if feat_element.text is not None:
features[feat_attributes['key']].append(
cls.cast_item(feat_element.text.strip()))
elif feat_element.text is not None:
features[feat_attributes['key']] = cls.cast_item(
feat_element.text.strip())
return features
@classmethod
def load_relations(cls, root):
''' '''
relations = {}
# inverse relation mapping for loading
relation_mapping = cls.invert_mapping(cls.RELATION_MAPPING)
for rel_element in root.iterfind('{%s}relation' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
rel_attributes = cls.load_attributes(rel_element.attrib,
mapping=relation_mapping)
if 'key' in rel_attributes and rel_attributes['key'] in relations:
if not isinstance(relations[rel_attributes['key']], list):
relations[rel_attributes['key']] = [
relations[rel_attributes['key']]]
if rel_element.text is not None:
relations[rel_attributes['key']].append(
cls.cast_item(rel_element.text.strip()))
elif rel_element.text is not None:
relations[rel_attributes['key']] = cls.cast_item(
rel_element.text.strip())
return relations
@classmethod
def dump_xml_attributes(cls, attributes, mapping):
new_attributes = {}
for key, value in attributes.iteritems():
if mapping and key in mapping:
key = mapping[key]
elif ':' in key:
continue
if isinstance(key, tuple):
key, namespace = key
if namespace is not None:
key = '{%s}%s' % (
cls.DOCUMENT_NAMESPACES[namespace], key)
if value and value not in ('None', 'null', '0.0'):
new_attributes[key] = cls.encode_value(value)
return new_attributes
@classmethod
def clean_attributes(cls, attributes):
''' '''
result = {}
for key, val in attributes.iteritems():
if key is None or val is None or isinstance(val, dict):
continue
result[key] = val
return result
@classmethod
def map_by_annotationtype(cls, itemlist):
result = {}
for item in itemlist:
if not item['annotationType'] in result:
result[item['annotationType']] = []
result[item['annotationType']].append(item)
return result
@classmethod
def get_required_namespaces(cls, attributes):
''' '''
result = {}
try:
for att in attributes:
ns_prefix = None
if att in cls.ATTR_MAPPING:
_, ns_prefix = cls.ATTR_MAPPING[att]
elif att in cls.SENTENCE_MAPPING:
_, ns_prefix = cls.SENTENCE_MAPPING[att]
elif cls.ANNOTATION_MAPPING and att in cls.ANNOTATION_MAPPING:
_, ns_prefix = cls.ANNOTATION_MAPPING[att]
elif cls.FEATURE_MAPPING and att in cls.FEATURE_MAPPING:
_, ns_prefix = cls.FEATURE_MAPPING[att]
elif cls.RELATION_MAPPING and att in cls.RELATION_MAPPING:
_, ns_prefix = cls.RELATION_MAPPING[att]
elif not att in cls.ATTR_MAPPING:
continue # skip unknown attributes
if ns_prefix is not None and ns_prefix in cls.DOCUMENT_NAMESPACES:
namespace = cls.DOCUMENT_NAMESPACES[cls.ATTR_MAPPING[att][1]]
result[ns_prefix] = namespace
except Exception as e:
pass
if not 'wl' in result:
result['wl'] = cls.DOCUMENT_NAMESPACES['wl']
return result
@classmethod
def dump_xml(cls, titles, attributes, sentences, annotations=[],
features={}, relations={}):
''' returns a webLyzard XML document '''
required_namespaces = cls.get_required_namespaces(attributes)
attributes, sentences = cls.pre_xml_dump(titles=titles,
attributes=attributes,
sentences=sentences)
if attributes:
assert isinstance(attributes, dict), 'dict required'
attributes = cls.dump_xml_attributes(attributes=attributes,
mapping=cls.ATTR_MAPPING)
try:
attributes = cls.clean_attributes(attributes)
except Exception as e:
logger.warn(e)
root = etree.Element('{%s}page' % cls.get_default_ns(),
attrib=attributes,
nsmap=required_namespaces)
for sent in sentences:
sent = sent.as_dict()
assert isinstance(sent, dict), 'dict required'
value = sent['value']
del sent['value']
if not value:
continue
value = cls.get_xml_value(value)
sent_attributes = cls.dump_xml_attributes(sent,
mapping=cls.SENTENCE_MAPPING)
sent_elem = etree.SubElement(root,
'{%s}sentence' % cls.get_default_ns(),
attrib=sent_attributes,
nsmap={})
try:
sent_elem.text = etree.CDATA(value)
except Exception as e:
print('Skipping bad cdata: %s (%s)' % (value, e))
continue
if annotations:
if isinstance(annotations, list):
annotations = cls.map_by_annotationtype(annotations)
# add all annotations as body annotations
for a_type, a_items in annotations.iteritems():
if a_items is None or len(a_items) == 0:
continue
for annotation in a_items:
if not isinstance(annotation, dict):
continue
assert isinstance(annotation, dict), 'dict required'
if 'entities' in annotation:
for entity in annotation['entities']:
entity = entity.copy()
entity['annotation_type'] = a_type
entity['key'] = annotation['key']
preferred_name = annotation['preferredName']
if not isinstance(preferred_name, unicode):
preferred_name = preferred_name.decode('utf-8')
entity['preferredName'] = preferred_name
annotation_attributes = cls.dump_xml_attributes(
entity, mapping=cls.ANNOTATION_MAPPING)
try:
etree.SubElement(root,
'{%s}annotation' % cls.get_default_ns(),
attrib=annotation_attributes,
nsmap={})
except Exception as e:
continue
# feature mappings if specified
if cls.FEATURE_MAPPING and len(cls.FEATURE_MAPPING):
for key, items in features.iteritems():
feature_attributes = cls.dump_xml_attributes({'key': key},
mapping=cls.FEATURE_MAPPING)
if not isinstance(items, list):
items = [items]
for value in items:
try:
value = cls.get_xml_value(value)
feat_elem = etree.SubElement(root,
'{%s}feature' % cls.get_default_ns(
),
attrib=feature_attributes,
nsmap={})
feat_elem.text = etree.CDATA(value)
except Exception as e:
print('Skipping bad cdata: %s (%s)' % (value, e))
continue
# relation mappings, if specified
if cls.RELATION_MAPPING and len(cls.RELATION_MAPPING):
for key, items in relations.iteritems():
rel_attributes = cls.dump_xml_attributes({'key': key},
mapping=cls.RELATION_MAPPING)
if not isinstance(items, list):
items = [items]
for value in items:
try:
value = cls.get_xml_value(value)
rel_elem = etree.SubElement(root,
'{%s}relation' % cls.get_default_ns(
),
attrib=rel_attributes,
nsmap={})
rel_elem.text = etree.CDATA(value)
except Exception as e:
print('Skipping bad cdata: %s (%s)' % (value, e))
continue
return etree.tostring(root, encoding='UTF-8', pretty_print=True)
@classmethod
def pre_xml_dump(cls, titles, attributes, sentences):
''' overriding this functions allows to perform custom cleanup tasks'''
return attributes, sentences
new: support for resolve_namespaces
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 07.04.2014
@author: heinz-peterlang
'''
from __future__ import print_function
import json
import logging
import hashlib
import unicodedata
from lxml import etree
from datetime import date, datetime
from weblyzard_api.model.exceptions import (MalformedJSONException,
UnexpectedFieldException,
MissingFieldException,
UnsupportedValueException)
logger = logging.getLogger('weblyzard_api.parsers')
class DatesToStrings(json.JSONEncoder):
def _encode(self, obj):
if isinstance(obj, dict):
def transform_date(o):
return self._encode(o.isoformat() if isinstance(o, datetime) else o)
return {transform_date(k): transform_date(v) for k, v in obj.items()}
else:
return obj
def encode(self, obj):
return super(DatesToStrings, self).encode(self._encode(obj))
class JSONParserBase(object):
'''
JSON Parser base class.
'''
#: Override this constant in the subclasses based on requirements.
FIELDS_REQUIRED = []
#: Override this constant in the subclasses based on requirements.
FIELDS_OPTIONAL = []
#: Override this constant in the subclasses based on requirements.
API_VERSION = None
@classmethod
def from_json_string(cls, json_string):
'''
Parses a JSON string.
:param json_string: The JSON to parse
:type json_string: str
:returns: The parsed object.
:rtype: :py:class:`weblyzard_api.model.xml_content.XMLContent` or \
:py:class:`wl_core.document.Document` or \
:py:class:`weblyzard_api.model.xml_content.Sentence` or\
dict.
'''
try:
api_dict = json.loads(json_string)
except Exception:
raise MalformedJSONException('JSON could not be parsed')
return cls.from_api_dict(api_dict)
@classmethod
def from_api_dict(cls, api_dict):
raise NotImplementedError
@classmethod
def _missing_fields(cls, api_dict):
'''
Checks if the given API dict misses a required field.
:param api_dict: The document to check as dict.
:type api_dict: dict
:returns: The list of missing fields, None if all present.
:rtype: list
'''
missing_fields = []
for key in cls.FIELDS_REQUIRED:
if key in api_dict:
# check if the fields contain non-null values
if api_dict[key] is None or api_dict[key] == '':
missing_fields.append(key)
else:
missing_fields.append(key)
if len(missing_fields) > 0:
return missing_fields
else:
return None
@classmethod
def _unexpected_fields(cls, api_dict):
'''
Checks if the given API dict contains an unexpected field.
:param api_dict: The document to check as dict.
:type api_dict: dict
:returns: The list of unexpected fields, None if all accepted.
:rtype: list
'''
allowed_fields = cls.FIELDS_REQUIRED + cls.FIELDS_OPTIONAL
unexpected_fields = []
for key in api_dict:
if key not in allowed_fields:
unexpected_fields.append(key)
if len(unexpected_fields) > 0:
return unexpected_fields
else:
return None
@classmethod
def _check_document_format(cls, api_dict, strict=True):
'''
Checks if the api_dict has all required fields and if there
are unexpected and unallowed keys.
:param api_dict: The dict to check.
:type api_dict: dict
:param strict: If set to true, an UnexpectedFieldException is raised \
if an unexpected key is contained in the dict.
:type strict: bool
'''
missing_fields = cls._missing_fields(api_dict)
if missing_fields is not None:
raise MissingFieldException("Missing field(s) %s" %
', '.join(missing_fields))
if strict:
unexpected_fields = cls._unexpected_fields(api_dict)
if unexpected_fields is not None:
raise UnexpectedFieldException("Got unexpected field(s): %s" %
', '.join(unexpected_fields))
@classmethod
def _validate_document(cls, json_document, strict=True):
''' '''
cls._check_document_format(json_document, strict)
if 'content' in json_document and 'content_type' not in json_document:
raise MissingFieldException(
"When field 'content' is set, 'content_type' must be set, too.")
elif 'content_type' in json_document and 'content' not in json_document:
raise MissingFieldException(
"When field 'content_type' is set, 'content' must be set, too.")
elif 'content' not in json_document and 'content_type' not in json_document and\
'sentences' not in json_document:
raise MissingFieldException(
"Either 'sentences' or 'content' and 'content_type' must be set.")
if 'content' in json_document and 'sentences' in json_document:
raise MalformedJSONException(
"If 'sentences' is set, 'content' must not be set.")
if 'content_type' in json_document and not json_document['content_type'] in cls.SUPPORTED_CONTENT_TYPES:
raise UnsupportedValueException("content_type %s is not supported. Supported are %s" %
(json_document['content_type'],
cls.SUPPORTED_CONTENT_TYPES))
meta_data = json_document.get('meta_data', {})
valid_from = None
if 'published_date' in meta_data:
try:
from dateutil.parser import parse
valid_from = parse(meta_data['published_date'])
except Exception as e:
raise MissingFieldException(
"Could not process published_date: %s" % meta_data['published_date'])
if not isinstance(valid_from, datetime):
raise UnsupportedValueException(
'Field published_date set but not parseable')
class XMLParser(object):
VERSION = None
SUPPORTED_NAMESPACE = None
DOCUMENT_NAMESPACES = None
ATTR_MAPPING = None
SENTENCE_MAPPING = None
ANNOTATION_MAPPING = None
FEATURE_MAPPING = None
RELATION_MAPPING = None
DEFAULT_NAMESPACE = 'wl'
@classmethod
def get_default_ns(cls):
return cls.SUPPORTED_NAMESPACE
@classmethod
def remove_control_characters(cls, value):
return ''.join(ch for ch in value if unicodedata.category(ch)[0] != 'C')
@classmethod
def encode_value(cls, value):
if isinstance(value, unicode):
return XMLParser.remove_control_characters(value)
elif isinstance(value, str):
return XMLParser.remove_control_characters(value.decode('utf-8'))
elif isinstance(value, date):
return value.isoformat()
elif isinstance(value, datetime):
return value.isoformat()
else:
try:
return json.dumps(value)
except Exception as e:
logger.error('could not encode {}: {}'.format(value, e))
return
@classmethod
def decode_value(cls, value):
try:
decoded = json.loads(value)
if decoded in (float('inf'), float('-inf'), float('nan')):
raise ValueError('deserializing of invalid json values')
else:
return decoded
except ValueError:
# ignore silently (expected behaviour)
return value
@classmethod
def cast_item(cls, item):
''' '''
if item.lower() == 'true':
return True
elif item.lower() == 'false':
return False
try:
return int(item)
except Exception:
pass
try:
return float(item)
except Exception:
pass
try:
return json.loads(item)
except Exception:
pass
return item
@classmethod
def get_xml_value(cls, value):
''' '''
try:
if isinstance(value, int) or isinstance(value, float) or \
isinstance(value, datetime):
value = str(value)
elif isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value, cls=DatesToStrings)
except Exception as e:
logger.error('could not encode {}: {}'.format(value, e))
value = str(value)
return value
@classmethod
def is_supported(cls, xml_content):
return 'xmlns:wl="{}"'.format(cls.SUPPORTED_NAMESPACE) in xml_content
@classmethod
def invert_mapping(cls, mapping):
result = {}
if mapping == None:
return result
invert_mapping = dict(zip(mapping.values(),
mapping.keys()))
for key, value in invert_mapping.iteritems():
if isinstance(key, tuple):
key, namespace = key
if namespace is not None:
key = '{%s}%s' % (cls.DOCUMENT_NAMESPACES[namespace], key)
result[key] = value
return result
@classmethod
def parse(cls, xml_content, remove_duplicates=True):
''' '''
parser = etree.XMLParser(recover=True, strip_cdata=False)
root = etree.fromstring(xml_content.replace('encoding="UTF-8"', ''),
parser=parser)
try:
invert_mapping = cls.invert_mapping(cls.ATTR_MAPPING)
attributes = cls.load_attributes(root.attrib,
mapping=invert_mapping)
except Exception as e:
logger.warn('could not process mapping {}: {}'.format(
cls.ATTR_MAPPING, e))
attributes = {}
sentences = cls.load_sentences(
root, remove_duplicates=remove_duplicates)
title_sentence_ids = [sentence['md5sum'] for sentence in sentences
if 'is_title' in sentence and sentence['is_title']]
title_annotations = []
body_annotations = []
for annotation in cls.load_annotations(root):
if 'md5sum' in annotation and annotation['md5sum'] in title_sentence_ids:
title_annotations.append(annotation)
else:
body_annotations.append(annotation)
features = cls.load_features(root)
relations = cls.load_relations(root)
return attributes, sentences, title_annotations, body_annotations, features, relations
@classmethod
def load_attributes(cls, attributes, mapping):
new_attributes = {}
for key, value in attributes.iteritems():
if mapping and key in mapping:
key = mapping.get(key, key)
value = cls.decode_value(value)
if not value == 'None':
new_attributes[key] = value
return new_attributes
@classmethod
def load_annotations(cls, root):
''' '''
annotations = []
annotation_mapping = cls.invert_mapping(cls.ANNOTATION_MAPPING)
for annotation_element in root.iterfind('{%s}annotation' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
annotations.append(cls.load_attributes(annotation_element.attrib,
mapping=annotation_mapping))
return annotations
@classmethod
def load_sentences(cls, root, remove_duplicates=True):
''' '''
sentences = []
seen_sentences = []
sentence_mapping = cls.invert_mapping(cls.SENTENCE_MAPPING)
for sent_element in root.iterfind('{%s}sentence' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
sent_attributes = cls.load_attributes(sent_element.attrib,
mapping=sentence_mapping)
sent_attributes['value'] = sent_element.text.strip()
if 'md5sum' in sent_attributes:
sent_id = sent_attributes['md5sum']
elif 'id' in sent_attributes:
sent_id = sent_attributes['id']
sent_attributes['md5sum'] = sent_id
del sent_attributes['id']
else:
sent_id = hashlib.md5(
sent_element.text.encode('utf-8')).hexdigest()
sent_attributes['md5sum'] = sent_id
if not sent_id in seen_sentences:
sentences.append(sent_attributes)
if remove_duplicates:
seen_sentences.append(sent_id)
return sentences
@classmethod
def load_features(cls, root):
''' '''
features = {}
# inverse feature mapping for loading
feature_mapping = cls.invert_mapping(cls.FEATURE_MAPPING)
for feat_element in root.iterfind('{%s}feature' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
feat_attributes = cls.load_attributes(feat_element.attrib,
mapping=feature_mapping)
if 'key' in feat_attributes and feat_attributes['key'] in features:
if not isinstance(features[feat_attributes['key']], list):
features[feat_attributes['key']] = [
features[feat_attributes['key']]]
if feat_element.text is not None:
features[feat_attributes['key']].append(
cls.cast_item(feat_element.text.strip()))
elif feat_element.text is not None:
features[feat_attributes['key']] = cls.cast_item(
feat_element.text.strip())
return features
@classmethod
def load_relations(cls, root):
''' '''
relations = {}
# inverse relation mapping for loading
relation_mapping = cls.invert_mapping(cls.RELATION_MAPPING)
for rel_element in root.iterfind('{%s}relation' % cls.get_default_ns(),
namespaces=cls.DOCUMENT_NAMESPACES):
rel_attributes = cls.load_attributes(rel_element.attrib,
mapping=relation_mapping)
if 'key' in rel_attributes and rel_attributes['key'] in relations:
if not isinstance(relations[rel_attributes['key']], list):
relations[rel_attributes['key']] = [
relations[rel_attributes['key']]]
if rel_element.text is not None:
relations[rel_attributes['key']].append(
cls.cast_item(rel_element.text.strip()))
elif rel_element.text is not None:
relations[rel_attributes['key']] = cls.cast_item(
rel_element.text.strip())
return relations
@classmethod
def dump_xml_attributes(cls, attributes, mapping, resolve_namespaces=True):
new_attributes = {}
for key, value in attributes.iteritems():
if mapping and key in mapping:
key = mapping[key]
elif ':' in key:
continue
if isinstance(key, tuple):
key, namespace = key
if namespace is not None:
if resolve_namespaces:
key = '{%s}%s' % (
cls.DOCUMENT_NAMESPACES[namespace], key)
else:
key = '%s:%s' % (namespace, key)
if value and value not in ('None', 'null', '0.0'):
new_attributes[key] = cls.encode_value(value)
return new_attributes
@classmethod
def clean_attributes(cls, attributes):
''' '''
result = {}
for key, val in attributes.iteritems():
if key is None or val is None or isinstance(val, dict):
continue
result[key] = val
return result
@classmethod
def map_by_annotationtype(cls, itemlist):
result = {}
for item in itemlist:
if not item['annotationType'] in result:
result[item['annotationType']] = []
result[item['annotationType']].append(item)
return result
@classmethod
def get_required_namespaces(cls, attributes):
''' '''
result = {}
try:
for att in attributes:
ns_prefix = None
if att in cls.ATTR_MAPPING:
_, ns_prefix = cls.ATTR_MAPPING[att]
elif att in cls.SENTENCE_MAPPING:
_, ns_prefix = cls.SENTENCE_MAPPING[att]
elif cls.ANNOTATION_MAPPING and att in cls.ANNOTATION_MAPPING:
_, ns_prefix = cls.ANNOTATION_MAPPING[att]
elif cls.FEATURE_MAPPING and att in cls.FEATURE_MAPPING:
_, ns_prefix = cls.FEATURE_MAPPING[att]
elif cls.RELATION_MAPPING and att in cls.RELATION_MAPPING:
_, ns_prefix = cls.RELATION_MAPPING[att]
elif not att in cls.ATTR_MAPPING:
continue # skip unknown attributes
if ns_prefix is not None and ns_prefix in cls.DOCUMENT_NAMESPACES:
namespace = cls.DOCUMENT_NAMESPACES[cls.ATTR_MAPPING[att][1]]
result[ns_prefix] = namespace
except Exception as e:
pass
if not 'wl' in result:
result['wl'] = cls.DOCUMENT_NAMESPACES['wl']
return result
@classmethod
def dump_xml(cls, titles, attributes, sentences, annotations=[],
features={}, relations={}):
''' returns a webLyzard XML document '''
required_namespaces = cls.get_required_namespaces(attributes)
attributes, sentences = cls.pre_xml_dump(titles=titles,
attributes=attributes,
sentences=sentences)
if attributes:
assert isinstance(attributes, dict), 'dict required'
attributes = cls.dump_xml_attributes(attributes=attributes,
mapping=cls.ATTR_MAPPING)
try:
attributes = cls.clean_attributes(attributes)
except Exception as e:
logger.warn(e)
root = etree.Element('{%s}page' % cls.get_default_ns(),
attrib=attributes,
nsmap=required_namespaces)
for sent in sentences:
sent = sent.as_dict()
assert isinstance(sent, dict), 'dict required'
value = sent['value']
del sent['value']
if not value:
continue
value = cls.get_xml_value(value)
sent_attributes = cls.dump_xml_attributes(sent,
mapping=cls.SENTENCE_MAPPING)
sent_elem = etree.SubElement(root,
'{%s}sentence' % cls.get_default_ns(),
attrib=sent_attributes,
nsmap={})
try:
sent_elem.text = etree.CDATA(value)
except Exception as e:
print('Skipping bad cdata: %s (%s)' % (value, e))
continue
if annotations:
if isinstance(annotations, list):
annotations = cls.map_by_annotationtype(annotations)
# add all annotations as body annotations
for a_type, a_items in annotations.iteritems():
if a_items is None or len(a_items) == 0:
continue
for annotation in a_items:
if not isinstance(annotation, dict):
continue
assert isinstance(annotation, dict), 'dict required'
if 'entities' in annotation:
for entity in annotation['entities']:
entity = entity.copy()
entity['annotation_type'] = a_type
entity['key'] = annotation['key']
preferred_name = annotation['preferredName']
if not isinstance(preferred_name, unicode):
preferred_name = preferred_name.decode('utf-8')
entity['preferredName'] = preferred_name
annotation_attributes = cls.dump_xml_attributes(
entity, mapping=cls.ANNOTATION_MAPPING)
try:
etree.SubElement(root,
'{%s}annotation' % cls.get_default_ns(),
attrib=annotation_attributes,
nsmap={})
except Exception as e:
continue
# feature mappings if specified
if cls.FEATURE_MAPPING and len(cls.FEATURE_MAPPING):
for key, items in features.iteritems():
feature_attributes = cls.dump_xml_attributes({'key': key},
mapping=cls.FEATURE_MAPPING)
if not isinstance(items, list):
items = [items]
for value in items:
try:
value = cls.get_xml_value(value)
feat_elem = etree.SubElement(root,
'{%s}feature' % cls.get_default_ns(
),
attrib=feature_attributes,
nsmap={})
feat_elem.text = etree.CDATA(value)
except Exception as e:
print('Skipping bad cdata: %s (%s)' % (value, e))
continue
# relation mappings, if specified
if cls.RELATION_MAPPING and len(cls.RELATION_MAPPING):
for key, items in relations.iteritems():
rel_attributes = cls.dump_xml_attributes({'key': key},
mapping=cls.RELATION_MAPPING)
if not isinstance(items, list):
items = [items]
for value in items:
try:
value = cls.get_xml_value(value)
rel_elem = etree.SubElement(root,
'{%s}relation' % cls.get_default_ns(
),
attrib=rel_attributes,
nsmap={})
rel_elem.text = etree.CDATA(value)
except Exception as e:
print('Skipping bad cdata: %s (%s)' % (value, e))
continue
return etree.tostring(root, encoding='UTF-8', pretty_print=True)
@classmethod
def pre_xml_dump(cls, titles, attributes, sentences):
''' overriding this functions allows to perform custom cleanup tasks'''
return attributes, sentences
|
# Copyright (c) 2010-2013, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Core calculator functionality for computing stochastic event sets and ground
motion fields using the 'event-based' method.
Stochastic events sets (which can be thought of as collections of ruptures) are
computed iven a set of seismic sources and investigation time span (in years).
For more information on computing stochastic event sets, see
:mod:`openquake.hazardlib.calc.stochastic`.
One can optionally compute a ground motion field (GMF) given a rupture, a site
collection (which is a collection of geographical points with associated soil
parameters), and a ground shaking intensity model (GSIM).
For more information on computing ground motion fields, see
:mod:`openquake.hazardlib.calc.gmf`.
"""
import random
import collections
import numpy.random
from django.db import transaction
from openquake.hazardlib.calc import filters
from openquake.hazardlib.calc import gmf
from openquake.hazardlib.imt import from_string
from openquake.engine import writer
from openquake.engine.calculators.hazard import general
from openquake.engine.calculators.hazard.classical import (
post_processing as cls_post_proc)
from openquake.engine.calculators.hazard.event_based import post_processing
from openquake.engine.db import models
from openquake.engine.utils import tasks
from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor
#: Always 1 for the computation of ground motion fields in the event-based
#: hazard calculator.
DEFAULT_GMF_REALIZATIONS = 1
# NB: beware of large caches
inserter = writer.CacheInserter(models.GmfData, 1000)
@tasks.oqtask
def compute_ses_and_gmfs(job_id, src_seeds, gsims_by_rlz, task_no):
"""
Celery task for the stochastic event set calculator.
Samples logic trees and calls the stochastic event set calculator.
Once stochastic event sets are calculated, results will be saved to the
database. See :class:`openquake.engine.db.models.SESCollection`.
Optionally (specified in the job configuration using the
`ground_motion_fields` parameter), GMFs can be computed from each rupture
in each stochastic event set. GMFs are also saved to the database.
:param int job_id:
ID of the currently running job.
:param src_seeds:
List of pairs (source, seed)
:params gsims_by_rlz:
dictionary of GSIM
:param task_no:
an ordinal so that GMV can be collected in a reproducible order
"""
rlz_ids = [r.id for r in gsims_by_rlz]
ses_coll = models.SESCollection.objects.get(lt_realization_ids=rlz_ids)
hc = models.HazardCalculation.objects.get(oqjob=job_id)
all_ses = models.SES.objects.filter(ses_collection=ses_coll)
imts = map(from_string, hc.intensity_measure_types)
params = dict(
correl_model=general.get_correl_model(hc),
truncation_level=hc.truncation_level,
maximum_distance=hc.maximum_distance,
num_sites=len(hc.site_collection))
collector = GmfCollector(
[s.id for s in hc.site_collection], params, imts, gsims_by_rlz)
mon1 = LightMonitor('filtering sites', job_id, compute_ses_and_gmfs)
mon2 = LightMonitor('generating ruptures', job_id, compute_ses_and_gmfs)
mon3 = LightMonitor('filtering ruptures', job_id, compute_ses_and_gmfs)
mon4 = LightMonitor('saving ses', job_id, compute_ses_and_gmfs)
mon5 = LightMonitor('computing gmfs', job_id, compute_ses_and_gmfs)
# Compute and save stochastic event sets
rnd = random.Random()
for src, seed in src_seeds:
rnd.seed(seed)
with mon1: # filtering sources
s_sites = src.filter_sites_by_distance_to_source(
hc.maximum_distance, hc.site_collection
) if hc.maximum_distance else hc.site_collection
if s_sites is None:
continue
# NB: the number of occurrences is very low, << 1, so it is
# more efficient to filter only the ruptures that occur
# and not to compute the occurrencies of the filtered ruptures
# the dictionary `ses_num_occ` contains [(ses, num_occurrences)]
# for each occurring rupture for each ses in the ses collection
ses_num_occ = collections.defaultdict(list)
with mon2: # generating ruptures
for rup in src.iter_ruptures():
for ses in all_ses:
numpy.random.seed(rnd.randint(0, models.MAX_SINT_32))
num_occurrences = rup.sample_number_of_occurrences()
if num_occurrences:
ses_num_occ[rup].append((ses, num_occurrences))
for rup in ses_num_occ:
with mon3: # filtering ruptures
r_sites = rup.source_typology.\
filter_sites_by_distance_to_rupture(
rup, hc.maximum_distance, s_sites
) if hc.maximum_distance else s_sites
if r_sites is None:
continue
# saving ses and generating gmf
for ses, num_occurrences in ses_num_occ[rup]:
for occ in range(1, num_occurrences + 1):
with mon4: # saving ruptures
rup_id = models.SESRupture.objects.create(
ses=ses,
rupture=rup,
tag='smlt=%02d|ses=%04d|src=%s|occ=%02d'
% (ses_coll.ordinal, ses.ordinal,
src.source_id, occ),
hypocenter=rup.hypocenter.wkt2d,
magnitude=rup.mag).id
if hc.ground_motion_fields:
with mon5: # computing GMFs
rup_seed = rnd.randint(0, models.MAX_SINT_32)
collector.calc_gmf(r_sites, rup, rup_id, rup_seed)
mon1.flush()
mon2.flush()
mon3.flush()
mon4.flush()
mon5.flush()
if hc.ground_motion_fields:
with EnginePerformanceMonitor(
'saving gmfs', job_id, compute_ses_and_gmfs):
collector.save_gmfs(task_no)
class GmfCollector(object):
"""
A class to compute and save ground motion fields.
"""
def __init__(self, site_ids, params, imts, gsims_by_rlz):
self.site_ids = site_ids
self.params = params
self.imts = imts
self.gsims_by_rlz = gsims_by_rlz
self.gmvs_per_site = collections.defaultdict(list)
self.ruptures_per_site = collections.defaultdict(list)
def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Compute the GMF generated by the given rupture on the given
sites and collect the values in the dictionaries
.gmvs_per_site and .ruptures_per_site.
"""
triples = [(rupture, rupture_id, rupture_seed)]
for rlz, gsims in self.gsims_by_rlz.items():
for imt, idx, gmv, rup_id in _compute_gmf(
self.params, self.imts, gsims, r_sites, triples):
if gmv:
site_id = self.site_ids[idx]
self.gmvs_per_site[rlz, imt, site_id].append(gmv)
self.ruptures_per_site[rlz, imt, site_id].append(rup_id)
@transaction.commit_on_success(using='job_init')
def save_gmfs(self, task_no):
"""
Helper method to save the computed GMF data to the database.
:param task_no:
The ordinal of the task which generated the current GMFs to save
"""
for rlz, imt, site_id in self.gmvs_per_site:
imt_name, sa_period, sa_damping = imt
inserter.add(models.GmfData(
gmf=models.Gmf.objects.get(lt_realization=rlz),
task_no=task_no,
imt=imt_name,
sa_period=sa_period,
sa_damping=sa_damping,
site_id=site_id,
gmvs=self.gmvs_per_site[rlz, imt, site_id],
rupture_ids=self.ruptures_per_site[rlz, imt, site_id]))
inserter.flush()
self.gmvs_per_site.clear()
self.ruptures_per_site.clear()
# NB: I tried to return a single dictionary {site_id: [(gmv, rupt_id),...]}
# but it takes a lot more memory (MS)
def _compute_gmf(params, imts, gsims, site_coll, rupture_id_seed_triples):
"""
Compute a ground motion field value for each rupture, for all the
points affected by that rupture, for the given IMT. Returns a
dictionary with the nonzero contributions to each site id, and a dictionary
with the ids of the contributing ruptures for each site id.
assert len(ruptures) == len(rupture_seeds)
:param params:
a dictionary containing the keys
correl_model, truncation_level, maximum_distance
:param imts:
a list of hazardlib intensity measure types
:param gsims:
a dictionary {tectonic region type -> GSIM instance}
:param site_coll:
a SiteCollection instance
:param rupture_id_seed_triple:
a list of triples with types
(:class:`openquake.hazardlib.source.rupture.Rupture`, int, int)
"""
# Compute and save ground motion fields
for rupture, rup_id, rup_seed in rupture_id_seed_triples:
gmf_calc_kwargs = {
'rupture': rupture,
'sites': site_coll,
'imts': imts,
'gsim': gsims[rupture.tectonic_region_type],
'truncation_level': params['truncation_level'],
'realizations': DEFAULT_GMF_REALIZATIONS,
'correlation_model': params['correl_model'],
'num_sites': params['num_sites'],
}
numpy.random.seed(rup_seed)
gmf_dict = gmf.ground_motion_fields(**gmf_calc_kwargs)
for imt, gmf_1_realiz in gmf_dict.iteritems():
# since DEFAULT_GMF_REALIZATIONS is 1, gmf_1_realiz is a matrix
# with n_sites rows and 1 column
for idx, gmv in enumerate(gmf_1_realiz):
# convert a 1x1 matrix into a float
yield imt, idx, float(gmv), rup_id
class EventBasedHazardCalculator(general.BaseHazardCalculator):
"""
Probabilistic Event-Based hazard calculator. Computes stochastic event sets
and (optionally) ground motion fields.
"""
core_calc_task = compute_ses_and_gmfs
def task_arg_gen(self, _block_size=None):
"""
Loop through realizations and sources to generate a sequence of
task arg tuples. Each tuple of args applies to a single task.
Yielded results are tuples of the form job_id, sources, ses, seeds
(seeds will be used to seed numpy for temporal occurence sampling).
"""
hc = self.hc
rnd = random.Random()
rnd.seed(hc.random_seed)
task_no = 0
for job_id, block, gsims_by_rlz in super(
EventBasedHazardCalculator, self).task_arg_gen():
ss = [(src, rnd.randint(0, models.MAX_SINT_32))
for src in block] # source, seed pairs
yield job_id, ss, gsims_by_rlz, task_no
task_no += 1
# now the source_blocks_per_ltpath dictionary can be cleared
self.source_blocks_per_ltpath.clear()
def initialize_ses_db_records(self, ordinal, rlzs):
"""
Create :class:`~openquake.engine.db.models.Output`,
:class:`~openquake.engine.db.models.SESCollection` and
:class:`~openquake.engine.db.models.SES` "container" records for
a single realization.
Stochastic event set ruptures computed for this realization will be
associated to these containers.
NOTE: Many tasks can contribute ruptures to the same SES.
"""
rlz_ids = [r.id for r in rlzs]
output = models.Output.objects.create(
oq_job=self.job,
display_name='SES Collection smlt-%d-rlz-%s' % (
ordinal, ','.join(map(str, rlz_ids))),
output_type='ses')
ses_coll = models.SESCollection.objects.create(
output=output, lt_realization_ids=rlz_ids, ordinal=ordinal)
for rlz in rlzs:
if self.job.hazard_calculation.ground_motion_fields:
output = models.Output.objects.create(
oq_job=self.job,
display_name='GMF rlz-%s' % rlz.id,
output_type='gmf')
models.Gmf.objects.create(output=output, lt_realization=rlz)
all_ses = []
for i in xrange(1, self.hc.ses_per_logic_tree_path + 1):
all_ses.append(
models.SES.objects.create(
ses_collection=ses_coll,
investigation_time=self.hc.investigation_time,
ordinal=i))
return all_ses
def pre_execute(self):
"""
Do pre-execution work. At the moment, this work entails:
parsing and initializing sources, parsing and initializing the
site model (if there is one), parsing vulnerability and
exposure files, and generating logic tree realizations. (The
latter piece basically defines the work to be done in the
`execute` phase.)
"""
super(EventBasedHazardCalculator, self).pre_execute()
for i, rlzs in enumerate(self.rlzs_per_ltpath.itervalues()):
self.initialize_ses_db_records(i, rlzs)
def post_process(self):
"""
If requested, perform additional processing of GMFs to produce hazard
curves.
"""
if self.hc.hazard_curves_from_gmfs:
with EnginePerformanceMonitor('generating hazard curves',
self.job.id):
self.parallelize(
post_processing.gmf_to_hazard_curve_task,
post_processing.gmf_to_hazard_curve_arg_gen(self.job),
self.log_percent)
# If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
# has some value (not an empty list), do this additional
# post-processing.
if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
with EnginePerformanceMonitor(
'generating mean/quantile curves', self.job.id):
self.do_aggregate_post_proc()
if self.hc.hazard_maps:
with EnginePerformanceMonitor(
'generating hazard maps', self.job.id):
self.parallelize(
cls_post_proc.hazard_curves_to_hazard_map_task,
cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen(
self.job),
self.log_percent)
Added logging of the computation times per source and rupture
# Copyright (c) 2010-2013, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Core calculator functionality for computing stochastic event sets and ground
motion fields using the 'event-based' method.
Stochastic events sets (which can be thought of as collections of ruptures) are
computed iven a set of seismic sources and investigation time span (in years).
For more information on computing stochastic event sets, see
:mod:`openquake.hazardlib.calc.stochastic`.
One can optionally compute a ground motion field (GMF) given a rupture, a site
collection (which is a collection of geographical points with associated soil
parameters), and a ground shaking intensity model (GSIM).
For more information on computing ground motion fields, see
:mod:`openquake.hazardlib.calc.gmf`.
"""
import time
import random
import collections
import numpy.random
from django.db import transaction
from openquake.hazardlib.calc import gmf
from openquake.hazardlib.imt import from_string
from openquake.engine import logs, writer
from openquake.engine.calculators.hazard import general
from openquake.engine.calculators.hazard.classical import (
post_processing as cls_post_proc)
from openquake.engine.calculators.hazard.event_based import post_processing
from openquake.engine.db import models
from openquake.engine.utils import tasks
from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor
#: Always 1 for the computation of ground motion fields in the event-based
#: hazard calculator.
DEFAULT_GMF_REALIZATIONS = 1
# NB: beware of large caches
inserter = writer.CacheInserter(models.GmfData, 1000)
@tasks.oqtask
def compute_ses_and_gmfs(job_id, src_seeds, gsims_by_rlz, task_no):
"""
Celery task for the stochastic event set calculator.
Samples logic trees and calls the stochastic event set calculator.
Once stochastic event sets are calculated, results will be saved to the
database. See :class:`openquake.engine.db.models.SESCollection`.
Optionally (specified in the job configuration using the
`ground_motion_fields` parameter), GMFs can be computed from each rupture
in each stochastic event set. GMFs are also saved to the database.
:param int job_id:
ID of the currently running job.
:param src_seeds:
List of pairs (source, seed)
:params gsims_by_rlz:
dictionary of GSIM
:param task_no:
an ordinal so that GMV can be collected in a reproducible order
"""
rlz_ids = [r.id for r in gsims_by_rlz]
ses_coll = models.SESCollection.objects.get(lt_realization_ids=rlz_ids)
hc = models.HazardCalculation.objects.get(oqjob=job_id)
all_ses = models.SES.objects.filter(ses_collection=ses_coll)
imts = map(from_string, hc.intensity_measure_types)
params = dict(
correl_model=general.get_correl_model(hc),
truncation_level=hc.truncation_level,
maximum_distance=hc.maximum_distance,
num_sites=len(hc.site_collection))
collector = GmfCollector(
[s.id for s in hc.site_collection], params, imts, gsims_by_rlz)
mon1 = LightMonitor('filtering sites', job_id, compute_ses_and_gmfs)
mon2 = LightMonitor('generating ruptures', job_id, compute_ses_and_gmfs)
mon3 = LightMonitor('filtering ruptures', job_id, compute_ses_and_gmfs)
mon4 = LightMonitor('saving ses', job_id, compute_ses_and_gmfs)
mon5 = LightMonitor('computing gmfs', job_id, compute_ses_and_gmfs)
# Compute and save stochastic event sets
rnd = random.Random()
for src, seed in src_seeds:
t0 = time.time()
rnd.seed(seed)
with mon1: # filtering sources
s_sites = src.filter_sites_by_distance_to_source(
hc.maximum_distance, hc.site_collection
) if hc.maximum_distance else hc.site_collection
if s_sites is None:
continue
# NB: the number of occurrences is very low, << 1, so it is
# more efficient to filter only the ruptures that occur
# and not to compute the occurrencies of the filtered ruptures
# the dictionary `ses_num_occ` contains [(ses, num_occurrences)]
# for each occurring rupture for each ses in the ses collection
ses_num_occ = collections.defaultdict(list)
with mon2: # generating ruptures
for rup in src.iter_ruptures():
for ses in all_ses:
numpy.random.seed(rnd.randint(0, models.MAX_SINT_32))
num_occurrences = rup.sample_number_of_occurrences()
if num_occurrences:
ses_num_occ[rup].append((ses, num_occurrences))
for rup in ses_num_occ:
with mon3: # filtering ruptures
r_sites = rup.source_typology.\
filter_sites_by_distance_to_rupture(
rup, hc.maximum_distance, s_sites
) if hc.maximum_distance else s_sites
if r_sites is None:
continue
# saving ses and generating gmf
for ses, num_occurrences in ses_num_occ[rup]:
for occ in range(1, num_occurrences + 1):
with mon4: # saving ruptures
rup_id = models.SESRupture.objects.create(
ses=ses,
rupture=rup,
tag='smlt=%02d|ses=%04d|src=%s|occ=%02d'
% (ses_coll.ordinal, ses.ordinal,
src.source_id, occ),
hypocenter=rup.hypocenter.wkt2d,
magnitude=rup.mag).id
if hc.ground_motion_fields:
with mon5: # computing GMFs
rup_seed = rnd.randint(0, models.MAX_SINT_32)
collector.calc_gmf(r_sites, rup, rup_id, rup_seed)
num_ruptures = sum(occ for ses, occ in ses_num_occ[rup]
for rup in ses_num_occ)
logs.LOG.info('job=%d, src=%s:%s, num_ruptures=%d, calc_time=%fs',
job_id, src.source_id, src.__class__.__name__,
num_ruptures, time.time() - t0)
mon1.flush()
mon2.flush()
mon3.flush()
mon4.flush()
mon5.flush()
if hc.ground_motion_fields:
with EnginePerformanceMonitor(
'saving gmfs', job_id, compute_ses_and_gmfs):
collector.save_gmfs(task_no)
class GmfCollector(object):
"""
A class to compute and save ground motion fields.
"""
def __init__(self, site_ids, params, imts, gsims_by_rlz):
self.site_ids = site_ids
self.params = params
self.imts = imts
self.gsims_by_rlz = gsims_by_rlz
self.gmvs_per_site = collections.defaultdict(list)
self.ruptures_per_site = collections.defaultdict(list)
def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Compute the GMF generated by the given rupture on the given
sites and collect the values in the dictionaries
.gmvs_per_site and .ruptures_per_site.
"""
triples = [(rupture, rupture_id, rupture_seed)]
for rlz, gsims in self.gsims_by_rlz.items():
for imt, idx, gmv, rup_id in _compute_gmf(
self.params, self.imts, gsims, r_sites, triples):
if gmv:
site_id = self.site_ids[idx]
self.gmvs_per_site[rlz, imt, site_id].append(gmv)
self.ruptures_per_site[rlz, imt, site_id].append(rup_id)
@transaction.commit_on_success(using='job_init')
def save_gmfs(self, task_no):
"""
Helper method to save the computed GMF data to the database.
:param task_no:
The ordinal of the task which generated the current GMFs to save
"""
for rlz, imt, site_id in self.gmvs_per_site:
imt_name, sa_period, sa_damping = imt
inserter.add(models.GmfData(
gmf=models.Gmf.objects.get(lt_realization=rlz),
task_no=task_no,
imt=imt_name,
sa_period=sa_period,
sa_damping=sa_damping,
site_id=site_id,
gmvs=self.gmvs_per_site[rlz, imt, site_id],
rupture_ids=self.ruptures_per_site[rlz, imt, site_id]))
inserter.flush()
self.gmvs_per_site.clear()
self.ruptures_per_site.clear()
# NB: I tried to return a single dictionary {site_id: [(gmv, rupt_id),...]}
# but it takes a lot more memory (MS)
def _compute_gmf(params, imts, gsims, site_coll, rupture_id_seed_triples):
"""
Compute a ground motion field value for each rupture, for all the
points affected by that rupture, for the given IMT. Returns a
dictionary with the nonzero contributions to each site id, and a dictionary
with the ids of the contributing ruptures for each site id.
assert len(ruptures) == len(rupture_seeds)
:param params:
a dictionary containing the keys
correl_model, truncation_level, maximum_distance
:param imts:
a list of hazardlib intensity measure types
:param gsims:
a dictionary {tectonic region type -> GSIM instance}
:param site_coll:
a SiteCollection instance
:param rupture_id_seed_triple:
a list of triples with types
(:class:`openquake.hazardlib.source.rupture.Rupture`, int, int)
"""
# Compute and save ground motion fields
for rupture, rup_id, rup_seed in rupture_id_seed_triples:
gmf_calc_kwargs = {
'rupture': rupture,
'sites': site_coll,
'imts': imts,
'gsim': gsims[rupture.tectonic_region_type],
'truncation_level': params['truncation_level'],
'realizations': DEFAULT_GMF_REALIZATIONS,
'correlation_model': params['correl_model'],
'num_sites': params['num_sites'],
}
numpy.random.seed(rup_seed)
gmf_dict = gmf.ground_motion_fields(**gmf_calc_kwargs)
for imt, gmf_1_realiz in gmf_dict.iteritems():
# since DEFAULT_GMF_REALIZATIONS is 1, gmf_1_realiz is a matrix
# with n_sites rows and 1 column
for idx, gmv in enumerate(gmf_1_realiz):
# convert a 1x1 matrix into a float
yield imt, idx, float(gmv), rup_id
class EventBasedHazardCalculator(general.BaseHazardCalculator):
"""
Probabilistic Event-Based hazard calculator. Computes stochastic event sets
and (optionally) ground motion fields.
"""
core_calc_task = compute_ses_and_gmfs
def task_arg_gen(self, _block_size=None):
"""
Loop through realizations and sources to generate a sequence of
task arg tuples. Each tuple of args applies to a single task.
Yielded results are tuples of the form job_id, sources, ses, seeds
(seeds will be used to seed numpy for temporal occurence sampling).
"""
hc = self.hc
rnd = random.Random()
rnd.seed(hc.random_seed)
task_no = 0
for job_id, block, gsims_by_rlz in super(
EventBasedHazardCalculator, self).task_arg_gen():
ss = [(src, rnd.randint(0, models.MAX_SINT_32))
for src in block] # source, seed pairs
yield job_id, ss, gsims_by_rlz, task_no
task_no += 1
# now the source_blocks_per_ltpath dictionary can be cleared
self.source_blocks_per_ltpath.clear()
def initialize_ses_db_records(self, ordinal, rlzs):
"""
Create :class:`~openquake.engine.db.models.Output`,
:class:`~openquake.engine.db.models.SESCollection` and
:class:`~openquake.engine.db.models.SES` "container" records for
a single realization.
Stochastic event set ruptures computed for this realization will be
associated to these containers.
NOTE: Many tasks can contribute ruptures to the same SES.
"""
rlz_ids = [r.id for r in rlzs]
output = models.Output.objects.create(
oq_job=self.job,
display_name='SES Collection smlt-%d-rlz-%s' % (
ordinal, ','.join(map(str, rlz_ids))),
output_type='ses')
ses_coll = models.SESCollection.objects.create(
output=output, lt_realization_ids=rlz_ids, ordinal=ordinal)
for rlz in rlzs:
if self.job.hazard_calculation.ground_motion_fields:
output = models.Output.objects.create(
oq_job=self.job,
display_name='GMF rlz-%s' % rlz.id,
output_type='gmf')
models.Gmf.objects.create(output=output, lt_realization=rlz)
all_ses = []
for i in xrange(1, self.hc.ses_per_logic_tree_path + 1):
all_ses.append(
models.SES.objects.create(
ses_collection=ses_coll,
investigation_time=self.hc.investigation_time,
ordinal=i))
return all_ses
def pre_execute(self):
"""
Do pre-execution work. At the moment, this work entails:
parsing and initializing sources, parsing and initializing the
site model (if there is one), parsing vulnerability and
exposure files, and generating logic tree realizations. (The
latter piece basically defines the work to be done in the
`execute` phase.)
"""
super(EventBasedHazardCalculator, self).pre_execute()
for i, rlzs in enumerate(self.rlzs_per_ltpath.itervalues()):
self.initialize_ses_db_records(i, rlzs)
def post_process(self):
"""
If requested, perform additional processing of GMFs to produce hazard
curves.
"""
if self.hc.hazard_curves_from_gmfs:
with EnginePerformanceMonitor('generating hazard curves',
self.job.id):
self.parallelize(
post_processing.gmf_to_hazard_curve_task,
post_processing.gmf_to_hazard_curve_arg_gen(self.job),
self.log_percent)
# If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
# has some value (not an empty list), do this additional
# post-processing.
if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
with EnginePerformanceMonitor(
'generating mean/quantile curves', self.job.id):
self.do_aggregate_post_proc()
if self.hc.hazard_maps:
with EnginePerformanceMonitor(
'generating hazard maps', self.job.id):
self.parallelize(
cls_post_proc.hazard_curves_to_hazard_map_task,
cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen(
self.job),
self.log_percent)
|
#!/usr/bin/env python
"""A static execution plugin for the MTMS pattern
"""
__author__ = "Vivek Balasubramanian <vivek.balasubramanian@rutgers.edu>"
__copyright__ = "Copyright 2015, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import sys
import saga
import time
import traceback
import pickle
import datetime
import radical.pilot
from radical.ensemblemd.exceptions import NotImplementedError, EnsemblemdError
from radical.ensemblemd.exec_plugins.plugin_base import PluginBase
# ------------------------------------------------------------------------------
#
_PLUGIN_INFO = {
"name": "mtms.static.default",
"pattern": "MTMS",
"context_type": "Static"
}
_PLUGIN_OPTIONS = []
# ------------------------------------------------------------------------------
#
def resolve_placeholder_vars(working_dirs, stage, task, path):
# If replacement not require, return the path as is
if '$' not in path:
return path
# Extract placeholder from path
if len(path.split('>'))==1:
placeholder = path.split('/')[0]
else:
if path.split('>')[0].strip().startswith('$'):
placeholder = path.split('>')[0].strip().split('/')[0]
else:
placeholder = path.split('>')[1].strip().split('/')[0]
if placeholder.startswith("$STAGE_"):
stage = placeholder.split("$STAGE_")[1]
return path.replace(placeholder,working_dirs['stage_{0}'.format(stage)]['task_{0}'.format(task)])
else:
raise Exception("placeholder $STAGE_ used in invalid context.")
class Plugin(PluginBase):
# --------------------------------------------------------------------------
#
def __init__(self):
super(Plugin, self).__init__(_PLUGIN_INFO, _PLUGIN_OPTIONS)
self.tot_fin_tasks=0
self.working_dirs = {}
# --------------------------------------------------------------------------
#
def verify_pattern(self, pattern, resource):
self.get_logger().info("Verifying pattern...")
# --------------------------------------------------------------------------
#
def execute_pattern(self, pattern, resource):
#-----------------------------------------------------------------------
# Get details of the Bag of Pipes
num_tasks = pattern.tasks
num_stages = pattern.stages
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Use callback to trigger next stage
def unit_state_cb (unit, state):
if state == radical.pilot.DONE:
cur_stage = int(unit.name.split('-')[1])
cur_task = int(unit.name.split('-')[3])
self.get_logger().info('Task {0} of stage {1} has finished'.format(cur_task,cur_stage))
#-----------------------------------------------------------------------
# Log unit working directories for placeholders
if 'stage_{0}'.format(cur_stage) not in self.working_dirs:
self.working_dirs['stage_{0}'.format(cur_stage)] = {}
self.working_dirs['stage_{0}'.format(cur_stage)]['task_{0}'.format(cur_task)] = unit.working_directory
#print self.working_dirs['stage_{0}'.format(cur_stage)]['task_{0}'.format(cur_task)]
#-----------------------------------------------------------------------
cud = create_next_stage_cud(unit)
if cud is not None:
launch_next_stage(cud)
#-----------------------------------------------------------------------
self.get_logger().info("Executing {0} pipeline instances of {1} stages on {2} allocated core(s) on '{3}'".format(num_tasks, num_stages,
resource._cores, resource._resource_key))
#-----------------------------------------------------------------------
# Wait for Pilot to go Active
resource._pmgr.wait_pilots(resource._pilot.uid,u'Active')
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Register CB
resource._umgr.register_callback(unit_state_cb)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Get input data for the kernel
def get_input_data(kernel,stage,task):
# INPUT DATA:
ip_list = []
#------------------------------------------------------------------------------------------------------------------
# upload_input_data
data_in = []
if kernel._kernel._upload_input_data is not None:
if isinstance(kernel._kernel._upload_input_data,list):
pass
else:
kernel._kernel._upload_input_data = [kernel._kernel._upload_input_data]
for i in range(0,len(kernel._kernel._upload_input_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._upload_input_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip()
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip())
}
data_in.append(temp)
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#-----------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# link_input_data
data_in = []
if kernel._kernel._link_input_data is not None:
if isinstance(kernel._kernel._link_input_data,list):
pass
else:
kernel._kernel._link_input_data = [kernel._kernel._link_input_data]
for i in range(0,len(kernel._kernel._link_input_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._link_input_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip(),
'action': radical.pilot.LINK
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip()),
'action': radical.pilot.LINK
}
data_in.append(temp)
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# copy_input_data
data_in = []
if kernel._kernel._copy_input_data is not None:
if isinstance(kernel._kernel._copy_input_data,list):
pass
else:
kernel._kernel._copy_input_data = [kernel._kernel._copy_input_data]
for i in range(0,len(kernel._kernel._copy_input_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._copy_input_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip(),
'action': radical.pilot.COPY
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip()),
'action': radical.pilot.COPY
}
data_in.append(temp)
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# download input data
if kernel.download_input_data is not None:
data_in = kernel.download_input_data
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#------------------------------------------------------------------------------------------------------------------
return ip_list
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Get output data for the kernel
def get_output_data(kernel,stage,task):
# OUTPUT DATA:
#------------------------------------------------------------------------------------------------------------------
# copy_output_data
op_list = []
data_out = []
if kernel._kernel._copy_output_data is not None:
if isinstance(kernel._kernel._copy_output_data,list):
pass
else:
kernel._kernel._copy_output_data = [kernel._kernel._copy_output_data]
for i in range(0,len(kernel._kernel._copy_output_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._copy_output_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip(),
'action': radical.pilot.COPY
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip()),
'action': radical.pilot.COPY
}
data_out.append(temp)
if op_list is None:
op_list = data_out
else:
op_list += data_out
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# download_output_data
data_out = []
if kernel._kernel._download_output_data is not None:
if isinstance(kernel._kernel._download_output_data,list):
pass
else:
kernel._kernel._download_output_data = [kernel._kernel._download_output_data]
for i in range(0,len(kernel._kernel._download_output_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._download_output_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip()
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip())
}
data_out.append(temp)
if op_list is None:
op_list = data_out
else:
op_list += data_out
#------------------------------------------------------------------------------------------------------------------
return op_list
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Launch first stage of all tasks
task_method = getattr(pattern, 'stage_1')
task_units_desc = []
for task_instance in range(1, num_tasks+1):
kernel = task_method(task_instance)
kernel._bind_to_resource(resource._resource_key)
cud = radical.pilot.ComputeUnitDescription()
cud.name = "stage-1-task-{0}".format(task_instance)
cud.pre_exec = kernel._cu_def_pre_exec
cud.executable = kernel._cu_def_executable
cud.arguments = kernel.arguments
cud.mpi = kernel.uses_mpi
cud.input_staging = get_input_data(kernel,1,task_instance)
cud.output_staging = get_output_data(kernel,1,task_instance)
task_units_desc.append(cud)
task_units = resource._umgr.submit_units(task_units_desc)
self.get_logger().info('Submitted all tasks of stage 1')
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Create next CU at the end of each CU
def create_next_stage_cud(unit):
cur_stage = int(unit.name.split('-')[1])+1
cur_task = int(unit.name.split('-')[3])
self.tot_fin_tasks+=1
if cur_stage <= num_stages:
self.get_logger().info('Submitting task {0} of stage {1}'.format(cur_task,cur_stage))
task_method = getattr(pattern, 'stage_{0}'.format(cur_stage))
kernel = task_method(cur_task)
kernel._bind_to_resource(resource._resource_key)
cud = radical.pilot.ComputeUnitDescription()
cud.name = "stage-{0}-task-{1}".format(cur_stage,cur_task)
cud.pre_exec = kernel._cu_def_pre_exec
cud.executable = kernel._cu_def_executable
cud.arguments = kernel.arguments
cud.mpi = kernel.uses_mpi
cud.input_staging = get_input_data(kernel,cur_stage,cur_task)
cud.output_staging = get_output_data(kernel,cur_stage,cur_task)
return cud
else:
return None
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Launch the CU of the next stage
def launch_next_stage(cud):
resource._umgr.submit_units(cud)
return None
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Wait for all tasks to finish
while(self.tot_fin_tasks<(num_stages*num_tasks)):
resource._umgr.wait_units()
#-----------------------------------------------------------------------
some more logging
#!/usr/bin/env python
"""A static execution plugin for the MTMS pattern
"""
__author__ = "Vivek Balasubramanian <vivek.balasubramanian@rutgers.edu>"
__copyright__ = "Copyright 2015, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import sys
import saga
import time
import traceback
import pickle
import datetime
import radical.pilot
from radical.ensemblemd.exceptions import NotImplementedError, EnsemblemdError
from radical.ensemblemd.exec_plugins.plugin_base import PluginBase
# ------------------------------------------------------------------------------
#
_PLUGIN_INFO = {
"name": "mtms.static.default",
"pattern": "MTMS",
"context_type": "Static"
}
_PLUGIN_OPTIONS = []
# ------------------------------------------------------------------------------
#
def resolve_placeholder_vars(working_dirs, stage, task, path):
# If replacement not require, return the path as is
if '$' not in path:
return path
# Extract placeholder from path
if len(path.split('>'))==1:
placeholder = path.split('/')[0]
else:
if path.split('>')[0].strip().startswith('$'):
placeholder = path.split('>')[0].strip().split('/')[0]
else:
placeholder = path.split('>')[1].strip().split('/')[0]
if placeholder.startswith("$STAGE_"):
stage = placeholder.split("$STAGE_")[1]
return path.replace(placeholder,working_dirs['stage_{0}'.format(stage)]['task_{0}'.format(task)])
else:
raise Exception("placeholder $STAGE_ used in invalid context.")
class Plugin(PluginBase):
# --------------------------------------------------------------------------
#
def __init__(self):
super(Plugin, self).__init__(_PLUGIN_INFO, _PLUGIN_OPTIONS)
self.tot_fin_tasks=0
self.working_dirs = {}
# --------------------------------------------------------------------------
#
def verify_pattern(self, pattern, resource):
self.get_logger().info("Verifying pattern...")
# --------------------------------------------------------------------------
#
def execute_pattern(self, pattern, resource):
#-----------------------------------------------------------------------
# Get details of the Bag of Pipes
num_tasks = pattern.tasks
num_stages = pattern.stages
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Use callback to trigger next stage
def unit_state_cb (unit, state):
if state == radical.pilot.DONE:
cur_stage = int(unit.name.split('-')[1])
cur_task = int(unit.name.split('-')[3])
self.get_logger().info('Task {0} of stage {1} has finished'.format(cur_task,cur_stage))
#-----------------------------------------------------------------------
# Log unit working directories for placeholders
if 'stage_{0}'.format(cur_stage) not in self.working_dirs:
self.working_dirs['stage_{0}'.format(cur_stage)] = {}
self.working_dirs['stage_{0}'.format(cur_stage)]['task_{0}'.format(cur_task)] = unit.working_directory
#-----------------------------------------------------------------------
cud = create_next_stage_cud(unit)
if cud is not None:
launch_next_stage(cud)
#-----------------------------------------------------------------------
self.get_logger().info("Executing {0} pipeline instances of {1} stages on {2} allocated core(s) on '{3}'".format(num_tasks, num_stages,
resource._cores, resource._resource_key))
#-----------------------------------------------------------------------
# Wait for Pilot to go Active
resource._pmgr.wait_pilots(resource._pilot.uid,u'Active')
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Register CB
resource._umgr.register_callback(unit_state_cb)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Get input data for the kernel
def get_input_data(kernel,stage,task):
# INPUT DATA:
ip_list = []
#------------------------------------------------------------------------------------------------------------------
# upload_input_data
data_in = []
if kernel._kernel._upload_input_data is not None:
if isinstance(kernel._kernel._upload_input_data,list):
pass
else:
kernel._kernel._upload_input_data = [kernel._kernel._upload_input_data]
for i in range(0,len(kernel._kernel._upload_input_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._upload_input_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip()
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip())
}
data_in.append(temp)
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#-----------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# link_input_data
data_in = []
if kernel._kernel._link_input_data is not None:
if isinstance(kernel._kernel._link_input_data,list):
pass
else:
kernel._kernel._link_input_data = [kernel._kernel._link_input_data]
for i in range(0,len(kernel._kernel._link_input_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._link_input_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip(),
'action': radical.pilot.LINK
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip()),
'action': radical.pilot.LINK
}
data_in.append(temp)
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# copy_input_data
data_in = []
if kernel._kernel._copy_input_data is not None:
if isinstance(kernel._kernel._copy_input_data,list):
pass
else:
kernel._kernel._copy_input_data = [kernel._kernel._copy_input_data]
for i in range(0,len(kernel._kernel._copy_input_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._copy_input_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip(),
'action': radical.pilot.COPY
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip()),
'action': radical.pilot.COPY
}
data_in.append(temp)
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# download input data
if kernel.download_input_data is not None:
data_in = kernel.download_input_data
if ip_list is None:
ip_list = data_in
else:
ip_list += data_in
#------------------------------------------------------------------------------------------------------------------
return ip_list
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Get output data for the kernel
def get_output_data(kernel,stage,task):
# OUTPUT DATA:
#------------------------------------------------------------------------------------------------------------------
# copy_output_data
op_list = []
data_out = []
if kernel._kernel._copy_output_data is not None:
if isinstance(kernel._kernel._copy_output_data,list):
pass
else:
kernel._kernel._copy_output_data = [kernel._kernel._copy_output_data]
for i in range(0,len(kernel._kernel._copy_output_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._copy_output_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip(),
'action': radical.pilot.COPY
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip()),
'action': radical.pilot.COPY
}
data_out.append(temp)
if op_list is None:
op_list = data_out
else:
op_list += data_out
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# download_output_data
data_out = []
if kernel._kernel._download_output_data is not None:
if isinstance(kernel._kernel._download_output_data,list):
pass
else:
kernel._kernel._download_output_data = [kernel._kernel._download_output_data]
for i in range(0,len(kernel._kernel._download_output_data)):
var=resolve_placeholder_vars(self.working_dirs, stage, task, kernel._kernel._download_output_data[i])
if len(var.split('>')) > 1:
temp = {
'source': var.split('>')[0].strip(),
'target': var.split('>')[1].strip()
}
else:
temp = {
'source': var.split('>')[0].strip(),
'target': os.path.basename(var.split('>')[0].strip())
}
data_out.append(temp)
if op_list is None:
op_list = data_out
else:
op_list += data_out
#------------------------------------------------------------------------------------------------------------------
return op_list
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Launch first stage of all tasks
task_method = getattr(pattern, 'stage_1')
task_units_desc = []
for task_instance in range(1, num_tasks+1):
kernel = task_method(task_instance)
kernel._bind_to_resource(resource._resource_key)
self.get_logger().debug('Creating task {0} of stage 1'.format(task_instance))
cud = radical.pilot.ComputeUnitDescription()
cud.name = "stage-1-task-{0}".format(task_instance)
cud.pre_exec = kernel._cu_def_pre_exec
cud.executable = kernel._cu_def_executable
cud.arguments = kernel.arguments
cud.mpi = kernel.uses_mpi
cud.input_staging = get_input_data(kernel,1,task_instance)
cud.output_staging = get_output_data(kernel,1,task_instance)
task_units_desc.append(cud)
task_units = resource._umgr.submit_units(task_units_desc)
self.get_logger().info('Submitted all tasks of stage 1')
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Create next CU at the end of each CU
def create_next_stage_cud(unit):
cur_stage = int(unit.name.split('-')[1])+1
cur_task = int(unit.name.split('-')[3])
self.tot_fin_tasks+=1
if cur_stage <= num_stages:
self.get_logger().debug('Creating task {0} of stage {1}'.format(cur_task,cur_stage))
task_method = getattr(pattern, 'stage_{0}'.format(cur_stage))
kernel = task_method(cur_task)
kernel._bind_to_resource(resource._resource_key)
cud = radical.pilot.ComputeUnitDescription()
cud.name = "stage-{0}-task-{1}".format(cur_stage,cur_task)
cud.pre_exec = kernel._cu_def_pre_exec
cud.executable = kernel._cu_def_executable
cud.arguments = kernel.arguments
cud.mpi = kernel.uses_mpi
cud.input_staging = get_input_data(kernel,cur_stage,cur_task)
cud.output_staging = get_output_data(kernel,cur_stage,cur_task)
return cud
else:
return None
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Launch the CU of the next stage
def launch_next_stage(cud):
cur_stage = int(cud.name.split('-')[1])
cur_task = int(cud.name.split('-')[3])
self.get_logger().info('Submitting task {0} of stage {1}'.format(cur_task,cur_stage))
resource._umgr.submit_units(cud)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# Wait for all tasks to finish
while(self.tot_fin_tasks<(num_stages*num_tasks)):
resource._umgr.wait_units()
#-----------------------------------------------------------------------
|
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Core calculator functionality for computing stochastic event sets and ground
motion fields using the 'event-based' method.
Stochastic events sets (which can be thought of as collections of ruptures) are
computed iven a set of seismic sources and investigation time span (in years).
For more information on computing stochastic event sets, see
:mod:`openquake.hazardlib.calc.stochastic`.
One can optionally compute a ground motion field (GMF) given a rupture, a site
collection (which is a collection of geographical points with associated soil
parameters), and a ground shaking intensity model (GSIM).
For more information on computing ground motion fields, see
:mod:`openquake.hazardlib.calc.gmf`.
"""
import time
import random
import collections
import numpy.random
from django.db import transaction
from openquake.hazardlib.calc import gmf, filters
from openquake.hazardlib.imt import from_string
from openquake.engine import logs, writer
from openquake.engine.calculators.hazard import general
from openquake.engine.calculators.hazard.classical import (
post_processing as cls_post_proc)
from openquake.engine.calculators.hazard.event_based import post_processing
from openquake.engine.db import models
from openquake.engine.utils import tasks
from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor
# NB: beware of large caches
inserter = writer.CacheInserter(models.GmfData, 1000)
@tasks.oqtask
def compute_ses_and_gmfs(
job_id, sitecol, src_seeds, trt_model_id, gsims, task_no):
"""
Celery task for the stochastic event set calculator.
Samples logic trees and calls the stochastic event set calculator.
Once stochastic event sets are calculated, results will be saved to the
database. See :class:`openquake.engine.db.models.SESCollection`.
Optionally (specified in the job configuration using the
`ground_motion_fields` parameter), GMFs can be computed from each rupture
in each stochastic event set. GMFs are also saved to the database.
:param int job_id:
ID of the currently running job.
:param sitecol:
a :class:`openquake.hazardlib.site.SiteCollection` instance
:param src_seeds:
List of pairs (source, seed)
:params gsims:
list of distinct GSIM instances
:param task_no:
an ordinal so that GMV can be collected in a reproducible order
"""
# NB: all realizations in gsims correspond to the same source model
trt_model = models.TrtModel.objects.get(pk=trt_model_id)
ses_coll = models.SESCollection.objects.get(lt_model=trt_model.lt_model)
hc = models.HazardCalculation.objects.get(oqjob=job_id)
all_ses = list(ses_coll)
imts = map(from_string, hc.intensity_measure_types)
params = dict(
correl_model=general.get_correl_model(hc),
truncation_level=hc.truncation_level,
maximum_distance=hc.maximum_distance)
gmfcollector = GmfCollector(
params, imts, gsims, trt_model.id)
filter_sites_mon = LightMonitor(
'filtering sites', job_id, compute_ses_and_gmfs)
generate_ruptures_mon = LightMonitor(
'generating ruptures', job_id, compute_ses_and_gmfs)
filter_ruptures_mon = LightMonitor(
'filtering ruptures', job_id, compute_ses_and_gmfs)
save_ruptures_mon = LightMonitor(
'saving ruptures', job_id, compute_ses_and_gmfs)
compute_gmfs_mon = LightMonitor(
'computing gmfs', job_id, compute_ses_and_gmfs)
# Compute and save stochastic event sets
rnd = random.Random()
num_distinct_ruptures = 0
total_ruptures = 0
for src, seed in src_seeds:
t0 = time.time()
rnd.seed(seed)
with filter_sites_mon: # filtering sources
s_sites = src.filter_sites_by_distance_to_source(
hc.maximum_distance, sitecol
) if hc.maximum_distance else sitecol
if s_sites is None:
continue
# the dictionary `ses_num_occ` contains [(ses, num_occurrences)]
# for each occurring rupture for each ses in the ses collection
ses_num_occ = collections.defaultdict(list)
with generate_ruptures_mon: # generating ruptures for the given source
for rup_no, rup in enumerate(src.iter_ruptures(), 1):
rup.rup_no = rup_no
for ses in all_ses:
numpy.random.seed(rnd.randint(0, models.MAX_SINT_32))
num_occurrences = rup.sample_number_of_occurrences()
if num_occurrences:
ses_num_occ[rup].append((ses, num_occurrences))
total_ruptures += num_occurrences
# NB: the number of occurrences is very low, << 1, so it is
# more efficient to filter only the ruptures that occur, i.e.
# to call sample_number_of_occurrences() *before* the filtering
for rup in ses_num_occ.keys():
with filter_ruptures_mon: # filtering ruptures
r_sites = filters.filter_sites_by_distance_to_rupture(
rup, hc.maximum_distance, s_sites
) if hc.maximum_distance else s_sites
if r_sites is None:
# ignore ruptures which are far away
del ses_num_occ[rup] # save memory
continue
ses_ruptures = []
with save_ruptures_mon: # saving ses_ruptures
# using a django transaction make the saving faster
with transaction.commit_on_success(using='job_init'):
indices = r_sites.indices if len(r_sites) < len(sitecol) \
else None # None means that nothing was filtered
prob_rup = models.ProbabilisticRupture.create(
rup, ses_coll, indices)
for ses, num_occurrences in ses_num_occ[rup]:
for occ_no in range(1, num_occurrences + 1):
rup_seed = rnd.randint(0, models.MAX_SINT_32)
ses_rup = models.SESRupture.create(
prob_rup, ses, src.source_id,
rup.rup_no, occ_no, rup_seed)
ses_ruptures.append(ses_rup)
with compute_gmfs_mon: # computing GMFs
if hc.ground_motion_fields:
for ses_rup in ses_ruptures:
gmfcollector.calc_gmf(
r_sites, rup, ses_rup.id, ses_rup.seed)
# log calc_time per distinct rupture
if ses_num_occ:
num_ruptures = len(ses_num_occ)
tot_ruptures = sum(num for rup in ses_num_occ
for ses, num in ses_num_occ[rup])
logs.LOG.info(
'job=%d, src=%s:%s, num_ruptures=%d, tot_ruptures=%d, '
'num_sites=%d, calc_time=%fs', job_id, src.source_id,
src.__class__.__name__, num_ruptures, tot_ruptures,
len(s_sites), time.time() - t0)
num_distinct_ruptures += num_ruptures
if num_distinct_ruptures:
logs.LOG.info('job=%d, task %d generated %d/%d ruptures',
job_id, task_no, num_distinct_ruptures, total_ruptures)
filter_sites_mon.flush()
generate_ruptures_mon.flush()
filter_ruptures_mon.flush()
save_ruptures_mon.flush()
compute_gmfs_mon.flush()
if hc.ground_motion_fields:
with EnginePerformanceMonitor(
'saving gmfs', job_id, compute_ses_and_gmfs):
gmfcollector.save_gmfs(task_no)
class GmfCollector(object):
"""
A class to compute and save ground motion fields.
"""
def __init__(self, params, imts, gsims, trt_model_id):
"""
:param params:
a dictionary of parameters with keys
correl_model, truncation_level, maximum_distance
:param imts:
a list of hazardlib intensity measure types
:param gsims:
a list of distinct GSIM instances
:param int trt_model_id:
the ID of a TRTModel instance
"""
self.params = params
self.imts = imts
self.gsims = gsims
self.trt_model_id = trt_model_id
# NB: I tried to use a single dictionary
# {site_id: [(gmv, rupt_id),...]} but it took a lot more memory (MS)
self.gmvs_per_site = collections.defaultdict(list)
self.ruptures_per_site = collections.defaultdict(list)
def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Compute the GMF generated by the given rupture on the given
sites and collect the values in the dictionaries
.gmvs_per_site and .ruptures_per_site.
:param r_sites:
the collection of sites affected by the rupture
:param rupture:
an `openquake.hazardlib.source.rupture.
ParametricProbabilisticRupture` instance
:param id:
the id of an `openquake.engine.db.models.SESRupture` instance
:param seed:
an integer to be used as stochastic seed
"""
for gsim in self.gsims:
gsim_name = gsim.__class__.__name__
computer = gmf.GmfComputer(rupture, r_sites, self.imts, gsim,
self.params['truncation_level'],
self.params['correl_model'])
gmf_dict = computer.compute(rupture_seed)
for imt, gmvs in gmf_dict.iteritems():
for site_id, gmv in zip(r_sites.sids, gmvs):
# convert a 1x1 matrix into a float
gmv = float(gmv)
if gmv:
self.gmvs_per_site[
gsim_name, imt, site_id].append(gmv)
self.ruptures_per_site[
gsim_name, imt, site_id].append(rupture_id)
@transaction.commit_on_success(using='job_init')
def save_gmfs(self, task_no):
"""
Helper method to save the computed GMF data to the database.
:param task_no:
The ordinal of the task which generated the current GMFs to save
"""
rlzs = models.TrtModel.objects.get(
pk=self.trt_model_id).get_rlzs_by_gsim()
for gsim_name, imt, site_id in self.gmvs_per_site:
for rlz in rlzs[gsim_name]:
imt_name, sa_period, sa_damping = imt
inserter.add(models.GmfData(
gmf=models.Gmf.objects.get(lt_realization=rlz),
task_no=task_no,
imt=imt_name,
sa_period=sa_period,
sa_damping=sa_damping,
site_id=site_id,
gmvs=self.gmvs_per_site[gsim_name, imt, site_id],
rupture_ids=self.ruptures_per_site[gsim_name, imt, site_id]
))
inserter.flush()
self.gmvs_per_site.clear()
self.ruptures_per_site.clear()
class EventBasedHazardCalculator(general.BaseHazardCalculator):
"""
Probabilistic Event-Based hazard calculator. Computes stochastic event sets
and (optionally) ground motion fields.
"""
core_calc_task = compute_ses_and_gmfs
def task_arg_gen(self, _block_size=None):
"""
Loop through realizations and sources to generate a sequence of
task arg tuples. Each tuple of args applies to a single task.
Yielded results are tuples of the form job_id, sources, ses, seeds
(seeds will be used to seed numpy for temporal occurence sampling).
"""
hc = self.hc
rnd = random.Random()
rnd.seed(hc.random_seed)
for job_id, sitecol, block, lt_model, gsims, task_no in \
super(EventBasedHazardCalculator, self).task_arg_gen():
ss = [(src, rnd.randint(0, models.MAX_SINT_32))
for src in block] # source, seed pairs
yield job_id, sitecol, ss, lt_model, gsims, task_no
# now the source_blocks_per_ltpath dictionary can be cleared
self.source_blocks_per_ltpath.clear()
def initialize_ses_db_records(self, lt_model):
"""
Create :class:`~openquake.engine.db.models.Output`,
:class:`~openquake.engine.db.models.SESCollection` and
:class:`~openquake.engine.db.models.SES` "container" records for
a single realization.
Stochastic event set ruptures computed for this realization will be
associated to these containers.
NOTE: Many tasks can contribute ruptures to the same SES.
"""
output = models.Output.objects.create(
oq_job=self.job,
display_name='SES Collection smlt-%d' % lt_model.ordinal,
output_type='ses')
ses_coll = models.SESCollection.objects.create(
output=output, lt_model=lt_model, ordinal=lt_model.ordinal)
for rlz in lt_model:
if self.job.hazard_calculation.ground_motion_fields:
output = models.Output.objects.create(
oq_job=self.job,
display_name='GMF rlz-%s' % rlz.id,
output_type='gmf')
models.Gmf.objects.create(output=output, lt_realization=rlz)
return ses_coll
def pre_execute(self):
"""
Do pre-execution work. At the moment, this work entails:
parsing and initializing sources, parsing and initializing the
site model (if there is one), parsing vulnerability and
exposure files, and generating logic tree realizations. (The
latter piece basically defines the work to be done in the
`execute` phase.)
"""
super(EventBasedHazardCalculator, self).pre_execute()
for lt_model in models.LtSourceModel.objects.filter(
hazard_calculation=self.hc):
self.initialize_ses_db_records(lt_model)
def post_process(self):
"""
If requested, perform additional processing of GMFs to produce hazard
curves.
"""
if self.hc.hazard_curves_from_gmfs:
with EnginePerformanceMonitor('generating hazard curves',
self.job.id):
self.parallelize(
post_processing.gmf_to_hazard_curve_task,
post_processing.gmf_to_hazard_curve_arg_gen(self.job),
lambda res: None)
# If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
# has some value (not an empty list), do this additional
# post-processing.
if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
with EnginePerformanceMonitor(
'generating mean/quantile curves', self.job.id):
self.do_aggregate_post_proc()
if self.hc.hazard_maps:
with EnginePerformanceMonitor(
'generating hazard maps', self.job.id):
self.parallelize(
cls_post_proc.hazard_curves_to_hazard_map_task,
cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen(
self.job),
lambda res: None)
Revolutionary move/desperate attempt: not the GMFs are computed and saved in the controller node
Former-commit-id: 58136bf684c1c5d7dd5037945828a8e7f0381ce8
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Core calculator functionality for computing stochastic event sets and ground
motion fields using the 'event-based' method.
Stochastic events sets (which can be thought of as collections of ruptures) are
computed iven a set of seismic sources and investigation time span (in years).
For more information on computing stochastic event sets, see
:mod:`openquake.hazardlib.calc.stochastic`.
One can optionally compute a ground motion field (GMF) given a rupture, a site
collection (which is a collection of geographical points with associated soil
parameters), and a ground shaking intensity model (GSIM).
For more information on computing ground motion fields, see
:mod:`openquake.hazardlib.calc.gmf`.
"""
import time
import random
import collections
import numpy.random
from django.db import transaction
from openquake.hazardlib.calc import gmf, filters
from openquake.hazardlib.imt import from_string
from openquake.engine import logs, writer
from openquake.engine.calculators.hazard import general
from openquake.engine.calculators.hazard.classical import (
post_processing as cls_post_proc)
from openquake.engine.calculators.hazard.event_based import post_processing
from openquake.engine.db import models
from openquake.engine.utils import tasks
from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor
# NB: beware of large caches
inserter = writer.CacheInserter(models.GmfData, 1000)
@tasks.oqtask
def compute_ruptures(
job_id, sitecol, src_seeds, trt_model_id, gsims, task_no):
"""
Celery task for the stochastic event set calculator.
Samples logic trees and calls the stochastic event set calculator.
Once stochastic event sets are calculated, results will be saved to the
database. See :class:`openquake.engine.db.models.SESCollection`.
Optionally (specified in the job configuration using the
`ground_motion_fields` parameter), GMFs can be computed from each rupture
in each stochastic event set. GMFs are also saved to the database.
:param int job_id:
ID of the currently running job.
:param sitecol:
a :class:`openquake.hazardlib.site.SiteCollection` instance
:param src_seeds:
List of pairs (source, seed)
:params gsims:
list of distinct GSIM instances
:param task_no:
an ordinal so that GMV can be collected in a reproducible order
"""
# NB: all realizations in gsims correspond to the same source model
trt_model = models.TrtModel.objects.get(pk=trt_model_id)
ses_coll = models.SESCollection.objects.get(lt_model=trt_model.lt_model)
hc = models.HazardCalculation.objects.get(oqjob=job_id)
all_ses = list(ses_coll)
imts = map(from_string, hc.intensity_measure_types)
params = dict(
correl_model=general.get_correl_model(hc),
truncation_level=hc.truncation_level,
maximum_distance=hc.maximum_distance)
rupturecollector = RuptureCollector(
params, imts, gsims, trt_model.id, task_no)
filter_sites_mon = LightMonitor(
'filtering sites', job_id, compute_ruptures)
generate_ruptures_mon = LightMonitor(
'generating ruptures', job_id, compute_ruptures)
filter_ruptures_mon = LightMonitor(
'filtering ruptures', job_id, compute_ruptures)
save_ruptures_mon = LightMonitor(
'saving ruptures', job_id, compute_ruptures)
# Compute and save stochastic event sets
rnd = random.Random()
num_distinct_ruptures = 0
total_ruptures = 0
for src, seed in src_seeds:
t0 = time.time()
rnd.seed(seed)
with filter_sites_mon: # filtering sources
s_sites = src.filter_sites_by_distance_to_source(
hc.maximum_distance, sitecol
) if hc.maximum_distance else sitecol
if s_sites is None:
continue
# the dictionary `ses_num_occ` contains [(ses, num_occurrences)]
# for each occurring rupture for each ses in the ses collection
ses_num_occ = collections.defaultdict(list)
with generate_ruptures_mon: # generating ruptures for the given source
for rup_no, rup in enumerate(src.iter_ruptures(), 1):
rup.rup_no = rup_no
for ses in all_ses:
numpy.random.seed(rnd.randint(0, models.MAX_SINT_32))
num_occurrences = rup.sample_number_of_occurrences()
if num_occurrences:
ses_num_occ[rup].append((ses, num_occurrences))
total_ruptures += num_occurrences
# NB: the number of occurrences is very low, << 1, so it is
# more efficient to filter only the ruptures that occur, i.e.
# to call sample_number_of_occurrences() *before* the filtering
for rup in ses_num_occ.keys():
with filter_ruptures_mon: # filtering ruptures
r_sites = filters.filter_sites_by_distance_to_rupture(
rup, hc.maximum_distance, s_sites
) if hc.maximum_distance else s_sites
if r_sites is None:
# ignore ruptures which are far away
del ses_num_occ[rup] # save memory
continue
ses_ruptures = []
with save_ruptures_mon: # saving ses_ruptures
# using a django transaction make the saving faster
with transaction.commit_on_success(using='job_init'):
indices = r_sites.indices if len(r_sites) < len(sitecol) \
else None # None means that nothing was filtered
prob_rup = models.ProbabilisticRupture.create(
rup, ses_coll, indices)
for ses, num_occurrences in ses_num_occ[rup]:
for occ_no in range(1, num_occurrences + 1):
rup_seed = rnd.randint(0, models.MAX_SINT_32)
ses_rup = models.SESRupture.create(
prob_rup, ses, src.source_id,
rup.rup_no, occ_no, rup_seed)
ses_ruptures.append(ses_rup)
if hc.ground_motion_fields:
for ses_rup in ses_ruptures:
rupturecollector.collect(
r_sites, rup, ses_rup.id, ses_rup.seed)
# log calc_time per distinct rupture
if ses_num_occ:
num_ruptures = len(ses_num_occ)
tot_ruptures = sum(num for rup in ses_num_occ
for ses, num in ses_num_occ[rup])
logs.LOG.info(
'job=%d, src=%s:%s, num_ruptures=%d, tot_ruptures=%d, '
'num_sites=%d, calc_time=%fs', job_id, src.source_id,
src.__class__.__name__, num_ruptures, tot_ruptures,
len(s_sites), time.time() - t0)
num_distinct_ruptures += num_ruptures
if num_distinct_ruptures:
logs.LOG.info('job=%d, task %d generated %d/%d ruptures',
job_id, task_no, num_distinct_ruptures, total_ruptures)
filter_sites_mon.flush()
generate_ruptures_mon.flush()
filter_ruptures_mon.flush()
save_ruptures_mon.flush()
return rupturecollector
class RuptureCollector(object):
"""
A class to store ruptures and then compute and save ground motion fields.
"""
def __init__(self, params, imts, gsims, trt_model_id, task_no=0):
"""
:param params:
a dictionary of parameters with keys
correl_model, truncation_level, maximum_distance
:param imts:
a list of hazardlib intensity measure types
:param gsims:
a list of distinct GSIM instances
:param int trt_model_id:
the ID of a TRTModel instance
"""
self.params = params
self.imts = imts
self.gsims = gsims
self.trt_model_id = trt_model_id
self.task_no = task_no
# NB: I tried to use a single dictionary
# {site_id: [(gmv, rupt_id),...]} but it took a lot more memory (MS)
self.gmvs_per_site = collections.defaultdict(list)
self.ruptures_per_site = collections.defaultdict(list)
self.rupture_data = []
def collect(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Collect rupture data.
"""
self.rupture_data.append((r_sites, rupture, rupture_id, rupture_seed))
def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Compute the GMF generated by the given rupture on the given
sites and collect the values in the dictionaries
.gmvs_per_site and .ruptures_per_site.
:param r_sites:
the collection of sites affected by the rupture
:param rupture:
an `openquake.hazardlib.source.rupture.
ParametricProbabilisticRupture` instance
:param id:
the id of an `openquake.engine.db.models.SESRupture` instance
:param seed:
an integer to be used as stochastic seed
"""
for gsim in self.gsims:
gsim_name = gsim.__class__.__name__
computer = gmf.GmfComputer(rupture, r_sites, self.imts, gsim,
self.params['truncation_level'],
self.params['correl_model'])
gmf_dict = computer.compute(rupture_seed)
for imt, gmvs in gmf_dict.iteritems():
for site_id, gmv in zip(r_sites.sids, gmvs):
# convert a 1x1 matrix into a float
gmv = float(gmv)
if gmv:
self.gmvs_per_site[
gsim_name, imt, site_id].append(gmv)
self.ruptures_per_site[
gsim_name, imt, site_id].append(rupture_id)
def save_gmfs(self):
"""
Helper method to save the computed GMF data to the database.
"""
rlzs = models.TrtModel.objects.get(
pk=self.trt_model_id).get_rlzs_by_gsim()
for gsim_name, imt, site_id in self.gmvs_per_site:
for rlz in rlzs[gsim_name]:
imt_name, sa_period, sa_damping = imt
inserter.add(models.GmfData(
gmf=models.Gmf.objects.get(lt_realization=rlz),
task_no=self.task_no,
imt=imt_name,
sa_period=sa_period,
sa_damping=sa_damping,
site_id=site_id,
gmvs=self.gmvs_per_site[gsim_name, imt, site_id],
rupture_ids=self.ruptures_per_site[gsim_name, imt, site_id]
))
inserter.flush()
self.rupture_data[:] = []
self.gmvs_per_site.clear()
self.ruptures_per_site.clear()
class EventBasedHazardCalculator(general.BaseHazardCalculator):
"""
Probabilistic Event-Based hazard calculator. Computes stochastic event sets
and (optionally) ground motion fields.
"""
core_calc_task = compute_ruptures
def task_arg_gen(self, _block_size=None):
"""
Loop through realizations and sources to generate a sequence of
task arg tuples. Each tuple of args applies to a single task.
Yielded results are tuples of the form job_id, sources, ses, seeds
(seeds will be used to seed numpy for temporal occurence sampling).
"""
hc = self.hc
rnd = random.Random()
rnd.seed(hc.random_seed)
for job_id, sitecol, block, lt_model, gsims, task_no in \
super(EventBasedHazardCalculator, self).task_arg_gen():
ss = [(src, rnd.randint(0, models.MAX_SINT_32))
for src in block] # source, seed pairs
yield job_id, sitecol, ss, lt_model, gsims, task_no
# now the source_blocks_per_ltpath dictionary can be cleared
self.source_blocks_per_ltpath.clear()
def task_completed(self, rupturecollector):
"""
If the parameter `ground_motion_fields` is set, compute and save
the GMFs from the ruptures generated by the given task and stored
in the `rupturecollector`.
"""
if not self.hc.ground_motion_fields:
return # do nothing
rupturecollector.imts = map(
from_string, self.hc.intensity_measure_types)
with self.monitor('computing gmfs'):
for rupture_data in rupturecollector.rupture_data:
rupturecollector.calc_gmf(*rupture_data)
with self.monitor('saving gmfs'):
rupturecollector.save_gmfs()
def initialize_ses_db_records(self, lt_model):
"""
Create :class:`~openquake.engine.db.models.Output`,
:class:`~openquake.engine.db.models.SESCollection` and
:class:`~openquake.engine.db.models.SES` "container" records for
a single realization.
Stochastic event set ruptures computed for this realization will be
associated to these containers.
NOTE: Many tasks can contribute ruptures to the same SES.
"""
output = models.Output.objects.create(
oq_job=self.job,
display_name='SES Collection smlt-%d' % lt_model.ordinal,
output_type='ses')
ses_coll = models.SESCollection.objects.create(
output=output, lt_model=lt_model, ordinal=lt_model.ordinal)
for rlz in lt_model:
if self.job.hazard_calculation.ground_motion_fields:
output = models.Output.objects.create(
oq_job=self.job,
display_name='GMF rlz-%s' % rlz.id,
output_type='gmf')
models.Gmf.objects.create(output=output, lt_realization=rlz)
return ses_coll
def pre_execute(self):
"""
Do pre-execution work. At the moment, this work entails:
parsing and initializing sources, parsing and initializing the
site model (if there is one), parsing vulnerability and
exposure files, and generating logic tree realizations. (The
latter piece basically defines the work to be done in the
`execute` phase.)
"""
super(EventBasedHazardCalculator, self).pre_execute()
for lt_model in models.LtSourceModel.objects.filter(
hazard_calculation=self.hc):
self.initialize_ses_db_records(lt_model)
def post_process(self):
"""
If requested, perform additional processing of GMFs to produce hazard
curves.
"""
if self.hc.hazard_curves_from_gmfs:
with EnginePerformanceMonitor('generating hazard curves',
self.job.id):
self.parallelize(
post_processing.gmf_to_hazard_curve_task,
post_processing.gmf_to_hazard_curve_arg_gen(self.job),
lambda res: None)
# If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
# has some value (not an empty list), do this additional
# post-processing.
if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
with EnginePerformanceMonitor(
'generating mean/quantile curves', self.job.id):
self.do_aggregate_post_proc()
if self.hc.hazard_maps:
with EnginePerformanceMonitor(
'generating hazard maps', self.job.id):
self.parallelize(
cls_post_proc.hazard_curves_to_hazard_map_task,
cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen(
self.job),
lambda res: None)
|
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Core calculator functionality for computing stochastic event sets and ground
motion fields using the 'event-based' method.
Stochastic events sets (which can be thought of as collections of ruptures) are
computed iven a set of seismic sources and investigation time span (in years).
For more information on computing stochastic event sets, see
:mod:`openquake.hazardlib.calc.stochastic`.
One can optionally compute a ground motion field (GMF) given a rupture, a site
collection (which is a collection of geographical points with associated soil
parameters), and a ground shaking intensity model (GSIM).
For more information on computing ground motion fields, see
:mod:`openquake.hazardlib.calc.gmf`.
"""
import time
import random
import collections
import numpy.random
from django.db import transaction
from openquake.hazardlib.calc import gmf, filters
from openquake.hazardlib.imt import from_string
from openquake.engine import logs, writer
from openquake.engine.calculators.hazard import general
from openquake.engine.calculators.hazard.classical import (
post_processing as cls_post_proc)
from openquake.engine.calculators.hazard.event_based import post_processing
from openquake.engine.db import models
from openquake.engine.utils import tasks
from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor
# NB: beware of large caches
inserter = writer.CacheInserter(models.GmfData, 1000)
@tasks.oqtask
def compute_ruptures(
job_id, sitecol, src_seeds, trt_model_id, gsims, task_no):
"""
Celery task for the stochastic event set calculator.
Samples logic trees and calls the stochastic event set calculator.
Once stochastic event sets are calculated, results will be saved to the
database. See :class:`openquake.engine.db.models.SESCollection`.
Optionally (specified in the job configuration using the
`ground_motion_fields` parameter), GMFs can be computed from each rupture
in each stochastic event set. GMFs are also saved to the database.
:param int job_id:
ID of the currently running job.
:param sitecol:
a :class:`openquake.hazardlib.site.SiteCollection` instance
:param src_seeds:
List of pairs (source, seed)
:params gsims:
list of distinct GSIM instances
:param task_no:
an ordinal so that GMV can be collected in a reproducible order
"""
# NB: all realizations in gsims correspond to the same source model
trt_model = models.TrtModel.objects.get(pk=trt_model_id)
ses_coll = models.SESCollection.objects.get(lt_model=trt_model.lt_model)
hc = models.HazardCalculation.objects.get(oqjob=job_id)
all_ses = list(ses_coll)
imts = map(from_string, hc.intensity_measure_types)
params = dict(
correl_model=general.get_correl_model(hc),
truncation_level=hc.truncation_level,
maximum_distance=hc.maximum_distance)
rupturecollector = RuptureCollector(
params, imts, gsims, trt_model.id, task_no)
filter_sites_mon = LightMonitor(
'filtering sites', job_id, compute_ruptures)
generate_ruptures_mon = LightMonitor(
'generating ruptures', job_id, compute_ruptures)
filter_ruptures_mon = LightMonitor(
'filtering ruptures', job_id, compute_ruptures)
save_ruptures_mon = LightMonitor(
'saving ruptures', job_id, compute_ruptures)
# Compute and save stochastic event sets
rnd = random.Random()
num_distinct_ruptures = 0
total_ruptures = 0
for src, seed in src_seeds:
t0 = time.time()
rnd.seed(seed)
with filter_sites_mon: # filtering sources
s_sites = src.filter_sites_by_distance_to_source(
hc.maximum_distance, sitecol
) if hc.maximum_distance else sitecol
if s_sites is None:
continue
# the dictionary `ses_num_occ` contains [(ses, num_occurrences)]
# for each occurring rupture for each ses in the ses collection
ses_num_occ = collections.defaultdict(list)
with generate_ruptures_mon: # generating ruptures for the given source
for rup_no, rup in enumerate(src.iter_ruptures(), 1):
rup.rup_no = rup_no
for ses in all_ses:
numpy.random.seed(rnd.randint(0, models.MAX_SINT_32))
num_occurrences = rup.sample_number_of_occurrences()
if num_occurrences:
ses_num_occ[rup].append((ses, num_occurrences))
total_ruptures += num_occurrences
# NB: the number of occurrences is very low, << 1, so it is
# more efficient to filter only the ruptures that occur, i.e.
# to call sample_number_of_occurrences() *before* the filtering
for rup in ses_num_occ.keys():
with filter_ruptures_mon: # filtering ruptures
r_sites = filters.filter_sites_by_distance_to_rupture(
rup, hc.maximum_distance, s_sites
) if hc.maximum_distance else s_sites
if r_sites is None:
# ignore ruptures which are far away
del ses_num_occ[rup] # save memory
continue
# saving ses_ruptures
ses_ruptures = []
with save_ruptures_mon:
# using a django transaction make the saving faster
with transaction.commit_on_success(using='job_init'):
indices = r_sites.indices if len(r_sites) < len(sitecol) \
else None # None means that nothing was filtered
prob_rup = models.ProbabilisticRupture.create(
rup, ses_coll, indices)
for ses, num_occurrences in ses_num_occ[rup]:
for occ_no in range(1, num_occurrences + 1):
rup_seed = rnd.randint(0, models.MAX_SINT_32)
ses_rup = models.SESRupture.create(
prob_rup, ses, src.source_id,
rup.rup_no, occ_no, rup_seed)
ses_ruptures.append(ses_rup)
# collecting ses_ruptures
for ses_rup in ses_ruptures:
rupturecollector.trts.add(src.tectonic_region_type)
rupturecollector.rupture_data.append(
(r_sites, rup, ses_rup.id, ses_rup.seed))
# log calc_time per distinct rupture
if ses_num_occ:
num_ruptures = len(ses_num_occ)
tot_ruptures = sum(num for rup in ses_num_occ
for ses, num in ses_num_occ[rup])
logs.LOG.info(
'job=%d, src=%s:%s, num_ruptures=%d, tot_ruptures=%d, '
'num_sites=%d, calc_time=%fs', job_id, src.source_id,
src.__class__.__name__, num_ruptures, tot_ruptures,
len(s_sites), time.time() - t0)
num_distinct_ruptures += num_ruptures
if num_distinct_ruptures:
logs.LOG.info('job=%d, task %d generated %d/%d ruptures',
job_id, task_no, num_distinct_ruptures, total_ruptures)
filter_sites_mon.flush()
generate_ruptures_mon.flush()
filter_ruptures_mon.flush()
save_ruptures_mon.flush()
return rupturecollector
@tasks.oqtask
def compute_and_save_gmfs(job_id, sids, rupt_collector):
"""
:param int job_id:
ID of the currently running job
:param sids:
numpy array of site IDs
:param rupt_collector:
an instance of `openquake.engine.calculators.hazard.event_based.core.RuptureCollector`
"""
hc = models.HazardCalculation.objects.get(oqjob=job_id)
with EnginePerformanceMonitor(
'computing gmfs', job_id, compute_and_save_gmfs):
for rupture_data in rupt_collector.rupture_data:
rupt_collector.calc_gmf(*rupture_data)
with EnginePerformanceMonitor(
'saving gmfs', job_id, compute_and_save_gmfs):
rupt_collector.save_gmfs()
if hc.hazard_curves_from_gmfs:
with EnginePerformanceMonitor(
'hazard curves from gmfs', job_id, compute_and_save_gmfs):
curves_by_gsim = rupt_collector.to_haz_curves(
sids, hc.investigation_time, hc.ses_per_logic_tree_path)
return curves_by_gsim, rupt_collector.trt_model_id, []
class RuptureCollector(object):
"""
A class to store ruptures and then compute and save ground motion fields.
"""
def __init__(self, params, imts, gsims, trt_model_id, task_no):
"""
:param params:
a dictionary of parameters with keys
correl_model, truncation_level, maximum_distance
:param imts:
a list of hazardlib intensity measure types
:param gsims:
a list of distinct GSIM instances
:param int trt_model_id:
the ID of a TRTModel instance
"""
self.params = params
self.imts = imts
self.gsims = gsims
self.trt_model_id = trt_model_id
self.task_no = task_no
# NB: I tried to use a single dictionary
# {site_id: [(gmv, rupt_id),...]} but it took a lot more memory (MS)
self.gmvs_per_site = collections.defaultdict(list)
self.ruptures_per_site = collections.defaultdict(list)
self.trts = set()
self.rupture_data = []
def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Compute the GMF generated by the given rupture on the given
sites and collect the values in the dictionaries
.gmvs_per_site and .ruptures_per_site.
:param r_sites:
the collection of sites affected by the rupture
:param rupture:
an `openquake.hazardlib.source.rupture.
ParametricProbabilisticRupture` instance
:param id:
the id of an `openquake.engine.db.models.SESRupture` instance
:param seed:
an integer to be used as stochastic seed
"""
computer = gmf.GmfComputer(rupture, r_sites, self.imts, self.gsims,
self.params['truncation_level'],
self.params['correl_model'])
gmf_dict = computer.compute(rupture_seed)
for gsim_name, imt in gmf_dict:
gmvs = gmf_dict[gsim_name, imt]
for site_id, gmv in zip(r_sites.sids, gmvs):
# convert a 1x1 matrix into a float
gmv = float(gmv)
if gmv:
self.gmvs_per_site[
gsim_name, imt, site_id].append(gmv)
self.ruptures_per_site[
gsim_name, imt, site_id].append(rupture_id)
def save_gmfs(self):
"""
Helper method to save the computed GMF data to the database.
"""
rlzs = models.TrtModel.objects.get(
pk=self.trt_model_id).get_rlzs_by_gsim()
for gsim_name, imt, site_id in self.gmvs_per_site:
if not rlzs[gsim_name]:
logs.LOG.warn('No realizations for TrtModel=%d, GSIM=%s',
self.trt_model_id, gsim_name)
for rlz in rlzs[gsim_name]:
imt_name, sa_period, sa_damping = imt
inserter.add(models.GmfData(
gmf=models.Gmf.objects.get(lt_realization=rlz),
task_no=self.task_no,
imt=imt_name,
sa_period=sa_period,
sa_damping=sa_damping,
site_id=site_id,
gmvs=self.gmvs_per_site[gsim_name, imt, site_id],
rupture_ids=self.ruptures_per_site[gsim_name, imt, site_id]
))
inserter.flush()
self.rupture_data[:] = []
self.gmvs_per_site.clear()
self.ruptures_per_site.clear()
def to_haz_curves(self, sids, imls, invest_time, num_ses):
"""
Convert the gmf into hazard curves (by gsim and imt)
"""
gmf = collections.defaultdict(dict)
for (gsim, imt, site_id), gmvs in self.gmvs_per_site.iteritems():
gmf[gsim, imt][site_id] = post_processing.gmvs_to_haz_curve(
gmvs, imls, invest_time, num_ses * invest_time)
curves_by_gsim = []
for gsim in self.gsims:
curves_by_imt = []
for imt in self.imts:
curves_by_imt.append(numpy.array([gmf[gsim, imt].get(site_id, 0)
for site_id in sids]))
curves_by_gsim.append((gsim, curves_by_imt))
return curves_by_gsim
class EventBasedHazardCalculator(general.BaseHazardCalculator):
"""
Probabilistic Event-Based hazard calculator. Computes stochastic event sets
and (optionally) ground motion fields.
"""
core_calc_task = compute_ruptures
def task_arg_gen(self, _block_size=None):
"""
Loop through realizations and sources to generate a sequence of
task arg tuples. Each tuple of args applies to a single task.
Yielded results are tuples of the form job_id, sources, ses, seeds
(seeds will be used to seed numpy for temporal occurence sampling).
"""
hc = self.hc
rnd = random.Random()
rnd.seed(hc.random_seed)
for job_id, sitecol, block, lt_model, gsims, task_no in \
super(EventBasedHazardCalculator, self).task_arg_gen():
ss = [(src, rnd.randint(0, models.MAX_SINT_32))
for src in block] # source, seed pairs
yield job_id, sitecol, ss, lt_model, gsims, task_no
# now the source_blocks_per_ltpath dictionary can be cleared
self.source_blocks_per_ltpath.clear()
def task_completed(self, rupturecollector):
"""
:param rupt_collector:
an instance of `openquake.engine.calculators.hazard.event_based.core.RuptureCollector`
If the parameter `ground_motion_fields` is set, compute and save
the GMFs from the ruptures generated by the given task and stored
in the `rupturecollector`.
"""
if not self.hc.ground_motion_fields:
return # do nothing
self.rupt_collectors.append(rupturecollector)
self.num_ruptures[rupturecollector.trt_model_id] += \
len(rupturecollector.rupture_data)
@EnginePerformanceMonitor.monitor
def post_execute(self):
for trt_id, num_ruptures in self.num_ruptures.iteritems():
trt = models.TrtModel.objects.get(pk=trt_id)
trt.num_ruptures = num_ruptures
trt.save()
super(EventBasedHazardCalculator, self).post_execute()
if not self.hc.ground_motion_fields:
return # do nothing
# create a Gmf output for each realization
for rlz in self._get_realizations():
output = models.Output.objects.create(
oq_job=self.job,
display_name='GMF rlz-%s' % rlz.id,
output_type='gmf')
models.Gmf.objects.create(output=output, lt_realization=rlz)
otm = tasks.OqTaskManager(compute_and_save_gmfs, logs.LOG.progress)
sids = self.hc.site_collection.sids
for rupt_collector in self.rupt_collectors:
otm.submit(self.job.id, sids, rupt_collector)
otm.aggregate_results(lambda acc, x: None, None)
def initialize_ses_db_records(self, lt_model):
"""
Create :class:`~openquake.engine.db.models.Output`,
:class:`~openquake.engine.db.models.SESCollection` and
:class:`~openquake.engine.db.models.SES` "container" records for
a single realization.
Stochastic event set ruptures computed for this realization will be
associated to these containers.
NOTE: Many tasks can contribute ruptures to the same SES.
"""
output = models.Output.objects.create(
oq_job=self.job,
display_name='SES Collection smlt-%d' % lt_model.ordinal,
output_type='ses')
ses_coll = models.SESCollection.objects.create(
output=output, lt_model=lt_model, ordinal=lt_model.ordinal)
return ses_coll
def pre_execute(self):
"""
Do pre-execution work. At the moment, this work entails:
parsing and initializing sources, parsing and initializing the
site model (if there is one), parsing vulnerability and
exposure files, and generating logic tree realizations. (The
latter piece basically defines the work to be done in the
`execute` phase.)
"""
super(EventBasedHazardCalculator, self).pre_execute()
for lt_model in models.LtSourceModel.objects.filter(
hazard_calculation=self.hc):
self.initialize_ses_db_records(lt_model)
def post_process(self):
"""
If requested, perform additional processing of GMFs to produce hazard
curves.
"""
if self.hc.hazard_curves_from_gmfs:
with self.monitor('generating hazard curves'):
self.parallelize(
post_processing.gmf_to_hazard_curve_task,
post_processing.gmf_to_hazard_curve_arg_gen(self.job),
lambda res: None)
# If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
# has some value (not an empty list), do this additional
# post-processing.
if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
with self.monitor('generating mean/quantile curves'):
self.do_aggregate_post_proc()
if self.hc.hazard_maps:
with self.monitor('generating hazard maps'):
self.parallelize(
cls_post_proc.hazard_curves_to_hazard_map_task,
cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen(
self.job),
lambda res: None)
Removed a confusing monitor
Former-commit-id: 182e5de60818c2af8f7c164d3d3cbc88cb8ba688 [formerly b82c8587fd1f649c9f5f84a711bb125866165105]
Former-commit-id: e14f286bc66d6fe968ce706c301b6246a0eded66
# Copyright (c) 2010-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Core calculator functionality for computing stochastic event sets and ground
motion fields using the 'event-based' method.
Stochastic events sets (which can be thought of as collections of ruptures) are
computed iven a set of seismic sources and investigation time span (in years).
For more information on computing stochastic event sets, see
:mod:`openquake.hazardlib.calc.stochastic`.
One can optionally compute a ground motion field (GMF) given a rupture, a site
collection (which is a collection of geographical points with associated soil
parameters), and a ground shaking intensity model (GSIM).
For more information on computing ground motion fields, see
:mod:`openquake.hazardlib.calc.gmf`.
"""
import time
import random
import collections
import numpy.random
from django.db import transaction
from openquake.hazardlib.calc import gmf, filters
from openquake.hazardlib.imt import from_string
from openquake.engine import logs, writer
from openquake.engine.calculators.hazard import general
from openquake.engine.calculators.hazard.classical import (
post_processing as cls_post_proc)
from openquake.engine.calculators.hazard.event_based import post_processing
from openquake.engine.db import models
from openquake.engine.utils import tasks
from openquake.engine.performance import EnginePerformanceMonitor, LightMonitor
# NB: beware of large caches
inserter = writer.CacheInserter(models.GmfData, 1000)
@tasks.oqtask
def compute_ruptures(
job_id, sitecol, src_seeds, trt_model_id, gsims, task_no):
"""
Celery task for the stochastic event set calculator.
Samples logic trees and calls the stochastic event set calculator.
Once stochastic event sets are calculated, results will be saved to the
database. See :class:`openquake.engine.db.models.SESCollection`.
Optionally (specified in the job configuration using the
`ground_motion_fields` parameter), GMFs can be computed from each rupture
in each stochastic event set. GMFs are also saved to the database.
:param int job_id:
ID of the currently running job.
:param sitecol:
a :class:`openquake.hazardlib.site.SiteCollection` instance
:param src_seeds:
List of pairs (source, seed)
:params gsims:
list of distinct GSIM instances
:param task_no:
an ordinal so that GMV can be collected in a reproducible order
"""
# NB: all realizations in gsims correspond to the same source model
trt_model = models.TrtModel.objects.get(pk=trt_model_id)
ses_coll = models.SESCollection.objects.get(lt_model=trt_model.lt_model)
hc = models.HazardCalculation.objects.get(oqjob=job_id)
all_ses = list(ses_coll)
imts = map(from_string, hc.intensity_measure_types)
params = dict(
correl_model=general.get_correl_model(hc),
truncation_level=hc.truncation_level,
maximum_distance=hc.maximum_distance)
rupturecollector = RuptureCollector(
params, imts, gsims, trt_model.id, task_no)
filter_sites_mon = LightMonitor(
'filtering sites', job_id, compute_ruptures)
generate_ruptures_mon = LightMonitor(
'generating ruptures', job_id, compute_ruptures)
filter_ruptures_mon = LightMonitor(
'filtering ruptures', job_id, compute_ruptures)
save_ruptures_mon = LightMonitor(
'saving ruptures', job_id, compute_ruptures)
# Compute and save stochastic event sets
rnd = random.Random()
num_distinct_ruptures = 0
total_ruptures = 0
for src, seed in src_seeds:
t0 = time.time()
rnd.seed(seed)
with filter_sites_mon: # filtering sources
s_sites = src.filter_sites_by_distance_to_source(
hc.maximum_distance, sitecol
) if hc.maximum_distance else sitecol
if s_sites is None:
continue
# the dictionary `ses_num_occ` contains [(ses, num_occurrences)]
# for each occurring rupture for each ses in the ses collection
ses_num_occ = collections.defaultdict(list)
with generate_ruptures_mon: # generating ruptures for the given source
for rup_no, rup in enumerate(src.iter_ruptures(), 1):
rup.rup_no = rup_no
for ses in all_ses:
numpy.random.seed(rnd.randint(0, models.MAX_SINT_32))
num_occurrences = rup.sample_number_of_occurrences()
if num_occurrences:
ses_num_occ[rup].append((ses, num_occurrences))
total_ruptures += num_occurrences
# NB: the number of occurrences is very low, << 1, so it is
# more efficient to filter only the ruptures that occur, i.e.
# to call sample_number_of_occurrences() *before* the filtering
for rup in ses_num_occ.keys():
with filter_ruptures_mon: # filtering ruptures
r_sites = filters.filter_sites_by_distance_to_rupture(
rup, hc.maximum_distance, s_sites
) if hc.maximum_distance else s_sites
if r_sites is None:
# ignore ruptures which are far away
del ses_num_occ[rup] # save memory
continue
# saving ses_ruptures
ses_ruptures = []
with save_ruptures_mon:
# using a django transaction make the saving faster
with transaction.commit_on_success(using='job_init'):
indices = r_sites.indices if len(r_sites) < len(sitecol) \
else None # None means that nothing was filtered
prob_rup = models.ProbabilisticRupture.create(
rup, ses_coll, indices)
for ses, num_occurrences in ses_num_occ[rup]:
for occ_no in range(1, num_occurrences + 1):
rup_seed = rnd.randint(0, models.MAX_SINT_32)
ses_rup = models.SESRupture.create(
prob_rup, ses, src.source_id,
rup.rup_no, occ_no, rup_seed)
ses_ruptures.append(ses_rup)
# collecting ses_ruptures
for ses_rup in ses_ruptures:
rupturecollector.trts.add(src.tectonic_region_type)
rupturecollector.rupture_data.append(
(r_sites, rup, ses_rup.id, ses_rup.seed))
# log calc_time per distinct rupture
if ses_num_occ:
num_ruptures = len(ses_num_occ)
tot_ruptures = sum(num for rup in ses_num_occ
for ses, num in ses_num_occ[rup])
logs.LOG.info(
'job=%d, src=%s:%s, num_ruptures=%d, tot_ruptures=%d, '
'num_sites=%d, calc_time=%fs', job_id, src.source_id,
src.__class__.__name__, num_ruptures, tot_ruptures,
len(s_sites), time.time() - t0)
num_distinct_ruptures += num_ruptures
if num_distinct_ruptures:
logs.LOG.info('job=%d, task %d generated %d/%d ruptures',
job_id, task_no, num_distinct_ruptures, total_ruptures)
filter_sites_mon.flush()
generate_ruptures_mon.flush()
filter_ruptures_mon.flush()
save_ruptures_mon.flush()
return rupturecollector
@tasks.oqtask
def compute_and_save_gmfs(job_id, sids, rupt_collector):
"""
:param int job_id:
ID of the currently running job
:param sids:
numpy array of site IDs
:param rupt_collector:
an instance of `openquake.engine.calculators.hazard.event_based.core.RuptureCollector`
"""
hc = models.HazardCalculation.objects.get(oqjob=job_id)
with EnginePerformanceMonitor(
'computing gmfs', job_id, compute_and_save_gmfs):
for rupture_data in rupt_collector.rupture_data:
rupt_collector.calc_gmf(*rupture_data)
with EnginePerformanceMonitor(
'saving gmfs', job_id, compute_and_save_gmfs):
rupt_collector.save_gmfs()
if hc.hazard_curves_from_gmfs:
with EnginePerformanceMonitor(
'hazard curves from gmfs', job_id, compute_and_save_gmfs):
curves_by_gsim = rupt_collector.to_haz_curves(
sids, hc.investigation_time, hc.ses_per_logic_tree_path)
return curves_by_gsim, rupt_collector.trt_model_id, []
class RuptureCollector(object):
"""
A class to store ruptures and then compute and save ground motion fields.
"""
def __init__(self, params, imts, gsims, trt_model_id, task_no):
"""
:param params:
a dictionary of parameters with keys
correl_model, truncation_level, maximum_distance
:param imts:
a list of hazardlib intensity measure types
:param gsims:
a list of distinct GSIM instances
:param int trt_model_id:
the ID of a TRTModel instance
"""
self.params = params
self.imts = imts
self.gsims = gsims
self.trt_model_id = trt_model_id
self.task_no = task_no
# NB: I tried to use a single dictionary
# {site_id: [(gmv, rupt_id),...]} but it took a lot more memory (MS)
self.gmvs_per_site = collections.defaultdict(list)
self.ruptures_per_site = collections.defaultdict(list)
self.trts = set()
self.rupture_data = []
def calc_gmf(self, r_sites, rupture, rupture_id, rupture_seed):
"""
Compute the GMF generated by the given rupture on the given
sites and collect the values in the dictionaries
.gmvs_per_site and .ruptures_per_site.
:param r_sites:
the collection of sites affected by the rupture
:param rupture:
an `openquake.hazardlib.source.rupture.
ParametricProbabilisticRupture` instance
:param id:
the id of an `openquake.engine.db.models.SESRupture` instance
:param seed:
an integer to be used as stochastic seed
"""
computer = gmf.GmfComputer(rupture, r_sites, self.imts, self.gsims,
self.params['truncation_level'],
self.params['correl_model'])
gmf_dict = computer.compute(rupture_seed)
for gsim_name, imt in gmf_dict:
gmvs = gmf_dict[gsim_name, imt]
for site_id, gmv in zip(r_sites.sids, gmvs):
# convert a 1x1 matrix into a float
gmv = float(gmv)
if gmv:
self.gmvs_per_site[
gsim_name, imt, site_id].append(gmv)
self.ruptures_per_site[
gsim_name, imt, site_id].append(rupture_id)
def save_gmfs(self):
"""
Helper method to save the computed GMF data to the database.
"""
rlzs = models.TrtModel.objects.get(
pk=self.trt_model_id).get_rlzs_by_gsim()
for gsim_name, imt, site_id in self.gmvs_per_site:
if not rlzs[gsim_name]:
logs.LOG.warn('No realizations for TrtModel=%d, GSIM=%s',
self.trt_model_id, gsim_name)
for rlz in rlzs[gsim_name]:
imt_name, sa_period, sa_damping = imt
inserter.add(models.GmfData(
gmf=models.Gmf.objects.get(lt_realization=rlz),
task_no=self.task_no,
imt=imt_name,
sa_period=sa_period,
sa_damping=sa_damping,
site_id=site_id,
gmvs=self.gmvs_per_site[gsim_name, imt, site_id],
rupture_ids=self.ruptures_per_site[gsim_name, imt, site_id]
))
inserter.flush()
self.rupture_data[:] = []
self.gmvs_per_site.clear()
self.ruptures_per_site.clear()
def to_haz_curves(self, sids, imls, invest_time, num_ses):
"""
Convert the gmf into hazard curves (by gsim and imt)
"""
gmf = collections.defaultdict(dict)
for (gsim, imt, site_id), gmvs in self.gmvs_per_site.iteritems():
gmf[gsim, imt][site_id] = post_processing.gmvs_to_haz_curve(
gmvs, imls, invest_time, num_ses * invest_time)
curves_by_gsim = []
for gsim in self.gsims:
curves_by_imt = []
for imt in self.imts:
curves_by_imt.append(numpy.array([gmf[gsim, imt].get(site_id, 0)
for site_id in sids]))
curves_by_gsim.append((gsim, curves_by_imt))
return curves_by_gsim
class EventBasedHazardCalculator(general.BaseHazardCalculator):
"""
Probabilistic Event-Based hazard calculator. Computes stochastic event sets
and (optionally) ground motion fields.
"""
core_calc_task = compute_ruptures
def task_arg_gen(self, _block_size=None):
"""
Loop through realizations and sources to generate a sequence of
task arg tuples. Each tuple of args applies to a single task.
Yielded results are tuples of the form job_id, sources, ses, seeds
(seeds will be used to seed numpy for temporal occurence sampling).
"""
hc = self.hc
rnd = random.Random()
rnd.seed(hc.random_seed)
for job_id, sitecol, block, lt_model, gsims, task_no in \
super(EventBasedHazardCalculator, self).task_arg_gen():
ss = [(src, rnd.randint(0, models.MAX_SINT_32))
for src in block] # source, seed pairs
yield job_id, sitecol, ss, lt_model, gsims, task_no
# now the source_blocks_per_ltpath dictionary can be cleared
self.source_blocks_per_ltpath.clear()
def task_completed(self, rupturecollector):
"""
:param rupt_collector:
an instance of `openquake.engine.calculators.hazard.event_based.core.RuptureCollector`
If the parameter `ground_motion_fields` is set, compute and save
the GMFs from the ruptures generated by the given task and stored
in the `rupturecollector`.
"""
if not self.hc.ground_motion_fields:
return # do nothing
self.rupt_collectors.append(rupturecollector)
self.num_ruptures[rupturecollector.trt_model_id] += \
len(rupturecollector.rupture_data)
def post_execute(self):
for trt_id, num_ruptures in self.num_ruptures.iteritems():
trt = models.TrtModel.objects.get(pk=trt_id)
trt.num_ruptures = num_ruptures
trt.save()
super(EventBasedHazardCalculator, self).post_execute()
if not self.hc.ground_motion_fields:
return # do nothing
# create a Gmf output for each realization
for rlz in self._get_realizations():
output = models.Output.objects.create(
oq_job=self.job,
display_name='GMF rlz-%s' % rlz.id,
output_type='gmf')
models.Gmf.objects.create(output=output, lt_realization=rlz)
otm = tasks.OqTaskManager(compute_and_save_gmfs, logs.LOG.progress)
sids = self.hc.site_collection.sids
for rupt_collector in self.rupt_collectors:
otm.submit(self.job.id, sids, rupt_collector)
otm.aggregate_results(lambda acc, x: None, None)
def initialize_ses_db_records(self, lt_model):
"""
Create :class:`~openquake.engine.db.models.Output`,
:class:`~openquake.engine.db.models.SESCollection` and
:class:`~openquake.engine.db.models.SES` "container" records for
a single realization.
Stochastic event set ruptures computed for this realization will be
associated to these containers.
NOTE: Many tasks can contribute ruptures to the same SES.
"""
output = models.Output.objects.create(
oq_job=self.job,
display_name='SES Collection smlt-%d' % lt_model.ordinal,
output_type='ses')
ses_coll = models.SESCollection.objects.create(
output=output, lt_model=lt_model, ordinal=lt_model.ordinal)
return ses_coll
def pre_execute(self):
"""
Do pre-execution work. At the moment, this work entails:
parsing and initializing sources, parsing and initializing the
site model (if there is one), parsing vulnerability and
exposure files, and generating logic tree realizations. (The
latter piece basically defines the work to be done in the
`execute` phase.)
"""
super(EventBasedHazardCalculator, self).pre_execute()
for lt_model in models.LtSourceModel.objects.filter(
hazard_calculation=self.hc):
self.initialize_ses_db_records(lt_model)
def post_process(self):
"""
If requested, perform additional processing of GMFs to produce hazard
curves.
"""
if self.hc.hazard_curves_from_gmfs:
with self.monitor('generating hazard curves'):
self.parallelize(
post_processing.gmf_to_hazard_curve_task,
post_processing.gmf_to_hazard_curve_arg_gen(self.job),
lambda res: None)
# If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
# has some value (not an empty list), do this additional
# post-processing.
if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
with self.monitor('generating mean/quantile curves'):
self.do_aggregate_post_proc()
if self.hc.hazard_maps:
with self.monitor('generating hazard maps'):
self.parallelize(
cls_post_proc.hazard_curves_to_hazard_map_task,
cls_post_proc.hazard_curves_to_hazard_map_task_arg_gen(
self.job),
lambda res: None)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 07.04.2014
@author: heinz-peterlang
'''
from weblyzard_api.xml_content.parsers import XMLParser
class XML2013(XMLParser):
SUPPORTED_NAMESPACE = 'http://www.weblyzard.com/wl/2013#'
DOCUMENT_NAMESPACES = {'wl': SUPPORTED_NAMESPACE,
'dc': 'http://purl.org/dc/elements/1.1/',
'xml': 'http://www.w3.org/XML/1998/namespace',
'sioc': 'http://rdfs.org/sioc/ns#',
'skos': 'http://www.w3.org/2004/02/skos/core#',
'foaf': 'http://xmlns.com/foaf/0.1/'}
VERSION = 2013
ATTR_MAPPING = {'{%s}nilsimsa' % DOCUMENT_NAMESPACES['wl']: 'nilsimsa',
'{%s}id' % DOCUMENT_NAMESPACES['wl']: 'content_id',
'{%s}jonas_type' % DOCUMENT_NAMESPACES['wl']: 'jonas_type',
'{%s}lang' % DOCUMENT_NAMESPACES['xml']: 'lang', #kept for legacy
'{%s}format' % DOCUMENT_NAMESPACES['dc']: 'content_type',
'{%s}language' % DOCUMENT_NAMESPACES['dc']: 'language',
'{%s}source' % DOCUMENT_NAMESPACES['dc']: 'source',
'{%s}identifier' % DOCUMENT_NAMESPACES['dc']: 'url',
'{%s}license' % DOCUMENT_NAMESPACES['dc']: 'license',
'{%s}creator' % DOCUMENT_NAMESPACES['dc']: 'creator',
'{%s}publisher' % DOCUMENT_NAMESPACES['dc']: 'publisher',
'{%s}accountName' % DOCUMENT_NAMESPACES['foaf']: 'user_name',
'{%s}num_views' % DOCUMENT_NAMESPACES['sioc']: 'view_count',
'{%s}num_replies' % DOCUMENT_NAMESPACES['sioc']: 'comment_count',
}
SENTENCE_MAPPING = {'{%s}token' % DOCUMENT_NAMESPACES['wl']: 'token',
'{%s}sem_orient' % DOCUMENT_NAMESPACES['wl']: 'sem_orient',
'{%s}significance' % DOCUMENT_NAMESPACES['wl']: 'significance',
'{%s}id' % DOCUMENT_NAMESPACES['wl']: 'md5sum',
'{%s}pos' % DOCUMENT_NAMESPACES['wl']: 'pos',
'{%s}is_title' % DOCUMENT_NAMESPACES['wl']: 'is_title',
'{%s}dependency' % DOCUMENT_NAMESPACES['wl']: 'dependency'}
ANNOTATION_MAPPING = {'{%s}key' % DOCUMENT_NAMESPACES['wl']: 'key',
'{%s}surfaceForm' % DOCUMENT_NAMESPACES['wl']: 'surfaceForm',
'{%s}start' % DOCUMENT_NAMESPACES['wl']: 'start',
'{%s}end' % DOCUMENT_NAMESPACES['wl']: 'end',
'{%s}annotationType' % DOCUMENT_NAMESPACES['wl']: 'annotation_type',
'{%s}preferredName' % DOCUMENT_NAMESPACES['wl']: 'preferredName',
'{%s}md5sum' % DOCUMENT_NAMESPACES['wl']: 'md5sum'}
FEATURE_MAPPING = {'{%s}key' % DOCUMENT_NAMESPACES['wl']: 'key',
'{%s}context' % DOCUMENT_NAMESPACES['wl']: 'context'}
RELATION_MAPPING = {'{%s}key' % DOCUMENT_NAMESPACES['wl']: 'key'}
@classmethod
def pre_xml_dump(cls, titles, attributes, sentences):
return attributes, titles + sentences
chg: xml mapping 2013
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 07.04.2014
@author: heinz-peterlang
'''
from weblyzard_api.xml_content.parsers import XMLParser
class XML2013(XMLParser):
SUPPORTED_NAMESPACE = 'http://www.weblyzard.com/wl/2013#'
DOCUMENT_NAMESPACES = {'wl': SUPPORTED_NAMESPACE,
'dc': 'http://purl.org/dc/elements/1.1/',
'xml': 'http://www.w3.org/XML/1998/namespace',
# 'sioc': 'http://rdfs.org/sioc/ns#',
# 'skos': 'http://www.w3.org/2004/02/skos/core#',
# 'foaf': 'http://xmlns.com/foaf/0.1/',
'ma': 'http://www.w3.org/ns/ma-ont#'}
VERSION = 2013
ATTR_MAPPING = {'{%s}nilsimsa' % DOCUMENT_NAMESPACES['wl']: 'nilsimsa',
'{%s}id' % DOCUMENT_NAMESPACES['wl']: 'content_id',
'{%s}jonas_type' % DOCUMENT_NAMESPACES['wl']: 'jonas_type',
'{%s}lang' % DOCUMENT_NAMESPACES['xml']: 'lang', #kept for legacy
'{%s}format' % DOCUMENT_NAMESPACES['dc']: 'content_type',
'{%s}language' % DOCUMENT_NAMESPACES['dc']: 'language',
'{%s}source' % DOCUMENT_NAMESPACES['dc']: 'source',
'{%s}identifier' % DOCUMENT_NAMESPACES['dc']: 'url',
'{%s}license' % DOCUMENT_NAMESPACES['dc']: 'license',
'{%s}creator' % DOCUMENT_NAMESPACES['dc']: 'creator',
'{%s}publisher' % DOCUMENT_NAMESPACES['dc']: 'publisher',
'{%s}subject' % DOCUMENT_NAMESPACES['dc']: 'keywords',
'{%s}title' % DOCUMENT_NAMESPACES['dc']: 'title',
'{%s}thumbnail' % DOCUMENT_NAMESPACES['wl']: 'thumbnail',
'{%s}thumbnail' % DOCUMENT_NAMESPACES['wl']: 'picture', #FB, YT
'{%s}thumbnail' % DOCUMENT_NAMESPACES['wl']: 'org_picture', #FB
'{%s}thumbnail' % DOCUMENT_NAMESPACES['wl']: 'group_picture', #FB
'{%s}post_type' % DOCUMENT_NAMESPACES['wl']: 'fbType', #FB
'{%s}location' % DOCUMENT_NAMESPACES['wl']: 'location',
'{%s}duration' % DOCUMENT_NAMESPACES['wl']: 'duration', #YT, vimeo/daily
'{%s}mediacriticism' % DOCUMENT_NAMESPACES['wl']: 'mediacriticism', #to be migrated to features, eventually
'{%s}article_content_id' % DOCUMENT_NAMESPACES['wl']: 'article_content_id', #to be migrated to relations eventually
#INVID
'{%s}locator' % DOCUMENT_NAMESPACES['ma']: 'media_url',
'{%s}format' % DOCUMENT_NAMESPACES['ma']: 'media_type',
'{%s}createdIn' % DOCUMENT_NAMESPACES['ma']: 'media_recordingLocation',
'{%s}creationDate' % DOCUMENT_NAMESPACES['ma']: 'media_recordingDate',
'{%s}hasPolicy' % DOCUMENT_NAMESPACES['ma']: 'media_license',
#SM_METRICS
'{%s}user_mentions' % DOCUMENT_NAMESPACES['wl']: 'user_mentions',
'{%s}rating' % DOCUMENT_NAMESPACES['wl']: 'rating',
'{%s}rating' % DOCUMENT_NAMESPACES['wl']: 'rating_average', #YT
'{%s}num_views' % DOCUMENT_NAMESPACES['wl']: 'viewcount', #vimeo/daily
'{%s}num_views' % DOCUMENT_NAMESPACES['wl']: 'statistics_viewcount', #youtube
'{%s}num_replies' % DOCUMENT_NAMESPACES['wl']: 'comment_count',
'{%s}num_reshares' % DOCUMENT_NAMESPACES['wl']: 'reshares', #g+, twitter
#USER MAPPTINGS
'{%s}user_id' % DOCUMENT_NAMESPACES['wl']: 'user_id', #FB, G+
'{%s}user_id' % DOCUMENT_NAMESPACES['wl']: 'user_url', #YT
'{%s}user_name' % DOCUMENT_NAMESPACES['wl']: 'user_name',
'{%s}user_type' % DOCUMENT_NAMESPACES['wl']: 'user_type',
'{%s}user_status' % DOCUMENT_NAMESPACES['wl']: 'current_status', #twitter
'{%s}user_screen_name' % DOCUMENT_NAMESPACES['wl']: 'screen_name',
'{%s}user_location' % DOCUMENT_NAMESPACES['wl']: 'user_location', #twitter
'{%s}user_timezone' % DOCUMENT_NAMESPACES['wl']: 'user_timezone', #twitter
'{%s}user_thumbnail' % DOCUMENT_NAMESPACES['wl']: 'user_thumbnail',
'{%s}user_thumbnail' % DOCUMENT_NAMESPACES['wl']: 'user_img_url', #twitter
#USER METRICS
'{%s}user_rating' % DOCUMENT_NAMESPACES['wl']: 'likes_count', #FB
'{%s}user_rating' % DOCUMENT_NAMESPACES['wl']: 'org_likes_count', #FB
'{%s}user_rating' % DOCUMENT_NAMESPACES['wl']: 'group_likes_count', #FB
'{%s}user_rating' % DOCUMENT_NAMESPACES['wl']: 'num_tweets', #twitter
'{%s}user_rating' % DOCUMENT_NAMESPACES['wl']: 'plusoners', #G+
'{%s}user_outdegree' % DOCUMENT_NAMESPACES['wl']: 'following', #twitter
'{%s}user_indegree' % DOCUMENT_NAMESPACES['wl']: 'followers', #twitter
}
SENTENCE_MAPPING = {'{%s}token' % DOCUMENT_NAMESPACES['wl']: 'token',
'{%s}sem_orient' % DOCUMENT_NAMESPACES['wl']: 'sem_orient',
'{%s}significance' % DOCUMENT_NAMESPACES['wl']: 'significance',
'{%s}id' % DOCUMENT_NAMESPACES['wl']: 'md5sum',
'{%s}pos' % DOCUMENT_NAMESPACES['wl']: 'pos',
'{%s}is_title' % DOCUMENT_NAMESPACES['wl']: 'is_title',
'{%s}dependency' % DOCUMENT_NAMESPACES['wl']: 'dependency'}
ANNOTATION_MAPPING = {'{%s}key' % DOCUMENT_NAMESPACES['wl']: 'key',
'{%s}surfaceForm' % DOCUMENT_NAMESPACES['wl']: 'surfaceForm',
'{%s}start' % DOCUMENT_NAMESPACES['wl']: 'start',
'{%s}end' % DOCUMENT_NAMESPACES['wl']: 'end',
'{%s}annotationType' % DOCUMENT_NAMESPACES['wl']: 'annotation_type',
'{%s}preferredName' % DOCUMENT_NAMESPACES['wl']: 'preferredName',
'{%s}md5sum' % DOCUMENT_NAMESPACES['wl']: 'md5sum'}
FEATURE_MAPPING = {'{%s}key' % DOCUMENT_NAMESPACES['wl']: 'key',
'{%s}context' % DOCUMENT_NAMESPACES['wl']: 'context'}
RELATION_MAPPING = {'{%s}key' % DOCUMENT_NAMESPACES['wl']: 'key'}
@classmethod
def pre_xml_dump(cls, titles, attributes, sentences):
return attributes, titles + sentences |
#!/usr/bin/env python
"""
.. module:: radical.pilot.agent
:platform: Unix
:synopsis: The agent for RADICAL-Pilot.
The agent gets CUs by means of the MongoDB.
The execution of CUs by the Agent is (primarily) configured by the
triplet (LRMS, LAUNCH_METHOD(s), SCHEDULER):
- The LRMS detects and structures the information about the resources
available to agent.
- The Scheduler maps the execution requests of the LaunchMethods to a
subset of the resources available to the Agent.
It does not deal with the "presentation" of this subset.
- The LaunchMethods configure how to execute (regular and MPI) tasks,
and know about the specific format to specify the subset of resources.
Structure:
----------
This represents the planned architecture, which is not fully represented in
code, yet.
- class Agent
- represents the whole thing
- has a set of StageinWorkers (threads or procs)
- has a set of StageoutWorkers (threads or procs)
- has a set of ExecWorkers (threads or procs)
- has a set of UpdateWorkers (threads or procs)
- has a HeartbeatMonitor (threads or procs)
- has a inputstaging queue
- has a outputstaging queue
- has a execution queue
- has a update queue
- loops forever
- in each iteration
- pulls CU bulks from DB
- pushes CUs into inputstaging queue or execution queue (based on
obvious metric)
class StageinWorker
- competes for CU input staging requests from inputstaging queue
- for each received CU
- performs staging
- pushes CU into execution queue
- pushes stage change notification request into update queue
class StageoutWorker
- competes for CU output staging requests from outputstaging queue
- for each received CU
- performs staging
- pushes stage change notification request into update queue
class ExecWorker
- manages a partition of the allocated cores
(partition size == max cu size)
- competes for CU execution reqeusts from execute queue
- for each CU
- prepares execution command
- pushes command to ExecutionEnvironment
- pushes stage change notification request into update queue
class Spawner
- executes CUs according to ExecWorker instruction
- monitors CU execution (for completion)
- gets CU execution reqeusts from ExecWorker
- for each CU
- executes CU command
- monitors CU execution
- on CU completion
- pushes CU to outputstaging queue (if staging is needed)
- pushes stage change notification request into update queue
class UpdateWorker
- competes for CU state update reqeusts from update queue
- for each CU
- pushes state update (collected into bulks if possible)
- cleans CU workdir if CU is final and cleanup is requested
Agent
|
+--------------------------------------------------------
| | | | |
| | | | |
V V V V V
ExecWorker* StageinWorker* StageoutWorker* UpdateWorker* HeartbeatMonitor
|
+-------------------------------------------------
| | | | |
| | | | |
V V V V V
LRMS MPILaunchMethod TaskLaunchMethod Scheduler Spawner
NOTE:
-----
- Units are progressing through the different worker threads, where, in
general, the unit changes state when transitioning to the next thread.
The unit ownership thus *defines* the unit state (its owned by the
InputStagingWorker, it is in StagingInput state, etc), and the state
update notifications to the DB are merely informational (and can thus be
asynchron). The updates need to be ordered though, to reflect valid and
correct state transition history.
TODO:
-----
- add option to scheduler to ignore core 0 (which hosts the agent process)
- add LRMS.partition (n) to return a set of partitioned LRMS for partial
ExecWorkers
- publish pilot slot history once on shutdown? Or once in a while when
idle? Or push continuously?
- Schedulers, LRMSs, LaunchMethods, etc need to be made threadsafe, for the
case where more than one execution worker threads are running.
- move util functions to rp.utils or r.utils, and pull the from there
- split the agent into logical components (classes?), and install along with
RP.
- add state asserts after `queue.get ()`
- move mkdir etc from ingest thread to where its used (input staging or
execution)
- the structure of the base scheduler should be suitable for both, UMGR
scheduling and Agent scheduling. The algs will be different though,
mostly because the pilots (as targets of the umgr scheduler) have a wait
queue, but the cores (targets of the agent scheduler) have not. Is it
worthwhile to re-use the structure anyway?
- all stop() method calls need to be replaced with commands which travel
through the queues. To deliver commands timely though we either need
command prioritization (difficult), or need separate command queues...
"""
__copyright__ = "Copyright 2014, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import copy
import math
import stat
import sys
import time
import errno
import Queue
import signal
import shutil
import optparse
import logging
import hostlist
import traceback
import threading
import subprocess
import multiprocessing
import saga as rs
import radical.utils as ru
import radical.pilot as rp
import radical.pilot.utils as rpu
# ------------------------------------------------------------------------------
#
# http://stackoverflow.com/questions/9539052/python-dynamically-changing-base-classes-at-runtime-how-to
#
# Depending on agent architecture (which is specific to the resource type it
# runs on) can switch between different component types: using threaded (when
# running on the same node), multiprocessing (also for running on the same node,
# but avoiding python's threading problems, for the prices of slower queues),
# and remote processes (for running components on different nodes, using zeromq
# queues for communication).
#
# We do some trickery to keep the actual components independent from the actual
# schema:
#
# - we wrap the different queue types into a rpu.Queue object
# - we change the base class of the component dynamically to the respective type
#
# This requires components to adhere to the following restrictions:
#
# - *only* communicate over queues -- no shared data with other components or
# component instances. Note that this also holds for example for the
# scheduler!
# - no shared data between the component class and it's run() method. That
# includes no sharing of queues.
# - components inherit from base_component, and the constructor needs to
# register all required component-internal and -external queues with that
# base class -- the run() method can then transparently retrieve them from
# there.
#
# FIXME: static switch between thread and process rendering of exec worker.
AGENT_THREADS = 'threading'
AGENT_PROCESSES = 'multiprocessing'
AGENT_MODE = AGENT_THREADS
if AGENT_MODE == AGENT_THREADS :
COMPONENT_MODE = threading
COMPONENT_TYPE = threading.Thread
QUEUE_TYPE = multiprocessing.Queue
elif AGENT_MODE == AGENT_PROCESSES :
COMPONENT_MODE = multiprocessing
COMPONENT_TYPE = multiprocessing.Process
QUEUE_TYPE = multiprocessing.Queue
else:
raise Exception('Unknown Agent Mode')
# this needs git attribute 'ident' set for this file
git_ident = "$Id$"
# ------------------------------------------------------------------------------
#
# DEBUGGING CONSTANTS -- only change when you know what you are doing. It is
# almost guaranteed that any changes will make the agent non-functional (if
# functionality is defined as executing a set of given CUs).
# component IDs
AGENT = 'Agent'
STAGEIN_QUEUE = 'stagein_queue'
STAGEIN_WORKER = 'StageinWorker'
SCHEDULE_QUEUE = 'schedule_queue'
SCHEDULER = 'Scheduler'
EXECUTION_QUEUE = 'execution_queue'
EXEC_WORKER = 'ExecWorker'
WATCH_QUEUE = 'watch_queue'
WATCHER = 'ExecWatcher'
STAGEOUT_QUEUE = 'stageout_queue'
STAGEOUT_WORKER = 'StageoutWorker'
UPDATE_QUEUE = 'update_queue'
UPDATE_WORKER = 'UpdateWorker'
# Number of worker threads
NUMBER_OF_WORKERS = {
STAGEIN_WORKER : 1,
EXEC_WORKER : 1,
STAGEOUT_WORKER : 1,
UPDATE_WORKER : 1
}
# factor by which the number of units are increased at a certain step. Value of
# '1' will leave the units unchanged. Any blowup will leave on unit as the
# original, and will then create clones with an changed unit ID (see blowup()).
BLOWUP_FACTOR = {
AGENT : 1,
STAGEIN_QUEUE : 1,
STAGEIN_WORKER : 1,
SCHEDULE_QUEUE : 1,
SCHEDULER : 1,
EXECUTION_QUEUE : 1,
EXEC_WORKER : 1,
WATCH_QUEUE : 1,
WATCHER : 1,
STAGEOUT_QUEUE : 1,
STAGEOUT_WORKER : 1,
UPDATE_QUEUE : 1,
UPDATE_WORKER : 1
}
# flag to drop all blown-up units at some point in the pipeline. The units
# with the original IDs will again be left untouched, but all other units are
# silently discarded.
# 0: drop nothing
# 1: drop clones
# 2: drop everything
DROP_CLONES = {
AGENT : 1,
STAGEIN_QUEUE : 1,
STAGEIN_WORKER : 1,
SCHEDULE_QUEUE : 1,
SCHEDULER : 1,
EXECUTION_QUEUE : 1,
EXEC_WORKER : 1,
WATCH_QUEUE : 1,
WATCHER : 1,
STAGEOUT_QUEUE : 1,
STAGEOUT_WORKER : 1,
UPDATE_QUEUE : 1,
UPDATE_WORKER : 1
}
#
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# CONSTANTS
#
# 'enum' for unit launch method types
LAUNCH_METHOD_APRUN = 'APRUN'
LAUNCH_METHOD_CCMRUN = 'CCMRUN'
LAUNCH_METHOD_DPLACE = 'DPLACE'
LAUNCH_METHOD_FORK = 'FORK'
LAUNCH_METHOD_IBRUN = 'IBRUN'
LAUNCH_METHOD_MPIEXEC = 'MPIEXEC'
LAUNCH_METHOD_MPIRUN_CCMRUN = 'MPIRUN_CCMRUN'
LAUNCH_METHOD_MPIRUN_DPLACE = 'MPIRUN_DPLACE'
LAUNCH_METHOD_MPIRUN = 'MPIRUN'
LAUNCH_METHOD_MPIRUN_RSH = 'MPIRUN_RSH'
LAUNCH_METHOD_ORTE = 'ORTE'
LAUNCH_METHOD_POE = 'POE'
LAUNCH_METHOD_RUNJOB = 'RUNJOB'
LAUNCH_METHOD_SSH = 'SSH'
# 'enum' for local resource manager types
LRMS_NAME_CCM = 'CCM'
LRMS_NAME_FORK = 'FORK'
LRMS_NAME_LOADLEVELER = 'LOADL'
LRMS_NAME_LSF = 'LSF'
LRMS_NAME_PBSPRO = 'PBSPRO'
LRMS_NAME_SGE = 'SGE'
LRMS_NAME_SLURM = 'SLURM'
LRMS_NAME_TORQUE = 'TORQUE'
# 'enum' for pilot's unit scheduler types
SCHEDULER_NAME_CONTINUOUS = "CONTINUOUS"
SCHEDULER_NAME_SCATTERED = "SCATTERED"
SCHEDULER_NAME_TORUS = "TORUS"
# 'enum' for pilot's unit spawner types
SPAWNER_NAME_POPEN = "POPEN"
SPAWNER_NAME_SHELL = "SHELL"
# defines for pilot commands
COMMAND_CANCEL_PILOT = "Cancel_Pilot"
COMMAND_CANCEL_COMPUTE_UNIT = "Cancel_Compute_Unit"
COMMAND_KEEP_ALIVE = "Keep_Alive"
COMMAND_FIELD = "commands"
COMMAND_TYPE = "type"
COMMAND_ARG = "arg"
COMMAND_RESCHEDULE = "Reschedule"
COMMAND_CANCEL = "Cancel"
# 'enum' for staging action operators
COPY = 'Copy' # local cp
LINK = 'Link' # local ln -s
MOVE = 'Move' # local mv
TRANSFER = 'Transfer' # saga remote transfer
# TODO: This might just be a special case of copy
# tri-state for unit spawn retval
OK = 'OK'
FAIL = 'FAIL'
RETRY = 'RETRY'
# two-state for slot occupation.
FREE = 'Free'
BUSY = 'Busy'
agent_config = {
# directory for staging files inside the agent sandbox
'staging_area' : 'staging_area',
# url scheme to indicate the use of staging_area
'staging_scheme' : 'staging',
# max number of cu out/err chars to push to db
'max_io_loglength' : 1*1024,
# max time period to collec db requests into bulks (seconds)
'bulk_collection_time' : 1.0,
# time to sleep between queue polls (seconds)
'queue_poll_sleeptime' : 0.1,
# time to sleep between database polls (seconds)
'db_poll_sleeptime' : 0.1,
# time between checks of internal state and commands from mothership (seconds)
'heartbeat_interval' : 10,
}
agent_config['blowup_factor'] = BLOWUP_FACTOR
agent_config['drop_clones'] = DROP_CLONES
agent_config['number_of_workers'] = NUMBER_OF_WORKERS
# ----------------------------------------------------------------------------------
#
def rec_makedir(target):
# recursive makedir which ignores errors if dir already exists
try:
os.makedirs(target)
except OSError as e:
# ignore failure on existing directory
if e.errno == errno.EEXIST and os.path.isdir(os.path.dirname(target)):
pass
else:
raise
# ------------------------------------------------------------------------------
#
def pilot_FAILED(mongo_p, pilot_uid, logger, message):
logger.error(message)
now = rpu.timestamp()
out = None
err = None
log = None
try : out = open('./agent.out', 'r').read()
except : pass
try : err = open('./agent.err', 'r').read()
except : pass
try : log = open('./agent.log', 'r').read()
except : pass
msg = [{"message": message, "timestamp": now},
{"message": rpu.get_rusage(), "timestamp": now}]
if mongo_p:
mongo_p.update({"_id": pilot_uid},
{"$pushAll": {"log" : msg},
"$push" : {"statehistory": {"state" : rp.FAILED,
"timestamp" : now}},
"$set" : {"state" : rp.FAILED,
"stdout" : rpu.tail(out),
"stderr" : rpu.tail(err),
"logfile" : rpu.tail(log),
"finished" : now}
})
else:
logger.error("cannot log error state in database!")
# ------------------------------------------------------------------------------
#
def pilot_CANCELED(mongo_p, pilot_uid, logger, message):
logger.warning(message)
now = rpu.timestamp()
out = None
err = None
log = None
try : out = open('./agent.out', 'r').read()
except : pass
try : err = open('./agent.err', 'r').read()
except : pass
try : log = open('./agent.log', 'r').read()
except : pass
msg = [{"message": message, "timestamp": now},
{"message": rpu.get_rusage(), "timestamp": now}]
mongo_p.update({"_id": pilot_uid},
{"$pushAll": {"log" : msg},
"$push" : {"statehistory": {"state" : rp.CANCELED,
"timestamp" : now}},
"$set" : {"state" : rp.CANCELED,
"stdout" : rpu.tail(out),
"stderr" : rpu.tail(err),
"logfile" : rpu.tail(log),
"finished" : now}
})
# ------------------------------------------------------------------------------
#
def pilot_DONE(mongo_p, pilot_uid):
now = rpu.timestamp()
out = None
err = None
log = None
try : out = open('./agent.out', 'r').read()
except : pass
try : err = open('./agent.err', 'r').read()
except : pass
try : log = open('./agent.log', 'r').read()
except : pass
msg = [{"message": "pilot done", "timestamp": now},
{"message": rpu.get_rusage(), "timestamp": now}]
mongo_p.update({"_id": pilot_uid},
{"$pushAll": {"log" : msg},
"$push" : {"statehistory": {"state" : rp.DONE,
"timestamp": now}},
"$set" : {"state" : rp.DONE,
"stdout" : rpu.tail(out),
"stderr" : rpu.tail(err),
"logfile" : rpu.tail(log),
"finished" : now}
})
# ==============================================================================
#
# Schedulers
#
# ==============================================================================
#
class Scheduler(threading.Thread):
# FIXME: clarify what can be overloaded by Scheduler classes
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, lrms, schedule_queue, execution_queue,
update_queue):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._lrms = lrms
self._schedule_queue = schedule_queue
self._execution_queue = execution_queue
self._update_queue = update_queue
self._terminate = threading.Event()
self._lock = threading.RLock()
self._wait_pool = list()
self._wait_queue_lock = threading.RLock()
rpu.prof('start')
self._configure()
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Launch Method.
#
@classmethod
def create(cls, name, config, logger, lrms, schedule_queue, execution_queue,
update_queue):
# Make sure that we are the base-class!
if cls != Scheduler:
raise TypeError("Scheduler Factory only available to base class!")
try:
implementation = {
SCHEDULER_NAME_CONTINUOUS : SchedulerContinuous,
SCHEDULER_NAME_SCATTERED : SchedulerScattered,
SCHEDULER_NAME_TORUS : SchedulerTorus
}[name]
impl = implementation(name, config, logger, lrms, schedule_queue,
execution_queue, update_queue)
impl.start()
return impl
except KeyError:
raise ValueError("Scheduler '%s' unknown!" % name)
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def _configure(self):
raise NotImplementedError("_configure() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def slot_status(self):
raise NotImplementedError("slot_status() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def _allocate_slot(self, cores_requested):
raise NotImplementedError("_allocate_slot() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def _release_slot(self, opaque_slot):
raise NotImplementedError("_release_slot() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def _try_allocation(self, cu):
"""
Attempt to allocate cores for a specific CU. If it succeeds, send the
CU off to the ExecutionWorker.
"""
# needs to be locked as we try to acquire slots, but slots are freed
# in a different thread. But we keep the lock duration short...
with self._lock :
# schedule this unit, and receive an opaque handle that has meaning to
# the LRMS, Scheduler and LaunchMethod.
cu['opaque_slot'] = self._allocate_slot(cu['description']['cores'])
if not cu['opaque_slot']:
# signal the CU remains unhandled
return False
# got an allocation, go off and launch the process
rpu.prof('schedule', msg="allocated", uid=cu['_id'], logger=self._log.warn)
self._log.info (self.slot_status())
cu_list = rpu.blowup(self._config, cu, EXECUTION_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="Scheduler to execution_queue (%s)" % _cu['state'], uid=_cu['_id'])
self._execution_queue.put(_cu)
return True
# --------------------------------------------------------------------------
#
def _reschedule(self):
rpu.prof('reschedule')
self._log.info("slot status before reschedule: %s" % self.slot_status())
# cycle through wait queue, and see if we get anything running now. We
# cycle over a copy of the list, so that we can modify the list on the
# fly
for cu in self._wait_pool[:]:
if self._try_allocation(cu):
# NOTE: this is final, remove it from the wait queue
with self._wait_queue_lock :
self._wait_pool.remove(cu)
rpu.prof('unqueue', msg="re-allocation done", uid=cu['_id'])
self._log.info("slot status after reschedule: %s" % self.slot_status ())
rpu.prof('reschedule done')
# --------------------------------------------------------------------------
#
def unschedule(self, cus):
# release (for whatever reason) all slots allocated to this CU
# needs to be locked as we try to release slots, but slots are acquired
# in a different thread....
with self._lock :
rpu.prof('unschedule')
self._log.info("slot status before unschedule: %s" % self.slot_status ())
slots_released = False
if not isinstance(cus, list):
cus = [cus]
for cu in cus:
if cu['opaque_slot']:
self._release_slot(cu['opaque_slot'])
slots_released = True
# notify the scheduling thread of released slots
if slots_released:
rpu.prof('put_cmd', msg="Scheduler to schedule_queue (%s)" % COMMAND_RESCHEDULE)
self._schedule_queue.put(COMMAND_RESCHEDULE)
self._log.info("slot status after unschedule: %s" % self.slot_status ())
rpu.prof('unschedule done - reschedule')
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
try:
request = self._schedule_queue.get()
# shutdown signal
if not request:
rpu.prof('get_cmd', msg="schedule_queue to Scheduler (wakeup)")
continue
# we either get a new scheduled CU, or get a trigger that cores were
# freed, and we can try to reschedule waiting CUs
if isinstance(request, basestring):
command = request
rpu.prof('get_cmd', msg="schedule_queue to Scheduler (%s)" % command)
if command == COMMAND_RESCHEDULE:
self._reschedule()
else:
self._log.error("Unknown scheduler command: %s (ignored)", command)
else:
cu = request
cu['state'] = rp.ALLOCATING
rpu.prof('get', msg="schedule_queue to Scheduler (%s)" % cu['state'], uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, SCHEDULER)
for _cu in cu_list:
# we got a new unit to schedule. Either we can place
# it straight away and move it to execution, or we have
# to put it on the wait queue.
if not self._try_allocation(_cu):
# No resources available, put in wait queue
with self._wait_queue_lock :
self._wait_pool.append(_cu)
rpu.prof('schedule', msg="allocation failed", uid=_cu['_id'])
except Exception as e:
self._log.exception('Error in scheduler loop: %s', e)
raise
finally:
rpu.prof ('stop')
# ==============================================================================
#
class SchedulerContinuous(Scheduler):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue):
self.slots = None
Scheduler.__init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue)
# --------------------------------------------------------------------------
#
def _configure(self):
if not self._lrms.node_list:
raise RuntimeError("LRMS %s didn't _configure node_list." % self._lrms.name)
if not self._lrms.cores_per_node:
raise RuntimeError("LRMS %s didn't _configure cores_per_node." % self._lrms.name)
# Slots represents the internal process management structure.
# The structure is as follows:
# [
# {'node': 'node1', 'cores': [p_1, p_2, p_3, ... , p_cores_per_node]},
# {'node': 'node2', 'cores': [p_1, p_2, p_3. ... , p_cores_per_node]
# ]
#
# We put it in a list because we care about (and make use of) the order.
#
self.slots = []
for node in self._lrms.node_list:
self.slots.append({
'node': node,
# TODO: Maybe use the real core numbers in the case of
# non-exclusive host reservations?
'cores': [FREE for _ in range(0, self._lrms.cores_per_node)]
})
# --------------------------------------------------------------------------
#
# Convert a set of slots into an index into the global slots list
#
def slots2offset(self, task_slots):
# TODO: This assumes all hosts have the same number of cores
first_slot = task_slots[0]
# Get the host and the core part
[first_slot_host, first_slot_core] = first_slot.split(':')
# Find the entry in the the all_slots list based on the host
slot_entry = (slot for slot in self.slots if slot["node"] == first_slot_host).next()
# Transform it into an index in to the all_slots list
all_slots_slot_index = self.slots.index(slot_entry)
return all_slots_slot_index * self._lrms.cores_per_node + int(first_slot_core)
# --------------------------------------------------------------------------
#
def slot_status(self):
"""Returns a multi-line string corresponding to slot status.
"""
slot_matrix = ""
for slot in self.slots:
slot_matrix += "|"
for core in slot['cores']:
if core == FREE:
slot_matrix += "-"
else:
slot_matrix += "+"
slot_matrix += "|"
return {'timestamp' : rpu.timestamp(),
'slotstate' : slot_matrix}
# --------------------------------------------------------------------------
#
# (Temporary?) wrapper for acquire_slots
#
def _allocate_slot(self, cores_requested):
# TODO: single_node should be enforced for e.g. non-message passing
# tasks, but we don't have that info here.
# NOTE AM: why should non-messaging tasks be confined to one node?
if cores_requested < self._lrms.cores_per_node:
single_node = True
else:
single_node = False
# Given that we are the continuous scheduler, this is fixed.
# TODO: Argument can be removed altogether?
continuous = True
# TODO: Now we rely on "None", maybe throw an exception?
return self._acquire_slots(cores_requested, single_node=single_node,
continuous=continuous)
# --------------------------------------------------------------------------
#
def _release_slot(self, (task_slots)):
self._change_slot_states(task_slots, FREE)
# --------------------------------------------------------------------------
#
def _acquire_slots(self, cores_requested, single_node, continuous):
#
# Switch between searching for continuous or scattered slots
#
# Switch between searching for single or multi-node
if single_node:
if continuous:
task_slots = self._find_slots_single_cont(cores_requested)
else:
raise NotImplementedError('No scattered single node scheduler implemented yet.')
else:
if continuous:
task_slots = self._find_slots_multi_cont(cores_requested)
else:
raise NotImplementedError('No scattered multi node scheduler implemented yet.')
if task_slots is not None:
self._change_slot_states(task_slots, BUSY)
return task_slots
# --------------------------------------------------------------------------
#
# Find a needle (continuous sub-list) in a haystack (list)
#
def _find_sublist(self, haystack, needle):
n = len(needle)
# Find all matches (returns list of False and True for every position)
hits = [(needle == haystack[i:i+n]) for i in xrange(len(haystack)-n+1)]
try:
# Grab the first occurrence
index = hits.index(True)
except ValueError:
index = None
return index
# --------------------------------------------------------------------------
#
# Transform the number of cores into a continuous list of "status"es,
# and use that to find a sub-list.
#
def _find_cores_cont(self, slot_cores, cores_requested, status):
return self._find_sublist(slot_cores, [status for _ in range(cores_requested)])
# --------------------------------------------------------------------------
#
# Find an available continuous slot within node boundaries.
#
def _find_slots_single_cont(self, cores_requested):
for slot in self.slots:
slot_node = slot['node']
slot_cores = slot['cores']
slot_cores_offset = self._find_cores_cont(slot_cores, cores_requested, FREE)
if slot_cores_offset is not None:
self._log.info('Node %s satisfies %d cores at offset %d',
slot_node, cores_requested, slot_cores_offset)
return ['%s:%d' % (slot_node, core) for core in
range(slot_cores_offset, slot_cores_offset + cores_requested)]
return None
# --------------------------------------------------------------------------
#
# Find an available continuous slot across node boundaries.
#
def _find_slots_multi_cont(self, cores_requested):
# Convenience aliases
cores_per_node = self._lrms.cores_per_node
all_slots = self.slots
# Glue all slot core lists together
all_slot_cores = [core for node in [node['cores'] for node in all_slots] for core in node]
# self._log.debug("all_slot_cores: %s", all_slot_cores)
# Find the start of the first available region
all_slots_first_core_offset = self._find_cores_cont(all_slot_cores, cores_requested, FREE)
self._log.debug("all_slots_first_core_offset: %s", all_slots_first_core_offset)
if all_slots_first_core_offset is None:
return None
# Determine the first slot in the slot list
first_slot_index = all_slots_first_core_offset / cores_per_node
self._log.debug("first_slot_index: %s", first_slot_index)
# And the core offset within that node
first_slot_core_offset = all_slots_first_core_offset % cores_per_node
self._log.debug("first_slot_core_offset: %s", first_slot_core_offset)
# Note: We subtract one here, because counting starts at zero;
# Imagine a zero offset and a count of 1, the only core used
# would be core 0.
# TODO: Verify this claim :-)
all_slots_last_core_offset = (first_slot_index * cores_per_node) +\
first_slot_core_offset + cores_requested - 1
self._log.debug("all_slots_last_core_offset: %s", all_slots_last_core_offset)
last_slot_index = (all_slots_last_core_offset) / cores_per_node
self._log.debug("last_slot_index: %s", last_slot_index)
last_slot_core_offset = all_slots_last_core_offset % cores_per_node
self._log.debug("last_slot_core_offset: %s", last_slot_core_offset)
# Convenience aliases
last_slot = self.slots[last_slot_index]
self._log.debug("last_slot: %s", last_slot)
last_node = last_slot['node']
self._log.debug("last_node: %s", last_node)
first_slot = self.slots[first_slot_index]
self._log.debug("first_slot: %s", first_slot)
first_node = first_slot['node']
self._log.debug("first_node: %s", first_node)
# Collect all node:core slots here
task_slots = []
# Add cores from first slot for this unit
# As this is a multi-node search, we can safely assume that we go
# from the offset all the way to the last core.
task_slots.extend(['%s:%d' % (first_node, core) for core in
range(first_slot_core_offset, cores_per_node)])
# Add all cores from "middle" slots
for slot_index in range(first_slot_index+1, last_slot_index):
slot_node = all_slots[slot_index]['node']
task_slots.extend(['%s:%d' % (slot_node, core) for core in range(0, cores_per_node)])
# Add the cores of the last slot
task_slots.extend(['%s:%d' % (last_node, core) for core in range(0, last_slot_core_offset+1)])
return task_slots
# --------------------------------------------------------------------------
#
# Change the reserved state of slots (FREE or BUSY)
#
def _change_slot_states(self, task_slots, new_state):
# Convenience alias
all_slots = self.slots
# logger.debug("change_slot_states: unit slots: %s", task_slots)
for slot in task_slots:
# logger.debug("change_slot_states: slot content: %s", slot)
# Get the node and the core part
[slot_node, slot_core] = slot.split(':')
# Find the entry in the the all_slots list
slot_entry = (slot for slot in all_slots if slot["node"] == slot_node).next()
# Change the state of the slot
slot_entry['cores'][int(slot_core)] = new_state
# ==============================================================================
#
class SchedulerScattered(Scheduler):
# FIXME: implement
pass
# ==============================================================================
#
class SchedulerTorus(Scheduler):
# TODO: Ultimately all BG/Q specifics should move out of the scheduler
# --------------------------------------------------------------------------
#
# Offsets into block structure
#
TORUS_BLOCK_INDEX = 0
TORUS_BLOCK_COOR = 1
TORUS_BLOCK_NAME = 2
TORUS_BLOCK_STATUS = 3
# --------------------------------------------------------------------------
def __init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue):
self.slots = None
self._cores_per_node = None
Scheduler.__init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue)
# --------------------------------------------------------------------------
#
def _configure(self):
if not self._lrms.cores_per_node:
raise RuntimeError("LRMS %s didn't _configure cores_per_node." % self._lrms.name)
self._cores_per_node = self._lrms.cores_per_node
# TODO: get rid of field below
self.slots = 'bogus'
# --------------------------------------------------------------------------
#
def slot_status(self):
"""Returns a multi-line string corresponding to slot status.
"""
slot_matrix = ""
for slot in self._lrms.torus_block:
slot_matrix += "|"
if slot[self.TORUS_BLOCK_STATUS] == FREE:
slot_matrix += "-" * self._lrms.cores_per_node
else:
slot_matrix += "+" * self._lrms.cores_per_node
slot_matrix += "|"
return {'timestamp': rpu.timestamp(),
'slotstate': slot_matrix}
# --------------------------------------------------------------------------
#
# Allocate a number of cores
#
# Currently only implements full-node allocation, so core count must
# be a multiple of cores_per_node.
#
def _allocate_slot(self, cores_requested):
block = self._lrms.torus_block
sub_block_shape_table = self._lrms.shape_table
self._log.info("Trying to allocate %d core(s).", cores_requested)
if cores_requested % self._lrms.cores_per_node:
num_cores = int(math.ceil(cores_requested / float(self._lrms.cores_per_node))) \
* self._lrms.cores_per_node
self._log.error('Core not multiple of %d, increasing to %d!',
self._lrms.cores_per_node, num_cores)
num_nodes = cores_requested / self._lrms.cores_per_node
offset = self._alloc_sub_block(block, num_nodes)
if offset is None:
self._log.warning('No allocation made.')
return
# TODO: return something else than corner location? Corner index?
corner = block[offset][self.TORUS_BLOCK_COOR]
sub_block_shape = sub_block_shape_table[num_nodes]
end = self.get_last_node(corner, sub_block_shape)
self._log.debug('Allocating sub-block of %d node(s) with dimensions %s'
' at offset %d with corner %s and end %s.',
num_nodes, self._lrms.shape2str(sub_block_shape), offset,
self._lrms.loc2str(corner), self._lrms.loc2str(end))
return corner, sub_block_shape
# --------------------------------------------------------------------------
#
# Allocate a sub-block within a block
# Currently only works with offset that are exactly the sub-block size
#
def _alloc_sub_block(self, block, num_nodes):
offset = 0
# Iterate through all nodes with offset a multiple of the sub-block size
while True:
# Verify the assumption (needs to be an assert?)
if offset % num_nodes != 0:
msg = 'Sub-block needs to start at correct offset!'
self._log.exception(msg)
raise ValueError(msg)
# TODO: If we want to workaround this, the coordinates need to overflow
not_free = False
# Check if all nodes from offset till offset+size are FREE
for peek in range(num_nodes):
try:
if block[offset+peek][self.TORUS_BLOCK_STATUS] == BUSY:
# Once we find the first BUSY node we can discard this attempt
not_free = True
break
except IndexError:
self._log.exception('Block out of bound. Num_nodes: %d, offset: %d, peek: %d.',
num_nodes, offset, peek)
if not_free == True:
# No success at this offset
self._log.info("No free nodes found at this offset: %d.", offset)
# If we weren't the last attempt, then increase the offset and iterate again.
if offset + num_nodes < self._block2num_nodes(block):
offset += num_nodes
continue
else:
return
else:
# At this stage we have found a free spot!
self._log.info("Free nodes found at this offset: %d.", offset)
# Then mark the nodes busy
for peek in range(num_nodes):
block[offset+peek][self.TORUS_BLOCK_STATUS] = BUSY
return offset
# --------------------------------------------------------------------------
#
# Return the number of nodes in a block
#
def _block2num_nodes(self, block):
return len(block)
# --------------------------------------------------------------------------
#
def _release_slot(self, (corner, shape)):
self._free_cores(self._lrms.torus_block, corner, shape)
# --------------------------------------------------------------------------
#
# Free up an allocation
#
def _free_cores(self, block, corner, shape):
# Number of nodes to free
num_nodes = self._shape2num_nodes(shape)
# Location of where to start freeing
offset = self.corner2offset(block, corner)
self._log.info("Freeing %d nodes starting at %d.", num_nodes, offset)
for peek in range(num_nodes):
assert block[offset+peek][self.TORUS_BLOCK_STATUS] == BUSY, \
'Block %d not Free!' % block[offset+peek]
block[offset+peek][self.TORUS_BLOCK_STATUS] = FREE
# --------------------------------------------------------------------------
#
# Follow coordinates to get the last node
#
def get_last_node(self, origin, shape):
ret = {}
for dim in self._lrms.torus_dimension_labels:
ret[dim] = origin[dim] + shape[dim] -1
return ret
# --------------------------------------------------------------------------
#
# Return the number of nodes for the given block shape
#
def _shape2num_nodes(self, shape):
nodes = 1
for dim in self._lrms.torus_dimension_labels:
nodes *= shape[dim]
return nodes
# --------------------------------------------------------------------------
#
# Return the offset into the node list from a corner
#
# TODO: Can this be determined instead of searched?
#
def corner2offset(self, block, corner):
offset = 0
for e in block:
if corner == e[self.TORUS_BLOCK_COOR]:
return offset
offset += 1
return offset
# ==============================================================================
#
# Launch Methods
#
# ==============================================================================
#
class LaunchMethod(object):
# List of environment variables that designated Launch Methods should export
EXPORT_ENV_VARIABLES = [
'LD_LIBRARY_PATH',
'PATH',
'PYTHONPATH',
'PYTHON_DIR',
]
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
self.name = name
self._config = config
self._log = logger
self._scheduler = scheduler
self.launch_command = None
self._configure()
# TODO: This doesn't make too much sense for LM's that use multiple
# commands, perhaps this needs to move to per LM __init__.
if self.launch_command is None:
raise RuntimeError("Launch command not found for LaunchMethod '%s'" % name)
logger.info("Discovered launch command: '%s'.", self.launch_command)
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Launch Method.
#
@classmethod
def create(cls, name, config, logger, scheduler):
# Make sure that we are the base-class!
if cls != LaunchMethod:
raise TypeError("LaunchMethod factory only available to base class!")
try:
implementation = {
LAUNCH_METHOD_APRUN : LaunchMethodAPRUN,
LAUNCH_METHOD_CCMRUN : LaunchMethodCCMRUN,
LAUNCH_METHOD_DPLACE : LaunchMethodDPLACE,
LAUNCH_METHOD_FORK : LaunchMethodFORK,
LAUNCH_METHOD_IBRUN : LaunchMethodIBRUN,
LAUNCH_METHOD_MPIEXEC : LaunchMethodMPIEXEC,
LAUNCH_METHOD_MPIRUN_CCMRUN : LaunchMethodMPIRUNCCMRUN,
LAUNCH_METHOD_MPIRUN_DPLACE : LaunchMethodMPIRUNDPLACE,
LAUNCH_METHOD_MPIRUN : LaunchMethodMPIRUN,
LAUNCH_METHOD_MPIRUN_RSH : LaunchMethodMPIRUNRSH,
LAUNCH_METHOD_ORTE : LaunchMethodORTE,
LAUNCH_METHOD_POE : LaunchMethodPOE,
LAUNCH_METHOD_RUNJOB : LaunchMethodRUNJOB,
LAUNCH_METHOD_SSH : LaunchMethodSSH
}[name]
return implementation(name, config, logger, scheduler)
except KeyError:
logger.exception("LaunchMethod '%s' unknown!" % name)
except Exception as e:
logger.exception("LaunchMethod cannot be used: %s!" % e)
return None
# --------------------------------------------------------------------------
#
def _configure(self):
raise NotImplementedError("_configure() not implemented for LaunchMethod: %s." % self.name)
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
raise NotImplementedError("construct_command() not implemented for LaunchMethod: %s." % self.name)
# --------------------------------------------------------------------------
#
def _find_executable(self, names):
"""Takes a (list of) name(s) and looks for an executable in the path.
"""
if not isinstance(names, list):
names = [names]
for name in names:
ret = self._which(name)
if ret is not None:
return ret
return None
# --------------------------------------------------------------------------
#
def _which(self, program):
"""Finds the location of an executable.
Taken from:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
# ----------------------------------------------------------------------
#
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# ==============================================================================
#
class LaunchMethodFORK(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# "Regular" tasks
self.launch_command = ''
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
if task_args:
command = " ".join([task_exec, task_args])
else:
command = task_exec
return command, None
# ==============================================================================
#
class LaunchMethodMPIRUN(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
self.launch_command = self._find_executable([
'mpirun', # General case
'mpirun_rsh', # Gordon @ SDSC
'mpirun-mpich-mp', # Mac OSX MacPorts
'mpirun-openmpi-mp' # Mac OSX MacPorts
])
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join(['-x ' + var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
mpirun_command = "%s %s -np %s -host %s %s" % (
self.launch_command, export_vars, task_numcores, hosts_string, task_command)
return mpirun_command, None
# ==============================================================================
#
class LaunchMethodSSH(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# Find ssh command
command = self._which('ssh')
if command is not None:
# Some MPI environments (e.g. SGE) put a link to rsh as "ssh" into
# the path. We try to detect that and then use different arguments.
if os.path.islink(command):
target = os.path.realpath(command)
if os.path.basename(target) == 'rsh':
self._log.info('Detected that "ssh" is a link to "rsh".')
return target
command = '%s -o StrictHostKeyChecking=no' % command
self.launch_command = command
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if not launch_script_hop :
raise ValueError ("LaunchMethodSSH.construct_command needs launch_script_hop!")
# Get the host of the first entry in the acquired slot
host = task_slots[0].split(':')[0]
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Command line to execute launch script via ssh on host
ssh_hop_cmd = "%s %s %s" % (self.launch_command, host, launch_script_hop)
# Special case, return a tuple that overrides the default command line.
return task_command, ssh_hop_cmd
# ==============================================================================
#
class LaunchMethodMPIEXEC(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# mpiexec (e.g. on SuperMUC)
self.launch_command = self._find_executable([
'mpiexec', # General case
'mpiexec-mpich-mp', # Mac OSX MacPorts
'mpiexec-openmpi-mp' # Mac OSX MacPorts
])
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
# Construct the hosts_string
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
# Construct the executable and arguments
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
mpiexec_command = "%s -n %s -host %s %s" % (
self.launch_command, task_numcores, hosts_string, task_command)
return mpiexec_command, None
# ==============================================================================
#
class LaunchMethodAPRUN(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# aprun: job launcher for Cray systems
self.launch_command= self._which('aprun')
# TODO: ensure that only one concurrent aprun per node is executed!
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
aprun_command = "%s -n %d %s" % (self.launch_command, task_numcores, task_command)
return aprun_command, None
# ==============================================================================
#
class LaunchMethodCCMRUN(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# ccmrun: Cluster Compatibility Mode (CCM) job launcher for Cray systems
self.launch_command= self._which('ccmrun')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
ccmrun_command = "%s -n %d %s" % (self.launch_command, task_numcores, task_command)
return ccmrun_command, None
# ==============================================================================
#
class LaunchMethodMPIRUNCCMRUN(LaunchMethod):
# TODO: This needs both mpirun and ccmrun
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# ccmrun: Cluster Compatibility Mode job launcher for Cray systems
self.launch_command= self._which('ccmrun')
self.mpirun_command = self._which('mpirun')
if not self.mpirun_command:
raise RuntimeError("mpirun not found!")
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string
# TODO: is there any use in using $HOME/.crayccm/ccm_nodelist.$JOBID?
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join(['-x ' + var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
mpirun_ccmrun_command = "%s %s %s -np %d -host %s %s" % (
self.launch_command, self.mpirun_command, export_vars,
task_numcores, hosts_string, task_command)
return mpirun_ccmrun_command, None
# ==============================================================================
#
class LaunchMethodRUNJOB(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# runjob: job launcher for IBM BG/Q systems, e.g. Joule
self.launch_command= self._which('runjob')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (corner, sub_block_shape)):
if task_numcores % self._scheduler._lrms.cores_per_node:
msg = "Num cores (%d) is not a multiple of %d!" % (
task_numcores, self._scheduler._lrms.cores_per_node)
self._log.exception(msg)
raise ValueError(msg)
# Runjob it is!
runjob_command = self.launch_command
# Set the number of tasks/ranks per node
# TODO: Currently hardcoded, this should be configurable,
# but I don't see how, this would be a leaky abstraction.
runjob_command += ' --ranks-per-node %d' % min(self._scheduler._lrms.cores_per_node, task_numcores)
# Run this subjob in the block communicated by LoadLeveler
runjob_command += ' --block %s' % self._scheduler._lrms.loadl_bg_block
corner_offset = self._scheduler.corner2offset(self._scheduler._lrms.torus_block, corner)
corner_node = self._scheduler._lrms.torus_block[corner_offset][self._scheduler.TORUS_BLOCK_NAME]
runjob_command += ' --corner %s' % corner_node
# convert the shape
runjob_command += ' --shape %s' % self._scheduler._lrms.shape2str(sub_block_shape)
# runjob needs the full path to the executable
if os.path.basename(task_exec) == task_exec:
# Use `which` with back-ticks as the executable,
# will be expanded in the shell script.
task_exec = '`which %s`' % task_exec
# Note: We can't use the expansion from here,
# as the pre-execs of the CU aren't run yet!!
# And finally add the executable and the arguments
# usage: runjob <runjob flags> : /bin/hostname -f
runjob_command += ' : %s' % task_exec
if task_args:
runjob_command += ' %s' % task_args
return runjob_command, None
# ==============================================================================
#
class LaunchMethodDPLACE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# dplace: job launcher for SGI systems (e.g. on Blacklight)
self.launch_command = self._which('dplace')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
dplace_offset = self._scheduler.slots2offset(task_slots)
dplace_command = "%s -c %d-%d %s" % (
self.launch_command, dplace_offset,
dplace_offset+task_numcores-1, task_command)
return dplace_command, None
# ==============================================================================
#
class LaunchMethodMPIRUNRSH(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# mpirun_rsh (e.g. on Gordon@SDSC, Stampede@TACC)
if not self._which('mpirun_rsh'):
raise Exception("mpirun_rsh could not be found")
# We don't use the full pathname as the user might load a different
# compiler / MPI library suite from his CU pre_exec that requires
# the launcher from that version, as experienced on stampede in #572.
self.launch_command = 'mpirun_rsh'
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string ('h1 h2 .. hN')
hosts_string = " ".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join([var+"=$"+var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
mpirun_rsh_command = "%s -np %s %s %s %s" % (
self.launch_command, task_numcores, hosts_string, export_vars, task_command)
return mpirun_rsh_command, None
# ==============================================================================
#
class LaunchMethodMPIRUNDPLACE(LaunchMethod):
# TODO: This needs both mpirun and dplace
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# dplace: job launcher for SGI systems (e.g. on Blacklight)
self.launch_command = self._which('dplace')
self.mpirun_command = self._which('mpirun')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
dplace_offset = self._scheduler.slots2offset(task_slots)
mpirun_dplace_command = "%s -np %d %s -c %d-%d %s" % \
(self.mpirun_command, task_numcores, self.launch_command,
dplace_offset, dplace_offset+task_numcores-1, task_command)
return mpirun_dplace_command, None
# ==============================================================================
#
class LaunchMethodIBRUN(LaunchMethod):
# NOTE: Don't think that with IBRUN it is possible to have
# processes != cores ...
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# ibrun: wrapper for mpirun at TACC
self.launch_command = self._which('ibrun')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
ibrun_offset = self._scheduler.slots2offset(task_slots)
ibrun_command = "%s -n %s -o %d %s" % \
(self.launch_command, task_numcores,
ibrun_offset, task_command)
return ibrun_command, None
# ==============================================================================
#
# NOTE: This requires a development version of Open MPI available.
#
class LaunchMethodORTE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
self.launch_command = self._which('orte-submit')
dvm_command = self._which('orte-dvm')
if not dvm_command:
raise Exception("Couldn't find orte-dvm")
# Use (g)stdbuf to disable buffering.
# We need this to get the "DVM ready",
# without waiting for orte-dvm to complete.
# The command seems to be generally available on our Cray's,
# if not, we can code some home-coooked pty stuff.
stdbuf_cmd = self._find_executable(['stdbuf', 'gstdbuf'])
if not stdbuf_cmd:
raise Exception("Couldn't find (g)stdbuf")
stdbuf_arg = "-oL"
self._log.info("Starting ORTE DVM ...")
self._dvm_process = subprocess.Popen(
[stdbuf_cmd, stdbuf_arg, dvm_command],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
self._dvmuri = None
while True:
line = self._dvm_process.stdout.readline().strip()
if line.startswith('VMURI:'):
if len(line.split(' ')) != 2:
raise Exception("Unknown VMURI format: %s" % line)
label, self._dvmuri = line.split(' ', 1)
if label != 'VMURI:':
raise Exception("Unknown VMURI format: %s" % line)
self._log.info("ORTE DVM URI: %s" % self._dvmuri)
elif line == 'DVM ready':
if not self._dvmuri:
raise Exception("VMURI not found!")
self._log.info("ORTE DVM startup successful!")
break
else:
# Check if the process is still around,
# and log output in debug mode.
if not self._dvm_process.poll():
self._log.debug("ORTE: %s" % line)
else:
# Process is gone: fatal!
raise Exception("ORTE DVM process disappeared")
# TODO: Create teardown() function for LaunchMethod's (in this case to terminate the dvm)
#subprocess.Popen([self.launch_command, "--hnp", orte_vm_uri_filename, "--terminate"])
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join(['-x ' + var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
orte_command = '%s --hnp "%s" %s -np %s -host %s %s' % (
self.launch_command, self._dvmuri, export_vars, task_numcores, hosts_string, task_command)
return orte_command, None
# ==============================================================================
#
class LaunchMethodPOE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# poe: LSF specific wrapper for MPI (e.g. yellowstone)
self.launch_command = self._which('poe')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
# Count slots per host in provided slots description.
hosts = {}
for slot in task_slots:
host = slot.split(':')[0]
if host not in hosts:
hosts[host] = 1
else:
hosts[host] += 1
# Create string with format: "hostX N host
hosts_string = ''
for host in hosts:
hosts_string += '%s %d ' % (host, hosts[host])
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Override the LSB_MCPU_HOSTS env variable as this is set by
# default to the size of the whole pilot.
poe_command = 'LSB_MCPU_HOSTS="%s" %s %s' % (
hosts_string, self.launch_command, task_command)
return poe_command, None
# ==============================================================================
#
# Base class for LRMS implementations.
#
# ==============================================================================
#
class LRMS(object):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
self.name = name
self._config = config
self._log = logger
self.requested_cores = requested_cores
self._log.info("Configuring LRMS %s.", self.name)
self.slot_list = []
self.node_list = []
self.cores_per_node = None
self._configure()
logger.info("Discovered execution environment: %s", self.node_list)
# For now assume that all nodes have equal amount of cores
cores_avail = len(self.node_list) * self.cores_per_node
if 'RADICAL_PILOT_PROFILE' not in os.environ:
if cores_avail < int(requested_cores):
raise ValueError("Not enough cores available (%s) to satisfy allocation request (%s)." \
% (str(cores_avail), str(requested_cores)))
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the LRMS.
#
@classmethod
def create(cls, name, config, logger, requested_cores):
# TODO: Core counts dont have to be the same number for all hosts.
# TODO: We might not have reserved the whole node.
# TODO: Given that the Agent can determine the real core count, in
# principle we could just ignore the config and use as many as we
# have to our availability (taken into account that we might not
# have the full node reserved of course)
# Answer: at least on Yellowstone this doesnt work for MPI,
# as you can't spawn more tasks then the number of slots.
# Make sure that we are the base-class!
if cls != LRMS:
raise TypeError("LRMS Factory only available to base class!")
try:
implementation = {
LRMS_NAME_CCM : CCMLRMS,
LRMS_NAME_FORK : ForkLRMS,
LRMS_NAME_LOADLEVELER : LoadLevelerLRMS,
LRMS_NAME_LSF : LSFLRMS,
LRMS_NAME_PBSPRO : PBSProLRMS,
LRMS_NAME_SGE : SGELRMS,
LRMS_NAME_SLURM : SLURMLRMS,
LRMS_NAME_TORQUE : TORQUELRMS
}[name]
return implementation(name, config, logger, requested_cores)
except KeyError:
raise RuntimeError("LRMS type '%s' unknown!" % name)
# --------------------------------------------------------------------------
#
def _configure(self):
raise NotImplementedError("_Configure not implemented for LRMS type: %s." % self.name)
# ==============================================================================
#
class CCMLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
self._log.info("Configured to run on system with %s.", self.name)
CCM_NODEFILE_DIR = os.path.expanduser('~/.crayccm')
ccm_nodefile_list = filter(lambda x: x.startswith('ccm_nodelist'),
os.listdir(CCM_NODEFILE_DIR))
if not ccm_nodefile_list:
raise Exception("No CCM nodefiles found in: %s." % CCM_NODEFILE_DIR)
ccm_nodefile_name = max(ccm_nodefile_list, key=lambda x:
os.stat(os.path.join(CCM_NODEFILE_DIR, x)).st_mtime)
ccm_nodefile = os.path.join(CCM_NODEFILE_DIR, ccm_nodefile_name)
hostname = os.uname()[1]
if not hostname in open(ccm_nodefile).read():
raise RuntimeError("Using the most recent CCM nodefile (%s),"
" but I (%s) am not in it!" % (ccm_nodefile, hostname))
# Parse the CCM nodefile
ccm_nodes = [line.strip() for line in open(ccm_nodefile)]
self._log.info("Found CCM nodefile: %s.", ccm_nodefile)
# Get the number of raw entries
ccm_nodes_length = len(ccm_nodes)
# Unique nodes
ccm_node_list = list(set(ccm_nodes))
ccm_node_list_length = len(ccm_node_list)
# Some simple arithmetic
self.cores_per_node = ccm_nodes_length / ccm_node_list_length
self.node_list = ccm_node_list
# ==============================================================================
#
class TORQUELRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
self._log.info("Configured to run on system with %s.", self.name)
torque_nodefile = os.environ.get('PBS_NODEFILE')
if torque_nodefile is None:
msg = "$PBS_NODEFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Parse PBS the nodefile
torque_nodes = [line.strip() for line in open(torque_nodefile)]
self._log.info("Found Torque PBS_NODEFILE %s: %s", torque_nodefile, torque_nodes)
# Number of cpus involved in allocation
val = os.environ.get('PBS_NCPUS')
if val:
torque_num_cpus = int(val)
else:
msg = "$PBS_NCPUS not set! (new Torque version?)"
torque_num_cpus = None
self._log.warning(msg)
# Number of nodes involved in allocation
val = os.environ.get('PBS_NUM_NODES')
if val:
torque_num_nodes = int(val)
else:
msg = "$PBS_NUM_NODES not set! (old Torque version?)"
torque_num_nodes = None
self._log.warning(msg)
# Number of cores (processors) per node
val = os.environ.get('PBS_NUM_PPN')
if val:
torque_cores_per_node = int(val)
else:
msg = "$PBS_NUM_PPN is not set!"
torque_cores_per_node = None
self._log.warning(msg)
if torque_cores_per_node in [None, 1]:
# lets see if SAGA has been forthcoming with some information
self._log.warning("fall back to $SAGA_PPN : %s", os.environ.get ('SAGA_PPN', None))
torque_cores_per_node = int(os.environ.get('SAGA_PPN', torque_cores_per_node))
# Number of entries in nodefile should be PBS_NUM_NODES * PBS_NUM_PPN
torque_nodes_length = len(torque_nodes)
torque_node_list = list(set(torque_nodes))
# if torque_num_nodes and torque_cores_per_node and \
# torque_nodes_length < torque_num_nodes * torque_cores_per_node:
# msg = "Number of entries in $PBS_NODEFILE (%s) does not match with $PBS_NUM_NODES*$PBS_NUM_PPN (%s*%s)" % \
# (torque_nodes_length, torque_num_nodes, torque_cores_per_node)
# raise RuntimeError(msg)
# only unique node names
torque_node_list_length = len(torque_node_list)
self._log.debug("Node list: %s(%d)", torque_node_list, torque_node_list_length)
if torque_num_nodes and torque_cores_per_node:
# Modern style Torque
self.cores_per_node = torque_cores_per_node
elif torque_num_cpus:
# Blacklight style (TORQUE-2.3.13)
self.cores_per_node = torque_num_cpus
else:
# Old style Torque (Should we just use this for all versions?)
self.cores_per_node = torque_nodes_length / torque_node_list_length
self.node_list = torque_node_list
# ==============================================================================
#
class PBSProLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
# TODO: $NCPUS?!?! = 1 on archer
pbspro_nodefile = os.environ.get('PBS_NODEFILE')
if pbspro_nodefile is None:
msg = "$PBS_NODEFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.info("Found PBSPro $PBS_NODEFILE %s." % pbspro_nodefile)
# Dont need to parse the content of nodefile for PBSPRO, only the length
# is interesting, as there are only duplicate entries in it.
pbspro_nodes_length = len([line.strip() for line in open(pbspro_nodefile)])
# Number of Processors per Node
val = os.environ.get('NUM_PPN')
if val:
pbspro_num_ppn = int(val)
else:
msg = "$NUM_PPN not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Number of Nodes allocated
val = os.environ.get('NODE_COUNT')
if val:
pbspro_node_count = int(val)
else:
msg = "$NODE_COUNT not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Number of Parallel Environments
val = os.environ.get('NUM_PES')
if val:
pbspro_num_pes = int(val)
else:
msg = "$NUM_PES not set!"
self._log.error(msg)
raise RuntimeError(msg)
pbspro_vnodes = self._parse_pbspro_vnodes()
# Verify that $NUM_PES == $NODE_COUNT * $NUM_PPN == len($PBS_NODEFILE)
if not (pbspro_node_count * pbspro_num_ppn == pbspro_num_pes == pbspro_nodes_length):
self._log.warning("NUM_PES != NODE_COUNT * NUM_PPN != len($PBS_NODEFILE)")
self.cores_per_node = pbspro_num_ppn
self.node_list = pbspro_vnodes
# --------------------------------------------------------------------------
#
def _parse_pbspro_vnodes(self):
# PBS Job ID
val = os.environ.get('PBS_JOBID')
if val:
pbspro_jobid = val
else:
msg = "$PBS_JOBID not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Get the output of qstat -f for this job
output = subprocess.check_output(["qstat", "-f", pbspro_jobid])
# Get the (multiline) 'exec_vnode' entry
vnodes_str = ''
for line in output.splitlines():
# Detect start of entry
if 'exec_vnode = ' in line:
vnodes_str += line.strip()
elif vnodes_str:
# Find continuing lines
if " = " not in line:
vnodes_str += line.strip()
else:
break
# Get the RHS of the entry
rhs = vnodes_str.split('=',1)[1].strip()
self._log.debug("input: %s", rhs)
nodes_list = []
# Break up the individual node partitions into vnode slices
while True:
idx = rhs.find(')+(')
node_str = rhs[1:idx]
nodes_list.append(node_str)
rhs = rhs[idx+2:]
if idx < 0:
break
vnodes_list = []
cpus_list = []
# Split out the slices into vnode name and cpu count
for node_str in nodes_list:
slices = node_str.split('+')
for _slice in slices:
vnode, cpus = _slice.split(':')
cpus = int(cpus.split('=')[1])
self._log.debug("vnode: %s cpus: %s", vnode, cpus)
vnodes_list.append(vnode)
cpus_list.append(cpus)
self._log.debug("vnodes: %s", vnodes_list)
self._log.debug("cpus: %s", cpus_list)
cpus_list = list(set(cpus_list))
min_cpus = int(min(cpus_list))
if len(cpus_list) > 1:
self._log.debug("Detected vnodes of different sizes: %s, the minimal is: %d.", cpus_list, min_cpus)
node_list = []
for vnode in vnodes_list:
# strip the last _0 of the vnodes to get the node name
node_list.append(vnode.rsplit('_', 1)[0])
# only unique node names
node_list = list(set(node_list))
self._log.debug("Node list: %s", node_list)
# Return the list of node names
return node_list
# ==============================================================================
#
class SLURMLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
slurm_nodelist = os.environ.get('SLURM_NODELIST')
if slurm_nodelist is None:
msg = "$SLURM_NODELIST not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Parse SLURM nodefile environment variable
slurm_nodes = hostlist.expand_hostlist(slurm_nodelist)
self._log.info("Found SLURM_NODELIST %s. Expanded to: %s", slurm_nodelist, slurm_nodes)
# $SLURM_NPROCS = Total number of cores allocated for the current job
slurm_nprocs_str = os.environ.get('SLURM_NPROCS')
if slurm_nprocs_str is None:
msg = "$SLURM_NPROCS not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_nprocs = int(slurm_nprocs_str)
# $SLURM_NNODES = Total number of (partial) nodes in the job's resource allocation
slurm_nnodes_str = os.environ.get('SLURM_NNODES')
if slurm_nnodes_str is None:
msg = "$SLURM_NNODES not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_nnodes = int(slurm_nnodes_str)
# $SLURM_CPUS_ON_NODE = Number of cores per node (physically)
slurm_cpus_on_node_str = os.environ.get('SLURM_CPUS_ON_NODE')
if slurm_cpus_on_node_str is None:
msg = "$SLURM_CPUS_ON_NODE not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_cpus_on_node = int(slurm_cpus_on_node_str)
# Verify that $SLURM_NPROCS <= $SLURM_NNODES * $SLURM_CPUS_ON_NODE
if not slurm_nprocs <= slurm_nnodes * slurm_cpus_on_node:
self._log.warning("$SLURM_NPROCS(%d) <= $SLURM_NNODES(%d) * $SLURM_CPUS_ON_NODE(%d)",
slurm_nprocs, slurm_nnodes, slurm_cpus_on_node)
# Verify that $SLURM_NNODES == len($SLURM_NODELIST)
if slurm_nnodes != len(slurm_nodes):
self._log.error("$SLURM_NNODES(%d) != len($SLURM_NODELIST)(%d)",
slurm_nnodes, len(slurm_nodes))
# Report the physical number of cores or the total number of cores
# in case of a single partial node allocation.
self.cores_per_node = min(slurm_cpus_on_node, slurm_nprocs)
self.node_list = slurm_nodes
# ==============================================================================
#
class SGELRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
sge_hostfile = os.environ.get('PE_HOSTFILE')
if sge_hostfile is None:
msg = "$PE_HOSTFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
# SGE core configuration might be different than what multiprocessing
# announces
# Alternative: "qconf -sq all.q|awk '/^slots *[0-9]+$/{print $2}'"
# Parse SGE hostfile for nodes
sge_node_list = [line.split()[0] for line in open(sge_hostfile)]
# Keep only unique nodes
sge_nodes = list(set(sge_node_list))
self._log.info("Found PE_HOSTFILE %s. Expanded to: %s", sge_hostfile, sge_nodes)
# Parse SGE hostfile for cores
sge_cores_count_list = [int(line.split()[1]) for line in open(sge_hostfile)]
sge_core_counts = list(set(sge_cores_count_list))
sge_cores_per_node = min(sge_core_counts)
self._log.info("Found unique core counts: %s Using: %d", sge_core_counts, sge_cores_per_node)
self.node_list = sge_nodes
self.cores_per_node = sge_cores_per_node
# ==============================================================================
#
class LSFLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
lsf_hostfile = os.environ.get('LSB_DJOB_HOSTFILE')
if lsf_hostfile is None:
msg = "$LSB_DJOB_HOSTFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
lsb_mcpu_hosts = os.environ.get('LSB_MCPU_HOSTS')
if lsb_mcpu_hosts is None:
msg = "$LSB_MCPU_HOSTS not set!"
self._log.error(msg)
raise RuntimeError(msg)
# parse LSF hostfile
# format:
# <hostnameX>
# <hostnameX>
# <hostnameY>
# <hostnameY>
#
# There are in total "-n" entries (number of tasks)
# and "-R" entries per host (tasks per host).
# (That results in "-n" / "-R" unique hosts)
#
lsf_nodes = [line.strip() for line in open(lsf_hostfile)]
self._log.info("Found LSB_DJOB_HOSTFILE %s. Expanded to: %s",
lsf_hostfile, lsf_nodes)
lsf_node_list = list(set(lsf_nodes))
# Grab the core (slot) count from the environment
# Format: hostX N hostY N hostZ N
lsf_cores_count_list = map(int, lsb_mcpu_hosts.split()[1::2])
lsf_core_counts = list(set(lsf_cores_count_list))
lsf_cores_per_node = min(lsf_core_counts)
self._log.info("Found unique core counts: %s Using: %d",
lsf_core_counts, lsf_cores_per_node)
self.node_list = lsf_node_list
self.cores_per_node = lsf_cores_per_node
# ==============================================================================
#
class LoadLevelerLRMS(LRMS):
# --------------------------------------------------------------------------
#
# BG/Q Topology of Nodes within a Board
#
BGQ_BOARD_TOPO = {
0: {'A': 29, 'B': 3, 'C': 1, 'D': 12, 'E': 7},
1: {'A': 28, 'B': 2, 'C': 0, 'D': 13, 'E': 6},
2: {'A': 31, 'B': 1, 'C': 3, 'D': 14, 'E': 5},
3: {'A': 30, 'B': 0, 'C': 2, 'D': 15, 'E': 4},
4: {'A': 25, 'B': 7, 'C': 5, 'D': 8, 'E': 3},
5: {'A': 24, 'B': 6, 'C': 4, 'D': 9, 'E': 2},
6: {'A': 27, 'B': 5, 'C': 7, 'D': 10, 'E': 1},
7: {'A': 26, 'B': 4, 'C': 6, 'D': 11, 'E': 0},
8: {'A': 21, 'B': 11, 'C': 9, 'D': 4, 'E': 15},
9: {'A': 20, 'B': 10, 'C': 8, 'D': 5, 'E': 14},
10: {'A': 23, 'B': 9, 'C': 11, 'D': 6, 'E': 13},
11: {'A': 22, 'B': 8, 'C': 10, 'D': 7, 'E': 12},
12: {'A': 17, 'B': 15, 'C': 13, 'D': 0, 'E': 11},
13: {'A': 16, 'B': 14, 'C': 12, 'D': 1, 'E': 10},
14: {'A': 19, 'B': 13, 'C': 15, 'D': 2, 'E': 9},
15: {'A': 18, 'B': 12, 'C': 14, 'D': 3, 'E': 8},
16: {'A': 13, 'B': 19, 'C': 17, 'D': 28, 'E': 23},
17: {'A': 12, 'B': 18, 'C': 16, 'D': 29, 'E': 22},
18: {'A': 15, 'B': 17, 'C': 19, 'D': 30, 'E': 21},
19: {'A': 14, 'B': 16, 'C': 18, 'D': 31, 'E': 20},
20: {'A': 9, 'B': 23, 'C': 21, 'D': 24, 'E': 19},
21: {'A': 8, 'B': 22, 'C': 20, 'D': 25, 'E': 18},
22: {'A': 11, 'B': 21, 'C': 23, 'D': 26, 'E': 17},
23: {'A': 10, 'B': 20, 'C': 22, 'D': 27, 'E': 16},
24: {'A': 5, 'B': 27, 'C': 25, 'D': 20, 'E': 31},
25: {'A': 4, 'B': 26, 'C': 24, 'D': 21, 'E': 30},
26: {'A': 7, 'B': 25, 'C': 27, 'D': 22, 'E': 29},
27: {'A': 6, 'B': 24, 'C': 26, 'D': 23, 'E': 28},
28: {'A': 1, 'B': 31, 'C': 29, 'D': 16, 'E': 27},
29: {'A': 0, 'B': 30, 'C': 28, 'D': 17, 'E': 26},
30: {'A': 3, 'B': 29, 'C': 31, 'D': 18, 'E': 25},
31: {'A': 2, 'B': 28, 'C': 30, 'D': 19, 'E': 24},
}
# --------------------------------------------------------------------------
#
# BG/Q Config
#
BGQ_CORES_PER_NODE = 16
BGQ_NODES_PER_BOARD = 32 # NODE == Compute Card == Chip module
BGQ_BOARDS_PER_MIDPLANE = 16 # NODE BOARD == NODE CARD
BGQ_MIDPLANES_PER_RACK = 2
# --------------------------------------------------------------------------
#
# Default mapping = "ABCDE(T)"
#
# http://www.redbooks.ibm.com/redbooks/SG247948/wwhelp/wwhimpl/js/html/wwhelp.htm
#
BGQ_MAPPING = "ABCDE"
# --------------------------------------------------------------------------
#
# Board labels (Rack, Midplane, Node)
#
BGQ_BOARD_LABELS = ['R', 'M', 'N']
# --------------------------------------------------------------------------
#
# Dimensions of a (sub-)block
#
BGQ_DIMENSION_LABELS = ['A', 'B', 'C', 'D', 'E']
# --------------------------------------------------------------------------
#
# Supported sub-block sizes (number of nodes).
# This influences the effectiveness of mixed-size allocations
# (and might even be a hard requirement from a topology standpoint).
#
# TODO: Do we actually need to restrict our sub-block sizes to this set?
#
BGQ_SUPPORTED_SUB_BLOCK_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
# --------------------------------------------------------------------------
#
# Mapping of starting corners.
#
# "board" -> "node"
#
# Ordering: ['E', 'D', 'DE', etc.]
#
# TODO: Is this independent of the mapping?
#
BGQ_BLOCK_STARTING_CORNERS = {
0: 0,
4: 29,
8: 4,
12: 25
}
# --------------------------------------------------------------------------
#
# BG/Q Topology of Boards within a Midplane
#
BGQ_MIDPLANE_TOPO = {
0: {'A': 4, 'B': 8, 'C': 1, 'D': 2},
1: {'A': 5, 'B': 9, 'C': 0, 'D': 3},
2: {'A': 6, 'B': 10, 'C': 3, 'D': 0},
3: {'A': 7, 'B': 11, 'C': 2, 'D': 1},
4: {'A': 0, 'B': 12, 'C': 5, 'D': 6},
5: {'A': 1, 'B': 13, 'C': 4, 'D': 7},
6: {'A': 2, 'B': 14, 'C': 7, 'D': 4},
7: {'A': 3, 'B': 15, 'C': 6, 'D': 5},
8: {'A': 12, 'B': 0, 'C': 9, 'D': 10},
9: {'A': 13, 'B': 1, 'C': 8, 'D': 11},
10: {'A': 14, 'B': 2, 'C': 11, 'D': 8},
11: {'A': 15, 'B': 3, 'C': 10, 'D': 9},
12: {'A': 8, 'B': 4, 'C': 13, 'D': 14},
13: {'A': 9, 'B': 5, 'C': 12, 'D': 15},
14: {'A': 10, 'B': 6, 'C': 15, 'D': 12},
15: {'A': 11, 'B': 7, 'C': 14, 'D': 13},
}
# --------------------------------------------------------------------------
#
# Shape of whole BG/Q Midplane
#
BGQ_MIDPLANE_SHAPE = {'A': 4, 'B': 4, 'C': 4, 'D': 4, 'E': 2} # '4x4x4x4x2'
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
self.torus_block = None
self.loadl_bg_block = None
self.shape_table = None
self.torus_dimension_labels = None
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
loadl_node_list = None
loadl_cpus_per_node = None
# Determine method for determining hosts,
# either through hostfile or BG/Q environment.
loadl_hostfile = os.environ.get('LOADL_HOSTFILE')
self.loadl_bg_block = os.environ.get('LOADL_BG_BLOCK')
if loadl_hostfile is None and self.loadl_bg_block is None:
msg = "Neither $LOADL_HOSTFILE or $LOADL_BG_BLOCK set!"
self._log.error(msg)
raise RuntimeError(msg)
# Determine the size of the pilot allocation
if loadl_hostfile is not None:
# Non Blue Gene Load Leveler installation.
loadl_total_tasks_str = os.environ.get('LOADL_TOTAL_TASKS')
if loadl_total_tasks_str is None:
msg = "$LOADL_TOTAL_TASKS not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
loadl_total_tasks = int(loadl_total_tasks_str)
# Construct the host list
loadl_nodes = [line.strip() for line in open(loadl_hostfile)]
self._log.info("Found LOADL_HOSTFILE %s. Expanded to: %s",
loadl_hostfile, loadl_nodes)
loadl_node_list = list(set(loadl_nodes))
# Verify that $LLOAD_TOTAL_TASKS == len($LOADL_HOSTFILE)
if loadl_total_tasks != len(loadl_nodes):
self._log.error("$LLOAD_TOTAL_TASKS(%d) != len($LOADL_HOSTFILE)(%d)",
loadl_total_tasks, len(loadl_nodes))
# Determine the number of cpus per node. Assume:
# cores_per_node = lenght(nodefile) / len(unique_nodes_in_nodefile)
loadl_cpus_per_node = len(loadl_nodes) / len(loadl_node_list)
elif self.loadl_bg_block is not None:
# Blue Gene specific.
loadl_bg_midplane_list_str = None
loadl_bg_block_size_str = None
loadl_job_name = os.environ.get('LOADL_JOB_NAME')
if loadl_job_name is None:
msg = "$LOADL_JOB_NAME not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Get the board list and block shape from 'llq -l' output
output = subprocess.check_output(["llq", "-l", loadl_job_name])
loadl_bg_board_list_str = None
loadl_bg_block_shape_str = None
for line in output.splitlines():
# Detect BG board list
if "BG Node Board List: " in line:
loadl_bg_board_list_str = line.split(':')[1].strip()
elif "BG Midplane List: " in line:
loadl_bg_midplane_list_str = line.split(':')[1].strip()
elif "BG Shape Allocated: " in line:
loadl_bg_block_shape_str = line.split(':')[1].strip()
elif "BG Size Allocated: " in line:
loadl_bg_block_size_str = line.split(':')[1].strip()
if not loadl_bg_board_list_str:
msg = "No board list found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("BG Node Board List: %s" % loadl_bg_board_list_str)
if not loadl_bg_midplane_list_str:
msg = "No midplane list found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("BG Midplane List: %s" % loadl_bg_midplane_list_str)
if not loadl_bg_block_shape_str:
msg = "No board shape found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("BG Shape Allocated: %s" % loadl_bg_block_shape_str)
if not loadl_bg_block_size_str:
msg = "No board size found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
loadl_bg_block_size = int(loadl_bg_block_size_str)
self._log.debug("BG Size Allocated: %d" % loadl_bg_block_size)
# Build nodes data structure to be handled by Torus Scheduler
try:
self.torus_block = self._bgq_construct_block(
loadl_bg_block_shape_str, loadl_bg_board_list_str,
loadl_bg_block_size, loadl_bg_midplane_list_str)
except Exception as e:
msg = "Couldn't construct block: %s" % e.message
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("Torus block constructed:")
for e in self.torus_block:
self._log.debug("%s %s %s %s" %
(e[0], [e[1][key] for key in sorted(e[1])], e[2], e[3]))
try:
loadl_node_list = [entry[SchedulerTorus.TORUS_BLOCK_NAME] for entry in self.torus_block]
except Exception as e:
msg = "Couldn't construct node list."
self._log.error(msg)
raise RuntimeError(msg)
#self._log.debug("Node list constructed: %s" % loadl_node_list)
# Construct sub-block table
try:
self.shape_table = self._bgq_create_sub_block_shape_table(loadl_bg_block_shape_str)
except Exception as e:
msg = "Couldn't construct shape table: %s" % e.message
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("Shape table constructed: ")
for (size, dim) in [(key, self.shape_table[key]) for key in sorted(self.shape_table)]:
self._log.debug("%s %s" % (size, [dim[key] for key in sorted(dim)]))
# Determine the number of cpus per node
loadl_cpus_per_node = self.BGQ_CORES_PER_NODE
# BGQ Specific Torus labels
self.torus_dimension_labels = self.BGQ_DIMENSION_LABELS
self.node_list = loadl_node_list
self.cores_per_node = loadl_cpus_per_node
self._log.debug("Sleeping for #473 ...")
time.sleep(5)
self._log.debug("Configure done")
# --------------------------------------------------------------------------
#
# Walk the block and return the node name for the given location
#
def _bgq_nodename_by_loc(self, midplanes, board, location):
self._log.debug("Starting nodebyname - midplanes:%s, board:%d" % (midplanes, board))
node = self.BGQ_BLOCK_STARTING_CORNERS[board]
# TODO: Does the order of walking matter?
# It might because of the starting blocks ...
for dim in self.BGQ_DIMENSION_LABELS: # [::-1]:
max_length = location[dim]
self._log.debug("Within dim loop dim:%s, max_length: %d" % (dim, max_length))
cur_length = 0
# Loop while we are not at the final depth
while cur_length < max_length:
self._log.debug("beginning of while loop, cur_length: %d" % cur_length)
if cur_length % 2 == 0:
# Stay within the board
node = self.BGQ_BOARD_TOPO[node][dim]
else:
# We jump to another board.
self._log.debug("jumping to new board from board: %d, dim: %s)" % (board, dim))
board = self.BGQ_MIDPLANE_TOPO[board][dim]
self._log.debug("board is now: %d" % board)
# If we switch boards in the B dimension,
# we seem to "land" at the opposite E dimension.
if dim == 'B':
node = self.BGQ_BOARD_TOPO[node]['E']
self._log.debug("node is now: %d" % node)
# Increase the length for the next iteration
cur_length += 1
self._log.debug("Wrapping inside dim loop dim:%s" % (dim))
# TODO: This will work for midplane expansion in one dimension only
midplane_idx = max(location.values()) / 4
rack = midplanes[midplane_idx]['R']
midplane = midplanes[midplane_idx]['M']
nodename = 'R%.2d-M%.1d-N%.2d-J%.2d' % (rack, midplane, board, node)
self._log.debug("from location %s constructed node name: %s, left at board: %d" % (self.loc2str(location), nodename, board))
return nodename
# --------------------------------------------------------------------------
#
# Convert the board string as given by llq into a board structure
#
# E.g. 'R00-M1-N08,R00-M1-N09,R00-M1-N10,R00-M0-N11' =>
# [{'R': 0, 'M': 1, 'N': 8}, {'R': 0, 'M': 1, 'N': 9},
# {'R': 0, 'M': 1, 'N': 10}, {'R': 0, 'M': 0, 'N': 11}]
#
def _bgq_str2boards(self, boards_str):
boards = boards_str.split(',')
board_dict_list = []
for board in boards:
elements = board.split('-')
board_dict = {}
for l, e in zip(self.BGQ_BOARD_LABELS, elements):
board_dict[l] = int(e.split(l)[1])
board_dict_list.append(board_dict)
return board_dict_list
# --------------------------------------------------------------------------
#
# Convert the midplane string as given by llq into a midplane structure
#
# E.g. 'R04-M0,R04-M1' =>
# [{'R': 4, 'M': 0}, {'R': 4, 'M': 1}]
#
#
def _bgq_str2midplanes(self, midplane_str):
midplanes = midplane_str.split(',')
midplane_dict_list = []
for midplane in midplanes:
elements = midplane.split('-')
midplane_dict = {}
# Take the first two labels
for l, e in zip(self.BGQ_BOARD_LABELS[:2], elements):
midplane_dict[l] = int(e.split(l)[1])
midplane_dict_list.append(midplane_dict)
return midplane_dict_list
# --------------------------------------------------------------------------
#
# Convert the string as given by llq into a block shape structure:
#
# E.g. '1x2x3x4x5' => {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5}
#
def _bgq_str2shape(self, shape_str):
# Get the lengths of the shape
shape_lengths = shape_str.split('x', 4)
shape_dict = {}
for dim, length in zip(self.BGQ_DIMENSION_LABELS, shape_lengths):
shape_dict[dim] = int(length)
return shape_dict
# --------------------------------------------------------------------------
#
# Multiply two shapes
#
def _multiply_shapes(self, shape1, shape2):
result = {}
for dim in self.BGQ_DIMENSION_LABELS:
try:
val1 = shape1[dim]
except KeyError:
val1 = 1
try:
val2 = shape2[dim]
except KeyError:
val2 = 1
result[dim] = val1 * val2
return result
# --------------------------------------------------------------------------
#
# Convert location dict into a tuple string
# E.g. {'A': 1, 'C': 4, 'B': 1, 'E': 2, 'D': 4} => '(1,4,1,2,4)'
#
def loc2str(self, loc):
return str(tuple(loc[dim] for dim in self.BGQ_DIMENSION_LABELS))
# --------------------------------------------------------------------------
#
# Convert a shape dict into string format
#
# E.g. {'A': 1, 'C': 4, 'B': 1, 'E': 2, 'D': 4} => '1x4x1x2x4'
#
def shape2str(self, shape):
shape_str = ''
for l in self.BGQ_DIMENSION_LABELS:
# Get the corresponding count
shape_str += str(shape[l])
# Add an 'x' behind all but the last label
if l in self.BGQ_DIMENSION_LABELS[:-1]:
shape_str += 'x'
return shape_str
# --------------------------------------------------------------------------
#
# Return list of nodes that make up the block
#
# Format: [(index, location, nodename, status), (i, c, n, s), ...]
#
# TODO: This function and _bgq_nodename_by_loc should be changed so that we
# only walk the torus once?
#
def _bgq_get_block(self, midplanes, board, shape):
self._log.debug("Shape: %s", shape)
nodes = []
index = 0
for a in range(shape['A']):
for b in range(shape['B']):
for c in range(shape['C']):
for d in range(shape['D']):
for e in range(shape['E']):
location = {'A': a, 'B': b, 'C': c, 'D': d, 'E': e}
nodename = self._bgq_nodename_by_loc(midplanes, board, location)
nodes.append([index, location, nodename, FREE])
index += 1
return nodes
# --------------------------------------------------------------------------
#
# Use block shape and board list to construct block structure
#
# The 5 dimensions are denoted by the letters A, B, C, D, and E, T for the core (0-15).
# The latest dimension E is always 2, and is contained entirely within a midplane.
# For any compute block, compute nodes (as well midplanes for large blocks) are combined in 4 dimensions,
# only 4 dimensions need to be considered.
#
# 128 nodes: BG Shape Allocated: 2x2x4x4x2
# 256 nodes: BG Shape Allocated: 4x2x4x4x2
# 512 nodes: BG Shape Allocated: 1x1x1x1
# 1024 nodes: BG Shape Allocated: 1x1x1x2
#
def _bgq_construct_block(self, block_shape_str, boards_str,
block_size, midplane_list_str):
llq_shape = self._bgq_str2shape(block_shape_str)
# TODO: Could check this, but currently _shape2num is part of the other class
#if self._shape2num_nodes(llq_shape) != block_size:
# self._log.error("Block Size doesn't match Block Shape")
# If the block is equal to or greater than a Midplane,
# then there is no board list provided.
# But because at that size, we have only full midplanes,
# we can construct it.
if block_size >= 1024:
#raise NotImplementedError("Currently multiple midplanes are not yet supported.")
# BG Size: 1024, BG Shape: 1x1x1x2, BG Midplane List: R04-M0,R04-M1
midplanes = self._bgq_str2midplanes(midplane_list_str)
# Start of at the "lowest" available rack/midplane/board
# TODO: No other explanation than that this seems to be the convention?
# TODO: Can we safely assume that they are sorted?
#rack = midplane_dict_list[0]['R']
#midplane = midplane_dict_list[0]['M']
board = 0
# block_shape = llq_shape * BGQ_MIDPLANE_SHAPE
block_shape = self._multiply_shapes(self.BGQ_MIDPLANE_SHAPE, llq_shape)
self._log.debug("Resulting shape after multiply: %s" % block_shape)
elif block_size == 512:
# Full midplane
# BG Size: 1024, BG Shape: 1x1x1x2, BG Midplane List: R04-M0,R04-M1
midplanes = self._bgq_str2midplanes(midplane_list_str)
# Start of at the "lowest" available rack/midplane/board
# TODO: No other explanation than that this seems to be the convention?
#rack = midplane_dict_list[0]['R'] # Assume they are all equal
#midplane = min([entry['M'] for entry in midplane_dict_list])
board = 0
block_shape = self.BGQ_MIDPLANE_SHAPE
else:
# Within single midplane, < 512 nodes
board_dict_list = self._bgq_str2boards(boards_str)
self._log.debug("Board dict list:\n%s", '\n'.join([str(x) for x in board_dict_list]))
midplanes = [{'R': board_dict_list[0]['R'],
'M': board_dict_list[0]['M']}]
# Start of at the "lowest" available board.
# TODO: No other explanation than that this seems to be the convention?
board = min([entry['N'] for entry in board_dict_list])
block_shape = llq_shape
# From here its all equal (assuming our walker does the walk and not just the talk!)
block = self._bgq_get_block(midplanes, board, block_shape)
# TODO: Check returned block:
# - Length
# - No duplicates
return block
# --------------------------------------------------------------------------
#
# Construction of sub-block shapes based on overall block allocation.
#
# Depending on the size of the total allocated block, the maximum size
# of a subblock can be 512 nodes.
#
#
def _bgq_create_sub_block_shape_table(self, shape_str):
# Convert the shape string into dict structure
#
# For < 512 nodes: the dimensions within a midplane (AxBxCxDxE)
# For >= 512 nodes: the dimensions between the midplanes (AxBxCxD)
#
if len(shape_str.split('x')) == 5:
block_shape = self._bgq_str2shape(shape_str)
elif len(shape_str.split('x')) == 4:
block_shape = self.BGQ_MIDPLANE_SHAPE
else:
raise ValueError('Invalid shape string: %s' % shape_str)
# Dict to store the results
table = {}
# Create a sub-block dict with shape 1x1x1x1x1
sub_block_shape = {}
for l in self.BGQ_DIMENSION_LABELS:
sub_block_shape[l] = 1
# Look over all the dimensions starting at the most right
for dim in self.BGQ_MAPPING[::-1]:
while True:
# Calculate the number of nodes for the current shape
from operator import mul
num_nodes = reduce(mul, filter(lambda length: length != 0, sub_block_shape.values()))
if num_nodes in self.BGQ_SUPPORTED_SUB_BLOCK_SIZES:
table[num_nodes] = copy.copy(sub_block_shape)
else:
self._log.warning("Non supported sub-block size: %d.", num_nodes)
# Done with iterating this dimension
if sub_block_shape[dim] >= block_shape[dim]:
break
# Increase the length in this dimension for the next iteration.
if sub_block_shape[dim] == 1:
sub_block_shape[dim] = 2
elif sub_block_shape[dim] == 2:
sub_block_shape[dim] = 4
return table
# ==============================================================================
#
class ForkLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
self._log.info("Using fork on localhost.")
selected_cpus = self.requested_cores
# when we profile the agent, we fake any number of cores, so don't
# perform any sanity checks
if 'RADICAL_PILOT_PROFILE' in os.environ:
detected_cpus = multiprocessing.cpu_count()
if detected_cpus < selected_cpus:
self._log.warn("insufficient cores: using %d instead of requested %d.",
detected_cpus, selected_cpus)
selected_cpus = detected_cpus
elif detected_cpus > selected_cpus:
self._log.warn("more cores available: using requested %d instead of available %d.",
selected_cpus, detected_cpus)
self.node_list = ["localhost"]
self.cores_per_node = selected_cpus
# ==============================================================================
#
# Worker Classes
#
# ==============================================================================
#
class ExecWorker(COMPONENT_TYPE):
"""
Manage the creation of CU processes, and watch them until they are completed
(one way or the other). The spawner thus moves the unit from
PendingExecution to Executing, and then to a final state (or PendingStageOut
of course).
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id):
rpu.prof('ExecWorker init')
COMPONENT_TYPE.__init__(self)
self._terminate = COMPONENT_MODE.Event()
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._lrms = lrms
self._scheduler = scheduler
self._task_launcher = task_launcher
self._mpi_launcher = mpi_launcher
self._command_queue = command_queue
self._execution_queue = execution_queue
self._stageout_queue = stageout_queue
self._update_queue = update_queue
self._schedule_queue = schedule_queue
self._pilot_id = pilot_id
self._session_id = session_id
self.configure ()
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Launch Method.
#
@classmethod
def create(cls, name, config, logger, spawner, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, update_queue, schedule_queue,
stageout_queue, pilot_id, session_id):
# Make sure that we are the base-class!
if cls != ExecWorker:
raise TypeError("ExecWorker Factory only available to base class!")
try:
implementation = {
SPAWNER_NAME_POPEN : ExecWorker_POPEN,
SPAWNER_NAME_SHELL : ExecWorker_SHELL
}[spawner]
impl = implementation(name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id)
impl.start ()
return impl
except KeyError:
raise ValueError("ExecWorker '%s' unknown!" % name)
# --------------------------------------------------------------------------
#
def __del__ (self):
self.close ()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def configure(self):
# hook for initialization
pass
# --------------------------------------------------------------------------
#
def close(self):
# hook for shutdown
pass
# --------------------------------------------------------------------------
#
def spawn(self, launcher, cu):
raise NotImplementedError("spawn() not implemented for ExecWorker '%s'." % self.name)
# ==============================================================================
#
class ExecWorker_POPEN (ExecWorker) :
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id):
rpu.prof('ExecWorker init')
self._cus_to_watch = list()
self._cus_to_cancel = list()
self._watch_queue = QUEUE_TYPE ()
self._cu_environment = self._populate_cu_environment()
ExecWorker.__init__ (self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id)
# run watcher thread
watcher_name = self.name.replace ('ExecWorker', 'ExecWatcher')
self._watcher = threading.Thread(target = self._watch,
name = watcher_name)
self._watcher.start ()
# --------------------------------------------------------------------------
#
def close(self):
# shut down the watcher thread
rpu.prof ('stop request')
self._terminate.set()
self._watcher.join()
# --------------------------------------------------------------------------
#
def _populate_cu_environment(self):
"""Derive the environment for the cu's from our own environment."""
# Get the environment of the agent
new_env = copy.deepcopy(os.environ)
#
# Mimic what virtualenv's "deactivate" would do
#
old_path = new_env.pop('_OLD_VIRTUAL_PATH', None)
if old_path:
new_env['PATH'] = old_path
old_home = new_env.pop('_OLD_VIRTUAL_PYTHONHOME', None)
if old_home:
new_env['PYTHON_HOME'] = old_home
old_ps = new_env.pop('_OLD_VIRTUAL_PS1', None)
if old_ps:
new_env['PS1'] = old_ps
new_env.pop('VIRTUAL_ENV', None)
return new_env
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
try:
# report initial slot status
# TODO: Where does this abstraction belong? Scheduler!
self._log.debug(self._scheduler.slot_status())
while not self._terminate.is_set():
cu = self._execution_queue.get()
if not cu :
rpu.prof('get_cmd', msg="execution_queue to ExecWorker (wakeup)")
# 'None' is the wakeup signal
continue
cu['state'] = rp.EXECUTING
rpu.prof('get', msg="executing_queue to ExecutionWorker (%s)" % cu['state'], uid=cu['_id'])
try:
cu_list = rpu.blowup(self._config, cu, EXEC_WORKER)
for _cu in cu_list:
if _cu['description']['mpi']:
launcher = self._mpi_launcher
else :
launcher = self._task_launcher
if not launcher:
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = "no launcher (mpi=%s)" % _cu['description']['mpi'],
logger = self._log.error)
self._log.debug("Launching unit with %s (%s).", launcher.name, launcher.launch_command)
assert(_cu['opaque_slot']) # FIXME: no assert, but check
rpu.prof('ExecWorker unit launch', uid=_cu['_id'])
# Start a new subprocess to launch the unit
# TODO: This is scheduler specific
self.spawn(launcher=launcher, cu=_cu)
except Exception as e:
# append the startup error to the units stderr. This is
# not completely correct (as this text is not produced
# by the unit), but it seems the most intuitive way to
# communicate that error to the application/user.
cu['stderr'] += "\nPilot cannot start compute unit:\n%s\n%s" \
% (str(e), traceback.format_exc())
cu['state'] = rp.FAILED
cu['stderr'] += "\nPilot cannot start compute unit: '%s'" % e
# Free the Slots, Flee the Flots, Ree the Frots!
if cu['opaque_slot']:
self._scheduler.unschedule(cu)
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.FAILED,
msg = "unit execution failed",
logger = self._log.exception)
except Exception as e:
self._log.exception("Error in ExecWorker loop (%s)" % e)
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def spawn(self, launcher, cu):
rpu.prof('ExecWorker spawn', uid=cu['_id'])
launch_script_name = '%s/radical_pilot_cu_launch_script.sh' % cu['workdir']
self._log.debug("Created launch_script: %s", launch_script_name)
with open(launch_script_name, "w") as launch_script:
launch_script.write('#!/bin/bash -l\n')
launch_script.write('\n# Change to working directory for unit\ncd %s\n' % cu['workdir'])
# Before the Big Bang there was nothing
if cu['description']['pre_exec']:
pre_exec_string = ''
if isinstance(cu['description']['pre_exec'], list):
for elem in cu['description']['pre_exec']:
pre_exec_string += "%s\n" % elem
else:
pre_exec_string += "%s\n" % cu['description']['pre_exec']
launch_script.write('# Pre-exec commands\n%s' % pre_exec_string)
# Create string for environment variable setting
if cu['description']['environment'] and \
cu['description']['environment'].keys():
env_string = 'export'
for key,val in cu['description']['environment'].iteritems():
env_string += ' %s=%s' % (key, val)
launch_script.write('# Environment variables\n%s\n' % env_string)
# unit Arguments (if any)
task_args_string = ''
if cu['description']['arguments']:
for arg in cu['description']['arguments']:
if not arg:
# ignore empty args
continue
arg = arg.replace('"', '\\"') # Escape all double quotes
if arg[0] == arg[-1] == "'" : # If a string is between outer single quotes,
task_args_string += '%s ' % arg # ... pass it as is.
else:
task_args_string += '"%s" ' % arg # Otherwise return between double quotes.
launch_script_hop = "/usr/bin/env RP_SPAWNER_HOP=TRUE %s" % launch_script_name
# The actual command line, constructed per launch-method
try:
launch_command, hop_cmd = \
launcher.construct_command(cu['description']['executable'],
task_args_string,
cu['description']['cores'],
launch_script_hop,
cu['opaque_slot'])
if hop_cmd : cmdline = hop_cmd
else : cmdline = launch_script_name
rpu.prof('launch script constructed', uid=cu['_id'])
except Exception as e:
msg = "Error in spawner (%s)" % e
self._log.exception(msg)
raise RuntimeError(msg)
launch_script.write('# The command to run\n%s\n' % launch_command)
# After the universe dies the infrared death, there will be nothing
if cu['description']['post_exec']:
post_exec_string = ''
if isinstance(cu['description']['post_exec'], list):
for elem in cu['description']['post_exec']:
post_exec_string += "%s\n" % elem
else:
post_exec_string += "%s\n" % cu['description']['post_exec']
launch_script.write('%s\n' % post_exec_string)
# done writing to launch script, get it ready for execution.
st = os.stat(launch_script_name)
os.chmod(launch_script_name, st.st_mode | stat.S_IEXEC)
_stdout_file_h = open(cu['stdout_file'], "w")
_stderr_file_h = open(cu['stderr_file'], "w")
self._log.info("Launching unit %s via %s in %s", cu['_id'], cmdline, cu['workdir'])
rpu.prof('spawning pass to popen', uid=cu['_id'])
proc = subprocess.Popen(args = cmdline,
bufsize = 0,
executable = None,
stdin = None,
stdout = _stdout_file_h,
stderr = _stderr_file_h,
preexec_fn = None,
close_fds = True,
shell = True,
cwd = cu['workdir'],
env = self._cu_environment,
universal_newlines = False,
startupinfo = None,
creationflags = 0)
rpu.prof('spawning passed to popen', uid=cu['_id'])
cu['started'] = rpu.timestamp()
cu['state'] = rp.EXECUTING
cu['proc'] = proc
# register for state update and watching
cu['state'] = rp.EXECUTING
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.EXECUTING,
msg = "unit execution start")
cu_list = rpu.blowup(self._config, cu, WATCH_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="ExecWorker to watcher (%s)" % _cu['state'], uid=_cu['_id'])
self._watch_queue.put(_cu)
# --------------------------------------------------------------------------
#
def _watch(self):
rpu.prof('run')
try:
while not self._terminate.is_set():
cus = list()
# See if there are cancel requests, or new units to watch
try:
command = self._command_queue.get_nowait()
rpu.prof('get_cmd', msg="command_queue to ExecWatcher (%s)" % command[COMMAND_TYPE])
if command[COMMAND_TYPE] == COMMAND_CANCEL_COMPUTE_UNIT:
self._cus_to_cancel.append(command[COMMAND_ARG])
else:
raise RuntimeError("Command %s not applicable in this context." %
command[COMMAND_TYPE])
except Queue.Empty:
# do nothing if we don't have any queued commands
pass
try:
# we don't want to only wait for one CU -- then we would
# pull CU state too frequently. OTOH, we also don't want to
# learn about CUs until all slots are filled, because then
# we may not be able to catch finishing CUs in time -- so
# there is a fine balance here. Balance means 100 (FIXME).
# rpu.prof('ExecWorker popen watcher pull cu from queue')
MAX_QUEUE_BULKSIZE = 100
while len(cus) < MAX_QUEUE_BULKSIZE :
cus.append (self._watch_queue.get_nowait())
except Queue.Empty:
# nothing found -- no problem, see if any CUs finshed
pass
# add all cus we found to the watchlist
for cu in cus :
rpu.prof('get', msg="ExecWatcher picked up unit", uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, WATCHER)
for _cu in cu_list :
self._cus_to_watch.append (_cu)
# check on the known cus.
action = self._check_running()
if not action and not cus :
# nothing happend at all! Zzz for a bit.
time.sleep(self._config['queue_poll_sleeptime'])
except Exception as e:
self._log.exception("Error in ExecWorker watch loop (%s)" % e)
rpu.prof ('stop')
# --------------------------------------------------------------------------
# Iterate over all running tasks, check their status, and decide on the
# next step. Also check for a requested cancellation for the tasks.
def _check_running(self):
action = 0
for cu in self._cus_to_watch:
# poll subprocess object
exit_code = cu['proc'].poll()
now = rpu.timestamp()
if exit_code is None:
# Process is still running
if cu['_id'] in self._cus_to_cancel:
# FIXME: there is a race condition between the state poll
# above and the kill command below. We probably should pull
# state after kill again?
# We got a request to cancel this cu
action += 1
cu['proc'].kill()
self._cus_to_cancel.remove(cu['_id'])
self._scheduler.unschedule(cu)
cu['state'] = rp.CANCELED
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.CANCELED,
msg = "unit execution canceled")
rpu.prof('final', msg="execution canceled", uid=cu['_id'])
# NOTE: this is final, cu will not be touched anymore
cu = None
else:
rpu.prof('execution complete', uid=cu['_id'])
# we have a valid return code -- unit is final
action += 1
self._log.info("Unit %s has return code %s.", cu['_id'], exit_code)
cu['exit_code'] = exit_code
cu['finished'] = now
# Free the Slots, Flee the Flots, Ree the Frots!
self._cus_to_watch.remove(cu)
self._scheduler.unschedule(cu)
if exit_code != 0:
# The unit failed, no need to deal with its output data.
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.FAILED,
msg = "unit execution failed")
rpu.prof('final', msg="execution failed", uid=cu['_id'])
# NOTE: this is final, cu will not be touched anymore
cu = None
else:
# The unit finished cleanly, see if we need to deal with
# output data. We always move to stageout, even if there are no
# directives -- at the very least, we'll upload stdout/stderr
cu['state'] = rp.STAGING_OUTPUT
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.STAGING_OUTPUT,
msg = "unit execution completed")
cu_list = rpu.blowup(self._config, cu, STAGEOUT_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="ExecWatcher to stageout_queue (%s)" % _cu['state'], uid=_cu['_id'])
self._stageout_queue.put(_cu)
return action
# ==============================================================================
#
class ExecWorker_SHELL(ExecWorker):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id):
ExecWorker.__init__ (self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id)
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
# Mimic what virtualenv's "deactivate" would do
self._deactivate = "# deactivate pilot virtualenv\n"
old_path = os.environ.get('_OLD_VIRTUAL_PATH', None)
old_home = os.environ.get('_OLD_VIRTUAL_PYTHONHOME', None)
old_ps1 = os.environ.get('_OLD_VIRTUAL_PS1', None)
if old_path: self._deactivate += 'export PATH="%s"\n' % old_path
if old_home: self._deactivate += 'export PYTHON_HOME="%s"\n' % old_home
if old_ps1: self._deactivate += 'export PS1="%s"\n' % old_ps1
self._deactivate += 'unset VIRTUAL_ENV\n\n'
if old_path: os.environ['PATH'] = old_path
if old_home: os.environ['PYTHON_HOME'] = old_home
if old_ps1: os.environ['PS1'] = old_ps1
if 'VIRTUAL_ENV' in os.environ :
del(os.environ['VIRTUAL_ENV'])
# simplify shell startup / prompt detection
os.environ['PS1'] = '$ '
# the registry keeps track of units to watch, indexed by their shell
# spawner process ID. As the registry is shared between the spawner and
# watcher thread, we use a lock while accessing it.
self._registry = dict()
self._registry_lock = threading.RLock()
self._cached_events = list() # keep monitoring events for pid's which
# are not yet known
# get some threads going -- those will do all the work.
import saga.utils.pty_shell as sups
self.launcher_shell = sups.PTYShell ("fork://localhost/")
self.monitor_shell = sups.PTYShell ("fork://localhost/")
# run the spawner on the shells
self.workdir = "%s/spawner.%s" % (os.getcwd(), self.name)
rec_makedir(self.workdir)
ret, out, _ = self.launcher_shell.run_sync \
("/bin/sh %s/agent/radical-pilot-spawner.sh %s" \
% (os.path.dirname (rp.__file__), self.workdir))
if ret != 0 :
raise RuntimeError ("failed to bootstrap launcher: (%s)(%s)", ret, out)
ret, out, _ = self.monitor_shell.run_sync \
("/bin/sh %s/agent/radical-pilot-spawner.sh %s" \
% (os.path.dirname (rp.__file__), self.workdir))
if ret != 0 :
raise RuntimeError ("failed to bootstrap monitor: (%s)(%s)", ret, out)
# run watcher thread
watcher_name = self.name.replace ('ExecWorker', 'ExecWatcher')
self._watcher = threading.Thread(target = self._watch,
name = watcher_name)
self._watcher.start ()
try:
# report initial slot status
# TODO: Where does this abstraction belong? Scheduler!
self._log.debug(self._scheduler.slot_status())
while not self._terminate.is_set():
# rpu.prof('ExecWorker pull cu from queue')
cu = self._execution_queue.get()
if not cu :
rpu.prof('get_cmd', msg="execution_queue to ExecWorker (wakeup)")
# 'None' is the wakeup signal
continue
cu['state'] = rp.EXECUTING
rpu.prof('get', msg="executing_queue to ExecutionWorker (%s)" % cu['state'], uid=cu['_id'])
try:
cu_list = rpu.blowup(self._config, cu, EXEC_WORKER)
for _cu in cu_list :
if _cu['description']['mpi']:
launcher = self._mpi_launcher
else :
launcher = self._task_launcher
if not launcher:
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = "no launcher (mpi=%s)" % _cu['description']['mpi'],
logger = self._log.error)
self._log.debug("Launching unit with %s (%s).", launcher.name, launcher.launch_command)
assert(_cu['opaque_slot']) # FIXME: no assert, but check
rpu.prof('ExecWorker unit launch', uid=_cu['_id'])
# Start a new subprocess to launch the unit
# TODO: This is scheduler specific
self.spawn(launcher=launcher, cu=_cu)
except Exception as e:
# append the startup error to the units stderr. This is
# not completely correct (as this text is not produced
# by the unit), but it seems the most intuitive way to
# communicate that error to the application/user.
cu['stderr'] += "\nPilot cannot start compute unit:\n%s\n%s" \
% (str(e), traceback.format_exc())
cu['state'] = rp.FAILED
cu['stderr'] += "\nPilot cannot start compute unit: '%s'" % e
# Free the Slots, Flee the Flots, Ree the Frots!
if cu['opaque_slot']:
self._scheduler.unschedule(cu)
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.FAILED,
msg = "unit execution failed",
logger = self._log.exception)
except Exception as e:
self._log.exception("Error in ExecWorker loop (%s)" % e)
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def _cu_to_cmd (self, cu, launcher) :
# ----------------------------------------------------------------------
def quote_args (args) :
ret = list()
for arg in args :
# if string is between outer single quotes,
# pass it as is.
# if string is between outer double quotes,
# pass it as is.
# otherwise (if string is not quoted)
# escape all double quotes
if arg[0] == arg[-1] == "'" :
ret.append (arg)
elif arg[0] == arg[-1] == '"' :
ret.append (arg)
else :
arg = arg.replace ('"', '\\"')
ret.append ('"%s"' % arg)
return ret
# ----------------------------------------------------------------------
args = ""
env = self._deactivate
cwd = ""
pre = ""
post = ""
io = ""
cmd = ""
descr = cu['description']
if cu['workdir'] :
cwd += "# CU workdir\n"
cwd += "mkdir -p %s\n" % cu['workdir']
cwd += "cd %s\n" % cu['workdir']
cwd += "\n"
if descr['environment'] :
env += "# CU environment\n"
for e in descr['environment'] :
env += "export %s=%s\n" % (e, descr['environment'][e])
env += "\n"
if descr['pre_exec'] :
pre += "# CU pre-exec\n"
pre += '\n'.join(descr['pre_exec' ])
pre += "\n\n"
if descr['post_exec'] :
post += "# CU post-exec\n"
post += '\n'.join(descr['post_exec' ])
post += "\n\n"
if descr['arguments'] :
args = ' ' .join (quote_args (descr['arguments']))
# if descr['stdin'] : io += "<%s " % descr['stdin']
# else : io += "<%s " % '/dev/null'
if descr['stdout'] : io += "1>%s " % descr['stdout']
else : io += "1>%s " % 'STDOUT'
if descr['stderr'] : io += "2>%s " % descr['stderr']
else : io += "2>%s " % 'STDERR'
cmd, hop_cmd = launcher.construct_command(descr['executable'], args,
descr['cores'],
'/usr/bin/env RP_SPAWNER_HOP=TRUE "$0"',
cu['opaque_slot'])
script = ""
if hop_cmd :
# the script will itself contain a remote callout which calls again
# the script for the invokation of the real workload (cmd) -- we
# thus introduce a guard for the first execution. The hop_cmd MUST
# set RP_SPAWNER_HOP to some value for the startup to work
script += "# ------------------------------------------------------\n"
script += '# perform one hop for the actual command launch\n'
script += 'if test -z "$RP_SPAWNER_HOP"\n'
script += 'then\n'
script += ' %s\n' % hop_cmd
script += ' exit\n'
script += 'fi\n\n'
script += "# ------------------------------------------------------\n"
script += "%s" % cwd
script += "%s" % env
script += "%s" % pre
script += "# CU execution\n"
script += "%s %s\n\n" % (cmd, io)
script += "%s" % post
script += "# ------------------------------------------------------\n\n"
# self._log.debug ("execution script:\n%s\n" % script)
return script
# --------------------------------------------------------------------------
#
def spawn(self, launcher, cu):
uid = cu['_id']
rpu.prof('ExecWorker spawn', uid=uid)
# we got an allocation: go off and launch the process. we get
# a multiline command, so use the wrapper's BULK/LRUN mode.
cmd = self._cu_to_cmd (cu, launcher)
run_cmd = "BULK\nLRUN\n%s\nLRUN_EOT\nBULK_RUN\n" % cmd
rpu.prof('launch script constructed', uid=cu['_id'])
# TODO: Remove this commented out block?
# if self.lrms.target_is_macos :
# run_cmd = run_cmd.replace ("\\", "\\\\\\\\") # hello MacOS
ret, out, _ = self.launcher_shell.run_sync (run_cmd)
if ret != 0 :
self._log.error ("failed to run unit '%s': (%s)(%s)" \
, (run_cmd, ret, out))
return FAIL
lines = filter (None, out.split ("\n"))
self._log.debug (lines)
if len (lines) < 2 :
raise RuntimeError ("Failed to run unit (%s)", lines)
if lines[-2] != "OK" :
raise RuntimeError ("Failed to run unit (%s)" % lines)
# FIXME: verify format of returned pid (\d+)!
pid = lines[-1].strip ()
cu['pid'] = pid
cu['started'] = rpu.timestamp()
# before we return, we need to clean the
# 'BULK COMPLETED message from lrun
ret, out = self.launcher_shell.find_prompt ()
if ret != 0 :
with self._registry_lock :
del(self._registry[uid])
raise RuntimeError ("failed to run unit '%s': (%s)(%s)" \
% (run_cmd, ret, out))
rpu.prof('spawning passed to pty', uid=uid)
# FIXME: this is too late, there is already a race with the monitoring
# thread for this CU execution. We need to communicate the PIDs/CUs via
# a queue again!
rpu.prof('put', msg="ExecWorker to watcher (%s)" % cu['state'], uid=cu['_id'])
with self._registry_lock :
self._registry[pid] = cu
cu['state'] = rp.EXECUTING
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.EXECUTING,
msg = "unit execution started")
# --------------------------------------------------------------------------
#
def _watch (self) :
MONITOR_READ_TIMEOUT = 1.0 # check for stop signal now and then
static_cnt = 0
rpu.prof('run')
try:
self.monitor_shell.run_async ("MONITOR")
while not self._terminate.is_set () :
_, out = self.monitor_shell.find (['\n'], timeout=MONITOR_READ_TIMEOUT)
line = out.strip ()
# self._log.debug ('monitor line: %s' % line)
if not line :
# just a read timeout, i.e. an opportunity to check for
# termination signals...
if self._terminate.is_set() :
self._log.debug ("stop monitoring")
return
# ... and for health issues ...
if not self.monitor_shell.alive () :
self._log.warn ("monitoring channel died")
return
# ... and to handle cached events.
if not self._cached_events :
static_cnt += 1
else :
self._log.info ("monitoring channel checks cache (%d)", len(self._cached_events))
static_cnt += 1
if static_cnt == 10 :
# 10 times cache to check, dump it for debugging
#print "cache state"
#import pprint
#pprint.pprint (self._cached_events)
#pprint.pprint (self._registry)
static_cnt = 0
cache_copy = self._cached_events[:]
self._cached_events = list()
events_to_handle = list()
with self._registry_lock :
for pid, state, data in cache_copy :
cu = self._registry.get (pid, None)
if cu : events_to_handle.append ([cu, pid, state, data])
else : self._cached_events.append ([pid, state, data])
# FIXME: measure if using many locks in the loop below
# is really better than doing all ops in the locked loop
# above
for cu, pid, state, data in events_to_handle :
self._handle_event (cu, pid, state, data)
# all is well...
# self._log.info ("monitoring channel finish idle loop")
continue
elif line == 'EXIT' or line == "Killed" :
self._log.error ("monitoring channel failed (%s)", line)
self._terminate.set()
return
elif not ':' in line :
self._log.warn ("monitoring channel noise: %s", line)
else :
pid, state, data = line.split (':', 2)
# we are not interested in non-final state information, at
# the moment
if state in ['RUNNING'] :
continue
self._log.info ("monitoring channel event: %s", line)
cu = None
with self._registry_lock :
cu = self._registry.get (pid, None)
if cu:
rpu.prof('get', msg="ExecWatcher picked up unit", uid=cu['_id'])
self._handle_event (cu, pid, state, data)
else:
self._cached_events.append ([pid, state, data])
except Exception as e:
self._log.error ("Exception in job monitoring thread: %s", e)
self._terminate.set()
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def _handle_event (self, cu, pid, state, data) :
# got an explicit event to handle
self._log.info ("monitoring handles event for %s: %s:%s:%s", cu['_id'], pid, state, data)
rp_state = {'DONE' : rp.DONE,
'FAILED' : rp.FAILED,
'CANCELED' : rp.CANCELED}.get (state, rp.UNKNOWN)
if rp_state not in [rp.DONE, rp.FAILED, rp.CANCELED] :
# non-final state
self._log.debug ("ignore shell level state transition (%s:%s:%s)",
pid, state, data)
return
# record timestamp, exit code on final states
cu['finished'] = rpu.timestamp()
if data : cu['exit_code'] = int(data)
else : cu['exit_code'] = None
if rp_state in [rp.FAILED, rp.CANCELED] :
# final state - no further state transition needed
self._scheduler.unschedule(cu)
cu['state'] = rp_state
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp_state,
msg = "unit execution finished")
elif rp_state in [rp.DONE] :
rpu.prof('execution complete', uid=cu['_id'])
# advance the unit state
self._scheduler.unschedule(cu)
cu['state'] = rp.STAGING_OUTPUT
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.STAGING_OUTPUT,
msg = "unit execution completed")
cu_list = rpu.blowup(self._config, cu, STAGEOUT_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="ExecWatcher to stageout_queue (%s)" % _cu['state'], uid=_cu['_id'])
self._stageout_queue.put(_cu)
# we don't need the cu in the registry anymore
with self._registry_lock :
if pid in self._registry : # why wouldn't it be in there though?
del(self._registry[pid])
# ==============================================================================
#
class UpdateWorker(threading.Thread):
"""
An UpdateWorker pushes CU and Pilot state updates to mongodb. Its instances
compete for update requests on the update_queue. Those requests will be
triplets of collection name, query dict, and update dict. Update requests
will be collected into bulks over some time (BULK_COLLECTION_TIME), to
reduce number of roundtrips.
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, session_id,
update_queue, mongodb_url, mongodb_name, mongodb_auth):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._session_id = session_id
self._update_queue = update_queue
self._terminate = threading.Event()
self._mongo_db = rpu.get_mongodb(mongodb_url, mongodb_name, mongodb_auth)
self._cinfo = dict() # collection cache
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
# ------------------------------------------------------------------
def timed_bulk_execute(cinfo):
# returns number of bulks pushed (0 or 1)
if not cinfo['bulk']:
return 0
now = time.time()
age = now - cinfo['last']
if cinfo['bulk'] and age > self._config['bulk_collection_time']:
res = cinfo['bulk'].execute()
self._log.debug("bulk update result: %s", res)
rpu.prof('unit update bulk pushed (%d)' % len(cinfo['uids']))
for entry in cinfo['uids']:
uid = entry[0]
state = entry[1]
if state:
rpu.prof('unit update pushed (%s)' % state, uid=uid)
else:
rpu.prof('unit update pushed', uid=uid)
cinfo['last'] = now
cinfo['bulk'] = None
cinfo['uids'] = list()
return 1
else:
return 0
# ------------------------------------------------------------------
try:
try:
update_request = self._update_queue.get_nowait()
uid = update_request.get('_id', None)
state = update_request.get('state', None)
except Queue.Empty:
# no new requests: push any pending bulks
action = 0
for cname in self._cinfo:
action += timed_bulk_execute(self._cinfo[cname])
if not action:
time.sleep(self._config['db_poll_sleeptime'])
continue
uid = update_request.get('_id')
state = update_request.get('state', None)
if state :
rpu.prof('get', msg="update_queue to UpdateWorker (%s)" % state, uid=uid)
else:
rpu.prof('get', msg="update_queue to UpdateWorker", uid=uid)
update_request_list = rpu.blowup(self._config, update_request, UPDATE_WORKER)
for _update_request in update_request_list :
# got a new request. Add to bulk (create as needed),
# and push bulk if time is up.
uid = _update_request.get('_id')
state = _update_request.get('state', None)
cbase = _update_request.get('cbase', '.cu')
query_dict = _update_request.get('query', dict())
update_dict = _update_request.get('update',dict())
cname = self._session_id + cbase
if not cname in self._cinfo:
self._cinfo[cname] = {
'coll' : self._mongo_db[cname],
'bulk' : None,
'last' : time.time(), # time of last push
'uids' : list()
}
cinfo = self._cinfo[cname]
if not cinfo['bulk']:
cinfo['bulk'] = cinfo['coll'].initialize_ordered_bulk_op()
cinfo['uids'].append([uid, state])
cinfo['bulk'].find (query_dict) \
.update(update_dict)
timed_bulk_execute(cinfo)
rpu.prof('unit update bulked (%s)' % state, uid=uid)
except Exception as e:
self._log.exception("unit update failed (%s)", e)
# FIXME: should we fail the pilot at this point?
# FIXME: Are the strategies to recover?
rpu.prof ('stop')
# ==============================================================================
#
class StageinWorker(threading.Thread):
"""An StageinWorker performs the agent side staging directives.
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, execution_queue, schedule_queue,
stagein_queue, update_queue, workdir):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._execution_queue = execution_queue
self._schedule_queue = schedule_queue
self._stagein_queue = stagein_queue
self._update_queue = update_queue
self._workdir = workdir
self._terminate = threading.Event()
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
try:
cu = self._stagein_queue.get()
if not cu:
rpu.prof('get_cmd', msg="stagein_queue to StageinWorker (wakeup)")
continue
cu['state'] = rp.STAGING_INPUT
rpu.prof('get', msg="stagein_queue to StageinWorker (%s)" % cu['state'], uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, STAGEIN_WORKER)
for _cu in cu_list :
sandbox = os.path.join(self._workdir, '%s' % _cu['_id'])
staging_area = os.path.join(self._workdir, self._config['staging_area'])
for directive in _cu['Agent_Input_Directives']:
rpu.prof('Agent input_staging queue', uid=_cu['_id'],
msg="%s -> %s" % (str(directive['source']), str(directive['target'])))
if directive['state'] != rp.PENDING :
# we ignore directives which need no action
rpu.prof('Agent input_staging queue', uid=_cu['_id'], msg='ignored')
continue
# Perform input staging
self._log.info("unit input staging directives %s for cu: %s to %s",
directive, _cu['_id'], sandbox)
# Convert the source_url into a SAGA Url object
source_url = rs.Url(directive['source'])
# Handle special 'staging' scheme
if source_url.scheme == self._config['staging_scheme']:
self._log.info('Operating from staging')
# Remove the leading slash to get a relative path from the staging area
rel2staging = source_url.path.split('/',1)[1]
source = os.path.join(staging_area, rel2staging)
else:
self._log.info('Operating from absolute path')
source = source_url.path
# Get the target from the directive and convert it to the location
# in the sandbox
target = directive['target']
abs_target = os.path.join(sandbox, target)
# Create output directory in case it doesn't exist yet
#
rec_makedir(os.path.dirname(abs_target))
try:
self._log.info("Going to '%s' %s to %s", directive['action'], source, abs_target)
if directive['action'] == LINK: os.symlink (source, abs_target)
elif directive['action'] == COPY: shutil.copyfile(source, abs_target)
elif directive['action'] == MOVE: shutil.move (source, abs_target)
else:
# FIXME: implement TRANSFER mode
raise NotImplementedError('Action %s not supported' % directive['action'])
log_message = "%s'ed %s to %s - success" % (directive['action'], source, abs_target)
self._log.info(log_message)
# If all went fine, update the state of this
# StagingDirective to DONE
# FIXME: is this update below really *needed*?
self._agent.update_unit(src = 'StageinWorker',
uid = _cu['_id'],
msg = log_message,
query = {
'Agent_Input_Status' : rp.EXECUTING,
'Agent_Input_Directives.state' : rp.PENDING,
'Agent_Input_Directives.source' : directive['source'],
'Agent_Input_Directives.target' : directive['target']
},
update = {
'$set'
: {'Agent_Input_Status' : rp.DONE,
'Agent_Input_Directives.$.state' : rp.DONE}
})
except Exception as e:
# If we catch an exception, assume the staging failed
log_message = "%s'ed %s to %s - failure (%s)" % \
(directive['action'], source, abs_target, e)
self._log.exception(log_message)
# If a staging directive fails, fail the CU also.
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'StageinWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = log_message,
query = {
'Agent_Input_Status' : rp.EXECUTING,
'Agent_Input_Directives.state' : rp.PENDING,
'Agent_Input_Directives.source' : directive['source'],
'Agent_Input_Directives.target' : directive['target']
},
update = {
'$set' : {'Agent_Input_Directives.$.state' : rp.FAILED,
'Agent_Input_Status' : rp.FAILED}
})
# agent staging is all done, unit can go to execution if it has
# no FTW staging -- with FTP staging, we have to wait for the
# FTW stager to finish (or to pick up on the agent staging
# completion) to push the unit via mongodb to the agebnt again.
# Duh! (FIXME)
if not _cu["FTW_Input_Directives"] :
_cu['state'] = rp.ALLOCATING
self._agent.update_unit_state(src = 'StageinWorker',
uid = _cu['_id'],
state = rp.ALLOCATING,
msg = 'agent input staging done')
_cu_list = rpu.blowup(self._config, _cu, SCHEDULE_QUEUE)
for __cu in _cu_list :
rpu.prof('put', msg="StageinWorker to schedule_queue (%s)" % __cu['state'], uid=__cu['_id'])
self._schedule_queue.put(__cu)
except Exception as e:
self._log.exception('worker died')
sys.exit(1)
rpu.prof ('stop')
# ==============================================================================
#
class StageoutWorker(threading.Thread):
"""
An StageoutWorker performs the agent side staging directives.
It competes for units on the stageout queue, and handles all relevant
staging directives. It also takes care of uploading stdout/stderr (which
can also be considered staging, really).
Upon completion, the units are moved into the respective final state.
Multiple StageoutWorker instances can co-exist -- this class needs to be
threadsafe.
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, execution_queue,
stageout_queue, update_queue, workdir):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._execution_queue = execution_queue
self._stageout_queue = stageout_queue
self._update_queue = update_queue
self._workdir = workdir
self._terminate = threading.Event()
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
staging_area = os.path.join(self._workdir, self._config['staging_area'])
while not self._terminate.is_set():
cu = None
try:
cu = self._stageout_queue.get()
if not cu:
rpu.prof('get_cmd', msg="stageout_queue to StageoutWorker (wakeup)")
continue
cu['state'] = rp.STAGING_OUTPUT
rpu.prof('get', msg="stageout_queue to StageoutWorker (%s)" % cu['state'], uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, STAGEOUT_WORKER)
for _cu in cu_list :
sandbox = os.path.join(self._workdir, '%s' % _cu['_id'])
## parked from unit state checker: unit postprocessing
if os.path.isfile(_cu['stdout_file']):
with open(_cu['stdout_file'], 'r') as stdout_f:
try:
txt = unicode(stdout_f.read(), "utf-8")
except UnicodeDecodeError:
txt = "unit stdout contains binary data -- use file staging directives"
_cu['stdout'] += rpu.tail(txt)
if os.path.isfile(_cu['stderr_file']):
with open(_cu['stderr_file'], 'r') as stderr_f:
try:
txt = unicode(stderr_f.read(), "utf-8")
except UnicodeDecodeError:
txt = "unit stderr contains binary data -- use file staging directives"
_cu['stderr'] += rpu.tail(txt)
for directive in _cu['Agent_Output_Directives']:
# Perform output staging
self._log.info("unit output staging directives %s for cu: %s to %s",
directive, _cu['_id'], sandbox)
# Convert the target_url into a SAGA Url object
target_url = rs.Url(directive['target'])
# Handle special 'staging' scheme
if target_url.scheme == self._config['staging_scheme']:
self._log.info('Operating from staging')
# Remove the leading slash to get a relative path from
# the staging area
rel2staging = target_url.path.split('/',1)[1]
target = os.path.join(staging_area, rel2staging)
else:
self._log.info('Operating from absolute path')
# FIXME: will this work for TRANSFER mode?
target = target_url.path
# Get the source from the directive and convert it to the location
# in the sandbox
source = str(directive['source'])
abs_source = os.path.join(sandbox, source)
# Create output directory in case it doesn't exist yet
# FIXME: will this work for TRANSFER mode?
rec_makedir(os.path.dirname(target))
try:
self._log.info("Going to '%s' %s to %s", directive['action'], abs_source, target)
if directive['action'] == LINK:
# This is probably not a brilliant idea, so at least give a warning
os.symlink(abs_source, target)
elif directive['action'] == COPY:
shutil.copyfile(abs_source, target)
elif directive['action'] == MOVE:
shutil.move(abs_source, target)
else:
# FIXME: implement TRANSFER mode
raise NotImplementedError('Action %s not supported' % directive['action'])
log_message = "%s'ed %s to %s - success" %(directive['action'], abs_source, target)
self._log.info(log_message)
# If all went fine, update the state of this
# StagingDirective to DONE
# FIXME: is this update below really *needed*?
self._agent.update_unit(src = 'StageoutWorker',
uid = _cu['_id'],
msg = log_message,
query = {
# TODO: We never set the status to EXECUTION anymore
'Agent_Output_Status' : rp.EXECUTING,
'Agent_Output_Directives.state' : rp.PENDING,
'Agent_Output_Directives.source': directive['source'],
'Agent_Output_Directives.target': directive['target']
},
update = {
'$set' : {'Agent_Output_Directives.$.state': rp.DONE}
})
except Exception as e:
# If we catch an exception, assume the staging failed
log_message = "%s'ed %s to %s - failure (%s)" % \
(directive['action'], abs_source, target, e)
self._log.exception(log_message)
# If a staging directive fails, fail the CU also.
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'StageoutWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = log_message,
query = {
'Agent_Output_Status' : rp.EXECUTING,
'Agent_Output_Directives.state' : rp.PENDING,
'Agent_Output_Directives.source' : directive['source'],
'Agent_Output_Directives.target' : directive['target']
},
update = {
'$set' : {'Agent_Output_Directives.$.state' : rp.FAILED,
'Agent_Output_Status' : rp.FAILED}
})
# TODO: Update Agent_Output_Status here?
# local staging is done. Now check if there are Directives that
# need to be performed by the FTW.
# Obviously these are not executed here (by the Agent),
# but we need this code to set the state so that the FTW
# gets notified that it can start its work.
if _cu['FTW_Output_Directives']:
rpu.prof('ExecWorker unit needs FTW_O ', uid=_cu['_id'])
self._agent.update_unit(src = 'StageoutWorker',
uid = _cu['_id'],
msg = 'FTW output staging needed',
update = {
'$set': {
'FTW_Output_Status' : rp.PENDING,
'stdout' : _cu['stdout'],
'stderr' : _cu['stderr'],
'exit_code' : _cu['exit_code'],
'started' : _cu['started'],
'finished' : _cu['finished'],
'slots' : _cu['opaque_slot'],
}
})
# NOTE: this is final for the agent scope -- further state
# transitions are done by the FTW.
_cu = None
else:
# no FTW staging is needed, local staging is done -- we can
# move the unit into final state.
rpu.prof('final', msg="stageout done", uid=_cu['_id'])
_cu['state'] = rp.DONE
self._agent.update_unit_state(src = 'StageoutWorker',
uid = _cu['_id'],
state = rp.DONE,
msg = 'output staging completed',
update = {
'$set' : {
'stdout' : _cu['stdout'],
'stderr' : _cu['stderr'],
'exit_code' : _cu['exit_code'],
'started' : _cu['started'],
'finished' : _cu['finished'],
'slots' : _cu['opaque_slot'],
}
})
# NOTE: this is final, the cu is not touched anymore
_cu = None
# make sure the CU is not touched anymore (see except below)
cu = None
except Exception as e:
self._log.exception("Error in StageoutWorker loop (%s)", e)
# check if we have any cu in operation. If so, mark as final.
# This check relies on the pushes to the update queue to be the
# *last* actions of the loop above -- otherwise we may get
# invalid state transitions...
if cu:
rpu.prof('final', msg="stageout failed", uid=cu['_id'])
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'StageoutWorker',
uid = cu['_id'],
state = rp.FAILED,
msg = 'output staging failed',
update = {
'$set' : {
'stdout' : cu['stdout'],
'stderr' : cu['stderr'],
'exit_code' : cu['exit_code'],
'started' : cu['started'],
'finished' : cu['finished'],
'slots' : cu['opaque_slot'],
}
})
# NOTE: this is final, the cu is not touched anymore
cu = None
# forward the exception
raise
rpu.prof ('stop')
# ==============================================================================
#
class HeartbeatMonitor(threading.Thread):
"""
The HeartbeatMonitor watches the command queue for heartbeat updates (and
other commands).
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, command_queue, p, pilot_id, starttime, runtime):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._command_queue = command_queue
self._p = p
self._pilot_id = pilot_id
self._starttime = starttime
self._runtime = runtime
self._terminate = threading.Event()
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
self._agent.stop()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
try:
rpu.prof('heartbeat', msg='Listen! Listen! Listen to the heartbeat!')
self._check_commands()
self._check_state ()
time.sleep(self._config['heartbeat_interval'])
except Exception as e:
self._log.exception('error in heartbeat monitor (%s)', e)
self.stop()
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def _check_commands(self):
# Check if there's a command waiting
retdoc = self._p.find_and_modify(
query = {"_id" : self._pilot_id},
update = {"$set" : {COMMAND_FIELD: []}}, # Wipe content of array
fields = [COMMAND_FIELD, 'state']
)
if not retdoc:
return
commands = retdoc[COMMAND_FIELD]
state = retdoc['state']
for command in commands:
command_str = '%s:%s' % (command[COMMAND_TYPE], command[COMMAND_ARG])
rpu.prof('ingest_cmd', msg="mongodb to HeartbeatMonitor (%s)" % command_str)
if command[COMMAND_TYPE] == COMMAND_CANCEL_PILOT:
self.stop()
pilot_CANCELED(self._p, self._pilot_id, self._log, "CANCEL received. Terminating.")
sys.exit(1)
elif state == rp.CANCELING:
self.stop()
pilot_CANCELED(self._p, self._pilot_id, self._log, "CANCEL implied. Terminating.")
sys.exit(1)
elif command[COMMAND_TYPE] == COMMAND_CANCEL_COMPUTE_UNIT:
self._log.info("Received Cancel Compute Unit command for: %s", command[COMMAND_ARG])
# Put it on the command queue of the ExecWorker
rpu.prof('put_cmd', msg="HeartbeatMonitor to command_queue (%s)" % command_str,
uid=command[COMMAND_ARG])
self._command_queue.put(command)
elif command[COMMAND_TYPE] == COMMAND_KEEP_ALIVE:
self._log.info("Received KeepAlive command.")
else:
self._log.error("Received unknown command: %s with arg: %s.",
command[COMMAND_TYPE], command[COMMAND_ARG])
# --------------------------------------------------------------------------
#
def _check_state(self):
# Check the workers periodically. If they have died, we
# exit as well. this can happen, e.g., if the worker
# process has caught an exception
for worker in self._agent.worker_list:
if not worker.is_alive():
self.stop()
msg = 'worker %s died' % str(worker)
pilot_FAILED(self._p, self._pilot_id, self._log, msg)
# Make sure that we haven't exceeded the agent runtime. if
# we have, terminate.
if time.time() >= self._starttime + (int(self._runtime) * 60):
self._log.info("Agent has reached runtime limit of %s seconds.", self._runtime*60)
self.stop()
pilot_DONE(self._p, self._pilot_id)
# ==============================================================================
#
class Agent(object):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, lrms_name, requested_cores,
task_launch_method, mpi_launch_method, spawner,
scheduler_name, runtime,
mongodb_url, mongodb_name, mongodb_auth,
pilot_id, session_id):
rpu.prof('Agent init')
self.name = name
self._config = config
self._log = logger
self._debug_helper = ru.DebugHelper()
self._pilot_id = pilot_id
self._runtime = runtime
self._terminate = threading.Event()
self._starttime = time.time()
self._workdir = os.getcwd()
self._session_id = session_id
self._pilot_id = pilot_id
self.worker_list = list()
# we want to own all queues -- that simplifies startup and shutdown
self._schedule_queue = QUEUE_TYPE()
self._stagein_queue = QUEUE_TYPE()
self._execution_queue = QUEUE_TYPE()
self._stageout_queue = QUEUE_TYPE()
self._update_queue = QUEUE_TYPE()
self._command_queue = QUEUE_TYPE()
mongo_db = rpu.get_mongodb(mongodb_url, mongodb_name, mongodb_auth)
self._p = mongo_db["%s.p" % self._session_id]
self._cu = mongo_db["%s.cu" % self._session_id]
self._lrms = LRMS.create(
name = lrms_name,
config = self._config,
logger = self._log,
requested_cores = requested_cores)
self._scheduler = Scheduler.create(
name = scheduler_name,
config = self._config,
logger = self._log,
lrms = self._lrms,
schedule_queue = self._schedule_queue,
execution_queue = self._execution_queue,
update_queue = self._update_queue)
self.worker_list.append(self._scheduler)
self._task_launcher = LaunchMethod.create(
name = task_launch_method,
config = self._config,
logger = self._log,
scheduler = self._scheduler)
self._mpi_launcher = LaunchMethod.create(
name = mpi_launch_method,
config = self._config,
logger = self._log,
scheduler = self._scheduler)
for n in range(self._config['number_of_workers'][STAGEIN_WORKER]):
stagein_worker = StageinWorker(
name = "StageinWorker-%d" % n,
config = self._config,
logger = self._log,
agent = self,
execution_queue = self._execution_queue,
schedule_queue = self._schedule_queue,
stagein_queue = self._stagein_queue,
update_queue = self._update_queue,
workdir = self._workdir
)
self.worker_list.append(stagein_worker)
for n in range(self._config['number_of_workers'][EXEC_WORKER]):
exec_worker = ExecWorker.create(
name = "ExecWorker-%d" % n,
config = self._config,
spawner = spawner,
logger = self._log,
agent = self,
lrms = self._lrms,
scheduler = self._scheduler,
task_launcher = self._task_launcher,
mpi_launcher = self._mpi_launcher,
command_queue = self._command_queue,
execution_queue = self._execution_queue,
stageout_queue = self._stageout_queue,
update_queue = self._update_queue,
schedule_queue = self._schedule_queue,
pilot_id = self._pilot_id,
session_id = self._session_id
)
self.worker_list.append(exec_worker)
for n in range(self._config['number_of_workers'][STAGEOUT_WORKER]):
stageout_worker = StageoutWorker(
name = "StageoutWorker-%d" % n,
config = self._config,
agent = self,
logger = self._log,
execution_queue = self._execution_queue,
stageout_queue = self._stageout_queue,
update_queue = self._update_queue,
workdir = self._workdir
)
self.worker_list.append(stageout_worker)
for n in range(self._config['number_of_workers'][UPDATE_WORKER]):
update_worker = UpdateWorker(
name = "UpdateWorker-%d" % n,
config = self._config,
logger = self._log,
agent = self,
session_id = self._session_id,
update_queue = self._update_queue,
mongodb_url = mongodb_url,
mongodb_name = mongodb_name,
mongodb_auth = mongodb_auth
)
self.worker_list.append(update_worker)
hbmon = HeartbeatMonitor(
name = "HeartbeatMonitor",
config = self._config,
logger = self._log,
agent = self,
command_queue = self._command_queue,
p = self._p,
starttime = self._starttime,
runtime = self._runtime,
pilot_id = self._pilot_id)
self.worker_list.append(hbmon)
rpu.prof('Agent init done')
# --------------------------------------------------------------------------
#
def stop(self):
"""
Terminate the agent main loop. The workers will be pulled down once the
main loop finishes (see run())
"""
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def update_unit(self, src, uid, state=None, msg=None, query=None, update=None):
if not query : query = dict()
if not update : update = dict()
query_dict = dict()
update_dict = update
query_dict['_id'] = uid
for key,val in query.iteritems():
query_dict[key] = val
if msg:
if not '$push' in update_dict:
update_dict['$push'] = dict()
update_dict['$push']['log'] = {'message' : msg,
'timestamp' : rpu.timestamp()}
if state:
rpu.prof('put', msg="%s to update_queue (%s)" % (src, state), uid=query_dict['_id'])
else:
rpu.prof('put', msg="%s to update_queue" % src, uid=query_dict['_id'])
query_list = rpu.blowup(self._config, query_dict, UPDATE_QUEUE)
for _query_dict in query_list :
self._update_queue.put({'_id' : _query_dict['_id'],
'state' : state,
'cbase' : '.cu',
'query' : _query_dict,
'update' : update_dict})
# --------------------------------------------------------------------------
#
def update_unit_state(self, src, uid, state, msg=None, query=None, update=None,
logger=None):
if not query : query = dict()
if not update : update = dict()
if logger and msg:
logger("unit '%s' state change (%s)" % (uid, msg))
# we alter update, so rather use a copy of the dict...
now = rpu.timestamp()
update_dict = {
'$set' : {
'state' : state
},
'$push': {
'statehistory' : {
'state' : state,
'timestamp' : now
}
}
}
if '$set' in update:
for key,val in update['$set'].iteritems():
update_dict['$set'][key] = val
if '$push' in update:
for key,val in update['$push'].iteritems():
update_dict['$push'][key] = val
self.update_unit(src = src,
uid = uid,
state = state,
msg = msg,
query = query,
update = update_dict)
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
# first order of business: set the start time and state of the pilot
self._log.info("Agent %s starting ...", self._pilot_id)
now = rpu.timestamp()
ret = self._p.update(
{"_id": self._pilot_id},
{"$set": {"state" : rp.ACTIVE,
# TODO: The two fields below are currently scheduler
# specific!
"nodes" : self._lrms.node_list,
"cores_per_node" : self._lrms.cores_per_node,
"started" : now},
"$push": {"statehistory": {"state" : rp.ACTIVE,
"timestamp": now}}
})
# TODO: Check for return value, update should be true!
self._log.info("Database updated: %s", ret)
while not self._terminate.is_set():
try:
# check for new units
action = self._check_units()
# if no units have been seen, then wait for juuuust a little...
# FIXME: use some mongodb notification mechanism to avoid busy
# polling. Tailed cursors or whatever...
if not action:
time.sleep(self._config['db_poll_sleeptime'])
except Exception as e:
# exception in the main loop is fatal
self.stop()
pilot_FAILED(self._p, self._pilot_id, self._log,
"ERROR in agent main loop: %s. %s" % (e, traceback.format_exc()))
sys.exit(1)
# main loop terminated, so self._terminate was set
# we need to signal shut down to all workers
for worker in self.worker_list:
worker.stop()
# to make sure that threads are not stuck waiting on a queue, we send
# a signal on each queue
self._schedule_queue.put (None)
self._execution_queue.put(None)
self._update_queue.put (None)
self._stagein_queue.put (None)
self._stageout_queue.put (None)
# and wait for them to actually finish
# FIXME: make sure this works when stop was initialized by heartbeat monitor
for worker in self.worker_list:
worker.join()
# record cancelation state
pilot_CANCELED(self._p, self._pilot_id, self._log,
"Terminated (_terminate set).")
rpu.prof ('stop')
sys.exit(0)
# --------------------------------------------------------------------------
#
def _check_units(self):
# Check if there are compute units waiting for execution,
# and log that we pulled it.
#
# FIXME: Unfortunately, 'find_and_modify' is not bulkable, so we have
# to use 'find'. To avoid finding the same units over and over again,
# we update the state *before* running the next find -- so we do it
# right here... No idea how to avoid that roundtrip...
# This also blocks us from using multiple ingest threads, or from doing
# late binding by unit pull :/
cu_cursor = self._cu.find(spec = {"pilot" : self._pilot_id,
"state" : rp.PENDING_EXECUTION})
if cu_cursor.count():
cu_list = list(cu_cursor)
cu_uids = [_cu['_id'] for _cu in cu_list]
self._cu.update(multi = True,
spec = {"_id" : {"$in" : cu_uids}},
document = {"$set" : {"state" : rp.ALLOCATING},
"$push" : {"statehistory":
{
"state" : rp.ALLOCATING,
"timestamp" : rpu.timestamp()
}
}})
else :
# if we did not find any units which can be executed immediately, we
# check if we have any units for which to do stage-in
cu_cursor = self._cu.find(spec = {"pilot" : self._pilot_id,
'Agent_Input_Status': rp.PENDING})
if cu_cursor.count():
cu_list = list(cu_cursor)
cu_uids = [_cu['_id'] for _cu in cu_list]
self._cu.update(multi = True,
spec = {"_id" : {"$in" : cu_uids}},
document = {"$set" : {"state" : rp.STAGING_INPUT,
"Agent_Input_Status": rp.EXECUTING},
"$push" : {"statehistory":
{
"state" : rp.STAGING_INPUT,
"timestamp" : rpu.timestamp()
}
}})
else :
# no units whatsoever...
return 0
# now we really own the CUs, and can start working on them (ie. push
# them into the pipeline)
if cu_cursor.count():
rpu.prof('Agent get units', msg="number of units: %d" % cu_cursor.count(),
logger=self._log.info)
for cu in cu_list:
rpu.prof('get', msg="MongoDB to Agent (%s)" % cu['state'], uid=cu['_id'], logger=self._log.info)
_cu_list = rpu.blowup(self._config, cu, AGENT)
for _cu in _cu_list :
try:
cud = _cu['description']
workdir = "%s/%s" % (self._workdir, _cu['_id'])
_cu['workdir'] = workdir
_cu['stdout'] = ''
_cu['stderr'] = ''
_cu['opaque_clot'] = None
stdout_file = cud.get('stdout')
if not stdout_file:
stdout_file = 'STDOUT'
_cu['stdout_file'] = os.path.join(workdir, stdout_file)
stderr_file = cud.get('stderr')
if not stderr_file:
stderr_file = 'STDERR'
_cu['stderr_file'] = os.path.join(workdir, stderr_file)
rpu.prof('Agent get unit meta', uid=_cu['_id'])
# create unit sandbox
rec_makedir(workdir)
rpu.prof('Agent get unit mkdir', uid=_cu['_id'])
# and send to staging / execution, respectively
if _cu['Agent_Input_Directives'] and \
_cu['Agent_Input_Status'] == rp.PENDING :
_cu['state'] = rp.STAGING_INPUT
self.update_unit_state(src = 'Agent',
uid = _cu['_id'],
state = rp.STAGING_INPUT,
msg = 'unit needs input staging')
_cu_list = rpu.blowup(self._config, _cu, STAGEIN_QUEUE)
for __cu in _cu_list :
rpu.prof('put', msg="Agent to stagein_queue (%s)" % __cu['state'], uid=__cu['_id'])
self._stagein_queue.put(__cu)
else:
_cu['state'] = rp.ALLOCATING
self.update_unit_state(src = 'Agent',
uid = _cu['_id'],
state = rp.ALLOCATING,
msg = 'unit needs no input staging')
_cu_list = rpu.blowup(self._config, _cu, SCHEDULE_QUEUE)
for __cu in _cu_list :
rpu.prof('put', msg="Agent to schedule_queue (%s)" % __cu['state'], uid=__cu['_id'])
self._schedule_queue.put(__cu)
except Exception as e:
# if any unit sorting step failed, the unit did not end up in
# a queue (its always the last step). We set it to FAILED
msg = "could not sort unit (%s)" % e
rpu.prof('error', msg=msg, uid=_cu['_id'], logger=self._log.exception)
_cu['state'] = rp.FAILED
self.update_unit_state(src = 'Agent',
uid = _cu['_id'],
state = rp.FAILED,
msg = msg)
# NOTE: this is final, the unit will not be touched
# anymore.
_cu = None
# indicate that we did some work (if we did...)
return len(cu_uids)
# ==============================================================================
#
# Agent main code
#
# ==============================================================================
def main():
mongo_p = None
parser = optparse.OptionParser()
parser.add_option('-a', dest='mongodb_auth')
parser.add_option('-c', dest='cores', type='int')
parser.add_option('-d', dest='debug_level', type='int')
parser.add_option('-j', dest='task_launch_method')
parser.add_option('-k', dest='mpi_launch_method')
parser.add_option('-l', dest='lrms')
parser.add_option('-m', dest='mongodb_url')
parser.add_option('-n', dest='mongodb_name')
parser.add_option('-o', dest='spawner')
parser.add_option('-p', dest='pilot_id')
parser.add_option('-q', dest='agent_scheduler')
parser.add_option('-r', dest='runtime', type='int')
parser.add_option('-s', dest='session_id')
# parse the whole shebang
(options, args) = parser.parse_args()
if args : parser.error("Unused arguments '%s'" % args)
if not options.cores : parser.error("Missing number of cores (-c)")
if not options.debug_level : parser.error("Missing DEBUG level (-d)")
if not options.task_launch_method : parser.error("Missing unit launch method (-j)")
if not options.mpi_launch_method : parser.error("Missing mpi launch method (-k)")
if not options.lrms : parser.error("Missing LRMS (-l)")
if not options.mongodb_url : parser.error("Missing MongoDB URL (-m)")
if not options.mongodb_name : parser.error("Missing database name (-n)")
if not options.spawner : parser.error("Missing agent spawner (-o)")
if not options.pilot_id : parser.error("Missing pilot id (-p)")
if not options.agent_scheduler : parser.error("Missing agent scheduler (-q)")
if not options.runtime : parser.error("Missing agent runtime (-r)")
if not options.session_id : parser.error("Missing session id (-s)")
rpu.prof('start', uid=options.pilot_id)
# configure the agent logger
logger = logging.getLogger ('radical.pilot.agent')
handle = logging.FileHandler("agent.log")
formatter = logging.Formatter ('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(options.debug_level)
handle.setFormatter(formatter)
logger.addHandler(handle)
logger.info("Using RADICAL-Utils version %s", rs.version)
logger.info("Using RADICAL-SAGA version %s", rs.version)
logger.info("Using RADICAL-Pilot version %s (%s)", rp.version, git_ident)
# --------------------------------------------------------------------------
#
def sigint_handler(signum, frame):
msg = 'Caught SIGINT. EXITING. (%s: %s)' % (signum, frame)
pilot_FAILED(mongo_p, options.pilot_id, logger, msg)
sys.exit(2)
signal.signal(signal.SIGINT, sigint_handler)
# --------------------------------------------------------------------------
#
def sigalarm_handler(signum, frame):
msg = 'Caught SIGALRM (Walltime limit reached?). EXITING (%s: %s)' \
% (signum, frame)
pilot_FAILED(mongo_p, options.pilot_id, logger, msg)
sys.exit(3)
signal.signal(signal.SIGALRM, sigalarm_handler)
# --------------------------------------------------------------------------
# load the local agent config, and overload the config dicts
try :
logger.info ("load agent config")
cfg_file = "%s/.radical/pilot/configs/agent.json" % os.environ['HOME']
cfg = ru.read_json_str (cfg_file)
import pprint
logger.debug("\n%s\n" % pprint.pformat(cfg.get('drop_clones', {})))
logger.debug("\n%s\n" % pprint.pformat(agent_config['drop_clones']))
ru.dict_merge (agent_config['number_of_workers'], cfg.get ('number_of_workers', {}), policy='overwrite')
ru.dict_merge (agent_config['blowup_factor'], cfg.get ('blowup_factor', {}), policy='overwrite')
ru.dict_merge (agent_config['drop_clones'], cfg.get ('drop_clones', {}), policy='overwrite')
logger.info ("agent config merged")
import pprint
logger.debug("\Agent config:\n%s\n\n" % pprint.pformat (agent_config))
except Exception as e:
logger.info ("agent config not merged: %s", e)
try:
# ----------------------------------------------------------------------
# Establish database connection
rpu.prof('db setup')
mongo_db = rpu.get_mongodb(options.mongodb_url, options.mongodb_name,
options.mongodb_auth)
mongo_p = mongo_db["%s.p" % options.session_id]
# ----------------------------------------------------------------------
# Launch the agent thread
rpu.prof('Agent create')
agent = Agent(
name = 'Agent',
config = agent_config,
logger = logger,
lrms_name = options.lrms,
requested_cores = options.cores,
task_launch_method = options.task_launch_method,
mpi_launch_method = options.mpi_launch_method,
spawner = options.spawner,
scheduler_name = options.agent_scheduler,
runtime = options.runtime,
mongodb_url = options.mongodb_url,
mongodb_name = options.mongodb_name,
mongodb_auth = options.mongodb_auth,
pilot_id = options.pilot_id,
session_id = options.session_id
)
agent.run()
rpu.prof('Agent done')
except SystemExit:
logger.error("Caught keyboard interrupt. EXITING")
return(6)
except Exception as e:
error_msg = "Error running agent: %s" % str(e)
logger.exception(error_msg)
pilot_FAILED(mongo_p, options.pilot_id, logger, error_msg)
sys.exit(7)
finally:
rpu.prof('stop', msg='finally clause')
sys.exit(8)
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
print "---------------------------------------------------------------------"
print
print "PYTHONPATH: %s" % sys.path
print "python: %s" % sys.version
print "utils : %-5s : %s" % (ru.version_detail, ru.__file__)
print "saga : %-5s : %s" % (rs.version_detail, rs.__file__)
print "pilot : %-5s : %s" % (rp.version_detail, rp.__file__)
print " type : multicore"
print " gitid : %s" % git_ident
print
print "---------------------------------------------------------------------"
print
sys.exit(main())
#
# ------------------------------------------------------------------------------
Fix off-by-one error.
This resolves #621 and #572.
My hunch is that there still is an underlying issue in the multinode scheduler,
will create a separate ticket for that.
Andre: To answer your (now removed question), a non-communicating task
can not have concurrency beyond node boundary. This seems obvious, so I might
miss the point of your question?
#!/usr/bin/env python
"""
.. module:: radical.pilot.agent
:platform: Unix
:synopsis: The agent for RADICAL-Pilot.
The agent gets CUs by means of the MongoDB.
The execution of CUs by the Agent is (primarily) configured by the
triplet (LRMS, LAUNCH_METHOD(s), SCHEDULER):
- The LRMS detects and structures the information about the resources
available to agent.
- The Scheduler maps the execution requests of the LaunchMethods to a
subset of the resources available to the Agent.
It does not deal with the "presentation" of this subset.
- The LaunchMethods configure how to execute (regular and MPI) tasks,
and know about the specific format to specify the subset of resources.
Structure:
----------
This represents the planned architecture, which is not fully represented in
code, yet.
- class Agent
- represents the whole thing
- has a set of StageinWorkers (threads or procs)
- has a set of StageoutWorkers (threads or procs)
- has a set of ExecWorkers (threads or procs)
- has a set of UpdateWorkers (threads or procs)
- has a HeartbeatMonitor (threads or procs)
- has a inputstaging queue
- has a outputstaging queue
- has a execution queue
- has a update queue
- loops forever
- in each iteration
- pulls CU bulks from DB
- pushes CUs into inputstaging queue or execution queue (based on
obvious metric)
class StageinWorker
- competes for CU input staging requests from inputstaging queue
- for each received CU
- performs staging
- pushes CU into execution queue
- pushes stage change notification request into update queue
class StageoutWorker
- competes for CU output staging requests from outputstaging queue
- for each received CU
- performs staging
- pushes stage change notification request into update queue
class ExecWorker
- manages a partition of the allocated cores
(partition size == max cu size)
- competes for CU execution reqeusts from execute queue
- for each CU
- prepares execution command
- pushes command to ExecutionEnvironment
- pushes stage change notification request into update queue
class Spawner
- executes CUs according to ExecWorker instruction
- monitors CU execution (for completion)
- gets CU execution reqeusts from ExecWorker
- for each CU
- executes CU command
- monitors CU execution
- on CU completion
- pushes CU to outputstaging queue (if staging is needed)
- pushes stage change notification request into update queue
class UpdateWorker
- competes for CU state update reqeusts from update queue
- for each CU
- pushes state update (collected into bulks if possible)
- cleans CU workdir if CU is final and cleanup is requested
Agent
|
+--------------------------------------------------------
| | | | |
| | | | |
V V V V V
ExecWorker* StageinWorker* StageoutWorker* UpdateWorker* HeartbeatMonitor
|
+-------------------------------------------------
| | | | |
| | | | |
V V V V V
LRMS MPILaunchMethod TaskLaunchMethod Scheduler Spawner
NOTE:
-----
- Units are progressing through the different worker threads, where, in
general, the unit changes state when transitioning to the next thread.
The unit ownership thus *defines* the unit state (its owned by the
InputStagingWorker, it is in StagingInput state, etc), and the state
update notifications to the DB are merely informational (and can thus be
asynchron). The updates need to be ordered though, to reflect valid and
correct state transition history.
TODO:
-----
- add option to scheduler to ignore core 0 (which hosts the agent process)
- add LRMS.partition (n) to return a set of partitioned LRMS for partial
ExecWorkers
- publish pilot slot history once on shutdown? Or once in a while when
idle? Or push continuously?
- Schedulers, LRMSs, LaunchMethods, etc need to be made threadsafe, for the
case where more than one execution worker threads are running.
- move util functions to rp.utils or r.utils, and pull the from there
- split the agent into logical components (classes?), and install along with
RP.
- add state asserts after `queue.get ()`
- move mkdir etc from ingest thread to where its used (input staging or
execution)
- the structure of the base scheduler should be suitable for both, UMGR
scheduling and Agent scheduling. The algs will be different though,
mostly because the pilots (as targets of the umgr scheduler) have a wait
queue, but the cores (targets of the agent scheduler) have not. Is it
worthwhile to re-use the structure anyway?
- all stop() method calls need to be replaced with commands which travel
through the queues. To deliver commands timely though we either need
command prioritization (difficult), or need separate command queues...
"""
__copyright__ = "Copyright 2014, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import copy
import math
import stat
import sys
import time
import errno
import Queue
import signal
import shutil
import optparse
import logging
import hostlist
import traceback
import threading
import subprocess
import multiprocessing
import saga as rs
import radical.utils as ru
import radical.pilot as rp
import radical.pilot.utils as rpu
# ------------------------------------------------------------------------------
#
# http://stackoverflow.com/questions/9539052/python-dynamically-changing-base-classes-at-runtime-how-to
#
# Depending on agent architecture (which is specific to the resource type it
# runs on) can switch between different component types: using threaded (when
# running on the same node), multiprocessing (also for running on the same node,
# but avoiding python's threading problems, for the prices of slower queues),
# and remote processes (for running components on different nodes, using zeromq
# queues for communication).
#
# We do some trickery to keep the actual components independent from the actual
# schema:
#
# - we wrap the different queue types into a rpu.Queue object
# - we change the base class of the component dynamically to the respective type
#
# This requires components to adhere to the following restrictions:
#
# - *only* communicate over queues -- no shared data with other components or
# component instances. Note that this also holds for example for the
# scheduler!
# - no shared data between the component class and it's run() method. That
# includes no sharing of queues.
# - components inherit from base_component, and the constructor needs to
# register all required component-internal and -external queues with that
# base class -- the run() method can then transparently retrieve them from
# there.
#
# FIXME: static switch between thread and process rendering of exec worker.
AGENT_THREADS = 'threading'
AGENT_PROCESSES = 'multiprocessing'
AGENT_MODE = AGENT_THREADS
if AGENT_MODE == AGENT_THREADS :
COMPONENT_MODE = threading
COMPONENT_TYPE = threading.Thread
QUEUE_TYPE = multiprocessing.Queue
elif AGENT_MODE == AGENT_PROCESSES :
COMPONENT_MODE = multiprocessing
COMPONENT_TYPE = multiprocessing.Process
QUEUE_TYPE = multiprocessing.Queue
else:
raise Exception('Unknown Agent Mode')
# this needs git attribute 'ident' set for this file
git_ident = "$Id$"
# ------------------------------------------------------------------------------
#
# DEBUGGING CONSTANTS -- only change when you know what you are doing. It is
# almost guaranteed that any changes will make the agent non-functional (if
# functionality is defined as executing a set of given CUs).
# component IDs
AGENT = 'Agent'
STAGEIN_QUEUE = 'stagein_queue'
STAGEIN_WORKER = 'StageinWorker'
SCHEDULE_QUEUE = 'schedule_queue'
SCHEDULER = 'Scheduler'
EXECUTION_QUEUE = 'execution_queue'
EXEC_WORKER = 'ExecWorker'
WATCH_QUEUE = 'watch_queue'
WATCHER = 'ExecWatcher'
STAGEOUT_QUEUE = 'stageout_queue'
STAGEOUT_WORKER = 'StageoutWorker'
UPDATE_QUEUE = 'update_queue'
UPDATE_WORKER = 'UpdateWorker'
# Number of worker threads
NUMBER_OF_WORKERS = {
STAGEIN_WORKER : 1,
EXEC_WORKER : 1,
STAGEOUT_WORKER : 1,
UPDATE_WORKER : 1
}
# factor by which the number of units are increased at a certain step. Value of
# '1' will leave the units unchanged. Any blowup will leave on unit as the
# original, and will then create clones with an changed unit ID (see blowup()).
BLOWUP_FACTOR = {
AGENT : 1,
STAGEIN_QUEUE : 1,
STAGEIN_WORKER : 1,
SCHEDULE_QUEUE : 1,
SCHEDULER : 1,
EXECUTION_QUEUE : 1,
EXEC_WORKER : 1,
WATCH_QUEUE : 1,
WATCHER : 1,
STAGEOUT_QUEUE : 1,
STAGEOUT_WORKER : 1,
UPDATE_QUEUE : 1,
UPDATE_WORKER : 1
}
# flag to drop all blown-up units at some point in the pipeline. The units
# with the original IDs will again be left untouched, but all other units are
# silently discarded.
# 0: drop nothing
# 1: drop clones
# 2: drop everything
DROP_CLONES = {
AGENT : 1,
STAGEIN_QUEUE : 1,
STAGEIN_WORKER : 1,
SCHEDULE_QUEUE : 1,
SCHEDULER : 1,
EXECUTION_QUEUE : 1,
EXEC_WORKER : 1,
WATCH_QUEUE : 1,
WATCHER : 1,
STAGEOUT_QUEUE : 1,
STAGEOUT_WORKER : 1,
UPDATE_QUEUE : 1,
UPDATE_WORKER : 1
}
#
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# CONSTANTS
#
# 'enum' for unit launch method types
LAUNCH_METHOD_APRUN = 'APRUN'
LAUNCH_METHOD_CCMRUN = 'CCMRUN'
LAUNCH_METHOD_DPLACE = 'DPLACE'
LAUNCH_METHOD_FORK = 'FORK'
LAUNCH_METHOD_IBRUN = 'IBRUN'
LAUNCH_METHOD_MPIEXEC = 'MPIEXEC'
LAUNCH_METHOD_MPIRUN_CCMRUN = 'MPIRUN_CCMRUN'
LAUNCH_METHOD_MPIRUN_DPLACE = 'MPIRUN_DPLACE'
LAUNCH_METHOD_MPIRUN = 'MPIRUN'
LAUNCH_METHOD_MPIRUN_RSH = 'MPIRUN_RSH'
LAUNCH_METHOD_ORTE = 'ORTE'
LAUNCH_METHOD_POE = 'POE'
LAUNCH_METHOD_RUNJOB = 'RUNJOB'
LAUNCH_METHOD_SSH = 'SSH'
# 'enum' for local resource manager types
LRMS_NAME_CCM = 'CCM'
LRMS_NAME_FORK = 'FORK'
LRMS_NAME_LOADLEVELER = 'LOADL'
LRMS_NAME_LSF = 'LSF'
LRMS_NAME_PBSPRO = 'PBSPRO'
LRMS_NAME_SGE = 'SGE'
LRMS_NAME_SLURM = 'SLURM'
LRMS_NAME_TORQUE = 'TORQUE'
# 'enum' for pilot's unit scheduler types
SCHEDULER_NAME_CONTINUOUS = "CONTINUOUS"
SCHEDULER_NAME_SCATTERED = "SCATTERED"
SCHEDULER_NAME_TORUS = "TORUS"
# 'enum' for pilot's unit spawner types
SPAWNER_NAME_POPEN = "POPEN"
SPAWNER_NAME_SHELL = "SHELL"
# defines for pilot commands
COMMAND_CANCEL_PILOT = "Cancel_Pilot"
COMMAND_CANCEL_COMPUTE_UNIT = "Cancel_Compute_Unit"
COMMAND_KEEP_ALIVE = "Keep_Alive"
COMMAND_FIELD = "commands"
COMMAND_TYPE = "type"
COMMAND_ARG = "arg"
COMMAND_RESCHEDULE = "Reschedule"
COMMAND_CANCEL = "Cancel"
# 'enum' for staging action operators
COPY = 'Copy' # local cp
LINK = 'Link' # local ln -s
MOVE = 'Move' # local mv
TRANSFER = 'Transfer' # saga remote transfer
# TODO: This might just be a special case of copy
# tri-state for unit spawn retval
OK = 'OK'
FAIL = 'FAIL'
RETRY = 'RETRY'
# two-state for slot occupation.
FREE = 'Free'
BUSY = 'Busy'
agent_config = {
# directory for staging files inside the agent sandbox
'staging_area' : 'staging_area',
# url scheme to indicate the use of staging_area
'staging_scheme' : 'staging',
# max number of cu out/err chars to push to db
'max_io_loglength' : 1*1024,
# max time period to collec db requests into bulks (seconds)
'bulk_collection_time' : 1.0,
# time to sleep between queue polls (seconds)
'queue_poll_sleeptime' : 0.1,
# time to sleep between database polls (seconds)
'db_poll_sleeptime' : 0.1,
# time between checks of internal state and commands from mothership (seconds)
'heartbeat_interval' : 10,
}
agent_config['blowup_factor'] = BLOWUP_FACTOR
agent_config['drop_clones'] = DROP_CLONES
agent_config['number_of_workers'] = NUMBER_OF_WORKERS
# ----------------------------------------------------------------------------------
#
def rec_makedir(target):
# recursive makedir which ignores errors if dir already exists
try:
os.makedirs(target)
except OSError as e:
# ignore failure on existing directory
if e.errno == errno.EEXIST and os.path.isdir(os.path.dirname(target)):
pass
else:
raise
# ------------------------------------------------------------------------------
#
def pilot_FAILED(mongo_p, pilot_uid, logger, message):
logger.error(message)
now = rpu.timestamp()
out = None
err = None
log = None
try : out = open('./agent.out', 'r').read()
except : pass
try : err = open('./agent.err', 'r').read()
except : pass
try : log = open('./agent.log', 'r').read()
except : pass
msg = [{"message": message, "timestamp": now},
{"message": rpu.get_rusage(), "timestamp": now}]
if mongo_p:
mongo_p.update({"_id": pilot_uid},
{"$pushAll": {"log" : msg},
"$push" : {"statehistory": {"state" : rp.FAILED,
"timestamp" : now}},
"$set" : {"state" : rp.FAILED,
"stdout" : rpu.tail(out),
"stderr" : rpu.tail(err),
"logfile" : rpu.tail(log),
"finished" : now}
})
else:
logger.error("cannot log error state in database!")
# ------------------------------------------------------------------------------
#
def pilot_CANCELED(mongo_p, pilot_uid, logger, message):
logger.warning(message)
now = rpu.timestamp()
out = None
err = None
log = None
try : out = open('./agent.out', 'r').read()
except : pass
try : err = open('./agent.err', 'r').read()
except : pass
try : log = open('./agent.log', 'r').read()
except : pass
msg = [{"message": message, "timestamp": now},
{"message": rpu.get_rusage(), "timestamp": now}]
mongo_p.update({"_id": pilot_uid},
{"$pushAll": {"log" : msg},
"$push" : {"statehistory": {"state" : rp.CANCELED,
"timestamp" : now}},
"$set" : {"state" : rp.CANCELED,
"stdout" : rpu.tail(out),
"stderr" : rpu.tail(err),
"logfile" : rpu.tail(log),
"finished" : now}
})
# ------------------------------------------------------------------------------
#
def pilot_DONE(mongo_p, pilot_uid):
now = rpu.timestamp()
out = None
err = None
log = None
try : out = open('./agent.out', 'r').read()
except : pass
try : err = open('./agent.err', 'r').read()
except : pass
try : log = open('./agent.log', 'r').read()
except : pass
msg = [{"message": "pilot done", "timestamp": now},
{"message": rpu.get_rusage(), "timestamp": now}]
mongo_p.update({"_id": pilot_uid},
{"$pushAll": {"log" : msg},
"$push" : {"statehistory": {"state" : rp.DONE,
"timestamp": now}},
"$set" : {"state" : rp.DONE,
"stdout" : rpu.tail(out),
"stderr" : rpu.tail(err),
"logfile" : rpu.tail(log),
"finished" : now}
})
# ==============================================================================
#
# Schedulers
#
# ==============================================================================
#
class Scheduler(threading.Thread):
# FIXME: clarify what can be overloaded by Scheduler classes
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, lrms, schedule_queue, execution_queue,
update_queue):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._lrms = lrms
self._schedule_queue = schedule_queue
self._execution_queue = execution_queue
self._update_queue = update_queue
self._terminate = threading.Event()
self._lock = threading.RLock()
self._wait_pool = list()
self._wait_queue_lock = threading.RLock()
rpu.prof('start')
self._configure()
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Launch Method.
#
@classmethod
def create(cls, name, config, logger, lrms, schedule_queue, execution_queue,
update_queue):
# Make sure that we are the base-class!
if cls != Scheduler:
raise TypeError("Scheduler Factory only available to base class!")
try:
implementation = {
SCHEDULER_NAME_CONTINUOUS : SchedulerContinuous,
SCHEDULER_NAME_SCATTERED : SchedulerScattered,
SCHEDULER_NAME_TORUS : SchedulerTorus
}[name]
impl = implementation(name, config, logger, lrms, schedule_queue,
execution_queue, update_queue)
impl.start()
return impl
except KeyError:
raise ValueError("Scheduler '%s' unknown!" % name)
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def _configure(self):
raise NotImplementedError("_configure() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def slot_status(self):
raise NotImplementedError("slot_status() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def _allocate_slot(self, cores_requested):
raise NotImplementedError("_allocate_slot() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def _release_slot(self, opaque_slot):
raise NotImplementedError("_release_slot() not implemented for Scheduler '%s'." % self.name)
# --------------------------------------------------------------------------
#
def _try_allocation(self, cu):
"""
Attempt to allocate cores for a specific CU. If it succeeds, send the
CU off to the ExecutionWorker.
"""
# needs to be locked as we try to acquire slots, but slots are freed
# in a different thread. But we keep the lock duration short...
with self._lock :
# schedule this unit, and receive an opaque handle that has meaning to
# the LRMS, Scheduler and LaunchMethod.
cu['opaque_slot'] = self._allocate_slot(cu['description']['cores'])
if not cu['opaque_slot']:
# signal the CU remains unhandled
return False
# got an allocation, go off and launch the process
rpu.prof('schedule', msg="allocated", uid=cu['_id'], logger=self._log.warn)
self._log.info (self.slot_status())
cu_list = rpu.blowup(self._config, cu, EXECUTION_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="Scheduler to execution_queue (%s)" % _cu['state'], uid=_cu['_id'])
self._execution_queue.put(_cu)
return True
# --------------------------------------------------------------------------
#
def _reschedule(self):
rpu.prof('reschedule')
self._log.info("slot status before reschedule: %s" % self.slot_status())
# cycle through wait queue, and see if we get anything running now. We
# cycle over a copy of the list, so that we can modify the list on the
# fly
for cu in self._wait_pool[:]:
if self._try_allocation(cu):
# NOTE: this is final, remove it from the wait queue
with self._wait_queue_lock :
self._wait_pool.remove(cu)
rpu.prof('unqueue', msg="re-allocation done", uid=cu['_id'])
self._log.info("slot status after reschedule: %s" % self.slot_status ())
rpu.prof('reschedule done')
# --------------------------------------------------------------------------
#
def unschedule(self, cus):
# release (for whatever reason) all slots allocated to this CU
# needs to be locked as we try to release slots, but slots are acquired
# in a different thread....
with self._lock :
rpu.prof('unschedule')
self._log.info("slot status before unschedule: %s" % self.slot_status ())
slots_released = False
if not isinstance(cus, list):
cus = [cus]
for cu in cus:
if cu['opaque_slot']:
self._release_slot(cu['opaque_slot'])
slots_released = True
# notify the scheduling thread of released slots
if slots_released:
rpu.prof('put_cmd', msg="Scheduler to schedule_queue (%s)" % COMMAND_RESCHEDULE)
self._schedule_queue.put(COMMAND_RESCHEDULE)
self._log.info("slot status after unschedule: %s" % self.slot_status ())
rpu.prof('unschedule done - reschedule')
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
try:
request = self._schedule_queue.get()
# shutdown signal
if not request:
rpu.prof('get_cmd', msg="schedule_queue to Scheduler (wakeup)")
continue
# we either get a new scheduled CU, or get a trigger that cores were
# freed, and we can try to reschedule waiting CUs
if isinstance(request, basestring):
command = request
rpu.prof('get_cmd', msg="schedule_queue to Scheduler (%s)" % command)
if command == COMMAND_RESCHEDULE:
self._reschedule()
else:
self._log.error("Unknown scheduler command: %s (ignored)", command)
else:
cu = request
cu['state'] = rp.ALLOCATING
rpu.prof('get', msg="schedule_queue to Scheduler (%s)" % cu['state'], uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, SCHEDULER)
for _cu in cu_list:
# we got a new unit to schedule. Either we can place
# it straight away and move it to execution, or we have
# to put it on the wait queue.
if not self._try_allocation(_cu):
# No resources available, put in wait queue
with self._wait_queue_lock :
self._wait_pool.append(_cu)
rpu.prof('schedule', msg="allocation failed", uid=_cu['_id'])
except Exception as e:
self._log.exception('Error in scheduler loop: %s', e)
raise
finally:
rpu.prof ('stop')
# ==============================================================================
#
class SchedulerContinuous(Scheduler):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue):
self.slots = None
Scheduler.__init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue)
# --------------------------------------------------------------------------
#
def _configure(self):
if not self._lrms.node_list:
raise RuntimeError("LRMS %s didn't _configure node_list." % self._lrms.name)
if not self._lrms.cores_per_node:
raise RuntimeError("LRMS %s didn't _configure cores_per_node." % self._lrms.name)
# Slots represents the internal process management structure.
# The structure is as follows:
# [
# {'node': 'node1', 'cores': [p_1, p_2, p_3, ... , p_cores_per_node]},
# {'node': 'node2', 'cores': [p_1, p_2, p_3. ... , p_cores_per_node]
# ]
#
# We put it in a list because we care about (and make use of) the order.
#
self.slots = []
for node in self._lrms.node_list:
self.slots.append({
'node': node,
# TODO: Maybe use the real core numbers in the case of
# non-exclusive host reservations?
'cores': [FREE for _ in range(0, self._lrms.cores_per_node)]
})
# --------------------------------------------------------------------------
#
# Convert a set of slots into an index into the global slots list
#
def slots2offset(self, task_slots):
# TODO: This assumes all hosts have the same number of cores
first_slot = task_slots[0]
# Get the host and the core part
[first_slot_host, first_slot_core] = first_slot.split(':')
# Find the entry in the the all_slots list based on the host
slot_entry = (slot for slot in self.slots if slot["node"] == first_slot_host).next()
# Transform it into an index in to the all_slots list
all_slots_slot_index = self.slots.index(slot_entry)
return all_slots_slot_index * self._lrms.cores_per_node + int(first_slot_core)
# --------------------------------------------------------------------------
#
def slot_status(self):
"""Returns a multi-line string corresponding to slot status.
"""
slot_matrix = ""
for slot in self.slots:
slot_matrix += "|"
for core in slot['cores']:
if core == FREE:
slot_matrix += "-"
else:
slot_matrix += "+"
slot_matrix += "|"
return {'timestamp' : rpu.timestamp(),
'slotstate' : slot_matrix}
# --------------------------------------------------------------------------
#
# (Temporary?) wrapper for acquire_slots
#
def _allocate_slot(self, cores_requested):
# TODO: single_node should be enforced for e.g. non-message passing
# tasks, but we don't have that info here.
if cores_requested <= self._lrms.cores_per_node:
single_node = True
else:
single_node = False
# Given that we are the continuous scheduler, this is fixed.
# TODO: Argument can be removed altogether?
continuous = True
# TODO: Now we rely on "None", maybe throw an exception?
return self._acquire_slots(cores_requested, single_node=single_node,
continuous=continuous)
# --------------------------------------------------------------------------
#
def _release_slot(self, (task_slots)):
self._change_slot_states(task_slots, FREE)
# --------------------------------------------------------------------------
#
def _acquire_slots(self, cores_requested, single_node, continuous):
#
# Switch between searching for continuous or scattered slots
#
# Switch between searching for single or multi-node
if single_node:
if continuous:
task_slots = self._find_slots_single_cont(cores_requested)
else:
raise NotImplementedError('No scattered single node scheduler implemented yet.')
else:
if continuous:
task_slots = self._find_slots_multi_cont(cores_requested)
else:
raise NotImplementedError('No scattered multi node scheduler implemented yet.')
if task_slots is not None:
self._change_slot_states(task_slots, BUSY)
return task_slots
# --------------------------------------------------------------------------
#
# Find a needle (continuous sub-list) in a haystack (list)
#
def _find_sublist(self, haystack, needle):
n = len(needle)
# Find all matches (returns list of False and True for every position)
hits = [(needle == haystack[i:i+n]) for i in xrange(len(haystack)-n+1)]
try:
# Grab the first occurrence
index = hits.index(True)
except ValueError:
index = None
return index
# --------------------------------------------------------------------------
#
# Transform the number of cores into a continuous list of "status"es,
# and use that to find a sub-list.
#
def _find_cores_cont(self, slot_cores, cores_requested, status):
return self._find_sublist(slot_cores, [status for _ in range(cores_requested)])
# --------------------------------------------------------------------------
#
# Find an available continuous slot within node boundaries.
#
def _find_slots_single_cont(self, cores_requested):
for slot in self.slots:
slot_node = slot['node']
slot_cores = slot['cores']
slot_cores_offset = self._find_cores_cont(slot_cores, cores_requested, FREE)
if slot_cores_offset is not None:
self._log.info('Node %s satisfies %d cores at offset %d',
slot_node, cores_requested, slot_cores_offset)
return ['%s:%d' % (slot_node, core) for core in
range(slot_cores_offset, slot_cores_offset + cores_requested)]
return None
# --------------------------------------------------------------------------
#
# Find an available continuous slot across node boundaries.
#
def _find_slots_multi_cont(self, cores_requested):
# Convenience aliases
cores_per_node = self._lrms.cores_per_node
all_slots = self.slots
# Glue all slot core lists together
all_slot_cores = [core for node in [node['cores'] for node in all_slots] for core in node]
# self._log.debug("all_slot_cores: %s", all_slot_cores)
# Find the start of the first available region
all_slots_first_core_offset = self._find_cores_cont(all_slot_cores, cores_requested, FREE)
self._log.debug("all_slots_first_core_offset: %s", all_slots_first_core_offset)
if all_slots_first_core_offset is None:
return None
# Determine the first slot in the slot list
first_slot_index = all_slots_first_core_offset / cores_per_node
self._log.debug("first_slot_index: %s", first_slot_index)
# And the core offset within that node
first_slot_core_offset = all_slots_first_core_offset % cores_per_node
self._log.debug("first_slot_core_offset: %s", first_slot_core_offset)
# Note: We subtract one here, because counting starts at zero;
# Imagine a zero offset and a count of 1, the only core used
# would be core 0.
# TODO: Verify this claim :-)
all_slots_last_core_offset = (first_slot_index * cores_per_node) +\
first_slot_core_offset + cores_requested - 1
self._log.debug("all_slots_last_core_offset: %s", all_slots_last_core_offset)
last_slot_index = (all_slots_last_core_offset) / cores_per_node
self._log.debug("last_slot_index: %s", last_slot_index)
last_slot_core_offset = all_slots_last_core_offset % cores_per_node
self._log.debug("last_slot_core_offset: %s", last_slot_core_offset)
# Convenience aliases
last_slot = self.slots[last_slot_index]
self._log.debug("last_slot: %s", last_slot)
last_node = last_slot['node']
self._log.debug("last_node: %s", last_node)
first_slot = self.slots[first_slot_index]
self._log.debug("first_slot: %s", first_slot)
first_node = first_slot['node']
self._log.debug("first_node: %s", first_node)
# Collect all node:core slots here
task_slots = []
# Add cores from first slot for this unit
# As this is a multi-node search, we can safely assume that we go
# from the offset all the way to the last core.
task_slots.extend(['%s:%d' % (first_node, core) for core in
range(first_slot_core_offset, cores_per_node)])
# Add all cores from "middle" slots
for slot_index in range(first_slot_index+1, last_slot_index):
slot_node = all_slots[slot_index]['node']
task_slots.extend(['%s:%d' % (slot_node, core) for core in range(0, cores_per_node)])
# Add the cores of the last slot
task_slots.extend(['%s:%d' % (last_node, core) for core in range(0, last_slot_core_offset+1)])
return task_slots
# --------------------------------------------------------------------------
#
# Change the reserved state of slots (FREE or BUSY)
#
def _change_slot_states(self, task_slots, new_state):
# Convenience alias
all_slots = self.slots
# logger.debug("change_slot_states: unit slots: %s", task_slots)
for slot in task_slots:
# logger.debug("change_slot_states: slot content: %s", slot)
# Get the node and the core part
[slot_node, slot_core] = slot.split(':')
# Find the entry in the the all_slots list
slot_entry = (slot for slot in all_slots if slot["node"] == slot_node).next()
# Change the state of the slot
slot_entry['cores'][int(slot_core)] = new_state
# ==============================================================================
#
class SchedulerScattered(Scheduler):
# FIXME: implement
pass
# ==============================================================================
#
class SchedulerTorus(Scheduler):
# TODO: Ultimately all BG/Q specifics should move out of the scheduler
# --------------------------------------------------------------------------
#
# Offsets into block structure
#
TORUS_BLOCK_INDEX = 0
TORUS_BLOCK_COOR = 1
TORUS_BLOCK_NAME = 2
TORUS_BLOCK_STATUS = 3
# --------------------------------------------------------------------------
def __init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue):
self.slots = None
self._cores_per_node = None
Scheduler.__init__(self, name, config, logger, lrms, scheduler_queue,
execution_queue, update_queue)
# --------------------------------------------------------------------------
#
def _configure(self):
if not self._lrms.cores_per_node:
raise RuntimeError("LRMS %s didn't _configure cores_per_node." % self._lrms.name)
self._cores_per_node = self._lrms.cores_per_node
# TODO: get rid of field below
self.slots = 'bogus'
# --------------------------------------------------------------------------
#
def slot_status(self):
"""Returns a multi-line string corresponding to slot status.
"""
slot_matrix = ""
for slot in self._lrms.torus_block:
slot_matrix += "|"
if slot[self.TORUS_BLOCK_STATUS] == FREE:
slot_matrix += "-" * self._lrms.cores_per_node
else:
slot_matrix += "+" * self._lrms.cores_per_node
slot_matrix += "|"
return {'timestamp': rpu.timestamp(),
'slotstate': slot_matrix}
# --------------------------------------------------------------------------
#
# Allocate a number of cores
#
# Currently only implements full-node allocation, so core count must
# be a multiple of cores_per_node.
#
def _allocate_slot(self, cores_requested):
block = self._lrms.torus_block
sub_block_shape_table = self._lrms.shape_table
self._log.info("Trying to allocate %d core(s).", cores_requested)
if cores_requested % self._lrms.cores_per_node:
num_cores = int(math.ceil(cores_requested / float(self._lrms.cores_per_node))) \
* self._lrms.cores_per_node
self._log.error('Core not multiple of %d, increasing to %d!',
self._lrms.cores_per_node, num_cores)
num_nodes = cores_requested / self._lrms.cores_per_node
offset = self._alloc_sub_block(block, num_nodes)
if offset is None:
self._log.warning('No allocation made.')
return
# TODO: return something else than corner location? Corner index?
corner = block[offset][self.TORUS_BLOCK_COOR]
sub_block_shape = sub_block_shape_table[num_nodes]
end = self.get_last_node(corner, sub_block_shape)
self._log.debug('Allocating sub-block of %d node(s) with dimensions %s'
' at offset %d with corner %s and end %s.',
num_nodes, self._lrms.shape2str(sub_block_shape), offset,
self._lrms.loc2str(corner), self._lrms.loc2str(end))
return corner, sub_block_shape
# --------------------------------------------------------------------------
#
# Allocate a sub-block within a block
# Currently only works with offset that are exactly the sub-block size
#
def _alloc_sub_block(self, block, num_nodes):
offset = 0
# Iterate through all nodes with offset a multiple of the sub-block size
while True:
# Verify the assumption (needs to be an assert?)
if offset % num_nodes != 0:
msg = 'Sub-block needs to start at correct offset!'
self._log.exception(msg)
raise ValueError(msg)
# TODO: If we want to workaround this, the coordinates need to overflow
not_free = False
# Check if all nodes from offset till offset+size are FREE
for peek in range(num_nodes):
try:
if block[offset+peek][self.TORUS_BLOCK_STATUS] == BUSY:
# Once we find the first BUSY node we can discard this attempt
not_free = True
break
except IndexError:
self._log.exception('Block out of bound. Num_nodes: %d, offset: %d, peek: %d.',
num_nodes, offset, peek)
if not_free == True:
# No success at this offset
self._log.info("No free nodes found at this offset: %d.", offset)
# If we weren't the last attempt, then increase the offset and iterate again.
if offset + num_nodes < self._block2num_nodes(block):
offset += num_nodes
continue
else:
return
else:
# At this stage we have found a free spot!
self._log.info("Free nodes found at this offset: %d.", offset)
# Then mark the nodes busy
for peek in range(num_nodes):
block[offset+peek][self.TORUS_BLOCK_STATUS] = BUSY
return offset
# --------------------------------------------------------------------------
#
# Return the number of nodes in a block
#
def _block2num_nodes(self, block):
return len(block)
# --------------------------------------------------------------------------
#
def _release_slot(self, (corner, shape)):
self._free_cores(self._lrms.torus_block, corner, shape)
# --------------------------------------------------------------------------
#
# Free up an allocation
#
def _free_cores(self, block, corner, shape):
# Number of nodes to free
num_nodes = self._shape2num_nodes(shape)
# Location of where to start freeing
offset = self.corner2offset(block, corner)
self._log.info("Freeing %d nodes starting at %d.", num_nodes, offset)
for peek in range(num_nodes):
assert block[offset+peek][self.TORUS_BLOCK_STATUS] == BUSY, \
'Block %d not Free!' % block[offset+peek]
block[offset+peek][self.TORUS_BLOCK_STATUS] = FREE
# --------------------------------------------------------------------------
#
# Follow coordinates to get the last node
#
def get_last_node(self, origin, shape):
ret = {}
for dim in self._lrms.torus_dimension_labels:
ret[dim] = origin[dim] + shape[dim] -1
return ret
# --------------------------------------------------------------------------
#
# Return the number of nodes for the given block shape
#
def _shape2num_nodes(self, shape):
nodes = 1
for dim in self._lrms.torus_dimension_labels:
nodes *= shape[dim]
return nodes
# --------------------------------------------------------------------------
#
# Return the offset into the node list from a corner
#
# TODO: Can this be determined instead of searched?
#
def corner2offset(self, block, corner):
offset = 0
for e in block:
if corner == e[self.TORUS_BLOCK_COOR]:
return offset
offset += 1
return offset
# ==============================================================================
#
# Launch Methods
#
# ==============================================================================
#
class LaunchMethod(object):
# List of environment variables that designated Launch Methods should export
EXPORT_ENV_VARIABLES = [
'LD_LIBRARY_PATH',
'PATH',
'PYTHONPATH',
'PYTHON_DIR',
]
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
self.name = name
self._config = config
self._log = logger
self._scheduler = scheduler
self.launch_command = None
self._configure()
# TODO: This doesn't make too much sense for LM's that use multiple
# commands, perhaps this needs to move to per LM __init__.
if self.launch_command is None:
raise RuntimeError("Launch command not found for LaunchMethod '%s'" % name)
logger.info("Discovered launch command: '%s'.", self.launch_command)
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Launch Method.
#
@classmethod
def create(cls, name, config, logger, scheduler):
# Make sure that we are the base-class!
if cls != LaunchMethod:
raise TypeError("LaunchMethod factory only available to base class!")
try:
implementation = {
LAUNCH_METHOD_APRUN : LaunchMethodAPRUN,
LAUNCH_METHOD_CCMRUN : LaunchMethodCCMRUN,
LAUNCH_METHOD_DPLACE : LaunchMethodDPLACE,
LAUNCH_METHOD_FORK : LaunchMethodFORK,
LAUNCH_METHOD_IBRUN : LaunchMethodIBRUN,
LAUNCH_METHOD_MPIEXEC : LaunchMethodMPIEXEC,
LAUNCH_METHOD_MPIRUN_CCMRUN : LaunchMethodMPIRUNCCMRUN,
LAUNCH_METHOD_MPIRUN_DPLACE : LaunchMethodMPIRUNDPLACE,
LAUNCH_METHOD_MPIRUN : LaunchMethodMPIRUN,
LAUNCH_METHOD_MPIRUN_RSH : LaunchMethodMPIRUNRSH,
LAUNCH_METHOD_ORTE : LaunchMethodORTE,
LAUNCH_METHOD_POE : LaunchMethodPOE,
LAUNCH_METHOD_RUNJOB : LaunchMethodRUNJOB,
LAUNCH_METHOD_SSH : LaunchMethodSSH
}[name]
return implementation(name, config, logger, scheduler)
except KeyError:
logger.exception("LaunchMethod '%s' unknown!" % name)
except Exception as e:
logger.exception("LaunchMethod cannot be used: %s!" % e)
return None
# --------------------------------------------------------------------------
#
def _configure(self):
raise NotImplementedError("_configure() not implemented for LaunchMethod: %s." % self.name)
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
raise NotImplementedError("construct_command() not implemented for LaunchMethod: %s." % self.name)
# --------------------------------------------------------------------------
#
def _find_executable(self, names):
"""Takes a (list of) name(s) and looks for an executable in the path.
"""
if not isinstance(names, list):
names = [names]
for name in names:
ret = self._which(name)
if ret is not None:
return ret
return None
# --------------------------------------------------------------------------
#
def _which(self, program):
"""Finds the location of an executable.
Taken from:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
# ----------------------------------------------------------------------
#
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# ==============================================================================
#
class LaunchMethodFORK(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# "Regular" tasks
self.launch_command = ''
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
if task_args:
command = " ".join([task_exec, task_args])
else:
command = task_exec
return command, None
# ==============================================================================
#
class LaunchMethodMPIRUN(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
self.launch_command = self._find_executable([
'mpirun', # General case
'mpirun_rsh', # Gordon @ SDSC
'mpirun-mpich-mp', # Mac OSX MacPorts
'mpirun-openmpi-mp' # Mac OSX MacPorts
])
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join(['-x ' + var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
mpirun_command = "%s %s -np %s -host %s %s" % (
self.launch_command, export_vars, task_numcores, hosts_string, task_command)
return mpirun_command, None
# ==============================================================================
#
class LaunchMethodSSH(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# Find ssh command
command = self._which('ssh')
if command is not None:
# Some MPI environments (e.g. SGE) put a link to rsh as "ssh" into
# the path. We try to detect that and then use different arguments.
if os.path.islink(command):
target = os.path.realpath(command)
if os.path.basename(target) == 'rsh':
self._log.info('Detected that "ssh" is a link to "rsh".')
return target
command = '%s -o StrictHostKeyChecking=no' % command
self.launch_command = command
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if not launch_script_hop :
raise ValueError ("LaunchMethodSSH.construct_command needs launch_script_hop!")
# Get the host of the first entry in the acquired slot
host = task_slots[0].split(':')[0]
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Command line to execute launch script via ssh on host
ssh_hop_cmd = "%s %s %s" % (self.launch_command, host, launch_script_hop)
# Special case, return a tuple that overrides the default command line.
return task_command, ssh_hop_cmd
# ==============================================================================
#
class LaunchMethodMPIEXEC(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# mpiexec (e.g. on SuperMUC)
self.launch_command = self._find_executable([
'mpiexec', # General case
'mpiexec-mpich-mp', # Mac OSX MacPorts
'mpiexec-openmpi-mp' # Mac OSX MacPorts
])
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
# Construct the hosts_string
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
# Construct the executable and arguments
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
mpiexec_command = "%s -n %s -host %s %s" % (
self.launch_command, task_numcores, hosts_string, task_command)
return mpiexec_command, None
# ==============================================================================
#
class LaunchMethodAPRUN(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# aprun: job launcher for Cray systems
self.launch_command= self._which('aprun')
# TODO: ensure that only one concurrent aprun per node is executed!
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
aprun_command = "%s -n %d %s" % (self.launch_command, task_numcores, task_command)
return aprun_command, None
# ==============================================================================
#
class LaunchMethodCCMRUN(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# ccmrun: Cluster Compatibility Mode (CCM) job launcher for Cray systems
self.launch_command= self._which('ccmrun')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, opaque_slot):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
ccmrun_command = "%s -n %d %s" % (self.launch_command, task_numcores, task_command)
return ccmrun_command, None
# ==============================================================================
#
class LaunchMethodMPIRUNCCMRUN(LaunchMethod):
# TODO: This needs both mpirun and ccmrun
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# ccmrun: Cluster Compatibility Mode job launcher for Cray systems
self.launch_command= self._which('ccmrun')
self.mpirun_command = self._which('mpirun')
if not self.mpirun_command:
raise RuntimeError("mpirun not found!")
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string
# TODO: is there any use in using $HOME/.crayccm/ccm_nodelist.$JOBID?
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join(['-x ' + var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
mpirun_ccmrun_command = "%s %s %s -np %d -host %s %s" % (
self.launch_command, self.mpirun_command, export_vars,
task_numcores, hosts_string, task_command)
return mpirun_ccmrun_command, None
# ==============================================================================
#
class LaunchMethodRUNJOB(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# runjob: job launcher for IBM BG/Q systems, e.g. Joule
self.launch_command= self._which('runjob')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (corner, sub_block_shape)):
if task_numcores % self._scheduler._lrms.cores_per_node:
msg = "Num cores (%d) is not a multiple of %d!" % (
task_numcores, self._scheduler._lrms.cores_per_node)
self._log.exception(msg)
raise ValueError(msg)
# Runjob it is!
runjob_command = self.launch_command
# Set the number of tasks/ranks per node
# TODO: Currently hardcoded, this should be configurable,
# but I don't see how, this would be a leaky abstraction.
runjob_command += ' --ranks-per-node %d' % min(self._scheduler._lrms.cores_per_node, task_numcores)
# Run this subjob in the block communicated by LoadLeveler
runjob_command += ' --block %s' % self._scheduler._lrms.loadl_bg_block
corner_offset = self._scheduler.corner2offset(self._scheduler._lrms.torus_block, corner)
corner_node = self._scheduler._lrms.torus_block[corner_offset][self._scheduler.TORUS_BLOCK_NAME]
runjob_command += ' --corner %s' % corner_node
# convert the shape
runjob_command += ' --shape %s' % self._scheduler._lrms.shape2str(sub_block_shape)
# runjob needs the full path to the executable
if os.path.basename(task_exec) == task_exec:
# Use `which` with back-ticks as the executable,
# will be expanded in the shell script.
task_exec = '`which %s`' % task_exec
# Note: We can't use the expansion from here,
# as the pre-execs of the CU aren't run yet!!
# And finally add the executable and the arguments
# usage: runjob <runjob flags> : /bin/hostname -f
runjob_command += ' : %s' % task_exec
if task_args:
runjob_command += ' %s' % task_args
return runjob_command, None
# ==============================================================================
#
class LaunchMethodDPLACE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# dplace: job launcher for SGI systems (e.g. on Blacklight)
self.launch_command = self._which('dplace')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
dplace_offset = self._scheduler.slots2offset(task_slots)
dplace_command = "%s -c %d-%d %s" % (
self.launch_command, dplace_offset,
dplace_offset+task_numcores-1, task_command)
return dplace_command, None
# ==============================================================================
#
class LaunchMethodMPIRUNRSH(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# mpirun_rsh (e.g. on Gordon@SDSC, Stampede@TACC)
if not self._which('mpirun_rsh'):
raise Exception("mpirun_rsh could not be found")
# We don't use the full pathname as the user might load a different
# compiler / MPI library suite from his CU pre_exec that requires
# the launcher from that version, as experienced on stampede in #572.
self.launch_command = 'mpirun_rsh'
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string ('h1 h2 .. hN')
hosts_string = " ".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join([var+"=$"+var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
mpirun_rsh_command = "%s -np %s %s %s %s" % (
self.launch_command, task_numcores, hosts_string, export_vars, task_command)
return mpirun_rsh_command, None
# ==============================================================================
#
class LaunchMethodMPIRUNDPLACE(LaunchMethod):
# TODO: This needs both mpirun and dplace
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# dplace: job launcher for SGI systems (e.g. on Blacklight)
self.launch_command = self._which('dplace')
self.mpirun_command = self._which('mpirun')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
dplace_offset = self._scheduler.slots2offset(task_slots)
mpirun_dplace_command = "%s -np %d %s -c %d-%d %s" % \
(self.mpirun_command, task_numcores, self.launch_command,
dplace_offset, dplace_offset+task_numcores-1, task_command)
return mpirun_dplace_command, None
# ==============================================================================
#
class LaunchMethodIBRUN(LaunchMethod):
# NOTE: Don't think that with IBRUN it is possible to have
# processes != cores ...
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# ibrun: wrapper for mpirun at TACC
self.launch_command = self._which('ibrun')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
ibrun_offset = self._scheduler.slots2offset(task_slots)
ibrun_command = "%s -n %s -o %d %s" % \
(self.launch_command, task_numcores,
ibrun_offset, task_command)
return ibrun_command, None
# ==============================================================================
#
# NOTE: This requires a development version of Open MPI available.
#
class LaunchMethodORTE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
self.launch_command = self._which('orte-submit')
dvm_command = self._which('orte-dvm')
if not dvm_command:
raise Exception("Couldn't find orte-dvm")
# Use (g)stdbuf to disable buffering.
# We need this to get the "DVM ready",
# without waiting for orte-dvm to complete.
# The command seems to be generally available on our Cray's,
# if not, we can code some home-coooked pty stuff.
stdbuf_cmd = self._find_executable(['stdbuf', 'gstdbuf'])
if not stdbuf_cmd:
raise Exception("Couldn't find (g)stdbuf")
stdbuf_arg = "-oL"
self._log.info("Starting ORTE DVM ...")
self._dvm_process = subprocess.Popen(
[stdbuf_cmd, stdbuf_arg, dvm_command],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
self._dvmuri = None
while True:
line = self._dvm_process.stdout.readline().strip()
if line.startswith('VMURI:'):
if len(line.split(' ')) != 2:
raise Exception("Unknown VMURI format: %s" % line)
label, self._dvmuri = line.split(' ', 1)
if label != 'VMURI:':
raise Exception("Unknown VMURI format: %s" % line)
self._log.info("ORTE DVM URI: %s" % self._dvmuri)
elif line == 'DVM ready':
if not self._dvmuri:
raise Exception("VMURI not found!")
self._log.info("ORTE DVM startup successful!")
break
else:
# Check if the process is still around,
# and log output in debug mode.
if not self._dvm_process.poll():
self._log.debug("ORTE: %s" % line)
else:
# Process is gone: fatal!
raise Exception("ORTE DVM process disappeared")
# TODO: Create teardown() function for LaunchMethod's (in this case to terminate the dvm)
#subprocess.Popen([self.launch_command, "--hnp", orte_vm_uri_filename, "--terminate"])
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Construct the hosts_string
hosts_string = ",".join([slot.split(':')[0] for slot in task_slots])
export_vars = ' '.join(['-x ' + var for var in self.EXPORT_ENV_VARIABLES if var in os.environ])
orte_command = '%s --hnp "%s" %s -np %s -host %s %s' % (
self.launch_command, self._dvmuri, export_vars, task_numcores, hosts_string, task_command)
return orte_command, None
# ==============================================================================
#
class LaunchMethodPOE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, scheduler):
LaunchMethod.__init__(self, name, config, logger, scheduler)
# --------------------------------------------------------------------------
#
def _configure(self):
# poe: LSF specific wrapper for MPI (e.g. yellowstone)
self.launch_command = self._which('poe')
# --------------------------------------------------------------------------
#
def construct_command(self, task_exec, task_args, task_numcores,
launch_script_hop, (task_slots)):
# Count slots per host in provided slots description.
hosts = {}
for slot in task_slots:
host = slot.split(':')[0]
if host not in hosts:
hosts[host] = 1
else:
hosts[host] += 1
# Create string with format: "hostX N host
hosts_string = ''
for host in hosts:
hosts_string += '%s %d ' % (host, hosts[host])
if task_args:
task_command = " ".join([task_exec, task_args])
else:
task_command = task_exec
# Override the LSB_MCPU_HOSTS env variable as this is set by
# default to the size of the whole pilot.
poe_command = 'LSB_MCPU_HOSTS="%s" %s %s' % (
hosts_string, self.launch_command, task_command)
return poe_command, None
# ==============================================================================
#
# Base class for LRMS implementations.
#
# ==============================================================================
#
class LRMS(object):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
self.name = name
self._config = config
self._log = logger
self.requested_cores = requested_cores
self._log.info("Configuring LRMS %s.", self.name)
self.slot_list = []
self.node_list = []
self.cores_per_node = None
self._configure()
logger.info("Discovered execution environment: %s", self.node_list)
# For now assume that all nodes have equal amount of cores
cores_avail = len(self.node_list) * self.cores_per_node
if 'RADICAL_PILOT_PROFILE' not in os.environ:
if cores_avail < int(requested_cores):
raise ValueError("Not enough cores available (%s) to satisfy allocation request (%s)." \
% (str(cores_avail), str(requested_cores)))
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the LRMS.
#
@classmethod
def create(cls, name, config, logger, requested_cores):
# TODO: Core counts dont have to be the same number for all hosts.
# TODO: We might not have reserved the whole node.
# TODO: Given that the Agent can determine the real core count, in
# principle we could just ignore the config and use as many as we
# have to our availability (taken into account that we might not
# have the full node reserved of course)
# Answer: at least on Yellowstone this doesnt work for MPI,
# as you can't spawn more tasks then the number of slots.
# Make sure that we are the base-class!
if cls != LRMS:
raise TypeError("LRMS Factory only available to base class!")
try:
implementation = {
LRMS_NAME_CCM : CCMLRMS,
LRMS_NAME_FORK : ForkLRMS,
LRMS_NAME_LOADLEVELER : LoadLevelerLRMS,
LRMS_NAME_LSF : LSFLRMS,
LRMS_NAME_PBSPRO : PBSProLRMS,
LRMS_NAME_SGE : SGELRMS,
LRMS_NAME_SLURM : SLURMLRMS,
LRMS_NAME_TORQUE : TORQUELRMS
}[name]
return implementation(name, config, logger, requested_cores)
except KeyError:
raise RuntimeError("LRMS type '%s' unknown!" % name)
# --------------------------------------------------------------------------
#
def _configure(self):
raise NotImplementedError("_Configure not implemented for LRMS type: %s." % self.name)
# ==============================================================================
#
class CCMLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
self._log.info("Configured to run on system with %s.", self.name)
CCM_NODEFILE_DIR = os.path.expanduser('~/.crayccm')
ccm_nodefile_list = filter(lambda x: x.startswith('ccm_nodelist'),
os.listdir(CCM_NODEFILE_DIR))
if not ccm_nodefile_list:
raise Exception("No CCM nodefiles found in: %s." % CCM_NODEFILE_DIR)
ccm_nodefile_name = max(ccm_nodefile_list, key=lambda x:
os.stat(os.path.join(CCM_NODEFILE_DIR, x)).st_mtime)
ccm_nodefile = os.path.join(CCM_NODEFILE_DIR, ccm_nodefile_name)
hostname = os.uname()[1]
if not hostname in open(ccm_nodefile).read():
raise RuntimeError("Using the most recent CCM nodefile (%s),"
" but I (%s) am not in it!" % (ccm_nodefile, hostname))
# Parse the CCM nodefile
ccm_nodes = [line.strip() for line in open(ccm_nodefile)]
self._log.info("Found CCM nodefile: %s.", ccm_nodefile)
# Get the number of raw entries
ccm_nodes_length = len(ccm_nodes)
# Unique nodes
ccm_node_list = list(set(ccm_nodes))
ccm_node_list_length = len(ccm_node_list)
# Some simple arithmetic
self.cores_per_node = ccm_nodes_length / ccm_node_list_length
self.node_list = ccm_node_list
# ==============================================================================
#
class TORQUELRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
self._log.info("Configured to run on system with %s.", self.name)
torque_nodefile = os.environ.get('PBS_NODEFILE')
if torque_nodefile is None:
msg = "$PBS_NODEFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Parse PBS the nodefile
torque_nodes = [line.strip() for line in open(torque_nodefile)]
self._log.info("Found Torque PBS_NODEFILE %s: %s", torque_nodefile, torque_nodes)
# Number of cpus involved in allocation
val = os.environ.get('PBS_NCPUS')
if val:
torque_num_cpus = int(val)
else:
msg = "$PBS_NCPUS not set! (new Torque version?)"
torque_num_cpus = None
self._log.warning(msg)
# Number of nodes involved in allocation
val = os.environ.get('PBS_NUM_NODES')
if val:
torque_num_nodes = int(val)
else:
msg = "$PBS_NUM_NODES not set! (old Torque version?)"
torque_num_nodes = None
self._log.warning(msg)
# Number of cores (processors) per node
val = os.environ.get('PBS_NUM_PPN')
if val:
torque_cores_per_node = int(val)
else:
msg = "$PBS_NUM_PPN is not set!"
torque_cores_per_node = None
self._log.warning(msg)
if torque_cores_per_node in [None, 1]:
# lets see if SAGA has been forthcoming with some information
self._log.warning("fall back to $SAGA_PPN : %s", os.environ.get ('SAGA_PPN', None))
torque_cores_per_node = int(os.environ.get('SAGA_PPN', torque_cores_per_node))
# Number of entries in nodefile should be PBS_NUM_NODES * PBS_NUM_PPN
torque_nodes_length = len(torque_nodes)
torque_node_list = list(set(torque_nodes))
# if torque_num_nodes and torque_cores_per_node and \
# torque_nodes_length < torque_num_nodes * torque_cores_per_node:
# msg = "Number of entries in $PBS_NODEFILE (%s) does not match with $PBS_NUM_NODES*$PBS_NUM_PPN (%s*%s)" % \
# (torque_nodes_length, torque_num_nodes, torque_cores_per_node)
# raise RuntimeError(msg)
# only unique node names
torque_node_list_length = len(torque_node_list)
self._log.debug("Node list: %s(%d)", torque_node_list, torque_node_list_length)
if torque_num_nodes and torque_cores_per_node:
# Modern style Torque
self.cores_per_node = torque_cores_per_node
elif torque_num_cpus:
# Blacklight style (TORQUE-2.3.13)
self.cores_per_node = torque_num_cpus
else:
# Old style Torque (Should we just use this for all versions?)
self.cores_per_node = torque_nodes_length / torque_node_list_length
self.node_list = torque_node_list
# ==============================================================================
#
class PBSProLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
# TODO: $NCPUS?!?! = 1 on archer
pbspro_nodefile = os.environ.get('PBS_NODEFILE')
if pbspro_nodefile is None:
msg = "$PBS_NODEFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.info("Found PBSPro $PBS_NODEFILE %s." % pbspro_nodefile)
# Dont need to parse the content of nodefile for PBSPRO, only the length
# is interesting, as there are only duplicate entries in it.
pbspro_nodes_length = len([line.strip() for line in open(pbspro_nodefile)])
# Number of Processors per Node
val = os.environ.get('NUM_PPN')
if val:
pbspro_num_ppn = int(val)
else:
msg = "$NUM_PPN not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Number of Nodes allocated
val = os.environ.get('NODE_COUNT')
if val:
pbspro_node_count = int(val)
else:
msg = "$NODE_COUNT not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Number of Parallel Environments
val = os.environ.get('NUM_PES')
if val:
pbspro_num_pes = int(val)
else:
msg = "$NUM_PES not set!"
self._log.error(msg)
raise RuntimeError(msg)
pbspro_vnodes = self._parse_pbspro_vnodes()
# Verify that $NUM_PES == $NODE_COUNT * $NUM_PPN == len($PBS_NODEFILE)
if not (pbspro_node_count * pbspro_num_ppn == pbspro_num_pes == pbspro_nodes_length):
self._log.warning("NUM_PES != NODE_COUNT * NUM_PPN != len($PBS_NODEFILE)")
self.cores_per_node = pbspro_num_ppn
self.node_list = pbspro_vnodes
# --------------------------------------------------------------------------
#
def _parse_pbspro_vnodes(self):
# PBS Job ID
val = os.environ.get('PBS_JOBID')
if val:
pbspro_jobid = val
else:
msg = "$PBS_JOBID not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Get the output of qstat -f for this job
output = subprocess.check_output(["qstat", "-f", pbspro_jobid])
# Get the (multiline) 'exec_vnode' entry
vnodes_str = ''
for line in output.splitlines():
# Detect start of entry
if 'exec_vnode = ' in line:
vnodes_str += line.strip()
elif vnodes_str:
# Find continuing lines
if " = " not in line:
vnodes_str += line.strip()
else:
break
# Get the RHS of the entry
rhs = vnodes_str.split('=',1)[1].strip()
self._log.debug("input: %s", rhs)
nodes_list = []
# Break up the individual node partitions into vnode slices
while True:
idx = rhs.find(')+(')
node_str = rhs[1:idx]
nodes_list.append(node_str)
rhs = rhs[idx+2:]
if idx < 0:
break
vnodes_list = []
cpus_list = []
# Split out the slices into vnode name and cpu count
for node_str in nodes_list:
slices = node_str.split('+')
for _slice in slices:
vnode, cpus = _slice.split(':')
cpus = int(cpus.split('=')[1])
self._log.debug("vnode: %s cpus: %s", vnode, cpus)
vnodes_list.append(vnode)
cpus_list.append(cpus)
self._log.debug("vnodes: %s", vnodes_list)
self._log.debug("cpus: %s", cpus_list)
cpus_list = list(set(cpus_list))
min_cpus = int(min(cpus_list))
if len(cpus_list) > 1:
self._log.debug("Detected vnodes of different sizes: %s, the minimal is: %d.", cpus_list, min_cpus)
node_list = []
for vnode in vnodes_list:
# strip the last _0 of the vnodes to get the node name
node_list.append(vnode.rsplit('_', 1)[0])
# only unique node names
node_list = list(set(node_list))
self._log.debug("Node list: %s", node_list)
# Return the list of node names
return node_list
# ==============================================================================
#
class SLURMLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
slurm_nodelist = os.environ.get('SLURM_NODELIST')
if slurm_nodelist is None:
msg = "$SLURM_NODELIST not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Parse SLURM nodefile environment variable
slurm_nodes = hostlist.expand_hostlist(slurm_nodelist)
self._log.info("Found SLURM_NODELIST %s. Expanded to: %s", slurm_nodelist, slurm_nodes)
# $SLURM_NPROCS = Total number of cores allocated for the current job
slurm_nprocs_str = os.environ.get('SLURM_NPROCS')
if slurm_nprocs_str is None:
msg = "$SLURM_NPROCS not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_nprocs = int(slurm_nprocs_str)
# $SLURM_NNODES = Total number of (partial) nodes in the job's resource allocation
slurm_nnodes_str = os.environ.get('SLURM_NNODES')
if slurm_nnodes_str is None:
msg = "$SLURM_NNODES not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_nnodes = int(slurm_nnodes_str)
# $SLURM_CPUS_ON_NODE = Number of cores per node (physically)
slurm_cpus_on_node_str = os.environ.get('SLURM_CPUS_ON_NODE')
if slurm_cpus_on_node_str is None:
msg = "$SLURM_CPUS_ON_NODE not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
slurm_cpus_on_node = int(slurm_cpus_on_node_str)
# Verify that $SLURM_NPROCS <= $SLURM_NNODES * $SLURM_CPUS_ON_NODE
if not slurm_nprocs <= slurm_nnodes * slurm_cpus_on_node:
self._log.warning("$SLURM_NPROCS(%d) <= $SLURM_NNODES(%d) * $SLURM_CPUS_ON_NODE(%d)",
slurm_nprocs, slurm_nnodes, slurm_cpus_on_node)
# Verify that $SLURM_NNODES == len($SLURM_NODELIST)
if slurm_nnodes != len(slurm_nodes):
self._log.error("$SLURM_NNODES(%d) != len($SLURM_NODELIST)(%d)",
slurm_nnodes, len(slurm_nodes))
# Report the physical number of cores or the total number of cores
# in case of a single partial node allocation.
self.cores_per_node = min(slurm_cpus_on_node, slurm_nprocs)
self.node_list = slurm_nodes
# ==============================================================================
#
class SGELRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
sge_hostfile = os.environ.get('PE_HOSTFILE')
if sge_hostfile is None:
msg = "$PE_HOSTFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
# SGE core configuration might be different than what multiprocessing
# announces
# Alternative: "qconf -sq all.q|awk '/^slots *[0-9]+$/{print $2}'"
# Parse SGE hostfile for nodes
sge_node_list = [line.split()[0] for line in open(sge_hostfile)]
# Keep only unique nodes
sge_nodes = list(set(sge_node_list))
self._log.info("Found PE_HOSTFILE %s. Expanded to: %s", sge_hostfile, sge_nodes)
# Parse SGE hostfile for cores
sge_cores_count_list = [int(line.split()[1]) for line in open(sge_hostfile)]
sge_core_counts = list(set(sge_cores_count_list))
sge_cores_per_node = min(sge_core_counts)
self._log.info("Found unique core counts: %s Using: %d", sge_core_counts, sge_cores_per_node)
self.node_list = sge_nodes
self.cores_per_node = sge_cores_per_node
# ==============================================================================
#
class LSFLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
lsf_hostfile = os.environ.get('LSB_DJOB_HOSTFILE')
if lsf_hostfile is None:
msg = "$LSB_DJOB_HOSTFILE not set!"
self._log.error(msg)
raise RuntimeError(msg)
lsb_mcpu_hosts = os.environ.get('LSB_MCPU_HOSTS')
if lsb_mcpu_hosts is None:
msg = "$LSB_MCPU_HOSTS not set!"
self._log.error(msg)
raise RuntimeError(msg)
# parse LSF hostfile
# format:
# <hostnameX>
# <hostnameX>
# <hostnameY>
# <hostnameY>
#
# There are in total "-n" entries (number of tasks)
# and "-R" entries per host (tasks per host).
# (That results in "-n" / "-R" unique hosts)
#
lsf_nodes = [line.strip() for line in open(lsf_hostfile)]
self._log.info("Found LSB_DJOB_HOSTFILE %s. Expanded to: %s",
lsf_hostfile, lsf_nodes)
lsf_node_list = list(set(lsf_nodes))
# Grab the core (slot) count from the environment
# Format: hostX N hostY N hostZ N
lsf_cores_count_list = map(int, lsb_mcpu_hosts.split()[1::2])
lsf_core_counts = list(set(lsf_cores_count_list))
lsf_cores_per_node = min(lsf_core_counts)
self._log.info("Found unique core counts: %s Using: %d",
lsf_core_counts, lsf_cores_per_node)
self.node_list = lsf_node_list
self.cores_per_node = lsf_cores_per_node
# ==============================================================================
#
class LoadLevelerLRMS(LRMS):
# --------------------------------------------------------------------------
#
# BG/Q Topology of Nodes within a Board
#
BGQ_BOARD_TOPO = {
0: {'A': 29, 'B': 3, 'C': 1, 'D': 12, 'E': 7},
1: {'A': 28, 'B': 2, 'C': 0, 'D': 13, 'E': 6},
2: {'A': 31, 'B': 1, 'C': 3, 'D': 14, 'E': 5},
3: {'A': 30, 'B': 0, 'C': 2, 'D': 15, 'E': 4},
4: {'A': 25, 'B': 7, 'C': 5, 'D': 8, 'E': 3},
5: {'A': 24, 'B': 6, 'C': 4, 'D': 9, 'E': 2},
6: {'A': 27, 'B': 5, 'C': 7, 'D': 10, 'E': 1},
7: {'A': 26, 'B': 4, 'C': 6, 'D': 11, 'E': 0},
8: {'A': 21, 'B': 11, 'C': 9, 'D': 4, 'E': 15},
9: {'A': 20, 'B': 10, 'C': 8, 'D': 5, 'E': 14},
10: {'A': 23, 'B': 9, 'C': 11, 'D': 6, 'E': 13},
11: {'A': 22, 'B': 8, 'C': 10, 'D': 7, 'E': 12},
12: {'A': 17, 'B': 15, 'C': 13, 'D': 0, 'E': 11},
13: {'A': 16, 'B': 14, 'C': 12, 'D': 1, 'E': 10},
14: {'A': 19, 'B': 13, 'C': 15, 'D': 2, 'E': 9},
15: {'A': 18, 'B': 12, 'C': 14, 'D': 3, 'E': 8},
16: {'A': 13, 'B': 19, 'C': 17, 'D': 28, 'E': 23},
17: {'A': 12, 'B': 18, 'C': 16, 'D': 29, 'E': 22},
18: {'A': 15, 'B': 17, 'C': 19, 'D': 30, 'E': 21},
19: {'A': 14, 'B': 16, 'C': 18, 'D': 31, 'E': 20},
20: {'A': 9, 'B': 23, 'C': 21, 'D': 24, 'E': 19},
21: {'A': 8, 'B': 22, 'C': 20, 'D': 25, 'E': 18},
22: {'A': 11, 'B': 21, 'C': 23, 'D': 26, 'E': 17},
23: {'A': 10, 'B': 20, 'C': 22, 'D': 27, 'E': 16},
24: {'A': 5, 'B': 27, 'C': 25, 'D': 20, 'E': 31},
25: {'A': 4, 'B': 26, 'C': 24, 'D': 21, 'E': 30},
26: {'A': 7, 'B': 25, 'C': 27, 'D': 22, 'E': 29},
27: {'A': 6, 'B': 24, 'C': 26, 'D': 23, 'E': 28},
28: {'A': 1, 'B': 31, 'C': 29, 'D': 16, 'E': 27},
29: {'A': 0, 'B': 30, 'C': 28, 'D': 17, 'E': 26},
30: {'A': 3, 'B': 29, 'C': 31, 'D': 18, 'E': 25},
31: {'A': 2, 'B': 28, 'C': 30, 'D': 19, 'E': 24},
}
# --------------------------------------------------------------------------
#
# BG/Q Config
#
BGQ_CORES_PER_NODE = 16
BGQ_NODES_PER_BOARD = 32 # NODE == Compute Card == Chip module
BGQ_BOARDS_PER_MIDPLANE = 16 # NODE BOARD == NODE CARD
BGQ_MIDPLANES_PER_RACK = 2
# --------------------------------------------------------------------------
#
# Default mapping = "ABCDE(T)"
#
# http://www.redbooks.ibm.com/redbooks/SG247948/wwhelp/wwhimpl/js/html/wwhelp.htm
#
BGQ_MAPPING = "ABCDE"
# --------------------------------------------------------------------------
#
# Board labels (Rack, Midplane, Node)
#
BGQ_BOARD_LABELS = ['R', 'M', 'N']
# --------------------------------------------------------------------------
#
# Dimensions of a (sub-)block
#
BGQ_DIMENSION_LABELS = ['A', 'B', 'C', 'D', 'E']
# --------------------------------------------------------------------------
#
# Supported sub-block sizes (number of nodes).
# This influences the effectiveness of mixed-size allocations
# (and might even be a hard requirement from a topology standpoint).
#
# TODO: Do we actually need to restrict our sub-block sizes to this set?
#
BGQ_SUPPORTED_SUB_BLOCK_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
# --------------------------------------------------------------------------
#
# Mapping of starting corners.
#
# "board" -> "node"
#
# Ordering: ['E', 'D', 'DE', etc.]
#
# TODO: Is this independent of the mapping?
#
BGQ_BLOCK_STARTING_CORNERS = {
0: 0,
4: 29,
8: 4,
12: 25
}
# --------------------------------------------------------------------------
#
# BG/Q Topology of Boards within a Midplane
#
BGQ_MIDPLANE_TOPO = {
0: {'A': 4, 'B': 8, 'C': 1, 'D': 2},
1: {'A': 5, 'B': 9, 'C': 0, 'D': 3},
2: {'A': 6, 'B': 10, 'C': 3, 'D': 0},
3: {'A': 7, 'B': 11, 'C': 2, 'D': 1},
4: {'A': 0, 'B': 12, 'C': 5, 'D': 6},
5: {'A': 1, 'B': 13, 'C': 4, 'D': 7},
6: {'A': 2, 'B': 14, 'C': 7, 'D': 4},
7: {'A': 3, 'B': 15, 'C': 6, 'D': 5},
8: {'A': 12, 'B': 0, 'C': 9, 'D': 10},
9: {'A': 13, 'B': 1, 'C': 8, 'D': 11},
10: {'A': 14, 'B': 2, 'C': 11, 'D': 8},
11: {'A': 15, 'B': 3, 'C': 10, 'D': 9},
12: {'A': 8, 'B': 4, 'C': 13, 'D': 14},
13: {'A': 9, 'B': 5, 'C': 12, 'D': 15},
14: {'A': 10, 'B': 6, 'C': 15, 'D': 12},
15: {'A': 11, 'B': 7, 'C': 14, 'D': 13},
}
# --------------------------------------------------------------------------
#
# Shape of whole BG/Q Midplane
#
BGQ_MIDPLANE_SHAPE = {'A': 4, 'B': 4, 'C': 4, 'D': 4, 'E': 2} # '4x4x4x4x2'
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
self.torus_block = None
self.loadl_bg_block = None
self.shape_table = None
self.torus_dimension_labels = None
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
loadl_node_list = None
loadl_cpus_per_node = None
# Determine method for determining hosts,
# either through hostfile or BG/Q environment.
loadl_hostfile = os.environ.get('LOADL_HOSTFILE')
self.loadl_bg_block = os.environ.get('LOADL_BG_BLOCK')
if loadl_hostfile is None and self.loadl_bg_block is None:
msg = "Neither $LOADL_HOSTFILE or $LOADL_BG_BLOCK set!"
self._log.error(msg)
raise RuntimeError(msg)
# Determine the size of the pilot allocation
if loadl_hostfile is not None:
# Non Blue Gene Load Leveler installation.
loadl_total_tasks_str = os.environ.get('LOADL_TOTAL_TASKS')
if loadl_total_tasks_str is None:
msg = "$LOADL_TOTAL_TASKS not set!"
self._log.error(msg)
raise RuntimeError(msg)
else:
loadl_total_tasks = int(loadl_total_tasks_str)
# Construct the host list
loadl_nodes = [line.strip() for line in open(loadl_hostfile)]
self._log.info("Found LOADL_HOSTFILE %s. Expanded to: %s",
loadl_hostfile, loadl_nodes)
loadl_node_list = list(set(loadl_nodes))
# Verify that $LLOAD_TOTAL_TASKS == len($LOADL_HOSTFILE)
if loadl_total_tasks != len(loadl_nodes):
self._log.error("$LLOAD_TOTAL_TASKS(%d) != len($LOADL_HOSTFILE)(%d)",
loadl_total_tasks, len(loadl_nodes))
# Determine the number of cpus per node. Assume:
# cores_per_node = lenght(nodefile) / len(unique_nodes_in_nodefile)
loadl_cpus_per_node = len(loadl_nodes) / len(loadl_node_list)
elif self.loadl_bg_block is not None:
# Blue Gene specific.
loadl_bg_midplane_list_str = None
loadl_bg_block_size_str = None
loadl_job_name = os.environ.get('LOADL_JOB_NAME')
if loadl_job_name is None:
msg = "$LOADL_JOB_NAME not set!"
self._log.error(msg)
raise RuntimeError(msg)
# Get the board list and block shape from 'llq -l' output
output = subprocess.check_output(["llq", "-l", loadl_job_name])
loadl_bg_board_list_str = None
loadl_bg_block_shape_str = None
for line in output.splitlines():
# Detect BG board list
if "BG Node Board List: " in line:
loadl_bg_board_list_str = line.split(':')[1].strip()
elif "BG Midplane List: " in line:
loadl_bg_midplane_list_str = line.split(':')[1].strip()
elif "BG Shape Allocated: " in line:
loadl_bg_block_shape_str = line.split(':')[1].strip()
elif "BG Size Allocated: " in line:
loadl_bg_block_size_str = line.split(':')[1].strip()
if not loadl_bg_board_list_str:
msg = "No board list found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("BG Node Board List: %s" % loadl_bg_board_list_str)
if not loadl_bg_midplane_list_str:
msg = "No midplane list found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("BG Midplane List: %s" % loadl_bg_midplane_list_str)
if not loadl_bg_block_shape_str:
msg = "No board shape found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("BG Shape Allocated: %s" % loadl_bg_block_shape_str)
if not loadl_bg_block_size_str:
msg = "No board size found in llq output!"
self._log.error(msg)
raise RuntimeError(msg)
loadl_bg_block_size = int(loadl_bg_block_size_str)
self._log.debug("BG Size Allocated: %d" % loadl_bg_block_size)
# Build nodes data structure to be handled by Torus Scheduler
try:
self.torus_block = self._bgq_construct_block(
loadl_bg_block_shape_str, loadl_bg_board_list_str,
loadl_bg_block_size, loadl_bg_midplane_list_str)
except Exception as e:
msg = "Couldn't construct block: %s" % e.message
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("Torus block constructed:")
for e in self.torus_block:
self._log.debug("%s %s %s %s" %
(e[0], [e[1][key] for key in sorted(e[1])], e[2], e[3]))
try:
loadl_node_list = [entry[SchedulerTorus.TORUS_BLOCK_NAME] for entry in self.torus_block]
except Exception as e:
msg = "Couldn't construct node list."
self._log.error(msg)
raise RuntimeError(msg)
#self._log.debug("Node list constructed: %s" % loadl_node_list)
# Construct sub-block table
try:
self.shape_table = self._bgq_create_sub_block_shape_table(loadl_bg_block_shape_str)
except Exception as e:
msg = "Couldn't construct shape table: %s" % e.message
self._log.error(msg)
raise RuntimeError(msg)
self._log.debug("Shape table constructed: ")
for (size, dim) in [(key, self.shape_table[key]) for key in sorted(self.shape_table)]:
self._log.debug("%s %s" % (size, [dim[key] for key in sorted(dim)]))
# Determine the number of cpus per node
loadl_cpus_per_node = self.BGQ_CORES_PER_NODE
# BGQ Specific Torus labels
self.torus_dimension_labels = self.BGQ_DIMENSION_LABELS
self.node_list = loadl_node_list
self.cores_per_node = loadl_cpus_per_node
self._log.debug("Sleeping for #473 ...")
time.sleep(5)
self._log.debug("Configure done")
# --------------------------------------------------------------------------
#
# Walk the block and return the node name for the given location
#
def _bgq_nodename_by_loc(self, midplanes, board, location):
self._log.debug("Starting nodebyname - midplanes:%s, board:%d" % (midplanes, board))
node = self.BGQ_BLOCK_STARTING_CORNERS[board]
# TODO: Does the order of walking matter?
# It might because of the starting blocks ...
for dim in self.BGQ_DIMENSION_LABELS: # [::-1]:
max_length = location[dim]
self._log.debug("Within dim loop dim:%s, max_length: %d" % (dim, max_length))
cur_length = 0
# Loop while we are not at the final depth
while cur_length < max_length:
self._log.debug("beginning of while loop, cur_length: %d" % cur_length)
if cur_length % 2 == 0:
# Stay within the board
node = self.BGQ_BOARD_TOPO[node][dim]
else:
# We jump to another board.
self._log.debug("jumping to new board from board: %d, dim: %s)" % (board, dim))
board = self.BGQ_MIDPLANE_TOPO[board][dim]
self._log.debug("board is now: %d" % board)
# If we switch boards in the B dimension,
# we seem to "land" at the opposite E dimension.
if dim == 'B':
node = self.BGQ_BOARD_TOPO[node]['E']
self._log.debug("node is now: %d" % node)
# Increase the length for the next iteration
cur_length += 1
self._log.debug("Wrapping inside dim loop dim:%s" % (dim))
# TODO: This will work for midplane expansion in one dimension only
midplane_idx = max(location.values()) / 4
rack = midplanes[midplane_idx]['R']
midplane = midplanes[midplane_idx]['M']
nodename = 'R%.2d-M%.1d-N%.2d-J%.2d' % (rack, midplane, board, node)
self._log.debug("from location %s constructed node name: %s, left at board: %d" % (self.loc2str(location), nodename, board))
return nodename
# --------------------------------------------------------------------------
#
# Convert the board string as given by llq into a board structure
#
# E.g. 'R00-M1-N08,R00-M1-N09,R00-M1-N10,R00-M0-N11' =>
# [{'R': 0, 'M': 1, 'N': 8}, {'R': 0, 'M': 1, 'N': 9},
# {'R': 0, 'M': 1, 'N': 10}, {'R': 0, 'M': 0, 'N': 11}]
#
def _bgq_str2boards(self, boards_str):
boards = boards_str.split(',')
board_dict_list = []
for board in boards:
elements = board.split('-')
board_dict = {}
for l, e in zip(self.BGQ_BOARD_LABELS, elements):
board_dict[l] = int(e.split(l)[1])
board_dict_list.append(board_dict)
return board_dict_list
# --------------------------------------------------------------------------
#
# Convert the midplane string as given by llq into a midplane structure
#
# E.g. 'R04-M0,R04-M1' =>
# [{'R': 4, 'M': 0}, {'R': 4, 'M': 1}]
#
#
def _bgq_str2midplanes(self, midplane_str):
midplanes = midplane_str.split(',')
midplane_dict_list = []
for midplane in midplanes:
elements = midplane.split('-')
midplane_dict = {}
# Take the first two labels
for l, e in zip(self.BGQ_BOARD_LABELS[:2], elements):
midplane_dict[l] = int(e.split(l)[1])
midplane_dict_list.append(midplane_dict)
return midplane_dict_list
# --------------------------------------------------------------------------
#
# Convert the string as given by llq into a block shape structure:
#
# E.g. '1x2x3x4x5' => {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5}
#
def _bgq_str2shape(self, shape_str):
# Get the lengths of the shape
shape_lengths = shape_str.split('x', 4)
shape_dict = {}
for dim, length in zip(self.BGQ_DIMENSION_LABELS, shape_lengths):
shape_dict[dim] = int(length)
return shape_dict
# --------------------------------------------------------------------------
#
# Multiply two shapes
#
def _multiply_shapes(self, shape1, shape2):
result = {}
for dim in self.BGQ_DIMENSION_LABELS:
try:
val1 = shape1[dim]
except KeyError:
val1 = 1
try:
val2 = shape2[dim]
except KeyError:
val2 = 1
result[dim] = val1 * val2
return result
# --------------------------------------------------------------------------
#
# Convert location dict into a tuple string
# E.g. {'A': 1, 'C': 4, 'B': 1, 'E': 2, 'D': 4} => '(1,4,1,2,4)'
#
def loc2str(self, loc):
return str(tuple(loc[dim] for dim in self.BGQ_DIMENSION_LABELS))
# --------------------------------------------------------------------------
#
# Convert a shape dict into string format
#
# E.g. {'A': 1, 'C': 4, 'B': 1, 'E': 2, 'D': 4} => '1x4x1x2x4'
#
def shape2str(self, shape):
shape_str = ''
for l in self.BGQ_DIMENSION_LABELS:
# Get the corresponding count
shape_str += str(shape[l])
# Add an 'x' behind all but the last label
if l in self.BGQ_DIMENSION_LABELS[:-1]:
shape_str += 'x'
return shape_str
# --------------------------------------------------------------------------
#
# Return list of nodes that make up the block
#
# Format: [(index, location, nodename, status), (i, c, n, s), ...]
#
# TODO: This function and _bgq_nodename_by_loc should be changed so that we
# only walk the torus once?
#
def _bgq_get_block(self, midplanes, board, shape):
self._log.debug("Shape: %s", shape)
nodes = []
index = 0
for a in range(shape['A']):
for b in range(shape['B']):
for c in range(shape['C']):
for d in range(shape['D']):
for e in range(shape['E']):
location = {'A': a, 'B': b, 'C': c, 'D': d, 'E': e}
nodename = self._bgq_nodename_by_loc(midplanes, board, location)
nodes.append([index, location, nodename, FREE])
index += 1
return nodes
# --------------------------------------------------------------------------
#
# Use block shape and board list to construct block structure
#
# The 5 dimensions are denoted by the letters A, B, C, D, and E, T for the core (0-15).
# The latest dimension E is always 2, and is contained entirely within a midplane.
# For any compute block, compute nodes (as well midplanes for large blocks) are combined in 4 dimensions,
# only 4 dimensions need to be considered.
#
# 128 nodes: BG Shape Allocated: 2x2x4x4x2
# 256 nodes: BG Shape Allocated: 4x2x4x4x2
# 512 nodes: BG Shape Allocated: 1x1x1x1
# 1024 nodes: BG Shape Allocated: 1x1x1x2
#
def _bgq_construct_block(self, block_shape_str, boards_str,
block_size, midplane_list_str):
llq_shape = self._bgq_str2shape(block_shape_str)
# TODO: Could check this, but currently _shape2num is part of the other class
#if self._shape2num_nodes(llq_shape) != block_size:
# self._log.error("Block Size doesn't match Block Shape")
# If the block is equal to or greater than a Midplane,
# then there is no board list provided.
# But because at that size, we have only full midplanes,
# we can construct it.
if block_size >= 1024:
#raise NotImplementedError("Currently multiple midplanes are not yet supported.")
# BG Size: 1024, BG Shape: 1x1x1x2, BG Midplane List: R04-M0,R04-M1
midplanes = self._bgq_str2midplanes(midplane_list_str)
# Start of at the "lowest" available rack/midplane/board
# TODO: No other explanation than that this seems to be the convention?
# TODO: Can we safely assume that they are sorted?
#rack = midplane_dict_list[0]['R']
#midplane = midplane_dict_list[0]['M']
board = 0
# block_shape = llq_shape * BGQ_MIDPLANE_SHAPE
block_shape = self._multiply_shapes(self.BGQ_MIDPLANE_SHAPE, llq_shape)
self._log.debug("Resulting shape after multiply: %s" % block_shape)
elif block_size == 512:
# Full midplane
# BG Size: 1024, BG Shape: 1x1x1x2, BG Midplane List: R04-M0,R04-M1
midplanes = self._bgq_str2midplanes(midplane_list_str)
# Start of at the "lowest" available rack/midplane/board
# TODO: No other explanation than that this seems to be the convention?
#rack = midplane_dict_list[0]['R'] # Assume they are all equal
#midplane = min([entry['M'] for entry in midplane_dict_list])
board = 0
block_shape = self.BGQ_MIDPLANE_SHAPE
else:
# Within single midplane, < 512 nodes
board_dict_list = self._bgq_str2boards(boards_str)
self._log.debug("Board dict list:\n%s", '\n'.join([str(x) for x in board_dict_list]))
midplanes = [{'R': board_dict_list[0]['R'],
'M': board_dict_list[0]['M']}]
# Start of at the "lowest" available board.
# TODO: No other explanation than that this seems to be the convention?
board = min([entry['N'] for entry in board_dict_list])
block_shape = llq_shape
# From here its all equal (assuming our walker does the walk and not just the talk!)
block = self._bgq_get_block(midplanes, board, block_shape)
# TODO: Check returned block:
# - Length
# - No duplicates
return block
# --------------------------------------------------------------------------
#
# Construction of sub-block shapes based on overall block allocation.
#
# Depending on the size of the total allocated block, the maximum size
# of a subblock can be 512 nodes.
#
#
def _bgq_create_sub_block_shape_table(self, shape_str):
# Convert the shape string into dict structure
#
# For < 512 nodes: the dimensions within a midplane (AxBxCxDxE)
# For >= 512 nodes: the dimensions between the midplanes (AxBxCxD)
#
if len(shape_str.split('x')) == 5:
block_shape = self._bgq_str2shape(shape_str)
elif len(shape_str.split('x')) == 4:
block_shape = self.BGQ_MIDPLANE_SHAPE
else:
raise ValueError('Invalid shape string: %s' % shape_str)
# Dict to store the results
table = {}
# Create a sub-block dict with shape 1x1x1x1x1
sub_block_shape = {}
for l in self.BGQ_DIMENSION_LABELS:
sub_block_shape[l] = 1
# Look over all the dimensions starting at the most right
for dim in self.BGQ_MAPPING[::-1]:
while True:
# Calculate the number of nodes for the current shape
from operator import mul
num_nodes = reduce(mul, filter(lambda length: length != 0, sub_block_shape.values()))
if num_nodes in self.BGQ_SUPPORTED_SUB_BLOCK_SIZES:
table[num_nodes] = copy.copy(sub_block_shape)
else:
self._log.warning("Non supported sub-block size: %d.", num_nodes)
# Done with iterating this dimension
if sub_block_shape[dim] >= block_shape[dim]:
break
# Increase the length in this dimension for the next iteration.
if sub_block_shape[dim] == 1:
sub_block_shape[dim] = 2
elif sub_block_shape[dim] == 2:
sub_block_shape[dim] = 4
return table
# ==============================================================================
#
class ForkLRMS(LRMS):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, requested_cores):
LRMS.__init__(self, name, config, logger, requested_cores)
# --------------------------------------------------------------------------
#
def _configure(self):
self._log.info("Using fork on localhost.")
selected_cpus = self.requested_cores
# when we profile the agent, we fake any number of cores, so don't
# perform any sanity checks
if 'RADICAL_PILOT_PROFILE' in os.environ:
detected_cpus = multiprocessing.cpu_count()
if detected_cpus < selected_cpus:
self._log.warn("insufficient cores: using %d instead of requested %d.",
detected_cpus, selected_cpus)
selected_cpus = detected_cpus
elif detected_cpus > selected_cpus:
self._log.warn("more cores available: using requested %d instead of available %d.",
selected_cpus, detected_cpus)
self.node_list = ["localhost"]
self.cores_per_node = selected_cpus
# ==============================================================================
#
# Worker Classes
#
# ==============================================================================
#
class ExecWorker(COMPONENT_TYPE):
"""
Manage the creation of CU processes, and watch them until they are completed
(one way or the other). The spawner thus moves the unit from
PendingExecution to Executing, and then to a final state (or PendingStageOut
of course).
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id):
rpu.prof('ExecWorker init')
COMPONENT_TYPE.__init__(self)
self._terminate = COMPONENT_MODE.Event()
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._lrms = lrms
self._scheduler = scheduler
self._task_launcher = task_launcher
self._mpi_launcher = mpi_launcher
self._command_queue = command_queue
self._execution_queue = execution_queue
self._stageout_queue = stageout_queue
self._update_queue = update_queue
self._schedule_queue = schedule_queue
self._pilot_id = pilot_id
self._session_id = session_id
self.configure ()
# --------------------------------------------------------------------------
#
# This class-method creates the appropriate sub-class for the Launch Method.
#
@classmethod
def create(cls, name, config, logger, spawner, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, update_queue, schedule_queue,
stageout_queue, pilot_id, session_id):
# Make sure that we are the base-class!
if cls != ExecWorker:
raise TypeError("ExecWorker Factory only available to base class!")
try:
implementation = {
SPAWNER_NAME_POPEN : ExecWorker_POPEN,
SPAWNER_NAME_SHELL : ExecWorker_SHELL
}[spawner]
impl = implementation(name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id)
impl.start ()
return impl
except KeyError:
raise ValueError("ExecWorker '%s' unknown!" % name)
# --------------------------------------------------------------------------
#
def __del__ (self):
self.close ()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def configure(self):
# hook for initialization
pass
# --------------------------------------------------------------------------
#
def close(self):
# hook for shutdown
pass
# --------------------------------------------------------------------------
#
def spawn(self, launcher, cu):
raise NotImplementedError("spawn() not implemented for ExecWorker '%s'." % self.name)
# ==============================================================================
#
class ExecWorker_POPEN (ExecWorker) :
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id):
rpu.prof('ExecWorker init')
self._cus_to_watch = list()
self._cus_to_cancel = list()
self._watch_queue = QUEUE_TYPE ()
self._cu_environment = self._populate_cu_environment()
ExecWorker.__init__ (self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id)
# run watcher thread
watcher_name = self.name.replace ('ExecWorker', 'ExecWatcher')
self._watcher = threading.Thread(target = self._watch,
name = watcher_name)
self._watcher.start ()
# --------------------------------------------------------------------------
#
def close(self):
# shut down the watcher thread
rpu.prof ('stop request')
self._terminate.set()
self._watcher.join()
# --------------------------------------------------------------------------
#
def _populate_cu_environment(self):
"""Derive the environment for the cu's from our own environment."""
# Get the environment of the agent
new_env = copy.deepcopy(os.environ)
#
# Mimic what virtualenv's "deactivate" would do
#
old_path = new_env.pop('_OLD_VIRTUAL_PATH', None)
if old_path:
new_env['PATH'] = old_path
old_home = new_env.pop('_OLD_VIRTUAL_PYTHONHOME', None)
if old_home:
new_env['PYTHON_HOME'] = old_home
old_ps = new_env.pop('_OLD_VIRTUAL_PS1', None)
if old_ps:
new_env['PS1'] = old_ps
new_env.pop('VIRTUAL_ENV', None)
return new_env
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
try:
# report initial slot status
# TODO: Where does this abstraction belong? Scheduler!
self._log.debug(self._scheduler.slot_status())
while not self._terminate.is_set():
cu = self._execution_queue.get()
if not cu :
rpu.prof('get_cmd', msg="execution_queue to ExecWorker (wakeup)")
# 'None' is the wakeup signal
continue
cu['state'] = rp.EXECUTING
rpu.prof('get', msg="executing_queue to ExecutionWorker (%s)" % cu['state'], uid=cu['_id'])
try:
cu_list = rpu.blowup(self._config, cu, EXEC_WORKER)
for _cu in cu_list:
if _cu['description']['mpi']:
launcher = self._mpi_launcher
else :
launcher = self._task_launcher
if not launcher:
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = "no launcher (mpi=%s)" % _cu['description']['mpi'],
logger = self._log.error)
self._log.debug("Launching unit with %s (%s).", launcher.name, launcher.launch_command)
assert(_cu['opaque_slot']) # FIXME: no assert, but check
rpu.prof('ExecWorker unit launch', uid=_cu['_id'])
# Start a new subprocess to launch the unit
# TODO: This is scheduler specific
self.spawn(launcher=launcher, cu=_cu)
except Exception as e:
# append the startup error to the units stderr. This is
# not completely correct (as this text is not produced
# by the unit), but it seems the most intuitive way to
# communicate that error to the application/user.
cu['stderr'] += "\nPilot cannot start compute unit:\n%s\n%s" \
% (str(e), traceback.format_exc())
cu['state'] = rp.FAILED
cu['stderr'] += "\nPilot cannot start compute unit: '%s'" % e
# Free the Slots, Flee the Flots, Ree the Frots!
if cu['opaque_slot']:
self._scheduler.unschedule(cu)
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.FAILED,
msg = "unit execution failed",
logger = self._log.exception)
except Exception as e:
self._log.exception("Error in ExecWorker loop (%s)" % e)
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def spawn(self, launcher, cu):
rpu.prof('ExecWorker spawn', uid=cu['_id'])
launch_script_name = '%s/radical_pilot_cu_launch_script.sh' % cu['workdir']
self._log.debug("Created launch_script: %s", launch_script_name)
with open(launch_script_name, "w") as launch_script:
launch_script.write('#!/bin/bash -l\n')
launch_script.write('\n# Change to working directory for unit\ncd %s\n' % cu['workdir'])
# Before the Big Bang there was nothing
if cu['description']['pre_exec']:
pre_exec_string = ''
if isinstance(cu['description']['pre_exec'], list):
for elem in cu['description']['pre_exec']:
pre_exec_string += "%s\n" % elem
else:
pre_exec_string += "%s\n" % cu['description']['pre_exec']
launch_script.write('# Pre-exec commands\n%s' % pre_exec_string)
# Create string for environment variable setting
if cu['description']['environment'] and \
cu['description']['environment'].keys():
env_string = 'export'
for key,val in cu['description']['environment'].iteritems():
env_string += ' %s=%s' % (key, val)
launch_script.write('# Environment variables\n%s\n' % env_string)
# unit Arguments (if any)
task_args_string = ''
if cu['description']['arguments']:
for arg in cu['description']['arguments']:
if not arg:
# ignore empty args
continue
arg = arg.replace('"', '\\"') # Escape all double quotes
if arg[0] == arg[-1] == "'" : # If a string is between outer single quotes,
task_args_string += '%s ' % arg # ... pass it as is.
else:
task_args_string += '"%s" ' % arg # Otherwise return between double quotes.
launch_script_hop = "/usr/bin/env RP_SPAWNER_HOP=TRUE %s" % launch_script_name
# The actual command line, constructed per launch-method
try:
launch_command, hop_cmd = \
launcher.construct_command(cu['description']['executable'],
task_args_string,
cu['description']['cores'],
launch_script_hop,
cu['opaque_slot'])
if hop_cmd : cmdline = hop_cmd
else : cmdline = launch_script_name
rpu.prof('launch script constructed', uid=cu['_id'])
except Exception as e:
msg = "Error in spawner (%s)" % e
self._log.exception(msg)
raise RuntimeError(msg)
launch_script.write('# The command to run\n%s\n' % launch_command)
# After the universe dies the infrared death, there will be nothing
if cu['description']['post_exec']:
post_exec_string = ''
if isinstance(cu['description']['post_exec'], list):
for elem in cu['description']['post_exec']:
post_exec_string += "%s\n" % elem
else:
post_exec_string += "%s\n" % cu['description']['post_exec']
launch_script.write('%s\n' % post_exec_string)
# done writing to launch script, get it ready for execution.
st = os.stat(launch_script_name)
os.chmod(launch_script_name, st.st_mode | stat.S_IEXEC)
_stdout_file_h = open(cu['stdout_file'], "w")
_stderr_file_h = open(cu['stderr_file'], "w")
self._log.info("Launching unit %s via %s in %s", cu['_id'], cmdline, cu['workdir'])
rpu.prof('spawning pass to popen', uid=cu['_id'])
proc = subprocess.Popen(args = cmdline,
bufsize = 0,
executable = None,
stdin = None,
stdout = _stdout_file_h,
stderr = _stderr_file_h,
preexec_fn = None,
close_fds = True,
shell = True,
cwd = cu['workdir'],
env = self._cu_environment,
universal_newlines = False,
startupinfo = None,
creationflags = 0)
rpu.prof('spawning passed to popen', uid=cu['_id'])
cu['started'] = rpu.timestamp()
cu['state'] = rp.EXECUTING
cu['proc'] = proc
# register for state update and watching
cu['state'] = rp.EXECUTING
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.EXECUTING,
msg = "unit execution start")
cu_list = rpu.blowup(self._config, cu, WATCH_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="ExecWorker to watcher (%s)" % _cu['state'], uid=_cu['_id'])
self._watch_queue.put(_cu)
# --------------------------------------------------------------------------
#
def _watch(self):
rpu.prof('run')
try:
while not self._terminate.is_set():
cus = list()
# See if there are cancel requests, or new units to watch
try:
command = self._command_queue.get_nowait()
rpu.prof('get_cmd', msg="command_queue to ExecWatcher (%s)" % command[COMMAND_TYPE])
if command[COMMAND_TYPE] == COMMAND_CANCEL_COMPUTE_UNIT:
self._cus_to_cancel.append(command[COMMAND_ARG])
else:
raise RuntimeError("Command %s not applicable in this context." %
command[COMMAND_TYPE])
except Queue.Empty:
# do nothing if we don't have any queued commands
pass
try:
# we don't want to only wait for one CU -- then we would
# pull CU state too frequently. OTOH, we also don't want to
# learn about CUs until all slots are filled, because then
# we may not be able to catch finishing CUs in time -- so
# there is a fine balance here. Balance means 100 (FIXME).
# rpu.prof('ExecWorker popen watcher pull cu from queue')
MAX_QUEUE_BULKSIZE = 100
while len(cus) < MAX_QUEUE_BULKSIZE :
cus.append (self._watch_queue.get_nowait())
except Queue.Empty:
# nothing found -- no problem, see if any CUs finshed
pass
# add all cus we found to the watchlist
for cu in cus :
rpu.prof('get', msg="ExecWatcher picked up unit", uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, WATCHER)
for _cu in cu_list :
self._cus_to_watch.append (_cu)
# check on the known cus.
action = self._check_running()
if not action and not cus :
# nothing happend at all! Zzz for a bit.
time.sleep(self._config['queue_poll_sleeptime'])
except Exception as e:
self._log.exception("Error in ExecWorker watch loop (%s)" % e)
rpu.prof ('stop')
# --------------------------------------------------------------------------
# Iterate over all running tasks, check their status, and decide on the
# next step. Also check for a requested cancellation for the tasks.
def _check_running(self):
action = 0
for cu in self._cus_to_watch:
# poll subprocess object
exit_code = cu['proc'].poll()
now = rpu.timestamp()
if exit_code is None:
# Process is still running
if cu['_id'] in self._cus_to_cancel:
# FIXME: there is a race condition between the state poll
# above and the kill command below. We probably should pull
# state after kill again?
# We got a request to cancel this cu
action += 1
cu['proc'].kill()
self._cus_to_cancel.remove(cu['_id'])
self._scheduler.unschedule(cu)
cu['state'] = rp.CANCELED
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.CANCELED,
msg = "unit execution canceled")
rpu.prof('final', msg="execution canceled", uid=cu['_id'])
# NOTE: this is final, cu will not be touched anymore
cu = None
else:
rpu.prof('execution complete', uid=cu['_id'])
# we have a valid return code -- unit is final
action += 1
self._log.info("Unit %s has return code %s.", cu['_id'], exit_code)
cu['exit_code'] = exit_code
cu['finished'] = now
# Free the Slots, Flee the Flots, Ree the Frots!
self._cus_to_watch.remove(cu)
self._scheduler.unschedule(cu)
if exit_code != 0:
# The unit failed, no need to deal with its output data.
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.FAILED,
msg = "unit execution failed")
rpu.prof('final', msg="execution failed", uid=cu['_id'])
# NOTE: this is final, cu will not be touched anymore
cu = None
else:
# The unit finished cleanly, see if we need to deal with
# output data. We always move to stageout, even if there are no
# directives -- at the very least, we'll upload stdout/stderr
cu['state'] = rp.STAGING_OUTPUT
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.STAGING_OUTPUT,
msg = "unit execution completed")
cu_list = rpu.blowup(self._config, cu, STAGEOUT_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="ExecWatcher to stageout_queue (%s)" % _cu['state'], uid=_cu['_id'])
self._stageout_queue.put(_cu)
return action
# ==============================================================================
#
class ExecWorker_SHELL(ExecWorker):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id):
ExecWorker.__init__ (self, name, config, logger, agent, lrms, scheduler,
task_launcher, mpi_launcher, command_queue,
execution_queue, stageout_queue, update_queue,
schedule_queue, pilot_id, session_id)
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
# Mimic what virtualenv's "deactivate" would do
self._deactivate = "# deactivate pilot virtualenv\n"
old_path = os.environ.get('_OLD_VIRTUAL_PATH', None)
old_home = os.environ.get('_OLD_VIRTUAL_PYTHONHOME', None)
old_ps1 = os.environ.get('_OLD_VIRTUAL_PS1', None)
if old_path: self._deactivate += 'export PATH="%s"\n' % old_path
if old_home: self._deactivate += 'export PYTHON_HOME="%s"\n' % old_home
if old_ps1: self._deactivate += 'export PS1="%s"\n' % old_ps1
self._deactivate += 'unset VIRTUAL_ENV\n\n'
if old_path: os.environ['PATH'] = old_path
if old_home: os.environ['PYTHON_HOME'] = old_home
if old_ps1: os.environ['PS1'] = old_ps1
if 'VIRTUAL_ENV' in os.environ :
del(os.environ['VIRTUAL_ENV'])
# simplify shell startup / prompt detection
os.environ['PS1'] = '$ '
# the registry keeps track of units to watch, indexed by their shell
# spawner process ID. As the registry is shared between the spawner and
# watcher thread, we use a lock while accessing it.
self._registry = dict()
self._registry_lock = threading.RLock()
self._cached_events = list() # keep monitoring events for pid's which
# are not yet known
# get some threads going -- those will do all the work.
import saga.utils.pty_shell as sups
self.launcher_shell = sups.PTYShell ("fork://localhost/")
self.monitor_shell = sups.PTYShell ("fork://localhost/")
# run the spawner on the shells
self.workdir = "%s/spawner.%s" % (os.getcwd(), self.name)
rec_makedir(self.workdir)
ret, out, _ = self.launcher_shell.run_sync \
("/bin/sh %s/agent/radical-pilot-spawner.sh %s" \
% (os.path.dirname (rp.__file__), self.workdir))
if ret != 0 :
raise RuntimeError ("failed to bootstrap launcher: (%s)(%s)", ret, out)
ret, out, _ = self.monitor_shell.run_sync \
("/bin/sh %s/agent/radical-pilot-spawner.sh %s" \
% (os.path.dirname (rp.__file__), self.workdir))
if ret != 0 :
raise RuntimeError ("failed to bootstrap monitor: (%s)(%s)", ret, out)
# run watcher thread
watcher_name = self.name.replace ('ExecWorker', 'ExecWatcher')
self._watcher = threading.Thread(target = self._watch,
name = watcher_name)
self._watcher.start ()
try:
# report initial slot status
# TODO: Where does this abstraction belong? Scheduler!
self._log.debug(self._scheduler.slot_status())
while not self._terminate.is_set():
# rpu.prof('ExecWorker pull cu from queue')
cu = self._execution_queue.get()
if not cu :
rpu.prof('get_cmd', msg="execution_queue to ExecWorker (wakeup)")
# 'None' is the wakeup signal
continue
cu['state'] = rp.EXECUTING
rpu.prof('get', msg="executing_queue to ExecutionWorker (%s)" % cu['state'], uid=cu['_id'])
try:
cu_list = rpu.blowup(self._config, cu, EXEC_WORKER)
for _cu in cu_list :
if _cu['description']['mpi']:
launcher = self._mpi_launcher
else :
launcher = self._task_launcher
if not launcher:
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = "no launcher (mpi=%s)" % _cu['description']['mpi'],
logger = self._log.error)
self._log.debug("Launching unit with %s (%s).", launcher.name, launcher.launch_command)
assert(_cu['opaque_slot']) # FIXME: no assert, but check
rpu.prof('ExecWorker unit launch', uid=_cu['_id'])
# Start a new subprocess to launch the unit
# TODO: This is scheduler specific
self.spawn(launcher=launcher, cu=_cu)
except Exception as e:
# append the startup error to the units stderr. This is
# not completely correct (as this text is not produced
# by the unit), but it seems the most intuitive way to
# communicate that error to the application/user.
cu['stderr'] += "\nPilot cannot start compute unit:\n%s\n%s" \
% (str(e), traceback.format_exc())
cu['state'] = rp.FAILED
cu['stderr'] += "\nPilot cannot start compute unit: '%s'" % e
# Free the Slots, Flee the Flots, Ree the Frots!
if cu['opaque_slot']:
self._scheduler.unschedule(cu)
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.FAILED,
msg = "unit execution failed",
logger = self._log.exception)
except Exception as e:
self._log.exception("Error in ExecWorker loop (%s)" % e)
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def _cu_to_cmd (self, cu, launcher) :
# ----------------------------------------------------------------------
def quote_args (args) :
ret = list()
for arg in args :
# if string is between outer single quotes,
# pass it as is.
# if string is between outer double quotes,
# pass it as is.
# otherwise (if string is not quoted)
# escape all double quotes
if arg[0] == arg[-1] == "'" :
ret.append (arg)
elif arg[0] == arg[-1] == '"' :
ret.append (arg)
else :
arg = arg.replace ('"', '\\"')
ret.append ('"%s"' % arg)
return ret
# ----------------------------------------------------------------------
args = ""
env = self._deactivate
cwd = ""
pre = ""
post = ""
io = ""
cmd = ""
descr = cu['description']
if cu['workdir'] :
cwd += "# CU workdir\n"
cwd += "mkdir -p %s\n" % cu['workdir']
cwd += "cd %s\n" % cu['workdir']
cwd += "\n"
if descr['environment'] :
env += "# CU environment\n"
for e in descr['environment'] :
env += "export %s=%s\n" % (e, descr['environment'][e])
env += "\n"
if descr['pre_exec'] :
pre += "# CU pre-exec\n"
pre += '\n'.join(descr['pre_exec' ])
pre += "\n\n"
if descr['post_exec'] :
post += "# CU post-exec\n"
post += '\n'.join(descr['post_exec' ])
post += "\n\n"
if descr['arguments'] :
args = ' ' .join (quote_args (descr['arguments']))
# if descr['stdin'] : io += "<%s " % descr['stdin']
# else : io += "<%s " % '/dev/null'
if descr['stdout'] : io += "1>%s " % descr['stdout']
else : io += "1>%s " % 'STDOUT'
if descr['stderr'] : io += "2>%s " % descr['stderr']
else : io += "2>%s " % 'STDERR'
cmd, hop_cmd = launcher.construct_command(descr['executable'], args,
descr['cores'],
'/usr/bin/env RP_SPAWNER_HOP=TRUE "$0"',
cu['opaque_slot'])
script = ""
if hop_cmd :
# the script will itself contain a remote callout which calls again
# the script for the invokation of the real workload (cmd) -- we
# thus introduce a guard for the first execution. The hop_cmd MUST
# set RP_SPAWNER_HOP to some value for the startup to work
script += "# ------------------------------------------------------\n"
script += '# perform one hop for the actual command launch\n'
script += 'if test -z "$RP_SPAWNER_HOP"\n'
script += 'then\n'
script += ' %s\n' % hop_cmd
script += ' exit\n'
script += 'fi\n\n'
script += "# ------------------------------------------------------\n"
script += "%s" % cwd
script += "%s" % env
script += "%s" % pre
script += "# CU execution\n"
script += "%s %s\n\n" % (cmd, io)
script += "%s" % post
script += "# ------------------------------------------------------\n\n"
# self._log.debug ("execution script:\n%s\n" % script)
return script
# --------------------------------------------------------------------------
#
def spawn(self, launcher, cu):
uid = cu['_id']
rpu.prof('ExecWorker spawn', uid=uid)
# we got an allocation: go off and launch the process. we get
# a multiline command, so use the wrapper's BULK/LRUN mode.
cmd = self._cu_to_cmd (cu, launcher)
run_cmd = "BULK\nLRUN\n%s\nLRUN_EOT\nBULK_RUN\n" % cmd
rpu.prof('launch script constructed', uid=cu['_id'])
# TODO: Remove this commented out block?
# if self.lrms.target_is_macos :
# run_cmd = run_cmd.replace ("\\", "\\\\\\\\") # hello MacOS
ret, out, _ = self.launcher_shell.run_sync (run_cmd)
if ret != 0 :
self._log.error ("failed to run unit '%s': (%s)(%s)" \
, (run_cmd, ret, out))
return FAIL
lines = filter (None, out.split ("\n"))
self._log.debug (lines)
if len (lines) < 2 :
raise RuntimeError ("Failed to run unit (%s)", lines)
if lines[-2] != "OK" :
raise RuntimeError ("Failed to run unit (%s)" % lines)
# FIXME: verify format of returned pid (\d+)!
pid = lines[-1].strip ()
cu['pid'] = pid
cu['started'] = rpu.timestamp()
# before we return, we need to clean the
# 'BULK COMPLETED message from lrun
ret, out = self.launcher_shell.find_prompt ()
if ret != 0 :
with self._registry_lock :
del(self._registry[uid])
raise RuntimeError ("failed to run unit '%s': (%s)(%s)" \
% (run_cmd, ret, out))
rpu.prof('spawning passed to pty', uid=uid)
# FIXME: this is too late, there is already a race with the monitoring
# thread for this CU execution. We need to communicate the PIDs/CUs via
# a queue again!
rpu.prof('put', msg="ExecWorker to watcher (%s)" % cu['state'], uid=cu['_id'])
with self._registry_lock :
self._registry[pid] = cu
cu['state'] = rp.EXECUTING
self._agent.update_unit_state(src = 'ExecWorker',
uid = cu['_id'],
state = rp.EXECUTING,
msg = "unit execution started")
# --------------------------------------------------------------------------
#
def _watch (self) :
MONITOR_READ_TIMEOUT = 1.0 # check for stop signal now and then
static_cnt = 0
rpu.prof('run')
try:
self.monitor_shell.run_async ("MONITOR")
while not self._terminate.is_set () :
_, out = self.monitor_shell.find (['\n'], timeout=MONITOR_READ_TIMEOUT)
line = out.strip ()
# self._log.debug ('monitor line: %s' % line)
if not line :
# just a read timeout, i.e. an opportunity to check for
# termination signals...
if self._terminate.is_set() :
self._log.debug ("stop monitoring")
return
# ... and for health issues ...
if not self.monitor_shell.alive () :
self._log.warn ("monitoring channel died")
return
# ... and to handle cached events.
if not self._cached_events :
static_cnt += 1
else :
self._log.info ("monitoring channel checks cache (%d)", len(self._cached_events))
static_cnt += 1
if static_cnt == 10 :
# 10 times cache to check, dump it for debugging
#print "cache state"
#import pprint
#pprint.pprint (self._cached_events)
#pprint.pprint (self._registry)
static_cnt = 0
cache_copy = self._cached_events[:]
self._cached_events = list()
events_to_handle = list()
with self._registry_lock :
for pid, state, data in cache_copy :
cu = self._registry.get (pid, None)
if cu : events_to_handle.append ([cu, pid, state, data])
else : self._cached_events.append ([pid, state, data])
# FIXME: measure if using many locks in the loop below
# is really better than doing all ops in the locked loop
# above
for cu, pid, state, data in events_to_handle :
self._handle_event (cu, pid, state, data)
# all is well...
# self._log.info ("monitoring channel finish idle loop")
continue
elif line == 'EXIT' or line == "Killed" :
self._log.error ("monitoring channel failed (%s)", line)
self._terminate.set()
return
elif not ':' in line :
self._log.warn ("monitoring channel noise: %s", line)
else :
pid, state, data = line.split (':', 2)
# we are not interested in non-final state information, at
# the moment
if state in ['RUNNING'] :
continue
self._log.info ("monitoring channel event: %s", line)
cu = None
with self._registry_lock :
cu = self._registry.get (pid, None)
if cu:
rpu.prof('get', msg="ExecWatcher picked up unit", uid=cu['_id'])
self._handle_event (cu, pid, state, data)
else:
self._cached_events.append ([pid, state, data])
except Exception as e:
self._log.error ("Exception in job monitoring thread: %s", e)
self._terminate.set()
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def _handle_event (self, cu, pid, state, data) :
# got an explicit event to handle
self._log.info ("monitoring handles event for %s: %s:%s:%s", cu['_id'], pid, state, data)
rp_state = {'DONE' : rp.DONE,
'FAILED' : rp.FAILED,
'CANCELED' : rp.CANCELED}.get (state, rp.UNKNOWN)
if rp_state not in [rp.DONE, rp.FAILED, rp.CANCELED] :
# non-final state
self._log.debug ("ignore shell level state transition (%s:%s:%s)",
pid, state, data)
return
# record timestamp, exit code on final states
cu['finished'] = rpu.timestamp()
if data : cu['exit_code'] = int(data)
else : cu['exit_code'] = None
if rp_state in [rp.FAILED, rp.CANCELED] :
# final state - no further state transition needed
self._scheduler.unschedule(cu)
cu['state'] = rp_state
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp_state,
msg = "unit execution finished")
elif rp_state in [rp.DONE] :
rpu.prof('execution complete', uid=cu['_id'])
# advance the unit state
self._scheduler.unschedule(cu)
cu['state'] = rp.STAGING_OUTPUT
self._agent.update_unit_state(src = 'ExecWatcher',
uid = cu['_id'],
state = rp.STAGING_OUTPUT,
msg = "unit execution completed")
cu_list = rpu.blowup(self._config, cu, STAGEOUT_QUEUE)
for _cu in cu_list :
rpu.prof('put', msg="ExecWatcher to stageout_queue (%s)" % _cu['state'], uid=_cu['_id'])
self._stageout_queue.put(_cu)
# we don't need the cu in the registry anymore
with self._registry_lock :
if pid in self._registry : # why wouldn't it be in there though?
del(self._registry[pid])
# ==============================================================================
#
class UpdateWorker(threading.Thread):
"""
An UpdateWorker pushes CU and Pilot state updates to mongodb. Its instances
compete for update requests on the update_queue. Those requests will be
triplets of collection name, query dict, and update dict. Update requests
will be collected into bulks over some time (BULK_COLLECTION_TIME), to
reduce number of roundtrips.
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, session_id,
update_queue, mongodb_url, mongodb_name, mongodb_auth):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._session_id = session_id
self._update_queue = update_queue
self._terminate = threading.Event()
self._mongo_db = rpu.get_mongodb(mongodb_url, mongodb_name, mongodb_auth)
self._cinfo = dict() # collection cache
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
# ------------------------------------------------------------------
def timed_bulk_execute(cinfo):
# returns number of bulks pushed (0 or 1)
if not cinfo['bulk']:
return 0
now = time.time()
age = now - cinfo['last']
if cinfo['bulk'] and age > self._config['bulk_collection_time']:
res = cinfo['bulk'].execute()
self._log.debug("bulk update result: %s", res)
rpu.prof('unit update bulk pushed (%d)' % len(cinfo['uids']))
for entry in cinfo['uids']:
uid = entry[0]
state = entry[1]
if state:
rpu.prof('unit update pushed (%s)' % state, uid=uid)
else:
rpu.prof('unit update pushed', uid=uid)
cinfo['last'] = now
cinfo['bulk'] = None
cinfo['uids'] = list()
return 1
else:
return 0
# ------------------------------------------------------------------
try:
try:
update_request = self._update_queue.get_nowait()
uid = update_request.get('_id', None)
state = update_request.get('state', None)
except Queue.Empty:
# no new requests: push any pending bulks
action = 0
for cname in self._cinfo:
action += timed_bulk_execute(self._cinfo[cname])
if not action:
time.sleep(self._config['db_poll_sleeptime'])
continue
uid = update_request.get('_id')
state = update_request.get('state', None)
if state :
rpu.prof('get', msg="update_queue to UpdateWorker (%s)" % state, uid=uid)
else:
rpu.prof('get', msg="update_queue to UpdateWorker", uid=uid)
update_request_list = rpu.blowup(self._config, update_request, UPDATE_WORKER)
for _update_request in update_request_list :
# got a new request. Add to bulk (create as needed),
# and push bulk if time is up.
uid = _update_request.get('_id')
state = _update_request.get('state', None)
cbase = _update_request.get('cbase', '.cu')
query_dict = _update_request.get('query', dict())
update_dict = _update_request.get('update',dict())
cname = self._session_id + cbase
if not cname in self._cinfo:
self._cinfo[cname] = {
'coll' : self._mongo_db[cname],
'bulk' : None,
'last' : time.time(), # time of last push
'uids' : list()
}
cinfo = self._cinfo[cname]
if not cinfo['bulk']:
cinfo['bulk'] = cinfo['coll'].initialize_ordered_bulk_op()
cinfo['uids'].append([uid, state])
cinfo['bulk'].find (query_dict) \
.update(update_dict)
timed_bulk_execute(cinfo)
rpu.prof('unit update bulked (%s)' % state, uid=uid)
except Exception as e:
self._log.exception("unit update failed (%s)", e)
# FIXME: should we fail the pilot at this point?
# FIXME: Are the strategies to recover?
rpu.prof ('stop')
# ==============================================================================
#
class StageinWorker(threading.Thread):
"""An StageinWorker performs the agent side staging directives.
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, execution_queue, schedule_queue,
stagein_queue, update_queue, workdir):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._execution_queue = execution_queue
self._schedule_queue = schedule_queue
self._stagein_queue = stagein_queue
self._update_queue = update_queue
self._workdir = workdir
self._terminate = threading.Event()
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
try:
cu = self._stagein_queue.get()
if not cu:
rpu.prof('get_cmd', msg="stagein_queue to StageinWorker (wakeup)")
continue
cu['state'] = rp.STAGING_INPUT
rpu.prof('get', msg="stagein_queue to StageinWorker (%s)" % cu['state'], uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, STAGEIN_WORKER)
for _cu in cu_list :
sandbox = os.path.join(self._workdir, '%s' % _cu['_id'])
staging_area = os.path.join(self._workdir, self._config['staging_area'])
for directive in _cu['Agent_Input_Directives']:
rpu.prof('Agent input_staging queue', uid=_cu['_id'],
msg="%s -> %s" % (str(directive['source']), str(directive['target'])))
if directive['state'] != rp.PENDING :
# we ignore directives which need no action
rpu.prof('Agent input_staging queue', uid=_cu['_id'], msg='ignored')
continue
# Perform input staging
self._log.info("unit input staging directives %s for cu: %s to %s",
directive, _cu['_id'], sandbox)
# Convert the source_url into a SAGA Url object
source_url = rs.Url(directive['source'])
# Handle special 'staging' scheme
if source_url.scheme == self._config['staging_scheme']:
self._log.info('Operating from staging')
# Remove the leading slash to get a relative path from the staging area
rel2staging = source_url.path.split('/',1)[1]
source = os.path.join(staging_area, rel2staging)
else:
self._log.info('Operating from absolute path')
source = source_url.path
# Get the target from the directive and convert it to the location
# in the sandbox
target = directive['target']
abs_target = os.path.join(sandbox, target)
# Create output directory in case it doesn't exist yet
#
rec_makedir(os.path.dirname(abs_target))
try:
self._log.info("Going to '%s' %s to %s", directive['action'], source, abs_target)
if directive['action'] == LINK: os.symlink (source, abs_target)
elif directive['action'] == COPY: shutil.copyfile(source, abs_target)
elif directive['action'] == MOVE: shutil.move (source, abs_target)
else:
# FIXME: implement TRANSFER mode
raise NotImplementedError('Action %s not supported' % directive['action'])
log_message = "%s'ed %s to %s - success" % (directive['action'], source, abs_target)
self._log.info(log_message)
# If all went fine, update the state of this
# StagingDirective to DONE
# FIXME: is this update below really *needed*?
self._agent.update_unit(src = 'StageinWorker',
uid = _cu['_id'],
msg = log_message,
query = {
'Agent_Input_Status' : rp.EXECUTING,
'Agent_Input_Directives.state' : rp.PENDING,
'Agent_Input_Directives.source' : directive['source'],
'Agent_Input_Directives.target' : directive['target']
},
update = {
'$set'
: {'Agent_Input_Status' : rp.DONE,
'Agent_Input_Directives.$.state' : rp.DONE}
})
except Exception as e:
# If we catch an exception, assume the staging failed
log_message = "%s'ed %s to %s - failure (%s)" % \
(directive['action'], source, abs_target, e)
self._log.exception(log_message)
# If a staging directive fails, fail the CU also.
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'StageinWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = log_message,
query = {
'Agent_Input_Status' : rp.EXECUTING,
'Agent_Input_Directives.state' : rp.PENDING,
'Agent_Input_Directives.source' : directive['source'],
'Agent_Input_Directives.target' : directive['target']
},
update = {
'$set' : {'Agent_Input_Directives.$.state' : rp.FAILED,
'Agent_Input_Status' : rp.FAILED}
})
# agent staging is all done, unit can go to execution if it has
# no FTW staging -- with FTP staging, we have to wait for the
# FTW stager to finish (or to pick up on the agent staging
# completion) to push the unit via mongodb to the agebnt again.
# Duh! (FIXME)
if not _cu["FTW_Input_Directives"] :
_cu['state'] = rp.ALLOCATING
self._agent.update_unit_state(src = 'StageinWorker',
uid = _cu['_id'],
state = rp.ALLOCATING,
msg = 'agent input staging done')
_cu_list = rpu.blowup(self._config, _cu, SCHEDULE_QUEUE)
for __cu in _cu_list :
rpu.prof('put', msg="StageinWorker to schedule_queue (%s)" % __cu['state'], uid=__cu['_id'])
self._schedule_queue.put(__cu)
except Exception as e:
self._log.exception('worker died')
sys.exit(1)
rpu.prof ('stop')
# ==============================================================================
#
class StageoutWorker(threading.Thread):
"""
An StageoutWorker performs the agent side staging directives.
It competes for units on the stageout queue, and handles all relevant
staging directives. It also takes care of uploading stdout/stderr (which
can also be considered staging, really).
Upon completion, the units are moved into the respective final state.
Multiple StageoutWorker instances can co-exist -- this class needs to be
threadsafe.
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, execution_queue,
stageout_queue, update_queue, workdir):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._execution_queue = execution_queue
self._stageout_queue = stageout_queue
self._update_queue = update_queue
self._workdir = workdir
self._terminate = threading.Event()
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
staging_area = os.path.join(self._workdir, self._config['staging_area'])
while not self._terminate.is_set():
cu = None
try:
cu = self._stageout_queue.get()
if not cu:
rpu.prof('get_cmd', msg="stageout_queue to StageoutWorker (wakeup)")
continue
cu['state'] = rp.STAGING_OUTPUT
rpu.prof('get', msg="stageout_queue to StageoutWorker (%s)" % cu['state'], uid=cu['_id'])
cu_list = rpu.blowup(self._config, cu, STAGEOUT_WORKER)
for _cu in cu_list :
sandbox = os.path.join(self._workdir, '%s' % _cu['_id'])
## parked from unit state checker: unit postprocessing
if os.path.isfile(_cu['stdout_file']):
with open(_cu['stdout_file'], 'r') as stdout_f:
try:
txt = unicode(stdout_f.read(), "utf-8")
except UnicodeDecodeError:
txt = "unit stdout contains binary data -- use file staging directives"
_cu['stdout'] += rpu.tail(txt)
if os.path.isfile(_cu['stderr_file']):
with open(_cu['stderr_file'], 'r') as stderr_f:
try:
txt = unicode(stderr_f.read(), "utf-8")
except UnicodeDecodeError:
txt = "unit stderr contains binary data -- use file staging directives"
_cu['stderr'] += rpu.tail(txt)
for directive in _cu['Agent_Output_Directives']:
# Perform output staging
self._log.info("unit output staging directives %s for cu: %s to %s",
directive, _cu['_id'], sandbox)
# Convert the target_url into a SAGA Url object
target_url = rs.Url(directive['target'])
# Handle special 'staging' scheme
if target_url.scheme == self._config['staging_scheme']:
self._log.info('Operating from staging')
# Remove the leading slash to get a relative path from
# the staging area
rel2staging = target_url.path.split('/',1)[1]
target = os.path.join(staging_area, rel2staging)
else:
self._log.info('Operating from absolute path')
# FIXME: will this work for TRANSFER mode?
target = target_url.path
# Get the source from the directive and convert it to the location
# in the sandbox
source = str(directive['source'])
abs_source = os.path.join(sandbox, source)
# Create output directory in case it doesn't exist yet
# FIXME: will this work for TRANSFER mode?
rec_makedir(os.path.dirname(target))
try:
self._log.info("Going to '%s' %s to %s", directive['action'], abs_source, target)
if directive['action'] == LINK:
# This is probably not a brilliant idea, so at least give a warning
os.symlink(abs_source, target)
elif directive['action'] == COPY:
shutil.copyfile(abs_source, target)
elif directive['action'] == MOVE:
shutil.move(abs_source, target)
else:
# FIXME: implement TRANSFER mode
raise NotImplementedError('Action %s not supported' % directive['action'])
log_message = "%s'ed %s to %s - success" %(directive['action'], abs_source, target)
self._log.info(log_message)
# If all went fine, update the state of this
# StagingDirective to DONE
# FIXME: is this update below really *needed*?
self._agent.update_unit(src = 'StageoutWorker',
uid = _cu['_id'],
msg = log_message,
query = {
# TODO: We never set the status to EXECUTION anymore
'Agent_Output_Status' : rp.EXECUTING,
'Agent_Output_Directives.state' : rp.PENDING,
'Agent_Output_Directives.source': directive['source'],
'Agent_Output_Directives.target': directive['target']
},
update = {
'$set' : {'Agent_Output_Directives.$.state': rp.DONE}
})
except Exception as e:
# If we catch an exception, assume the staging failed
log_message = "%s'ed %s to %s - failure (%s)" % \
(directive['action'], abs_source, target, e)
self._log.exception(log_message)
# If a staging directive fails, fail the CU also.
_cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'StageoutWorker',
uid = _cu['_id'],
state = rp.FAILED,
msg = log_message,
query = {
'Agent_Output_Status' : rp.EXECUTING,
'Agent_Output_Directives.state' : rp.PENDING,
'Agent_Output_Directives.source' : directive['source'],
'Agent_Output_Directives.target' : directive['target']
},
update = {
'$set' : {'Agent_Output_Directives.$.state' : rp.FAILED,
'Agent_Output_Status' : rp.FAILED}
})
# TODO: Update Agent_Output_Status here?
# local staging is done. Now check if there are Directives that
# need to be performed by the FTW.
# Obviously these are not executed here (by the Agent),
# but we need this code to set the state so that the FTW
# gets notified that it can start its work.
if _cu['FTW_Output_Directives']:
rpu.prof('ExecWorker unit needs FTW_O ', uid=_cu['_id'])
self._agent.update_unit(src = 'StageoutWorker',
uid = _cu['_id'],
msg = 'FTW output staging needed',
update = {
'$set': {
'FTW_Output_Status' : rp.PENDING,
'stdout' : _cu['stdout'],
'stderr' : _cu['stderr'],
'exit_code' : _cu['exit_code'],
'started' : _cu['started'],
'finished' : _cu['finished'],
'slots' : _cu['opaque_slot'],
}
})
# NOTE: this is final for the agent scope -- further state
# transitions are done by the FTW.
_cu = None
else:
# no FTW staging is needed, local staging is done -- we can
# move the unit into final state.
rpu.prof('final', msg="stageout done", uid=_cu['_id'])
_cu['state'] = rp.DONE
self._agent.update_unit_state(src = 'StageoutWorker',
uid = _cu['_id'],
state = rp.DONE,
msg = 'output staging completed',
update = {
'$set' : {
'stdout' : _cu['stdout'],
'stderr' : _cu['stderr'],
'exit_code' : _cu['exit_code'],
'started' : _cu['started'],
'finished' : _cu['finished'],
'slots' : _cu['opaque_slot'],
}
})
# NOTE: this is final, the cu is not touched anymore
_cu = None
# make sure the CU is not touched anymore (see except below)
cu = None
except Exception as e:
self._log.exception("Error in StageoutWorker loop (%s)", e)
# check if we have any cu in operation. If so, mark as final.
# This check relies on the pushes to the update queue to be the
# *last* actions of the loop above -- otherwise we may get
# invalid state transitions...
if cu:
rpu.prof('final', msg="stageout failed", uid=cu['_id'])
cu['state'] = rp.FAILED
self._agent.update_unit_state(src = 'StageoutWorker',
uid = cu['_id'],
state = rp.FAILED,
msg = 'output staging failed',
update = {
'$set' : {
'stdout' : cu['stdout'],
'stderr' : cu['stderr'],
'exit_code' : cu['exit_code'],
'started' : cu['started'],
'finished' : cu['finished'],
'slots' : cu['opaque_slot'],
}
})
# NOTE: this is final, the cu is not touched anymore
cu = None
# forward the exception
raise
rpu.prof ('stop')
# ==============================================================================
#
class HeartbeatMonitor(threading.Thread):
"""
The HeartbeatMonitor watches the command queue for heartbeat updates (and
other commands).
"""
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, agent, command_queue, p, pilot_id, starttime, runtime):
threading.Thread.__init__(self)
self.name = name
self._config = config
self._log = logger
self._agent = agent
self._command_queue = command_queue
self._p = p
self._pilot_id = pilot_id
self._starttime = starttime
self._runtime = runtime
self._terminate = threading.Event()
# run worker thread
self.start()
# --------------------------------------------------------------------------
#
def stop(self):
rpu.prof ('stop request')
self._terminate.set()
self._agent.stop()
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
while not self._terminate.is_set():
try:
rpu.prof('heartbeat', msg='Listen! Listen! Listen to the heartbeat!')
self._check_commands()
self._check_state ()
time.sleep(self._config['heartbeat_interval'])
except Exception as e:
self._log.exception('error in heartbeat monitor (%s)', e)
self.stop()
rpu.prof ('stop')
# --------------------------------------------------------------------------
#
def _check_commands(self):
# Check if there's a command waiting
retdoc = self._p.find_and_modify(
query = {"_id" : self._pilot_id},
update = {"$set" : {COMMAND_FIELD: []}}, # Wipe content of array
fields = [COMMAND_FIELD, 'state']
)
if not retdoc:
return
commands = retdoc[COMMAND_FIELD]
state = retdoc['state']
for command in commands:
command_str = '%s:%s' % (command[COMMAND_TYPE], command[COMMAND_ARG])
rpu.prof('ingest_cmd', msg="mongodb to HeartbeatMonitor (%s)" % command_str)
if command[COMMAND_TYPE] == COMMAND_CANCEL_PILOT:
self.stop()
pilot_CANCELED(self._p, self._pilot_id, self._log, "CANCEL received. Terminating.")
sys.exit(1)
elif state == rp.CANCELING:
self.stop()
pilot_CANCELED(self._p, self._pilot_id, self._log, "CANCEL implied. Terminating.")
sys.exit(1)
elif command[COMMAND_TYPE] == COMMAND_CANCEL_COMPUTE_UNIT:
self._log.info("Received Cancel Compute Unit command for: %s", command[COMMAND_ARG])
# Put it on the command queue of the ExecWorker
rpu.prof('put_cmd', msg="HeartbeatMonitor to command_queue (%s)" % command_str,
uid=command[COMMAND_ARG])
self._command_queue.put(command)
elif command[COMMAND_TYPE] == COMMAND_KEEP_ALIVE:
self._log.info("Received KeepAlive command.")
else:
self._log.error("Received unknown command: %s with arg: %s.",
command[COMMAND_TYPE], command[COMMAND_ARG])
# --------------------------------------------------------------------------
#
def _check_state(self):
# Check the workers periodically. If they have died, we
# exit as well. this can happen, e.g., if the worker
# process has caught an exception
for worker in self._agent.worker_list:
if not worker.is_alive():
self.stop()
msg = 'worker %s died' % str(worker)
pilot_FAILED(self._p, self._pilot_id, self._log, msg)
# Make sure that we haven't exceeded the agent runtime. if
# we have, terminate.
if time.time() >= self._starttime + (int(self._runtime) * 60):
self._log.info("Agent has reached runtime limit of %s seconds.", self._runtime*60)
self.stop()
pilot_DONE(self._p, self._pilot_id)
# ==============================================================================
#
class Agent(object):
# --------------------------------------------------------------------------
#
def __init__(self, name, config, logger, lrms_name, requested_cores,
task_launch_method, mpi_launch_method, spawner,
scheduler_name, runtime,
mongodb_url, mongodb_name, mongodb_auth,
pilot_id, session_id):
rpu.prof('Agent init')
self.name = name
self._config = config
self._log = logger
self._debug_helper = ru.DebugHelper()
self._pilot_id = pilot_id
self._runtime = runtime
self._terminate = threading.Event()
self._starttime = time.time()
self._workdir = os.getcwd()
self._session_id = session_id
self._pilot_id = pilot_id
self.worker_list = list()
# we want to own all queues -- that simplifies startup and shutdown
self._schedule_queue = QUEUE_TYPE()
self._stagein_queue = QUEUE_TYPE()
self._execution_queue = QUEUE_TYPE()
self._stageout_queue = QUEUE_TYPE()
self._update_queue = QUEUE_TYPE()
self._command_queue = QUEUE_TYPE()
mongo_db = rpu.get_mongodb(mongodb_url, mongodb_name, mongodb_auth)
self._p = mongo_db["%s.p" % self._session_id]
self._cu = mongo_db["%s.cu" % self._session_id]
self._lrms = LRMS.create(
name = lrms_name,
config = self._config,
logger = self._log,
requested_cores = requested_cores)
self._scheduler = Scheduler.create(
name = scheduler_name,
config = self._config,
logger = self._log,
lrms = self._lrms,
schedule_queue = self._schedule_queue,
execution_queue = self._execution_queue,
update_queue = self._update_queue)
self.worker_list.append(self._scheduler)
self._task_launcher = LaunchMethod.create(
name = task_launch_method,
config = self._config,
logger = self._log,
scheduler = self._scheduler)
self._mpi_launcher = LaunchMethod.create(
name = mpi_launch_method,
config = self._config,
logger = self._log,
scheduler = self._scheduler)
for n in range(self._config['number_of_workers'][STAGEIN_WORKER]):
stagein_worker = StageinWorker(
name = "StageinWorker-%d" % n,
config = self._config,
logger = self._log,
agent = self,
execution_queue = self._execution_queue,
schedule_queue = self._schedule_queue,
stagein_queue = self._stagein_queue,
update_queue = self._update_queue,
workdir = self._workdir
)
self.worker_list.append(stagein_worker)
for n in range(self._config['number_of_workers'][EXEC_WORKER]):
exec_worker = ExecWorker.create(
name = "ExecWorker-%d" % n,
config = self._config,
spawner = spawner,
logger = self._log,
agent = self,
lrms = self._lrms,
scheduler = self._scheduler,
task_launcher = self._task_launcher,
mpi_launcher = self._mpi_launcher,
command_queue = self._command_queue,
execution_queue = self._execution_queue,
stageout_queue = self._stageout_queue,
update_queue = self._update_queue,
schedule_queue = self._schedule_queue,
pilot_id = self._pilot_id,
session_id = self._session_id
)
self.worker_list.append(exec_worker)
for n in range(self._config['number_of_workers'][STAGEOUT_WORKER]):
stageout_worker = StageoutWorker(
name = "StageoutWorker-%d" % n,
config = self._config,
agent = self,
logger = self._log,
execution_queue = self._execution_queue,
stageout_queue = self._stageout_queue,
update_queue = self._update_queue,
workdir = self._workdir
)
self.worker_list.append(stageout_worker)
for n in range(self._config['number_of_workers'][UPDATE_WORKER]):
update_worker = UpdateWorker(
name = "UpdateWorker-%d" % n,
config = self._config,
logger = self._log,
agent = self,
session_id = self._session_id,
update_queue = self._update_queue,
mongodb_url = mongodb_url,
mongodb_name = mongodb_name,
mongodb_auth = mongodb_auth
)
self.worker_list.append(update_worker)
hbmon = HeartbeatMonitor(
name = "HeartbeatMonitor",
config = self._config,
logger = self._log,
agent = self,
command_queue = self._command_queue,
p = self._p,
starttime = self._starttime,
runtime = self._runtime,
pilot_id = self._pilot_id)
self.worker_list.append(hbmon)
rpu.prof('Agent init done')
# --------------------------------------------------------------------------
#
def stop(self):
"""
Terminate the agent main loop. The workers will be pulled down once the
main loop finishes (see run())
"""
rpu.prof ('stop request')
self._terminate.set()
# --------------------------------------------------------------------------
#
def update_unit(self, src, uid, state=None, msg=None, query=None, update=None):
if not query : query = dict()
if not update : update = dict()
query_dict = dict()
update_dict = update
query_dict['_id'] = uid
for key,val in query.iteritems():
query_dict[key] = val
if msg:
if not '$push' in update_dict:
update_dict['$push'] = dict()
update_dict['$push']['log'] = {'message' : msg,
'timestamp' : rpu.timestamp()}
if state:
rpu.prof('put', msg="%s to update_queue (%s)" % (src, state), uid=query_dict['_id'])
else:
rpu.prof('put', msg="%s to update_queue" % src, uid=query_dict['_id'])
query_list = rpu.blowup(self._config, query_dict, UPDATE_QUEUE)
for _query_dict in query_list :
self._update_queue.put({'_id' : _query_dict['_id'],
'state' : state,
'cbase' : '.cu',
'query' : _query_dict,
'update' : update_dict})
# --------------------------------------------------------------------------
#
def update_unit_state(self, src, uid, state, msg=None, query=None, update=None,
logger=None):
if not query : query = dict()
if not update : update = dict()
if logger and msg:
logger("unit '%s' state change (%s)" % (uid, msg))
# we alter update, so rather use a copy of the dict...
now = rpu.timestamp()
update_dict = {
'$set' : {
'state' : state
},
'$push': {
'statehistory' : {
'state' : state,
'timestamp' : now
}
}
}
if '$set' in update:
for key,val in update['$set'].iteritems():
update_dict['$set'][key] = val
if '$push' in update:
for key,val in update['$push'].iteritems():
update_dict['$push'][key] = val
self.update_unit(src = src,
uid = uid,
state = state,
msg = msg,
query = query,
update = update_dict)
# --------------------------------------------------------------------------
#
def run(self):
rpu.prof('run')
# first order of business: set the start time and state of the pilot
self._log.info("Agent %s starting ...", self._pilot_id)
now = rpu.timestamp()
ret = self._p.update(
{"_id": self._pilot_id},
{"$set": {"state" : rp.ACTIVE,
# TODO: The two fields below are currently scheduler
# specific!
"nodes" : self._lrms.node_list,
"cores_per_node" : self._lrms.cores_per_node,
"started" : now},
"$push": {"statehistory": {"state" : rp.ACTIVE,
"timestamp": now}}
})
# TODO: Check for return value, update should be true!
self._log.info("Database updated: %s", ret)
while not self._terminate.is_set():
try:
# check for new units
action = self._check_units()
# if no units have been seen, then wait for juuuust a little...
# FIXME: use some mongodb notification mechanism to avoid busy
# polling. Tailed cursors or whatever...
if not action:
time.sleep(self._config['db_poll_sleeptime'])
except Exception as e:
# exception in the main loop is fatal
self.stop()
pilot_FAILED(self._p, self._pilot_id, self._log,
"ERROR in agent main loop: %s. %s" % (e, traceback.format_exc()))
sys.exit(1)
# main loop terminated, so self._terminate was set
# we need to signal shut down to all workers
for worker in self.worker_list:
worker.stop()
# to make sure that threads are not stuck waiting on a queue, we send
# a signal on each queue
self._schedule_queue.put (None)
self._execution_queue.put(None)
self._update_queue.put (None)
self._stagein_queue.put (None)
self._stageout_queue.put (None)
# and wait for them to actually finish
# FIXME: make sure this works when stop was initialized by heartbeat monitor
for worker in self.worker_list:
worker.join()
# record cancelation state
pilot_CANCELED(self._p, self._pilot_id, self._log,
"Terminated (_terminate set).")
rpu.prof ('stop')
sys.exit(0)
# --------------------------------------------------------------------------
#
def _check_units(self):
# Check if there are compute units waiting for execution,
# and log that we pulled it.
#
# FIXME: Unfortunately, 'find_and_modify' is not bulkable, so we have
# to use 'find'. To avoid finding the same units over and over again,
# we update the state *before* running the next find -- so we do it
# right here... No idea how to avoid that roundtrip...
# This also blocks us from using multiple ingest threads, or from doing
# late binding by unit pull :/
cu_cursor = self._cu.find(spec = {"pilot" : self._pilot_id,
"state" : rp.PENDING_EXECUTION})
if cu_cursor.count():
cu_list = list(cu_cursor)
cu_uids = [_cu['_id'] for _cu in cu_list]
self._cu.update(multi = True,
spec = {"_id" : {"$in" : cu_uids}},
document = {"$set" : {"state" : rp.ALLOCATING},
"$push" : {"statehistory":
{
"state" : rp.ALLOCATING,
"timestamp" : rpu.timestamp()
}
}})
else :
# if we did not find any units which can be executed immediately, we
# check if we have any units for which to do stage-in
cu_cursor = self._cu.find(spec = {"pilot" : self._pilot_id,
'Agent_Input_Status': rp.PENDING})
if cu_cursor.count():
cu_list = list(cu_cursor)
cu_uids = [_cu['_id'] for _cu in cu_list]
self._cu.update(multi = True,
spec = {"_id" : {"$in" : cu_uids}},
document = {"$set" : {"state" : rp.STAGING_INPUT,
"Agent_Input_Status": rp.EXECUTING},
"$push" : {"statehistory":
{
"state" : rp.STAGING_INPUT,
"timestamp" : rpu.timestamp()
}
}})
else :
# no units whatsoever...
return 0
# now we really own the CUs, and can start working on them (ie. push
# them into the pipeline)
if cu_cursor.count():
rpu.prof('Agent get units', msg="number of units: %d" % cu_cursor.count(),
logger=self._log.info)
for cu in cu_list:
rpu.prof('get', msg="MongoDB to Agent (%s)" % cu['state'], uid=cu['_id'], logger=self._log.info)
_cu_list = rpu.blowup(self._config, cu, AGENT)
for _cu in _cu_list :
try:
cud = _cu['description']
workdir = "%s/%s" % (self._workdir, _cu['_id'])
_cu['workdir'] = workdir
_cu['stdout'] = ''
_cu['stderr'] = ''
_cu['opaque_clot'] = None
stdout_file = cud.get('stdout')
if not stdout_file:
stdout_file = 'STDOUT'
_cu['stdout_file'] = os.path.join(workdir, stdout_file)
stderr_file = cud.get('stderr')
if not stderr_file:
stderr_file = 'STDERR'
_cu['stderr_file'] = os.path.join(workdir, stderr_file)
rpu.prof('Agent get unit meta', uid=_cu['_id'])
# create unit sandbox
rec_makedir(workdir)
rpu.prof('Agent get unit mkdir', uid=_cu['_id'])
# and send to staging / execution, respectively
if _cu['Agent_Input_Directives'] and \
_cu['Agent_Input_Status'] == rp.PENDING :
_cu['state'] = rp.STAGING_INPUT
self.update_unit_state(src = 'Agent',
uid = _cu['_id'],
state = rp.STAGING_INPUT,
msg = 'unit needs input staging')
_cu_list = rpu.blowup(self._config, _cu, STAGEIN_QUEUE)
for __cu in _cu_list :
rpu.prof('put', msg="Agent to stagein_queue (%s)" % __cu['state'], uid=__cu['_id'])
self._stagein_queue.put(__cu)
else:
_cu['state'] = rp.ALLOCATING
self.update_unit_state(src = 'Agent',
uid = _cu['_id'],
state = rp.ALLOCATING,
msg = 'unit needs no input staging')
_cu_list = rpu.blowup(self._config, _cu, SCHEDULE_QUEUE)
for __cu in _cu_list :
rpu.prof('put', msg="Agent to schedule_queue (%s)" % __cu['state'], uid=__cu['_id'])
self._schedule_queue.put(__cu)
except Exception as e:
# if any unit sorting step failed, the unit did not end up in
# a queue (its always the last step). We set it to FAILED
msg = "could not sort unit (%s)" % e
rpu.prof('error', msg=msg, uid=_cu['_id'], logger=self._log.exception)
_cu['state'] = rp.FAILED
self.update_unit_state(src = 'Agent',
uid = _cu['_id'],
state = rp.FAILED,
msg = msg)
# NOTE: this is final, the unit will not be touched
# anymore.
_cu = None
# indicate that we did some work (if we did...)
return len(cu_uids)
# ==============================================================================
#
# Agent main code
#
# ==============================================================================
def main():
mongo_p = None
parser = optparse.OptionParser()
parser.add_option('-a', dest='mongodb_auth')
parser.add_option('-c', dest='cores', type='int')
parser.add_option('-d', dest='debug_level', type='int')
parser.add_option('-j', dest='task_launch_method')
parser.add_option('-k', dest='mpi_launch_method')
parser.add_option('-l', dest='lrms')
parser.add_option('-m', dest='mongodb_url')
parser.add_option('-n', dest='mongodb_name')
parser.add_option('-o', dest='spawner')
parser.add_option('-p', dest='pilot_id')
parser.add_option('-q', dest='agent_scheduler')
parser.add_option('-r', dest='runtime', type='int')
parser.add_option('-s', dest='session_id')
# parse the whole shebang
(options, args) = parser.parse_args()
if args : parser.error("Unused arguments '%s'" % args)
if not options.cores : parser.error("Missing number of cores (-c)")
if not options.debug_level : parser.error("Missing DEBUG level (-d)")
if not options.task_launch_method : parser.error("Missing unit launch method (-j)")
if not options.mpi_launch_method : parser.error("Missing mpi launch method (-k)")
if not options.lrms : parser.error("Missing LRMS (-l)")
if not options.mongodb_url : parser.error("Missing MongoDB URL (-m)")
if not options.mongodb_name : parser.error("Missing database name (-n)")
if not options.spawner : parser.error("Missing agent spawner (-o)")
if not options.pilot_id : parser.error("Missing pilot id (-p)")
if not options.agent_scheduler : parser.error("Missing agent scheduler (-q)")
if not options.runtime : parser.error("Missing agent runtime (-r)")
if not options.session_id : parser.error("Missing session id (-s)")
rpu.prof('start', uid=options.pilot_id)
# configure the agent logger
logger = logging.getLogger ('radical.pilot.agent')
handle = logging.FileHandler("agent.log")
formatter = logging.Formatter ('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(options.debug_level)
handle.setFormatter(formatter)
logger.addHandler(handle)
logger.info("Using RADICAL-Utils version %s", rs.version)
logger.info("Using RADICAL-SAGA version %s", rs.version)
logger.info("Using RADICAL-Pilot version %s (%s)", rp.version, git_ident)
# --------------------------------------------------------------------------
#
def sigint_handler(signum, frame):
msg = 'Caught SIGINT. EXITING. (%s: %s)' % (signum, frame)
pilot_FAILED(mongo_p, options.pilot_id, logger, msg)
sys.exit(2)
signal.signal(signal.SIGINT, sigint_handler)
# --------------------------------------------------------------------------
#
def sigalarm_handler(signum, frame):
msg = 'Caught SIGALRM (Walltime limit reached?). EXITING (%s: %s)' \
% (signum, frame)
pilot_FAILED(mongo_p, options.pilot_id, logger, msg)
sys.exit(3)
signal.signal(signal.SIGALRM, sigalarm_handler)
# --------------------------------------------------------------------------
# load the local agent config, and overload the config dicts
try :
logger.info ("load agent config")
cfg_file = "%s/.radical/pilot/configs/agent.json" % os.environ['HOME']
cfg = ru.read_json_str (cfg_file)
import pprint
logger.debug("\n%s\n" % pprint.pformat(cfg.get('drop_clones', {})))
logger.debug("\n%s\n" % pprint.pformat(agent_config['drop_clones']))
ru.dict_merge (agent_config['number_of_workers'], cfg.get ('number_of_workers', {}), policy='overwrite')
ru.dict_merge (agent_config['blowup_factor'], cfg.get ('blowup_factor', {}), policy='overwrite')
ru.dict_merge (agent_config['drop_clones'], cfg.get ('drop_clones', {}), policy='overwrite')
logger.info ("agent config merged")
import pprint
logger.debug("\Agent config:\n%s\n\n" % pprint.pformat (agent_config))
except Exception as e:
logger.info ("agent config not merged: %s", e)
try:
# ----------------------------------------------------------------------
# Establish database connection
rpu.prof('db setup')
mongo_db = rpu.get_mongodb(options.mongodb_url, options.mongodb_name,
options.mongodb_auth)
mongo_p = mongo_db["%s.p" % options.session_id]
# ----------------------------------------------------------------------
# Launch the agent thread
rpu.prof('Agent create')
agent = Agent(
name = 'Agent',
config = agent_config,
logger = logger,
lrms_name = options.lrms,
requested_cores = options.cores,
task_launch_method = options.task_launch_method,
mpi_launch_method = options.mpi_launch_method,
spawner = options.spawner,
scheduler_name = options.agent_scheduler,
runtime = options.runtime,
mongodb_url = options.mongodb_url,
mongodb_name = options.mongodb_name,
mongodb_auth = options.mongodb_auth,
pilot_id = options.pilot_id,
session_id = options.session_id
)
agent.run()
rpu.prof('Agent done')
except SystemExit:
logger.error("Caught keyboard interrupt. EXITING")
return(6)
except Exception as e:
error_msg = "Error running agent: %s" % str(e)
logger.exception(error_msg)
pilot_FAILED(mongo_p, options.pilot_id, logger, error_msg)
sys.exit(7)
finally:
rpu.prof('stop', msg='finally clause')
sys.exit(8)
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
print "---------------------------------------------------------------------"
print
print "PYTHONPATH: %s" % sys.path
print "python: %s" % sys.version
print "utils : %-5s : %s" % (ru.version_detail, ru.__file__)
print "saga : %-5s : %s" % (rs.version_detail, rs.__file__)
print "pilot : %-5s : %s" % (rp.version_detail, rp.__file__)
print " type : multicore"
print " gitid : %s" % git_ident
print
print "---------------------------------------------------------------------"
print
sys.exit(main())
#
# ------------------------------------------------------------------------------
|
#
# Copyright 2012 WebFilings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Core async task wrapper. This module contains the `Async` class, which is
used to create asynchronous jobs, and a `defaults` decorator you may use to
specify default settings for a particular async task. To use,
# Create a task.
work = Async(
target="function.to.run",
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target},
task_args={"appengine": 1, "task": "kwargs"},
queue="yourqueue"
)
# Enqueue the task.
work.start()
*or*, set default arguments for a function:
@defaults(task_args={"appengine": 1, "task": "kwargs"}, queue="yourqueue")
def run_me(*args, **kwargs):
pass
# Create a task.
work = Async(
target=run_me,
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target},
)
# Enqueue the task.
work.start()
You may also update options after instantiation:
# Create a task.
work = Async(
target="function.to.run",
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target}
)
work.update_options(task_args={"appengine":1, "task": "kwargs"},
queue="yourqueue")
# Enqueue the task.
work.start()
The order of precedence is:
1) options specified when calling start.
2) options specified using update_options.
3) options specified in the constructor.
4) options specified by @defaults decorator.
"""
from functools import wraps
import json
from .job_utils import decode_callbacks
from .job_utils import encode_callbacks
from .job_utils import get_function_path_and_options
__all__ = ['ASYNC_DEFAULT_QUEUE', 'ASYNC_ENDPOINT', 'Async', 'defaults']
ASYNC_DEFAULT_QUEUE = 'default'
ASYNC_ENDPOINT = '/_ah/queue/async'
MAX_DEPTH = 100
class NotExecutedError(Exception):
"""This Async has not yet been executed."""
class NotExecutingError(Exception):
"""This Async in not currently executing."""
class AlreadyExecutedError(Exception):
"""This Async has already been executed."""
class AlreadyExecutingError(Exception):
"""This Async is currently executing."""
class Abort(Exception):
"""This Async needs to be aborted immediately. Only an info level logging
message will be output about the aborted job.
"""
class AbortAndRestart(Exception):
"""This Async needs to be aborted immediately and restarted."""
class Async(object):
def __init__(self, target, args=None, kwargs=None, **options):
self._options = {}
# Make sure nothing is snuck in.
_check_options(options)
self._update_job(target, args, kwargs)
self.update_options(**options)
self._execution_context = None
self._executing = False
self._executed = False
self._result = None
@property
def executed(self):
return self._executed
@property
def executing(self):
return self._executing
@executing.setter
def executing(self, executing):
if self._executed:
raise AlreadyExecutedError(
'You can not execute an executed job.')
if self._executing:
raise AlreadyExecutingError(
'Job is already executing, can not set executing.')
self._executing = executing
@property
def result(self):
if not self.executed:
raise NotExecutedError(
'You must execute this Async before getting its result.')
return self._result
@result.setter
def result(self, result):
if not self._executing:
raise NotExecutingError(
'The Async must be executing to set its result.')
self._result = result
self._executing = False
self._executed = True
@property
def _function_path(self):
return self._options['job'][0]
def _update_job(self, target, args, kwargs):
"""Specify the function this async job is to execute when run."""
target_path, options = get_function_path_and_options(target)
assert isinstance(args, (tuple, list)) or args is None
assert isinstance(kwargs, dict) or kwargs is None
if options:
self.update_options(**options)
self._options['job'] = (target_path, args, kwargs)
def set_execution_context(self, execution_context):
"""Set the ExecutionContext this async is executing under."""
if self._execution_context:
from .context import AlreadyInContextError
raise AlreadyInContextError
self._execution_context = execution_context
def get_options(self):
"""Return this async job's configuration options."""
return self._options
def update_options(self, **options):
"""Safely update this async job's configuration options."""
_check_options(options)
self._options.update(options)
def get_callbacks(self):
"""Return this async job's callback map."""
return self._options.get('callbacks', {})
def get_headers(self):
"""Create and return task headers."""
# TODO: Encode some options into a header here.
return self._options.get('headers', {})
def get_queue(self):
"""Return the queue the task should run in."""
return self._options.get('queue', ASYNC_DEFAULT_QUEUE)
def get_task_args(self):
"""Get user-specified task kwargs."""
return self._options.get('task_args', {})
def to_task(self):
"""Return a task object representing this async job."""
from google.appengine.api.taskqueue import Task
url = "%s/%s" % (ASYNC_ENDPOINT, self._function_path)
kwargs = {
'url': url,
'headers': self.get_headers().copy(),
'payload': json.dumps(self.to_dict()),
}
kwargs.update(self.get_task_args())
return Task(**kwargs)
def start(self):
"""Insert the task into the requested queue, 'default' if non given.
If a TransientError is hit the task will re-insert the task.
If a TaskAlreadyExistsError or TombstonedTaskError is hit the task will
silently fail.
"""
if self._check_and_update_depth():
import logging
logging.warning('Async %s execution has reached max_depth and is '
'ceasing execution.' % self._function_path)
return
from google.appengine.api import taskqueue
task = self.to_task()
try:
taskqueue.Queue(name=self.get_queue()).add(task)
except taskqueue.TransientError:
taskqueue.Queue(name=self.get_queue()).add(task)
except (taskqueue.TaskAlreadyExistsError,
taskqueue.TombstonedTaskError):
return
# TODO: Return a "result" object.
def to_dict(self):
"""Return this async job as a dict suitable for json encoding."""
import copy
options = copy.deepcopy(self._options)
# JSON don't like datetimes.
eta = options.get('task_args', {}).get('eta')
if eta:
import time
options['task_args']['eta'] = time.mktime(eta.timetuple())
callbacks = self._options.get('callbacks')
if callbacks:
options['callbacks'] = encode_callbacks(callbacks)
return options
@classmethod
def from_dict(cls, async):
"""Return an async job from a dict output by Async.to_dict."""
import copy
async_options = copy.deepcopy(async)
# JSON don't like datetimes.
eta = async_options.get('task_args', {}).get('eta')
if eta:
from datetime import datetime
async_options['task_args']['eta'] = datetime.fromtimestamp(eta)
target, args, kwargs = async_options.pop('job')
# If there are callbacks, reconstitute them.
callbacks = async_options.get('callbacks', {})
if callbacks:
async_options['callbacks'] = decode_callbacks(callbacks)
return cls(target, args, kwargs, **async_options)
def _restart(self):
"""Restarts the executing Async.
If the Async is executing, then it will reset the _executing flag, and
restart this job. This means that the job will not necessarily execute
immediately, or on the same machine, as it goes back into the queue.
"""
if not self._executing:
raise NotExecutingError("Must be executing to restart the job, "
"perhaps you want Async.start()")
self._executing = False
return self.start()
def _check_and_update_depth(self):
"""Check to see if we've exceeded the max recursion depth.
Returns True if current depth > max depth.
When that is False, it updates this Async with the incremented depth
and the max depth.
"""
from .context._local import get_local_context
recursion_options = self.get_options().get('_recursion', {})
current_depth = recursion_options.get('current', 0)
max_depth = recursion_options.get('max', MAX_DEPTH)
execution_context = get_local_context()._executing_async_context
if execution_context:
# We're in an existing Async chain
wrapping_options = execution_context.async.get_options()
wrapping_recursion = wrapping_options.get('_recursion', {})
current_depth = wrapping_recursion.get('current', current_depth)
max_depth = wrapping_recursion.get('max', max_depth)
if current_depth > max_depth:
return True
# Increment and store
self.update_options(_recursion={'current': current_depth + 1,
'max': max_depth})
return False
def defaults(**options):
"""Set default Async options on the function decorated.
Note: you must pass the decorated function by reference, not as a
"path.string.to.function" for this to have any effect.
"""
_check_options(options)
def real_decorator(function):
function._async_options = options
@wraps(function)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
return real_decorator
def _check_options(options):
"""Make sure no one passes something not allowed in."""
if not options:
return
assert 'job' not in options
#assert 'callbacks' not in options
Change _update_and_check_depth to _check_recursion_level
#
# Copyright 2012 WebFilings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Core async task wrapper. This module contains the `Async` class, which is
used to create asynchronous jobs, and a `defaults` decorator you may use to
specify default settings for a particular async task. To use,
# Create a task.
work = Async(
target="function.to.run",
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target},
task_args={"appengine": 1, "task": "kwargs"},
queue="yourqueue"
)
# Enqueue the task.
work.start()
*or*, set default arguments for a function:
@defaults(task_args={"appengine": 1, "task": "kwargs"}, queue="yourqueue")
def run_me(*args, **kwargs):
pass
# Create a task.
work = Async(
target=run_me,
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target},
)
# Enqueue the task.
work.start()
You may also update options after instantiation:
# Create a task.
work = Async(
target="function.to.run",
args=(args, for, function),
kwargs={'keyword': arguments, 'to': target}
)
work.update_options(task_args={"appengine":1, "task": "kwargs"},
queue="yourqueue")
# Enqueue the task.
work.start()
The order of precedence is:
1) options specified when calling start.
2) options specified using update_options.
3) options specified in the constructor.
4) options specified by @defaults decorator.
"""
from functools import wraps
import json
from .job_utils import decode_callbacks
from .job_utils import encode_callbacks
from .job_utils import get_function_path_and_options
__all__ = ['ASYNC_DEFAULT_QUEUE', 'ASYNC_ENDPOINT', 'Async', 'defaults']
ASYNC_DEFAULT_QUEUE = 'default'
ASYNC_ENDPOINT = '/_ah/queue/async'
MAX_DEPTH = 100
class NotExecutedError(Exception):
"""This Async has not yet been executed."""
class NotExecutingError(Exception):
"""This Async in not currently executing."""
class AlreadyExecutedError(Exception):
"""This Async has already been executed."""
class AlreadyExecutingError(Exception):
"""This Async is currently executing."""
class Abort(Exception):
"""This Async needs to be aborted immediately. Only an info level logging
message will be output about the aborted job.
"""
class AbortAndRestart(Exception):
"""This Async needs to be aborted immediately and restarted."""
class Async(object):
def __init__(self, target, args=None, kwargs=None, **options):
self._options = {}
# Make sure nothing is snuck in.
_check_options(options)
self._update_job(target, args, kwargs)
self.update_options(**options)
self._execution_context = None
self._executing = False
self._executed = False
self._result = None
@property
def executed(self):
return self._executed
@property
def executing(self):
return self._executing
@executing.setter
def executing(self, executing):
if self._executed:
raise AlreadyExecutedError(
'You can not execute an executed job.')
if self._executing:
raise AlreadyExecutingError(
'Job is already executing, can not set executing.')
self._executing = executing
@property
def result(self):
if not self.executed:
raise NotExecutedError(
'You must execute this Async before getting its result.')
return self._result
@result.setter
def result(self, result):
if not self._executing:
raise NotExecutingError(
'The Async must be executing to set its result.')
self._result = result
self._executing = False
self._executed = True
@property
def _function_path(self):
return self._options['job'][0]
def _update_job(self, target, args, kwargs):
"""Specify the function this async job is to execute when run."""
target_path, options = get_function_path_and_options(target)
assert isinstance(args, (tuple, list)) or args is None
assert isinstance(kwargs, dict) or kwargs is None
if options:
self.update_options(**options)
self._options['job'] = (target_path, args, kwargs)
def set_execution_context(self, execution_context):
"""Set the ExecutionContext this async is executing under."""
if self._execution_context:
from .context import AlreadyInContextError
raise AlreadyInContextError
self._execution_context = execution_context
def get_options(self):
"""Return this async job's configuration options."""
return self._options
def update_options(self, **options):
"""Safely update this async job's configuration options."""
_check_options(options)
self._options.update(options)
def get_callbacks(self):
"""Return this async job's callback map."""
return self._options.get('callbacks', {})
def get_headers(self):
"""Create and return task headers."""
# TODO: Encode some options into a header here.
return self._options.get('headers', {})
def get_queue(self):
"""Return the queue the task should run in."""
return self._options.get('queue', ASYNC_DEFAULT_QUEUE)
def get_task_args(self):
"""Get user-specified task kwargs."""
return self._options.get('task_args', {})
def to_task(self):
"""Return a task object representing this async job."""
from google.appengine.api.taskqueue import Task
url = "%s/%s" % (ASYNC_ENDPOINT, self._function_path)
kwargs = {
'url': url,
'headers': self.get_headers().copy(),
'payload': json.dumps(self.to_dict()),
}
kwargs.update(self.get_task_args())
return Task(**kwargs)
def start(self):
"""Insert the task into the requested queue, 'default' if non given.
If a TransientError is hit the task will re-insert the task.
If a TaskAlreadyExistsError or TombstonedTaskError is hit the task will
silently fail.
"""
if self._check_recursion_level():
import logging
logging.warning('Async %s execution has reached max_depth and is '
'ceasing execution.' % self._function_path)
return
from google.appengine.api import taskqueue
task = self.to_task()
try:
taskqueue.Queue(name=self.get_queue()).add(task)
except taskqueue.TransientError:
taskqueue.Queue(name=self.get_queue()).add(task)
except (taskqueue.TaskAlreadyExistsError,
taskqueue.TombstonedTaskError):
return
# TODO: Return a "result" object.
def to_dict(self):
"""Return this async job as a dict suitable for json encoding."""
import copy
options = copy.deepcopy(self._options)
# JSON don't like datetimes.
eta = options.get('task_args', {}).get('eta')
if eta:
import time
options['task_args']['eta'] = time.mktime(eta.timetuple())
callbacks = self._options.get('callbacks')
if callbacks:
options['callbacks'] = encode_callbacks(callbacks)
return options
@classmethod
def from_dict(cls, async):
"""Return an async job from a dict output by Async.to_dict."""
import copy
async_options = copy.deepcopy(async)
# JSON don't like datetimes.
eta = async_options.get('task_args', {}).get('eta')
if eta:
from datetime import datetime
async_options['task_args']['eta'] = datetime.fromtimestamp(eta)
target, args, kwargs = async_options.pop('job')
# If there are callbacks, reconstitute them.
callbacks = async_options.get('callbacks', {})
if callbacks:
async_options['callbacks'] = decode_callbacks(callbacks)
return cls(target, args, kwargs, **async_options)
def _restart(self):
"""Restarts the executing Async.
If the Async is executing, then it will reset the _executing flag, and
restart this job. This means that the job will not necessarily execute
immediately, or on the same machine, as it goes back into the queue.
"""
if not self._executing:
raise NotExecutingError("Must be executing to restart the job, "
"perhaps you want Async.start()")
self._executing = False
return self.start()
def _check_recursion_level(self):
"""Check to see if we've exceeded the max recursion depth.
Returns True if current depth > max depth.
When that is False, it updates this Async with the incremented depth
and the max depth.
"""
from .context._local import get_local_context
recursion_options = self.get_options().get('_recursion', {})
current_depth = recursion_options.get('current', 0)
max_depth = recursion_options.get('max', MAX_DEPTH)
execution_context = get_local_context()._executing_async_context
if execution_context:
# We're in an existing Async chain
wrapping_options = execution_context.async.get_options()
wrapping_recursion = wrapping_options.get('_recursion', {})
current_depth = wrapping_recursion.get('current', current_depth)
max_depth = wrapping_recursion.get('max', max_depth)
if current_depth > max_depth:
return True
# Increment and store
self.update_options(_recursion={'current': current_depth + 1,
'max': max_depth})
return False
def defaults(**options):
"""Set default Async options on the function decorated.
Note: you must pass the decorated function by reference, not as a
"path.string.to.function" for this to have any effect.
"""
_check_options(options)
def real_decorator(function):
function._async_options = options
@wraps(function)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
return real_decorator
def _check_options(options):
"""Make sure no one passes something not allowed in."""
if not options:
return
assert 'job' not in options
#assert 'callbacks' not in options
|
import ast
import inspect
import re
import sys
import traceback
from inspect import CO_VARARGS
from inspect import CO_VARKEYWORDS
from io import StringIO
from pathlib import Path
from traceback import format_exception_only
from types import CodeType
from types import FrameType
from types import TracebackType
from typing import Any
from typing import Callable
from typing import ClassVar
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Pattern
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from weakref import ref
import attr
import pluggy
import _pytest
from _pytest._code.source import findsource
from _pytest._code.source import getrawcode
from _pytest._code.source import getstatementrange_ast
from _pytest._code.source import Source
from _pytest._io import TerminalWriter
from _pytest._io.saferepr import safeformat
from _pytest._io.saferepr import saferepr
from _pytest.compat import final
from _pytest.compat import get_real_func
from _pytest.deprecated import check_ispytest
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
if TYPE_CHECKING:
from typing_extensions import Literal
from typing_extensions import SupportsIndex
from weakref import ReferenceType
_TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"]
class Code:
"""Wrapper around Python code objects."""
__slots__ = ("raw",)
def __init__(self, obj: CodeType) -> None:
self.raw = obj
@classmethod
def from_function(cls, obj: object) -> "Code":
return cls(getrawcode(obj))
def __eq__(self, other):
return self.raw == other.raw
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
@property
def firstlineno(self) -> int:
return self.raw.co_firstlineno - 1
@property
def name(self) -> str:
return self.raw.co_name
@property
def path(self) -> Union[Path, str]:
"""Return a path object pointing to source code, or an ``str`` in
case of ``OSError`` / non-existing file."""
if not self.raw.co_filename:
return ""
try:
p = absolutepath(self.raw.co_filename)
# maybe don't try this checking
if not p.exists():
raise OSError("path check failed.")
return p
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
return self.raw.co_filename
@property
def fullsource(self) -> Optional["Source"]:
"""Return a _pytest._code.Source object for the full source file of the code."""
full, _ = findsource(self.raw)
return full
def source(self) -> "Source":
"""Return a _pytest._code.Source object for the code object's source only."""
# return source only for that part of code
return Source(self.raw)
def getargs(self, var: bool = False) -> Tuple[str, ...]:
"""Return a tuple with the argument names for the code object.
If 'var' is set True also return the names of the variable and
keyword arguments when present.
"""
# Handy shortcut for getting args.
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame:
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
__slots__ = ("raw",)
def __init__(self, frame: FrameType) -> None:
self.raw = frame
@property
def lineno(self) -> int:
return self.raw.f_lineno - 1
@property
def f_globals(self) -> Dict[str, Any]:
return self.raw.f_globals
@property
def f_locals(self) -> Dict[str, Any]:
return self.raw.f_locals
@property
def code(self) -> Code:
return Code(self.raw.f_code)
@property
def statement(self) -> "Source":
"""Statement this frame is at."""
if self.code.fullsource is None:
return Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
"""Evaluate 'code' in the frame.
'vars' are optional additional local variables.
Returns the result of the evaluation.
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def repr(self, object: object) -> str:
"""Return a 'safe' (non-recursive, one-line) string repr for 'object'."""
return saferepr(object)
def getargs(self, var: bool = False):
"""Return a list of tuples (name, value) for all arguments.
If 'var' is set True, also include the variable and keyword arguments
when present.
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry:
"""A single entry in a Traceback."""
__slots__ = ("_rawentry", "_excinfo", "_repr_style")
def __init__(
self,
rawentry: TracebackType,
excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
) -> None:
self._rawentry = rawentry
self._excinfo = excinfo
self._repr_style: Optional['Literal["short", "long"]'] = None
@property
def lineno(self) -> int:
return self._rawentry.tb_lineno - 1
def set_repr_style(self, mode: "Literal['short', 'long']") -> None:
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self) -> Frame:
return Frame(self._rawentry.tb_frame)
@property
def relline(self) -> int:
return self.lineno - self.frame.code.firstlineno
def __repr__(self) -> str:
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
@property
def statement(self) -> "Source":
"""_pytest._code.Source object for the current statement."""
source = self.frame.code.fullsource
assert source is not None
return source.getstatement(self.lineno)
@property
def path(self) -> Union[Path, str]:
"""Path to the source code."""
return self.frame.code.path
@property
def locals(self) -> Dict[str, Any]:
"""Locals of underlying frame."""
return self.frame.f_locals
def getfirstlinesource(self) -> int:
return self.frame.code.firstlineno
def getsource(
self, astcache: Optional[Dict[Union[str, Path], ast.AST]] = None
) -> Optional["Source"]:
"""Return failing source code."""
# we use the passed in astcache to not reparse asttrees
# within exception info printing
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(
self.lineno, source, astnode=astnode
)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None and astcache is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self) -> bool:
"""Return True if the current frame has a var __tracebackhide__
resolving to True.
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
Mostly for internal use.
"""
tbh: Union[
bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]
] = False
for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals):
# in normal cases, f_locals and f_globals are dictionaries
# however via `exec(...)` / `eval(...)` they can be other types
# (even incorrect types!).
# as such, we suppress all exceptions while accessing __tracebackhide__
try:
tbh = maybe_ns_dct["__tracebackhide__"]
except Exception:
pass
else:
break
if tbh and callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
return tbh
def __str__(self) -> str:
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except BaseException:
line = "???"
# This output does not quite match Python's repr for traceback entries,
# but changing it to do so would break certain plugins. See
# https://github.com/pytest-dev/pytest/pull/7535/ for details.
return " File %r:%d in %s\n %s\n" % (
str(self.path),
self.lineno + 1,
name,
line,
)
@property
def name(self) -> str:
"""co_name of underlying code."""
return self.frame.code.raw.co_name
class Traceback(List[TracebackEntry]):
"""Traceback objects encapsulate and offer higher level access to Traceback entries."""
def __init__(
self,
tb: Union[TracebackType, Iterable[TracebackEntry]],
excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
) -> None:
"""Initialize from given python traceback object and ExceptionInfo."""
self._excinfo = excinfo
if isinstance(tb, TracebackType):
def f(cur: TracebackType) -> Iterable[TracebackEntry]:
cur_: Optional[TracebackType] = cur
while cur_ is not None:
yield TracebackEntry(cur_, excinfo=excinfo)
cur_ = cur_.tb_next
super().__init__(f(tb))
else:
super().__init__(tb)
def cut(
self,
path: Optional[Union[Path, str]] = None,
lineno: Optional[int] = None,
firstlineno: Optional[int] = None,
excludepath: Optional[Path] = None,
) -> "Traceback":
"""Return a Traceback instance wrapping part of this Traceback.
By providing any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined.
This allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback).
"""
for x in self:
code = x.frame.code
codepath = code.path
if path is not None and codepath != path:
continue
if (
excludepath is not None
and isinstance(codepath, Path)
and excludepath in codepath.parents
):
continue
if lineno is not None and x.lineno != lineno:
continue
if firstlineno is not None and x.frame.code.firstlineno != firstlineno:
continue
return Traceback(x._rawentry, self._excinfo)
return self
@overload
def __getitem__(self, key: "SupportsIndex") -> TracebackEntry:
...
@overload
def __getitem__(self, key: slice) -> "Traceback":
...
def __getitem__(
self, key: Union["SupportsIndex", slice]
) -> Union[TracebackEntry, "Traceback"]:
if isinstance(key, slice):
return self.__class__(super().__getitem__(key))
else:
return super().__getitem__(key)
def filter(
self, fn: Callable[[TracebackEntry], bool] = lambda x: not x.ishidden()
) -> "Traceback":
"""Return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not.
By default this removes all the TracebackEntries which are hidden
(see ishidden() above).
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self) -> TracebackEntry:
"""Return last non-hidden traceback entry that lead to the exception of a traceback."""
for i in range(-1, -len(self) - 1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self) -> Optional[int]:
"""Return the index of the frame/TracebackEntry where recursion originates if
appropriate, None if no recursion occurred."""
cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
# XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
# print "checking for recursion at", key
values = cache.setdefault(key, [])
if values:
f = entry.frame
loc = f.f_locals
for otherloc in values:
if otherloc == loc:
return i
values.append(entry.frame.f_locals)
return None
E = TypeVar("E", bound=BaseException, covariant=True)
@final
@attr.s(repr=False, init=False, auto_attribs=True)
class ExceptionInfo(Generic[E]):
"""Wraps sys.exc_info() objects and offers help for navigating the traceback."""
_assert_start_repr: ClassVar = "AssertionError('assert "
_excinfo: Optional[Tuple[Type["E"], "E", TracebackType]]
_striptext: str
_traceback: Optional[Traceback]
def __init__(
self,
excinfo: Optional[Tuple[Type["E"], "E", TracebackType]],
striptext: str = "",
traceback: Optional[Traceback] = None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
self._excinfo = excinfo
self._striptext = striptext
self._traceback = traceback
@classmethod
def from_exc_info(
cls,
exc_info: Tuple[Type[E], E, TracebackType],
exprinfo: Optional[str] = None,
) -> "ExceptionInfo[E]":
"""Return an ExceptionInfo for an existing exc_info tuple.
.. warning::
Experimental API
:param exprinfo:
A text string helping to determine if we should strip
``AssertionError`` from the output. Defaults to the exception
message/``__str__()``.
"""
_striptext = ""
if exprinfo is None and isinstance(exc_info[1], AssertionError):
exprinfo = getattr(exc_info[1], "msg", None)
if exprinfo is None:
exprinfo = saferepr(exc_info[1])
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
_striptext = "AssertionError: "
return cls(exc_info, _striptext, _ispytest=True)
@classmethod
def from_current(
cls, exprinfo: Optional[str] = None
) -> "ExceptionInfo[BaseException]":
"""Return an ExceptionInfo matching the current traceback.
.. warning::
Experimental API
:param exprinfo:
A text string helping to determine if we should strip
``AssertionError`` from the output. Defaults to the exception
message/``__str__()``.
"""
tup = sys.exc_info()
assert tup[0] is not None, "no current exception"
assert tup[1] is not None, "no current exception"
assert tup[2] is not None, "no current exception"
exc_info = (tup[0], tup[1], tup[2])
return ExceptionInfo.from_exc_info(exc_info, exprinfo)
@classmethod
def for_later(cls) -> "ExceptionInfo[E]":
"""Return an unfilled ExceptionInfo."""
return cls(None, _ispytest=True)
def fill_unfilled(self, exc_info: Tuple[Type[E], E, TracebackType]) -> None:
"""Fill an unfilled ExceptionInfo created with ``for_later()``."""
assert self._excinfo is None, "ExceptionInfo was already filled"
self._excinfo = exc_info
@property
def type(self) -> Type[E]:
"""The exception class."""
assert (
self._excinfo is not None
), ".type can only be used after the context manager exits"
return self._excinfo[0]
@property
def value(self) -> E:
"""The exception value."""
assert (
self._excinfo is not None
), ".value can only be used after the context manager exits"
return self._excinfo[1]
@property
def tb(self) -> TracebackType:
"""The exception raw traceback."""
assert (
self._excinfo is not None
), ".tb can only be used after the context manager exits"
return self._excinfo[2]
@property
def typename(self) -> str:
"""The type name of the exception."""
assert (
self._excinfo is not None
), ".typename can only be used after the context manager exits"
return self.type.__name__
@property
def traceback(self) -> Traceback:
"""The traceback."""
if self._traceback is None:
self._traceback = Traceback(self.tb, excinfo=ref(self))
return self._traceback
@traceback.setter
def traceback(self, value: Traceback) -> None:
self._traceback = value
def __repr__(self) -> str:
if self._excinfo is None:
return "<ExceptionInfo for raises contextmanager>"
return "<{} {} tblen={}>".format(
self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback)
)
def exconly(self, tryshort: bool = False) -> str:
"""Return the exception as a string.
When 'tryshort' resolves to True, and the exception is an
AssertionError, only the actual exception part of the exception
representation is returned (so 'AssertionError: ' is removed from
the beginning).
"""
lines = format_exception_only(self.type, self.value)
text = "".join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext) :]
return text
def errisinstance(
self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]]
) -> bool:
"""Return True if the exception is an instance of exc.
Consider using ``isinstance(excinfo.value, exc)`` instead.
"""
return isinstance(self.value, exc)
def _getreprcrash(self) -> "ReprFileLocation":
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno + 1, exconly)
def getrepr(
self,
showlocals: bool = False,
style: "_TracebackStyle" = "long",
abspath: bool = False,
tbfilter: bool = True,
funcargs: bool = False,
truncate_locals: bool = True,
chain: bool = True,
) -> Union["ReprExceptionInfo", "ExceptionChainRepr"]:
"""Return str()able representation of this exception info.
:param bool showlocals:
Show locals per traceback entry.
Ignored if ``style=="native"``.
:param str style:
long|short|no|native|value traceback style.
:param bool abspath:
If paths should be changed to absolute or left unchanged.
:param bool tbfilter:
Hide entries that contain a local variable ``__tracebackhide__==True``.
Ignored if ``style=="native"``.
:param bool funcargs:
Show fixtures ("funcargs" for legacy purposes) per traceback entry.
:param bool truncate_locals:
With ``showlocals==True``, make sure locals can be safely represented as strings.
:param bool chain:
If chained exceptions in Python 3 should be shown.
.. versionchanged:: 3.9
Added the ``chain`` parameter.
"""
if style == "native":
return ReprExceptionInfo(
ReprTracebackNative(
traceback.format_exception(
self.type, self.value, self.traceback[0]._rawentry
)
),
self._getreprcrash(),
)
fmt = FormattedExcinfo(
showlocals=showlocals,
style=style,
abspath=abspath,
tbfilter=tbfilter,
funcargs=funcargs,
truncate_locals=truncate_locals,
chain=chain,
)
return fmt.repr_excinfo(self)
def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]":
"""Check whether the regular expression `regexp` matches the string
representation of the exception using :func:`python:re.search`.
If it matches `True` is returned, otherwise an `AssertionError` is raised.
"""
__tracebackhide__ = True
msg = "Regex pattern {!r} does not match {!r}."
if regexp == str(self.value):
msg += " Did you mean to `re.escape()` the regex?"
assert re.search(regexp, str(self.value)), msg.format(regexp, str(self.value))
# Return True to allow for "assert excinfo.match()".
return True
@attr.s(auto_attribs=True)
class FormattedExcinfo:
"""Presenting information about failing Functions and Generators."""
# for traceback entries
flow_marker: ClassVar = ">"
fail_marker: ClassVar = "E"
showlocals: bool = False
style: "_TracebackStyle" = "long"
abspath: bool = True
tbfilter: bool = True
funcargs: bool = False
truncate_locals: bool = True
chain: bool = True
astcache: Dict[Union[str, Path], ast.AST] = attr.ib(
factory=dict, init=False, repr=False
)
def _getindent(self, source: "Source") -> int:
# Figure out indent for the given source.
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
raise
except BaseException:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except BaseException:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry: TracebackEntry) -> Optional["Source"]:
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def repr_args(self, entry: TracebackEntry) -> Optional["ReprFuncArgs"]:
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, saferepr(argvalue)))
return ReprFuncArgs(args)
return None
def get_source(
self,
source: Optional["Source"],
line_index: int = -1,
excinfo: Optional[ExceptionInfo[BaseException]] = None,
short: bool = False,
) -> List[str]:
"""Return formatted and marked up source lines."""
lines = []
if source is None or line_index >= len(source.lines):
source = Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index + 1 :]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(
self,
excinfo: ExceptionInfo[BaseException],
indent: int = 4,
markall: bool = False,
) -> List[str]:
lines = []
indentstr = " " * indent
# Get the real exception information out.
exlines = excinfo.exconly(tryshort=True).split("\n")
failindent = self.fail_marker + indentstr[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indentstr
return lines
def repr_locals(self, locals: Mapping[str, object]) -> Optional["ReprLocals"]:
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == "__builtins__":
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
if self.truncate_locals:
str_repr = saferepr(value)
else:
str_repr = safeformat(value)
# if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)):
lines.append(f"{name:<10} = {str_repr}")
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
# pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
return None
def repr_traceback_entry(
self,
entry: TracebackEntry,
excinfo: Optional[ExceptionInfo[BaseException]] = None,
) -> "ReprEntry":
lines: List[str] = []
style = entry._repr_style if entry._repr_style is not None else self.style
if style in ("short", "long"):
source = self._getentrysource(entry)
if source is None:
source = Source("???")
line_index = 0
else:
line_index = entry.lineno - entry.getfirstlinesource()
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" % (entry.name)
else:
message = excinfo and excinfo.typename or ""
entry_path = entry.path
path = self._makepath(entry_path)
reprfileloc = ReprFileLocation(path, entry.lineno + 1, message)
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style)
elif style == "value":
if excinfo:
lines.extend(str(excinfo.value).split("\n"))
return ReprEntry(lines, None, None, None, style)
else:
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path: Union[Path, str]) -> str:
if not self.abspath and isinstance(path, Path):
try:
np = bestrelpath(Path.cwd(), path)
except OSError:
return str(path)
if len(np) < len(str(path)):
return np
return str(path)
def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback":
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if isinstance(excinfo.value, RecursionError):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
if self.style == "value":
reprentry = self.repr_traceback_entry(last, excinfo)
entries.append(reprentry)
return ReprTraceback(entries, None, style=self.style)
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(
self, traceback: Traceback
) -> Tuple[Traceback, Optional[str]]:
"""Truncate the given recursive traceback trying to find the starting
point of the recursion.
The detection is done by going through each traceback entry and
finding the point in which the locals of the frame are equal to the
locals of a previous frame (see ``recursionindex()``).
Handle the situation where the recursion process might raise an
exception (for example comparing numpy arrays using equality raises a
TypeError), in which case we do our best to warn the user of the
error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline: Optional[str] = (
"!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
" The following exception happened when comparing locals in the stack frame:\n"
" {exc_type}: {exc_msg}\n"
" Displaying first and last {max_frames} stack frames out of {total}."
).format(
exc_type=type(e).__name__,
exc_msg=str(e),
max_frames=max_frames,
total=len(traceback),
)
# Type ignored because adding two instaces of a List subtype
# currently incorrectly has type List instead of the subtype.
traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[: recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(
self, excinfo: ExceptionInfo[BaseException]
) -> "ExceptionChainRepr":
repr_chain: List[
Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]
] = []
e: Optional[BaseException] = excinfo.value
excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo
descr = None
seen: Set[int] = set()
while e is not None and id(e) not in seen:
seen.add(id(e))
if excinfo_:
reprtraceback = self.repr_traceback(excinfo_)
reprcrash: Optional[ReprFileLocation] = (
excinfo_._getreprcrash() if self.style != "value" else None
)
else:
# Fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work.
reprtraceback = ReprTracebackNative(
traceback.format_exception(type(e), e, None)
)
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None and self.chain:
e = e.__cause__
excinfo_ = (
ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
if e.__traceback__
else None
)
descr = "The above exception was the direct cause of the following exception:"
elif (
e.__context__ is not None and not e.__suppress_context__ and self.chain
):
e = e.__context__
excinfo_ = (
ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
if e.__traceback__
else None
)
descr = "During handling of the above exception, another exception occurred:"
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
@attr.s(eq=False, auto_attribs=True)
class TerminalRepr:
def __str__(self) -> str:
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = StringIO()
tw = TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self) -> str:
return f"<{self.__class__} instance at {id(self):0x}>"
def toterminal(self, tw: TerminalWriter) -> None:
raise NotImplementedError()
# This class is abstract -- only subclasses are instantiated.
@attr.s(eq=False)
class ExceptionRepr(TerminalRepr):
# Provided by subclasses.
reprcrash: Optional["ReprFileLocation"]
reprtraceback: "ReprTraceback"
def __attrs_post_init__(self) -> None:
self.sections: List[Tuple[str, str, str]] = []
def addsection(self, name: str, content: str, sep: str = "-") -> None:
self.sections.append((name, content, sep))
def toterminal(self, tw: TerminalWriter) -> None:
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
@attr.s(eq=False, auto_attribs=True)
class ExceptionChainRepr(ExceptionRepr):
chain: Sequence[Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]]]
def __attrs_post_init__(self) -> None:
super().__attrs_post_init__()
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain.
self.reprtraceback = self.chain[-1][0]
self.reprcrash = self.chain[-1][1]
def toterminal(self, tw: TerminalWriter) -> None:
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super().toterminal(tw)
@attr.s(eq=False, auto_attribs=True)
class ReprExceptionInfo(ExceptionRepr):
reprtraceback: "ReprTraceback"
reprcrash: "ReprFileLocation"
def toterminal(self, tw: TerminalWriter) -> None:
self.reprtraceback.toterminal(tw)
super().toterminal(tw)
@attr.s(eq=False, auto_attribs=True)
class ReprTraceback(TerminalRepr):
reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]]
extraline: Optional[str]
style: "_TracebackStyle"
entrysep: ClassVar = "_ "
def toterminal(self, tw: TerminalWriter) -> None:
# The entries might have different styles.
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i + 1]
if (
entry.style == "long"
or entry.style == "short"
and next_entry.style == "long"
):
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines: Sequence[str]) -> None:
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
@attr.s(eq=False, auto_attribs=True)
class ReprEntryNative(TerminalRepr):
lines: Sequence[str]
style: ClassVar["_TracebackStyle"] = "native"
def toterminal(self, tw: TerminalWriter) -> None:
tw.write("".join(self.lines))
@attr.s(eq=False, auto_attribs=True)
class ReprEntry(TerminalRepr):
lines: Sequence[str]
reprfuncargs: Optional["ReprFuncArgs"]
reprlocals: Optional["ReprLocals"]
reprfileloc: Optional["ReprFileLocation"]
style: "_TracebackStyle"
def _write_entry_lines(self, tw: TerminalWriter) -> None:
"""Write the source code portions of a list of traceback entries with syntax highlighting.
Usually entries are lines like these:
" x = 1"
"> assert x == 2"
"E assert 1 == 2"
This function takes care of rendering the "source" portions of it (the lines without
the "E" prefix) using syntax highlighting, taking care to not highlighting the ">"
character, as doing so might break line continuations.
"""
if not self.lines:
return
# separate indents and source lines that are not failures: we want to
# highlight the code but not the indentation, which may contain markers
# such as "> assert 0"
fail_marker = f"{FormattedExcinfo.fail_marker} "
indent_size = len(fail_marker)
indents: List[str] = []
source_lines: List[str] = []
failure_lines: List[str] = []
for index, line in enumerate(self.lines):
is_failure_line = line.startswith(fail_marker)
if is_failure_line:
# from this point on all lines are considered part of the failure
failure_lines.extend(self.lines[index:])
break
else:
if self.style == "value":
source_lines.append(line)
else:
indents.append(line[:indent_size])
source_lines.append(line[indent_size:])
tw._write_source(source_lines, indents)
# failure lines are always completely red and bold
for line in failure_lines:
tw.line(line, bold=True, red=True)
def toterminal(self, tw: TerminalWriter) -> None:
if self.style == "short":
assert self.reprfileloc is not None
self.reprfileloc.toterminal(tw)
self._write_entry_lines(tw)
if self.reprlocals:
self.reprlocals.toterminal(tw, indent=" " * 8)
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
self._write_entry_lines(tw)
if self.reprlocals:
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self) -> str:
return "{}\n{}\n{}".format(
"\n".join(self.lines), self.reprlocals, self.reprfileloc
)
@attr.s(eq=False, auto_attribs=True)
class ReprFileLocation(TerminalRepr):
path: str = attr.ib(converter=str)
lineno: int
message: str
def toterminal(self, tw: TerminalWriter) -> None:
# Filename and lineno output for each entry, using an output format
# that most editors understand.
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(f":{self.lineno}: {msg}")
@attr.s(eq=False, auto_attribs=True)
class ReprLocals(TerminalRepr):
lines: Sequence[str]
def toterminal(self, tw: TerminalWriter, indent="") -> None:
for line in self.lines:
tw.line(indent + line)
@attr.s(eq=False, auto_attribs=True)
class ReprFuncArgs(TerminalRepr):
args: Sequence[Tuple[str, object]]
def toterminal(self, tw: TerminalWriter) -> None:
if self.args:
linesofar = ""
for name, value in self.args:
ns = f"{name} = {value}"
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getfslineno(obj: object) -> Tuple[Union[str, Path], int]:
"""Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1).
The line number is 0-based.
"""
# xxx let decorators etc specify a sane ordering
# NOTE: this used to be done in _pytest.compat.getfslineno, initially added
# in 6ec13a2b9. It ("place_as") appears to be something very custom.
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as # type: ignore[attr-defined]
try:
code = Code.from_function(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type]
except TypeError:
return "", -1
fspath = fn and absolutepath(fn) or ""
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except OSError:
pass
return fspath, lineno
return code.path, code.firstlineno
# Relative paths that we use to filter traceback entries from appearing to the user;
# see filter_traceback.
# note: if we need to add more paths than what we have now we should probably use a list
# for better maintenance.
_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc"))
# pluggy is either a package or a single module depending on the version
if _PLUGGY_DIR.name == "__init__.py":
_PLUGGY_DIR = _PLUGGY_DIR.parent
_PYTEST_DIR = Path(_pytest.__file__).parent
def filter_traceback(entry: TracebackEntry) -> bool:
"""Return True if a TracebackEntry instance should be included in tracebacks.
We hide traceback entries of:
* dynamically generated code (no code to show up for it);
* internal traceback from pytest or its internal libraries, py and pluggy.
"""
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code.
# See https://bitbucket.org/pytest-dev/py/issues/71.
raw_filename = entry.frame.code.raw.co_filename
is_generated = "<" in raw_filename and ">" in raw_filename
if is_generated:
return False
# entry.path might point to a non-existing file, in which case it will
# also return a str object. See #1133.
p = Path(entry.path)
parents = p.parents
if _PLUGGY_DIR in parents:
return False
if _PYTEST_DIR in parents:
return False
return True
code: accept any `os.PathLike[str]` in `Traceback.cut`
Before 7.0.0rc1, the function accepted `Union[str, py.path.local]`, and
`py.path.local` compares equal to the string path, so a user was able to
pass the path as a string and it would work. In 7.0.0rc1 we changed the
`py.path.local` to `Path` which doesn't compare equal to the string
path, which breaks compatibility (e.g. the `sybil` package).
This restores compatibility for this function by accepting any
`os.PathLike[str]` and only comparing the string representations.
import ast
import inspect
import os
import re
import sys
import traceback
from inspect import CO_VARARGS
from inspect import CO_VARKEYWORDS
from io import StringIO
from pathlib import Path
from traceback import format_exception_only
from types import CodeType
from types import FrameType
from types import TracebackType
from typing import Any
from typing import Callable
from typing import ClassVar
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Pattern
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from weakref import ref
import attr
import pluggy
import _pytest
from _pytest._code.source import findsource
from _pytest._code.source import getrawcode
from _pytest._code.source import getstatementrange_ast
from _pytest._code.source import Source
from _pytest._io import TerminalWriter
from _pytest._io.saferepr import safeformat
from _pytest._io.saferepr import saferepr
from _pytest.compat import final
from _pytest.compat import get_real_func
from _pytest.deprecated import check_ispytest
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
if TYPE_CHECKING:
from typing_extensions import Literal
from typing_extensions import SupportsIndex
from weakref import ReferenceType
_TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"]
class Code:
"""Wrapper around Python code objects."""
__slots__ = ("raw",)
def __init__(self, obj: CodeType) -> None:
self.raw = obj
@classmethod
def from_function(cls, obj: object) -> "Code":
return cls(getrawcode(obj))
def __eq__(self, other):
return self.raw == other.raw
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
@property
def firstlineno(self) -> int:
return self.raw.co_firstlineno - 1
@property
def name(self) -> str:
return self.raw.co_name
@property
def path(self) -> Union[Path, str]:
"""Return a path object pointing to source code, or an ``str`` in
case of ``OSError`` / non-existing file."""
if not self.raw.co_filename:
return ""
try:
p = absolutepath(self.raw.co_filename)
# maybe don't try this checking
if not p.exists():
raise OSError("path check failed.")
return p
except OSError:
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
return self.raw.co_filename
@property
def fullsource(self) -> Optional["Source"]:
"""Return a _pytest._code.Source object for the full source file of the code."""
full, _ = findsource(self.raw)
return full
def source(self) -> "Source":
"""Return a _pytest._code.Source object for the code object's source only."""
# return source only for that part of code
return Source(self.raw)
def getargs(self, var: bool = False) -> Tuple[str, ...]:
"""Return a tuple with the argument names for the code object.
If 'var' is set True also return the names of the variable and
keyword arguments when present.
"""
# Handy shortcut for getting args.
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame:
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
__slots__ = ("raw",)
def __init__(self, frame: FrameType) -> None:
self.raw = frame
@property
def lineno(self) -> int:
return self.raw.f_lineno - 1
@property
def f_globals(self) -> Dict[str, Any]:
return self.raw.f_globals
@property
def f_locals(self) -> Dict[str, Any]:
return self.raw.f_locals
@property
def code(self) -> Code:
return Code(self.raw.f_code)
@property
def statement(self) -> "Source":
"""Statement this frame is at."""
if self.code.fullsource is None:
return Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
"""Evaluate 'code' in the frame.
'vars' are optional additional local variables.
Returns the result of the evaluation.
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def repr(self, object: object) -> str:
"""Return a 'safe' (non-recursive, one-line) string repr for 'object'."""
return saferepr(object)
def getargs(self, var: bool = False):
"""Return a list of tuples (name, value) for all arguments.
If 'var' is set True, also include the variable and keyword arguments
when present.
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry:
"""A single entry in a Traceback."""
__slots__ = ("_rawentry", "_excinfo", "_repr_style")
def __init__(
self,
rawentry: TracebackType,
excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
) -> None:
self._rawentry = rawentry
self._excinfo = excinfo
self._repr_style: Optional['Literal["short", "long"]'] = None
@property
def lineno(self) -> int:
return self._rawentry.tb_lineno - 1
def set_repr_style(self, mode: "Literal['short', 'long']") -> None:
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self) -> Frame:
return Frame(self._rawentry.tb_frame)
@property
def relline(self) -> int:
return self.lineno - self.frame.code.firstlineno
def __repr__(self) -> str:
return "<TracebackEntry %s:%d>" % (self.frame.code.path, self.lineno + 1)
@property
def statement(self) -> "Source":
"""_pytest._code.Source object for the current statement."""
source = self.frame.code.fullsource
assert source is not None
return source.getstatement(self.lineno)
@property
def path(self) -> Union[Path, str]:
"""Path to the source code."""
return self.frame.code.path
@property
def locals(self) -> Dict[str, Any]:
"""Locals of underlying frame."""
return self.frame.f_locals
def getfirstlinesource(self) -> int:
return self.frame.code.firstlineno
def getsource(
self, astcache: Optional[Dict[Union[str, Path], ast.AST]] = None
) -> Optional["Source"]:
"""Return failing source code."""
# we use the passed in astcache to not reparse asttrees
# within exception info printing
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(
self.lineno, source, astnode=astnode
)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None and astcache is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self) -> bool:
"""Return True if the current frame has a var __tracebackhide__
resolving to True.
If __tracebackhide__ is a callable, it gets called with the
ExceptionInfo instance and can decide whether to hide the traceback.
Mostly for internal use.
"""
tbh: Union[
bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool]
] = False
for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals):
# in normal cases, f_locals and f_globals are dictionaries
# however via `exec(...)` / `eval(...)` they can be other types
# (even incorrect types!).
# as such, we suppress all exceptions while accessing __tracebackhide__
try:
tbh = maybe_ns_dct["__tracebackhide__"]
except Exception:
pass
else:
break
if tbh and callable(tbh):
return tbh(None if self._excinfo is None else self._excinfo())
return tbh
def __str__(self) -> str:
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except BaseException:
line = "???"
# This output does not quite match Python's repr for traceback entries,
# but changing it to do so would break certain plugins. See
# https://github.com/pytest-dev/pytest/pull/7535/ for details.
return " File %r:%d in %s\n %s\n" % (
str(self.path),
self.lineno + 1,
name,
line,
)
@property
def name(self) -> str:
"""co_name of underlying code."""
return self.frame.code.raw.co_name
class Traceback(List[TracebackEntry]):
"""Traceback objects encapsulate and offer higher level access to Traceback entries."""
def __init__(
self,
tb: Union[TracebackType, Iterable[TracebackEntry]],
excinfo: Optional["ReferenceType[ExceptionInfo[BaseException]]"] = None,
) -> None:
"""Initialize from given python traceback object and ExceptionInfo."""
self._excinfo = excinfo
if isinstance(tb, TracebackType):
def f(cur: TracebackType) -> Iterable[TracebackEntry]:
cur_: Optional[TracebackType] = cur
while cur_ is not None:
yield TracebackEntry(cur_, excinfo=excinfo)
cur_ = cur_.tb_next
super().__init__(f(tb))
else:
super().__init__(tb)
def cut(
self,
path: Optional[Union["os.PathLike[str]", str]] = None,
lineno: Optional[int] = None,
firstlineno: Optional[int] = None,
excludepath: Optional["os.PathLike[str]"] = None,
) -> "Traceback":
"""Return a Traceback instance wrapping part of this Traceback.
By providing any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined.
This allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback).
"""
path_ = None if path is None else os.fspath(path)
excludepath_ = None if excludepath is None else os.fspath(excludepath)
for x in self:
code = x.frame.code
codepath = code.path
if path is not None and str(codepath) != path_:
continue
if (
excludepath is not None
and isinstance(codepath, Path)
and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator]
):
continue
if lineno is not None and x.lineno != lineno:
continue
if firstlineno is not None and x.frame.code.firstlineno != firstlineno:
continue
return Traceback(x._rawentry, self._excinfo)
return self
@overload
def __getitem__(self, key: "SupportsIndex") -> TracebackEntry:
...
@overload
def __getitem__(self, key: slice) -> "Traceback":
...
def __getitem__(
self, key: Union["SupportsIndex", slice]
) -> Union[TracebackEntry, "Traceback"]:
if isinstance(key, slice):
return self.__class__(super().__getitem__(key))
else:
return super().__getitem__(key)
def filter(
self, fn: Callable[[TracebackEntry], bool] = lambda x: not x.ishidden()
) -> "Traceback":
"""Return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackEntry
instance, and should return True when the item should be added
to the Traceback, False when not.
By default this removes all the TracebackEntries which are hidden
(see ishidden() above).
"""
return Traceback(filter(fn, self), self._excinfo)
def getcrashentry(self) -> TracebackEntry:
"""Return last non-hidden traceback entry that lead to the exception of a traceback."""
for i in range(-1, -len(self) - 1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self) -> Optional[int]:
"""Return the index of the frame/TracebackEntry where recursion originates if
appropriate, None if no recursion occurred."""
cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
# XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
# print "checking for recursion at", key
values = cache.setdefault(key, [])
if values:
f = entry.frame
loc = f.f_locals
for otherloc in values:
if otherloc == loc:
return i
values.append(entry.frame.f_locals)
return None
E = TypeVar("E", bound=BaseException, covariant=True)
@final
@attr.s(repr=False, init=False, auto_attribs=True)
class ExceptionInfo(Generic[E]):
"""Wraps sys.exc_info() objects and offers help for navigating the traceback."""
_assert_start_repr: ClassVar = "AssertionError('assert "
_excinfo: Optional[Tuple[Type["E"], "E", TracebackType]]
_striptext: str
_traceback: Optional[Traceback]
def __init__(
self,
excinfo: Optional[Tuple[Type["E"], "E", TracebackType]],
striptext: str = "",
traceback: Optional[Traceback] = None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
self._excinfo = excinfo
self._striptext = striptext
self._traceback = traceback
@classmethod
def from_exc_info(
cls,
exc_info: Tuple[Type[E], E, TracebackType],
exprinfo: Optional[str] = None,
) -> "ExceptionInfo[E]":
"""Return an ExceptionInfo for an existing exc_info tuple.
.. warning::
Experimental API
:param exprinfo:
A text string helping to determine if we should strip
``AssertionError`` from the output. Defaults to the exception
message/``__str__()``.
"""
_striptext = ""
if exprinfo is None and isinstance(exc_info[1], AssertionError):
exprinfo = getattr(exc_info[1], "msg", None)
if exprinfo is None:
exprinfo = saferepr(exc_info[1])
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
_striptext = "AssertionError: "
return cls(exc_info, _striptext, _ispytest=True)
@classmethod
def from_current(
cls, exprinfo: Optional[str] = None
) -> "ExceptionInfo[BaseException]":
"""Return an ExceptionInfo matching the current traceback.
.. warning::
Experimental API
:param exprinfo:
A text string helping to determine if we should strip
``AssertionError`` from the output. Defaults to the exception
message/``__str__()``.
"""
tup = sys.exc_info()
assert tup[0] is not None, "no current exception"
assert tup[1] is not None, "no current exception"
assert tup[2] is not None, "no current exception"
exc_info = (tup[0], tup[1], tup[2])
return ExceptionInfo.from_exc_info(exc_info, exprinfo)
@classmethod
def for_later(cls) -> "ExceptionInfo[E]":
"""Return an unfilled ExceptionInfo."""
return cls(None, _ispytest=True)
def fill_unfilled(self, exc_info: Tuple[Type[E], E, TracebackType]) -> None:
"""Fill an unfilled ExceptionInfo created with ``for_later()``."""
assert self._excinfo is None, "ExceptionInfo was already filled"
self._excinfo = exc_info
@property
def type(self) -> Type[E]:
"""The exception class."""
assert (
self._excinfo is not None
), ".type can only be used after the context manager exits"
return self._excinfo[0]
@property
def value(self) -> E:
"""The exception value."""
assert (
self._excinfo is not None
), ".value can only be used after the context manager exits"
return self._excinfo[1]
@property
def tb(self) -> TracebackType:
"""The exception raw traceback."""
assert (
self._excinfo is not None
), ".tb can only be used after the context manager exits"
return self._excinfo[2]
@property
def typename(self) -> str:
"""The type name of the exception."""
assert (
self._excinfo is not None
), ".typename can only be used after the context manager exits"
return self.type.__name__
@property
def traceback(self) -> Traceback:
"""The traceback."""
if self._traceback is None:
self._traceback = Traceback(self.tb, excinfo=ref(self))
return self._traceback
@traceback.setter
def traceback(self, value: Traceback) -> None:
self._traceback = value
def __repr__(self) -> str:
if self._excinfo is None:
return "<ExceptionInfo for raises contextmanager>"
return "<{} {} tblen={}>".format(
self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback)
)
def exconly(self, tryshort: bool = False) -> str:
"""Return the exception as a string.
When 'tryshort' resolves to True, and the exception is an
AssertionError, only the actual exception part of the exception
representation is returned (so 'AssertionError: ' is removed from
the beginning).
"""
lines = format_exception_only(self.type, self.value)
text = "".join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext) :]
return text
def errisinstance(
self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]]
) -> bool:
"""Return True if the exception is an instance of exc.
Consider using ``isinstance(excinfo.value, exc)`` instead.
"""
return isinstance(self.value, exc)
def _getreprcrash(self) -> "ReprFileLocation":
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno + 1, exconly)
def getrepr(
self,
showlocals: bool = False,
style: "_TracebackStyle" = "long",
abspath: bool = False,
tbfilter: bool = True,
funcargs: bool = False,
truncate_locals: bool = True,
chain: bool = True,
) -> Union["ReprExceptionInfo", "ExceptionChainRepr"]:
"""Return str()able representation of this exception info.
:param bool showlocals:
Show locals per traceback entry.
Ignored if ``style=="native"``.
:param str style:
long|short|no|native|value traceback style.
:param bool abspath:
If paths should be changed to absolute or left unchanged.
:param bool tbfilter:
Hide entries that contain a local variable ``__tracebackhide__==True``.
Ignored if ``style=="native"``.
:param bool funcargs:
Show fixtures ("funcargs" for legacy purposes) per traceback entry.
:param bool truncate_locals:
With ``showlocals==True``, make sure locals can be safely represented as strings.
:param bool chain:
If chained exceptions in Python 3 should be shown.
.. versionchanged:: 3.9
Added the ``chain`` parameter.
"""
if style == "native":
return ReprExceptionInfo(
ReprTracebackNative(
traceback.format_exception(
self.type, self.value, self.traceback[0]._rawentry
)
),
self._getreprcrash(),
)
fmt = FormattedExcinfo(
showlocals=showlocals,
style=style,
abspath=abspath,
tbfilter=tbfilter,
funcargs=funcargs,
truncate_locals=truncate_locals,
chain=chain,
)
return fmt.repr_excinfo(self)
def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]":
"""Check whether the regular expression `regexp` matches the string
representation of the exception using :func:`python:re.search`.
If it matches `True` is returned, otherwise an `AssertionError` is raised.
"""
__tracebackhide__ = True
msg = "Regex pattern {!r} does not match {!r}."
if regexp == str(self.value):
msg += " Did you mean to `re.escape()` the regex?"
assert re.search(regexp, str(self.value)), msg.format(regexp, str(self.value))
# Return True to allow for "assert excinfo.match()".
return True
@attr.s(auto_attribs=True)
class FormattedExcinfo:
"""Presenting information about failing Functions and Generators."""
# for traceback entries
flow_marker: ClassVar = ">"
fail_marker: ClassVar = "E"
showlocals: bool = False
style: "_TracebackStyle" = "long"
abspath: bool = True
tbfilter: bool = True
funcargs: bool = False
truncate_locals: bool = True
chain: bool = True
astcache: Dict[Union[str, Path], ast.AST] = attr.ib(
factory=dict, init=False, repr=False
)
def _getindent(self, source: "Source") -> int:
# Figure out indent for the given source.
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
raise
except BaseException:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except BaseException:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry: TracebackEntry) -> Optional["Source"]:
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def repr_args(self, entry: TracebackEntry) -> Optional["ReprFuncArgs"]:
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, saferepr(argvalue)))
return ReprFuncArgs(args)
return None
def get_source(
self,
source: Optional["Source"],
line_index: int = -1,
excinfo: Optional[ExceptionInfo[BaseException]] = None,
short: bool = False,
) -> List[str]:
"""Return formatted and marked up source lines."""
lines = []
if source is None or line_index >= len(source.lines):
source = Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index + 1 :]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(
self,
excinfo: ExceptionInfo[BaseException],
indent: int = 4,
markall: bool = False,
) -> List[str]:
lines = []
indentstr = " " * indent
# Get the real exception information out.
exlines = excinfo.exconly(tryshort=True).split("\n")
failindent = self.fail_marker + indentstr[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indentstr
return lines
def repr_locals(self, locals: Mapping[str, object]) -> Optional["ReprLocals"]:
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == "__builtins__":
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
if self.truncate_locals:
str_repr = saferepr(value)
else:
str_repr = safeformat(value)
# if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)):
lines.append(f"{name:<10} = {str_repr}")
# else:
# self._line("%-10s =\\" % (name,))
# # XXX
# pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
return None
def repr_traceback_entry(
self,
entry: TracebackEntry,
excinfo: Optional[ExceptionInfo[BaseException]] = None,
) -> "ReprEntry":
lines: List[str] = []
style = entry._repr_style if entry._repr_style is not None else self.style
if style in ("short", "long"):
source = self._getentrysource(entry)
if source is None:
source = Source("???")
line_index = 0
else:
line_index = entry.lineno - entry.getfirstlinesource()
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" % (entry.name)
else:
message = excinfo and excinfo.typename or ""
entry_path = entry.path
path = self._makepath(entry_path)
reprfileloc = ReprFileLocation(path, entry.lineno + 1, message)
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style)
elif style == "value":
if excinfo:
lines.extend(str(excinfo.value).split("\n"))
return ReprEntry(lines, None, None, None, style)
else:
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path: Union[Path, str]) -> str:
if not self.abspath and isinstance(path, Path):
try:
np = bestrelpath(Path.cwd(), path)
except OSError:
return str(path)
if len(np) < len(str(path)):
return np
return str(path)
def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback":
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
if isinstance(excinfo.value, RecursionError):
traceback, extraline = self._truncate_recursive_traceback(traceback)
else:
extraline = None
last = traceback[-1]
entries = []
if self.style == "value":
reprentry = self.repr_traceback_entry(last, excinfo)
entries.append(reprentry)
return ReprTraceback(entries, None, style=self.style)
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
return ReprTraceback(entries, extraline, style=self.style)
def _truncate_recursive_traceback(
self, traceback: Traceback
) -> Tuple[Traceback, Optional[str]]:
"""Truncate the given recursive traceback trying to find the starting
point of the recursion.
The detection is done by going through each traceback entry and
finding the point in which the locals of the frame are equal to the
locals of a previous frame (see ``recursionindex()``).
Handle the situation where the recursion process might raise an
exception (for example comparing numpy arrays using equality raises a
TypeError), in which case we do our best to warn the user of the
error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
except Exception as e:
max_frames = 10
extraline: Optional[str] = (
"!!! Recursion error detected, but an error occurred locating the origin of recursion.\n"
" The following exception happened when comparing locals in the stack frame:\n"
" {exc_type}: {exc_msg}\n"
" Displaying first and last {max_frames} stack frames out of {total}."
).format(
exc_type=type(e).__name__,
exc_msg=str(e),
max_frames=max_frames,
total=len(traceback),
)
# Type ignored because adding two instaces of a List subtype
# currently incorrectly has type List instead of the subtype.
traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore
else:
if recursionindex is not None:
extraline = "!!! Recursion detected (same locals & position)"
traceback = traceback[: recursionindex + 1]
else:
extraline = None
return traceback, extraline
def repr_excinfo(
self, excinfo: ExceptionInfo[BaseException]
) -> "ExceptionChainRepr":
repr_chain: List[
Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]
] = []
e: Optional[BaseException] = excinfo.value
excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo
descr = None
seen: Set[int] = set()
while e is not None and id(e) not in seen:
seen.add(id(e))
if excinfo_:
reprtraceback = self.repr_traceback(excinfo_)
reprcrash: Optional[ReprFileLocation] = (
excinfo_._getreprcrash() if self.style != "value" else None
)
else:
# Fallback to native repr if the exception doesn't have a traceback:
# ExceptionInfo objects require a full traceback to work.
reprtraceback = ReprTracebackNative(
traceback.format_exception(type(e), e, None)
)
reprcrash = None
repr_chain += [(reprtraceback, reprcrash, descr)]
if e.__cause__ is not None and self.chain:
e = e.__cause__
excinfo_ = (
ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
if e.__traceback__
else None
)
descr = "The above exception was the direct cause of the following exception:"
elif (
e.__context__ is not None and not e.__suppress_context__ and self.chain
):
e = e.__context__
excinfo_ = (
ExceptionInfo.from_exc_info((type(e), e, e.__traceback__))
if e.__traceback__
else None
)
descr = "During handling of the above exception, another exception occurred:"
else:
e = None
repr_chain.reverse()
return ExceptionChainRepr(repr_chain)
@attr.s(eq=False, auto_attribs=True)
class TerminalRepr:
def __str__(self) -> str:
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = StringIO()
tw = TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self) -> str:
return f"<{self.__class__} instance at {id(self):0x}>"
def toterminal(self, tw: TerminalWriter) -> None:
raise NotImplementedError()
# This class is abstract -- only subclasses are instantiated.
@attr.s(eq=False)
class ExceptionRepr(TerminalRepr):
# Provided by subclasses.
reprcrash: Optional["ReprFileLocation"]
reprtraceback: "ReprTraceback"
def __attrs_post_init__(self) -> None:
self.sections: List[Tuple[str, str, str]] = []
def addsection(self, name: str, content: str, sep: str = "-") -> None:
self.sections.append((name, content, sep))
def toterminal(self, tw: TerminalWriter) -> None:
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
@attr.s(eq=False, auto_attribs=True)
class ExceptionChainRepr(ExceptionRepr):
chain: Sequence[Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]]]
def __attrs_post_init__(self) -> None:
super().__attrs_post_init__()
# reprcrash and reprtraceback of the outermost (the newest) exception
# in the chain.
self.reprtraceback = self.chain[-1][0]
self.reprcrash = self.chain[-1][1]
def toterminal(self, tw: TerminalWriter) -> None:
for element in self.chain:
element[0].toterminal(tw)
if element[2] is not None:
tw.line("")
tw.line(element[2], yellow=True)
super().toterminal(tw)
@attr.s(eq=False, auto_attribs=True)
class ReprExceptionInfo(ExceptionRepr):
reprtraceback: "ReprTraceback"
reprcrash: "ReprFileLocation"
def toterminal(self, tw: TerminalWriter) -> None:
self.reprtraceback.toterminal(tw)
super().toterminal(tw)
@attr.s(eq=False, auto_attribs=True)
class ReprTraceback(TerminalRepr):
reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]]
extraline: Optional[str]
style: "_TracebackStyle"
entrysep: ClassVar = "_ "
def toterminal(self, tw: TerminalWriter) -> None:
# The entries might have different styles.
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i + 1]
if (
entry.style == "long"
or entry.style == "short"
and next_entry.style == "long"
):
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines: Sequence[str]) -> None:
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
@attr.s(eq=False, auto_attribs=True)
class ReprEntryNative(TerminalRepr):
lines: Sequence[str]
style: ClassVar["_TracebackStyle"] = "native"
def toterminal(self, tw: TerminalWriter) -> None:
tw.write("".join(self.lines))
@attr.s(eq=False, auto_attribs=True)
class ReprEntry(TerminalRepr):
lines: Sequence[str]
reprfuncargs: Optional["ReprFuncArgs"]
reprlocals: Optional["ReprLocals"]
reprfileloc: Optional["ReprFileLocation"]
style: "_TracebackStyle"
def _write_entry_lines(self, tw: TerminalWriter) -> None:
"""Write the source code portions of a list of traceback entries with syntax highlighting.
Usually entries are lines like these:
" x = 1"
"> assert x == 2"
"E assert 1 == 2"
This function takes care of rendering the "source" portions of it (the lines without
the "E" prefix) using syntax highlighting, taking care to not highlighting the ">"
character, as doing so might break line continuations.
"""
if not self.lines:
return
# separate indents and source lines that are not failures: we want to
# highlight the code but not the indentation, which may contain markers
# such as "> assert 0"
fail_marker = f"{FormattedExcinfo.fail_marker} "
indent_size = len(fail_marker)
indents: List[str] = []
source_lines: List[str] = []
failure_lines: List[str] = []
for index, line in enumerate(self.lines):
is_failure_line = line.startswith(fail_marker)
if is_failure_line:
# from this point on all lines are considered part of the failure
failure_lines.extend(self.lines[index:])
break
else:
if self.style == "value":
source_lines.append(line)
else:
indents.append(line[:indent_size])
source_lines.append(line[indent_size:])
tw._write_source(source_lines, indents)
# failure lines are always completely red and bold
for line in failure_lines:
tw.line(line, bold=True, red=True)
def toterminal(self, tw: TerminalWriter) -> None:
if self.style == "short":
assert self.reprfileloc is not None
self.reprfileloc.toterminal(tw)
self._write_entry_lines(tw)
if self.reprlocals:
self.reprlocals.toterminal(tw, indent=" " * 8)
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
self._write_entry_lines(tw)
if self.reprlocals:
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self) -> str:
return "{}\n{}\n{}".format(
"\n".join(self.lines), self.reprlocals, self.reprfileloc
)
@attr.s(eq=False, auto_attribs=True)
class ReprFileLocation(TerminalRepr):
path: str = attr.ib(converter=str)
lineno: int
message: str
def toterminal(self, tw: TerminalWriter) -> None:
# Filename and lineno output for each entry, using an output format
# that most editors understand.
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.write(self.path, bold=True, red=True)
tw.line(f":{self.lineno}: {msg}")
@attr.s(eq=False, auto_attribs=True)
class ReprLocals(TerminalRepr):
lines: Sequence[str]
def toterminal(self, tw: TerminalWriter, indent="") -> None:
for line in self.lines:
tw.line(indent + line)
@attr.s(eq=False, auto_attribs=True)
class ReprFuncArgs(TerminalRepr):
args: Sequence[Tuple[str, object]]
def toterminal(self, tw: TerminalWriter) -> None:
if self.args:
linesofar = ""
for name, value in self.args:
ns = f"{name} = {value}"
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
def getfslineno(obj: object) -> Tuple[Union[str, Path], int]:
"""Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1).
The line number is 0-based.
"""
# xxx let decorators etc specify a sane ordering
# NOTE: this used to be done in _pytest.compat.getfslineno, initially added
# in 6ec13a2b9. It ("place_as") appears to be something very custom.
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as # type: ignore[attr-defined]
try:
code = Code.from_function(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type]
except TypeError:
return "", -1
fspath = fn and absolutepath(fn) or ""
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except OSError:
pass
return fspath, lineno
return code.path, code.firstlineno
# Relative paths that we use to filter traceback entries from appearing to the user;
# see filter_traceback.
# note: if we need to add more paths than what we have now we should probably use a list
# for better maintenance.
_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc"))
# pluggy is either a package or a single module depending on the version
if _PLUGGY_DIR.name == "__init__.py":
_PLUGGY_DIR = _PLUGGY_DIR.parent
_PYTEST_DIR = Path(_pytest.__file__).parent
def filter_traceback(entry: TracebackEntry) -> bool:
"""Return True if a TracebackEntry instance should be included in tracebacks.
We hide traceback entries of:
* dynamically generated code (no code to show up for it);
* internal traceback from pytest or its internal libraries, py and pluggy.
"""
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code.
# See https://bitbucket.org/pytest-dev/py/issues/71.
raw_filename = entry.frame.code.raw.co_filename
is_generated = "<" in raw_filename and ">" in raw_filename
if is_generated:
return False
# entry.path might point to a non-existing file, in which case it will
# also return a str object. See #1133.
p = Path(entry.path)
parents = p.parents
if _PLUGGY_DIR in parents:
return False
if _PYTEST_DIR in parents:
return False
return True
|
data = [0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x0, 0x8, 0xa, 0x4a, 0x40, 0x0, 0x0, 0xa, 0x5f, 0xea, 0x5f, 0xea, 0xe, 0xd9, 0x2e, 0xd3, 0x6e, 0x19, 0x32, 0x44, 0x89, 0x33, 0xc, 0x92, 0x4c, 0x92, 0x4d, 0x8, 0x8, 0x0, 0x0, 0x0, 0x4, 0x88, 0x8, 0x8, 0x4, 0x8, 0x4, 0x84, 0x84, 0x88, 0x0, 0xa, 0x44, 0x8a, 0x40, 0x0, 0x4, 0x8e, 0xc4, 0x80, 0x0, 0x0, 0x0, 0x4, 0x88, 0x0, 0x0, 0xe, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x0, 0x1, 0x22, 0x44, 0x88, 0x10, 0xc, 0x92, 0x52, 0x52, 0x4c, 0x4, 0x8c, 0x84, 0x84, 0x8e, 0x1c, 0x82, 0x4c, 0x90, 0x1e, 0x1e, 0xc2, 0x44, 0x92, 0x4c, 0x6, 0xca, 0x52, 0x5f, 0xe2, 0x1f, 0xf0, 0x1e, 0xc1, 0x3e, 0x2, 0x44, 0x8e, 0xd1, 0x2e, 0x1f, 0xe2, 0x44, 0x88, 0x10, 0xe, 0xd1, 0x2e, 0xd1, 0x2e, 0xe, 0xd1, 0x2e, 0xc4, 0x88, 0x0, 0x8, 0x0, 0x8, 0x0, 0x0, 0x4, 0x80, 0x4, 0x88, 0x2, 0x44, 0x88, 0x4, 0x82, 0x0, 0xe, 0xc0, 0xe, 0xc0, 0x8, 0x4, 0x82, 0x44, 0x88, 0xe, 0xd1, 0x26, 0xc0, 0x4, 0xe, 0xd1, 0x35, 0xb3, 0x6c, 0xc, 0x92, 0x5e, 0xd2, 0x52, 0x1c, 0x92, 0x5c, 0x92, 0x5c, 0xe, 0xd0, 0x10, 0x10, 0xe, 0x1c, 0x92, 0x52, 0x52, 0x5c, 0x1e, 0xd0, 0x1c, 0x90, 0x1e, 0x1e, 0xd0, 0x1c, 0x90, 0x10, 0xe, 0xd0, 0x13, 0x71, 0x2e, 0x12, 0x52, 0x5e, 0xd2, 0x52, 0x1c, 0x88, 0x8, 0x8, 0x1c, 0x1f, 0xe2, 0x42, 0x52, 0x4c, 0x12, 0x54, 0x98, 0x14, 0x92, 0x10, 0x10, 0x10, 0x10, 0x1e, 0x11, 0x3b, 0x75, 0xb1, 0x31, 0x11, 0x39, 0x35, 0xb3, 0x71, 0xc, 0x92, 0x52, 0x52, 0x4c, 0x1c, 0x92, 0x5c, 0x90, 0x10, 0xc, 0x92, 0x52, 0x4c, 0x86, 0x1c, 0x92, 0x5c, 0x92, 0x51, 0xe, 0xd0, 0xc, 0x82, 0x5c, 0x1f, 0xe4, 0x84, 0x84, 0x84, 0x12, 0x52, 0x52, 0x52, 0x4c, 0x11, 0x31, 0x31, 0x2a, 0x44, 0x11, 0x31, 0x35, 0xbb, 0x71, 0x12, 0x52, 0x4c, 0x92, 0x52, 0x11, 0x2a, 0x44, 0x84, 0x84, 0x1e, 0xc4, 0x88, 0x10, 0x1e, 0xe, 0xc8, 0x8, 0x8, 0xe, 0x10, 0x8, 0x4, 0x82, 0x41, 0xe, 0xc2, 0x42, 0x42, 0x4e, 0x4, 0x8a, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1f, 0x8, 0x4, 0x80, 0x0, 0x0, 0x0, 0xe, 0xd2, 0x52, 0x4f, 0x10, 0x10, 0x1c, 0x92, 0x5c, 0x0, 0xe, 0xd0, 0x10, 0xe, 0x2, 0x42, 0x4e, 0xd2, 0x4e, 0xc, 0x92, 0x5c, 0x90, 0xe, 0x6, 0xc8, 0x1c, 0x88, 0x8, 0xe, 0xd2, 0x4e, 0xc2, 0x4c, 0x10, 0x10, 0x1c, 0x92, 0x52, 0x8, 0x0, 0x8, 0x8, 0x8, 0x2, 0x40, 0x2, 0x42, 0x4c, 0x10, 0x14, 0x98, 0x14, 0x92, 0x8, 0x8, 0x8, 0x8, 0x6, 0x0, 0x1b, 0x75, 0xb1, 0x31, 0x0, 0x1c, 0x92, 0x52, 0x52, 0x0, 0xc, 0x92, 0x52, 0x4c, 0x0, 0x1c, 0x92, 0x5c, 0x90, 0x0, 0xe, 0xd2, 0x4e, 0xc2, 0x0, 0xe, 0xd0, 0x10, 0x10, 0x0, 0x6, 0xc8, 0x4, 0x98, 0x8, 0x8, 0xe, 0xc8, 0x7, 0x0, 0x12, 0x52, 0x52, 0x4f, 0x0, 0x11, 0x31, 0x2a, 0x44, 0x0, 0x11, 0x31, 0x35, 0xbb, 0x0, 0x12, 0x4c, 0x8c, 0x92, 0x0, 0x11, 0x2a, 0x44, 0x98, 0x0, 0x1e, 0xc4, 0x88, 0x1e, 0x6, 0xc4, 0x8c, 0x84, 0x86, 0x8, 0x8, 0x8, 0x8, 0x8, 0x18, 0x8, 0xc, 0x88, 0x18, 0x0, 0x0, 0xc, 0x83, 0x60]
l = len(data)
print("_microbit_font_pendolino3 = {")
for i in range(0, 128-33):
rows = data[i*5:(i+1)*5]
row0 = [(rows[0] & 0b10000) >> 4, (rows[0] & 0b1000) >> 3, (rows[0] & 0b100) >> 2, (rows[0] & 0b10) >> 1, rows[0] & 0b1]
row1 = [(rows[1] & 0b10000) >> 4, (rows[1] & 0b1000) >> 3, (rows[1] & 0b100) >> 2, (rows[1] & 0b10) >> 1, rows[1] & 0b1]
row2 = [(rows[2] & 0b10000) >> 4, (rows[2] & 0b1000) >> 3, (rows[2] & 0b100) >> 2, (rows[2] & 0b10) >> 1, rows[2] & 0b1]
row3 = [(rows[3] & 0b10000) >> 4, (rows[3] & 0b1000) >> 3, (rows[3] & 0b100) >> 2, (rows[3] & 0b10) >> 1, rows[3] & 0b1]
row4 = [(rows[4] & 0b10000) >> 4, (rows[4] & 0b1000) >> 3, (rows[4] & 0b100) >> 2, (rows[4] & 0b10) >> 1, rows[4] & 0b1]
finalrow = [row0, row1, row2, row3, row4]
print(str(" \"%s\": %s," % (str(chr(i + 32)), str(finalrow).replace(" ",""))).replace("1", "9"))
print("}")
Updated fontparser.py
#/usr/bin/env python3
# this script is designed to convert fonts from the DAL/micropython and turn them into lookup maps
data = [0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x0, 0x8, 0xa, 0x4a, 0x40, 0x0, 0x0, 0xa, 0x5f, 0xea, 0x5f, 0xea, 0xe, 0xd9, 0x2e, 0xd3, 0x6e, 0x19, 0x32, 0x44, 0x89, 0x33, 0xc, 0x92, 0x4c, 0x92, 0x4d, 0x8, 0x8, 0x0, 0x0, 0x0, 0x4, 0x88, 0x8, 0x8, 0x4, 0x8, 0x4, 0x84, 0x84, 0x88, 0x0, 0xa, 0x44, 0x8a, 0x40, 0x0, 0x4, 0x8e, 0xc4, 0x80, 0x0, 0x0, 0x0, 0x4, 0x88, 0x0, 0x0, 0xe, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x0, 0x1, 0x22, 0x44, 0x88, 0x10, 0xc, 0x92, 0x52, 0x52, 0x4c, 0x4, 0x8c, 0x84, 0x84, 0x8e, 0x1c, 0x82, 0x4c, 0x90, 0x1e, 0x1e, 0xc2, 0x44, 0x92, 0x4c, 0x6, 0xca, 0x52, 0x5f, 0xe2, 0x1f, 0xf0, 0x1e, 0xc1, 0x3e, 0x2, 0x44, 0x8e, 0xd1, 0x2e, 0x1f, 0xe2, 0x44, 0x88, 0x10, 0xe, 0xd1, 0x2e, 0xd1, 0x2e, 0xe, 0xd1, 0x2e, 0xc4, 0x88, 0x0, 0x8, 0x0, 0x8, 0x0, 0x0, 0x4, 0x80, 0x4, 0x88, 0x2, 0x44, 0x88, 0x4, 0x82, 0x0, 0xe, 0xc0, 0xe, 0xc0, 0x8, 0x4, 0x82, 0x44, 0x88, 0xe, 0xd1, 0x26, 0xc0, 0x4, 0xe, 0xd1, 0x35, 0xb3, 0x6c, 0xc, 0x92, 0x5e, 0xd2, 0x52, 0x1c, 0x92, 0x5c, 0x92, 0x5c, 0xe, 0xd0, 0x10, 0x10, 0xe, 0x1c, 0x92, 0x52, 0x52, 0x5c, 0x1e, 0xd0, 0x1c, 0x90, 0x1e, 0x1e, 0xd0, 0x1c, 0x90, 0x10, 0xe, 0xd0, 0x13, 0x71, 0x2e, 0x12, 0x52, 0x5e, 0xd2, 0x52, 0x1c, 0x88, 0x8, 0x8, 0x1c, 0x1f, 0xe2, 0x42, 0x52, 0x4c, 0x12, 0x54, 0x98, 0x14, 0x92, 0x10, 0x10, 0x10, 0x10, 0x1e, 0x11, 0x3b, 0x75, 0xb1, 0x31, 0x11, 0x39, 0x35, 0xb3, 0x71, 0xc, 0x92, 0x52, 0x52, 0x4c, 0x1c, 0x92, 0x5c, 0x90, 0x10, 0xc, 0x92, 0x52, 0x4c, 0x86, 0x1c, 0x92, 0x5c, 0x92, 0x51, 0xe, 0xd0, 0xc, 0x82, 0x5c, 0x1f, 0xe4, 0x84, 0x84, 0x84, 0x12, 0x52, 0x52, 0x52, 0x4c, 0x11, 0x31, 0x31, 0x2a, 0x44, 0x11, 0x31, 0x35, 0xbb, 0x71, 0x12, 0x52, 0x4c, 0x92, 0x52, 0x11, 0x2a, 0x44, 0x84, 0x84, 0x1e, 0xc4, 0x88, 0x10, 0x1e, 0xe, 0xc8, 0x8, 0x8, 0xe, 0x10, 0x8, 0x4, 0x82, 0x41, 0xe, 0xc2, 0x42, 0x42, 0x4e, 0x4, 0x8a, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1f, 0x8, 0x4, 0x80, 0x0, 0x0, 0x0, 0xe, 0xd2, 0x52, 0x4f, 0x10, 0x10, 0x1c, 0x92, 0x5c, 0x0, 0xe, 0xd0, 0x10, 0xe, 0x2, 0x42, 0x4e, 0xd2, 0x4e, 0xc, 0x92, 0x5c, 0x90, 0xe, 0x6, 0xc8, 0x1c, 0x88, 0x8, 0xe, 0xd2, 0x4e, 0xc2, 0x4c, 0x10, 0x10, 0x1c, 0x92, 0x52, 0x8, 0x0, 0x8, 0x8, 0x8, 0x2, 0x40, 0x2, 0x42, 0x4c, 0x10, 0x14, 0x98, 0x14, 0x92, 0x8, 0x8, 0x8, 0x8, 0x6, 0x0, 0x1b, 0x75, 0xb1, 0x31, 0x0, 0x1c, 0x92, 0x52, 0x52, 0x0, 0xc, 0x92, 0x52, 0x4c, 0x0, 0x1c, 0x92, 0x5c, 0x90, 0x0, 0xe, 0xd2, 0x4e, 0xc2, 0x0, 0xe, 0xd0, 0x10, 0x10, 0x0, 0x6, 0xc8, 0x4, 0x98, 0x8, 0x8, 0xe, 0xc8, 0x7, 0x0, 0x12, 0x52, 0x52, 0x4f, 0x0, 0x11, 0x31, 0x2a, 0x44, 0x0, 0x11, 0x31, 0x35, 0xbb, 0x0, 0x12, 0x4c, 0x8c, 0x92, 0x0, 0x11, 0x2a, 0x44, 0x98, 0x0, 0x1e, 0xc4, 0x88, 0x1e, 0x6, 0xc4, 0x8c, 0x84, 0x86, 0x8, 0x8, 0x8, 0x8, 0x8, 0x18, 0x8, 0xc, 0x88, 0x18, 0x0, 0x0, 0xc, 0x83, 0x60]
l = len(data)
print("_microbit_font_pendolino3 = {")
for i in range(0, 128-33):
rows = data[i*5:(i+1)*5]
row0 = [(rows[0] & 0b10000) >> 4, (rows[0] & 0b1000) >> 3, (rows[0] & 0b100) >> 2, (rows[0] & 0b10) >> 1, rows[0] & 0b1]
row1 = [(rows[1] & 0b10000) >> 4, (rows[1] & 0b1000) >> 3, (rows[1] & 0b100) >> 2, (rows[1] & 0b10) >> 1, rows[1] & 0b1]
row2 = [(rows[2] & 0b10000) >> 4, (rows[2] & 0b1000) >> 3, (rows[2] & 0b100) >> 2, (rows[2] & 0b10) >> 1, rows[2] & 0b1]
row3 = [(rows[3] & 0b10000) >> 4, (rows[3] & 0b1000) >> 3, (rows[3] & 0b100) >> 2, (rows[3] & 0b10) >> 1, rows[3] & 0b1]
row4 = [(rows[4] & 0b10000) >> 4, (rows[4] & 0b1000) >> 3, (rows[4] & 0b100) >> 2, (rows[4] & 0b10) >> 1, rows[4] & 0b1]
finalrow = [row0, row1, row2, row3, row4]
print(str(" \"%s\": %s," % (str(chr(i + 32)), str(finalrow).replace(" ",""))).replace("1", "9"))
print("}")
|
from collections import OrderedDict
from datetime import date, datetime
from django.core.management import call_command
from django.test import TestCase
from mock import patch
from casexml.apps.case.const import ARCHIVED_CASE_OWNER_ID
from casexml.apps.case.sharedmodels import CommCareCaseIndex
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import run_with_all_backends
from custom.enikshay.nikshay_datamigration.models import Followup, Outcome, PatientDetail
from custom.enikshay.tests.utils import ENikshayLocationStructureMixin
class TestCreateEnikshayCases(ENikshayLocationStructureMixin, TestCase):
def setUp(self):
self.domain = "enikshay-test-domain"
super(TestCreateEnikshayCases, self).setUp()
self.patient_detail = PatientDetail.objects.create(
PregId='MH-ABD-05-16-0001',
Tbunitcode=1,
pname='A B C',
pgender='M',
page=18,
poccupation='4',
paadharno=867386000000,
paddress='Cambridge MA',
pmob='5432109876',
pregdate1=date(2016, 12, 13),
cname='Secondary name',
caddress='Secondary address',
cmob='1234567890',
dcpulmunory='Y',
dotname='Bubble Bubbles',
dotmob='9876543210',
dotpType=1,
PHI=2,
atbtreatment='',
Ptype=4,
pcategory=4,
cvisitedDate1='2016-12-25 00:00:00.000',
InitiationDate1='2016-12-22 16:06:47.726',
dotmosignDate1='2016-12-23 00:00:00.000',
)
self.outcome = Outcome.objects.create(
PatientId=self.patient_detail,
HIVStatus='negative',
loginDate=datetime(2016, 1, 2),
)
# Household.objects.create(
# PatientID=patient_detail,
# )
for i in range(5):
Followup.objects.create(
id=(i + 1),
PatientID=self.patient_detail,
)
self.case_accessor = CaseAccessors(self.domain)
def tearDown(self):
Outcome.objects.all().delete()
Followup.objects.all().delete()
# Household.objects.all().delete()
PatientDetail.objects.all().delete()
super(TestCreateEnikshayCases, self).tearDown()
@run_with_all_backends
@patch('custom.enikshay.nikshay_datamigration.factory.datetime')
def test_case_creation(self, mock_datetime):
mock_datetime.utcnow.return_value = datetime(2016, 9, 8, 1, 2, 3, 4123)
call_command('create_enikshay_cases', self.domain)
person_case_ids = self.case_accessor.get_case_ids_in_domain(type='person')
self.assertEqual(1, len(person_case_ids))
person_case = self.case_accessor.get_case(person_case_ids[0])
self.assertEqual(
OrderedDict([
('aadhaar_number', '867386000000'),
('age', '18'),
('age_entered', '18'),
('contact_phone_number', '5432109876'),
('current_address', 'Cambridge MA'),
('current_address_district_choice', self.dto.location_id),
('current_address_state_choice', self.sto.location_id),
('dob', '1998-07-01'),
('dob_known', 'no'),
('first_name', 'A B'),
('last_name', 'C'),
('migration_created_case', 'true'),
('nikshay_id', 'MH-ABD-05-16-0001'),
('person_id', 'FROM_NIKSHAY_MH-ABD-05-16-0001'),
('phi', 'PHI'),
('secondary_contact_name_address', 'Secondary name, Secondary address'),
('secondary_contact_phone_number', '1234567890'),
('sex', 'male'),
('tu_choice', 'TU'),
]),
person_case.dynamic_case_properties()
)
self.assertEqual('MH-ABD-05-16-0001', person_case.external_id)
self.assertEqual('A B C', person_case.name)
self.assertEqual(self.phi.location_id, person_case.owner_id)
# make sure the case is only created/modified by a single form
self.assertEqual(1, len(person_case.xform_ids))
occurrence_case_ids = self.case_accessor.get_case_ids_in_domain(type='occurrence')
self.assertEqual(1, len(occurrence_case_ids))
occurrence_case = self.case_accessor.get_case(occurrence_case_ids[0])
self.assertEqual(
OrderedDict([
('current_episode_type', 'confirmed_tb'),
('hiv_status', 'negative'),
('ihv_date', '2016-12-25'),
('initial_home_visit_status', 'completed'),
('migration_created_case', 'true'),
('occurrence_episode_count', '1'),
('occurrence_id', '20160908010203004'),
]),
occurrence_case.dynamic_case_properties()
)
self.assertEqual('Occurrence #1', occurrence_case.name)
self.assertEqual(len(occurrence_case.indices), 1)
self._assertIndexEqual(
CommCareCaseIndex(
identifier='host',
referenced_type='person',
referenced_id=person_case.get_id,
relationship='extension',
),
occurrence_case.indices[0]
)
self.assertEqual('-', occurrence_case.owner_id)
# make sure the case is only created/modified by a single form
self.assertEqual(1, len(occurrence_case.xform_ids))
episode_case_ids = self.case_accessor.get_case_ids_in_domain(type='episode')
self.assertEqual(1, len(episode_case_ids))
episode_case = self.case_accessor.get_case(episode_case_ids[0])
self.assertEqual(
OrderedDict([
('date_of_mo_signature', '2016-12-23'),
('disease_classification', 'pulmonary'),
('dots_99_enabled', 'false'),
('episode_pending_registration', 'no'),
('episode_type', 'confirmed_tb'),
('migration_created_case', 'true'),
('occupation', 'physical_mathematical_and_engineering'),
('patient_type_choice', 'treatment_after_lfu'),
('treatment_initiation_date', '2016-12-22'),
('treatment_supporter_designation', 'health_worker'),
('treatment_supporter_first_name', 'Bubble'),
('treatment_supporter_last_name', 'Bubbles'),
('treatment_supporter_mobile_number', '9876543210'),
]),
episode_case.dynamic_case_properties()
)
self.assertEqual('Episode #1: Confirmed TB (Patient)', episode_case.name)
self.assertEqual(datetime(2016, 12, 13), episode_case.opened_on)
self.assertEqual('-', episode_case.owner_id)
self.assertEqual(len(episode_case.indices), 1)
self._assertIndexEqual(
CommCareCaseIndex(
identifier='host',
referenced_type='occurrence',
referenced_id=occurrence_case.get_id,
relationship='extension',
),
episode_case.indices[0]
)
# make sure the case is only created/modified by a single form
self.assertEqual(1, len(episode_case.xform_ids))
test_case_ids = set(self.case_accessor.get_case_ids_in_domain(type='test'))
self.assertEqual(5, len(test_case_ids))
test_cases = [
self.case_accessor.get_case(test_case_id)
for test_case_id in test_case_ids
]
self.assertItemsEqual(
[
test_case.dynamic_case_properties()
for test_case in test_cases
],
[
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(1)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(2)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(3)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(4)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(5)),
]),
]
)
for test_case in test_cases:
self.assertEqual('-', test_case.owner_id)
self.assertEqual(len(test_case.indices), 1)
self._assertIndexEqual(
CommCareCaseIndex(
identifier='host',
referenced_type='occurrence',
referenced_id=occurrence_case.get_id,
relationship='extension',
),
test_case.indices[0]
)
@run_with_all_backends
def test_case_update(self):
call_command('create_enikshay_cases', self.domain)
new_addhaar_number = 867386000001
self.patient_detail.paadharno = new_addhaar_number
self.patient_detail.dcpulmunory = 'N'
self.patient_detail.save()
self.outcome.HIVStatus = 'positive'
self.outcome.save()
call_command('create_enikshay_cases', self.domain)
person_case_ids = self.case_accessor.get_case_ids_in_domain(type='person')
self.assertEqual(1, len(person_case_ids))
person_case = self.case_accessor.get_case(person_case_ids[0])
self.assertEqual(person_case.dynamic_case_properties()['aadhaar_number'], str(new_addhaar_number))
occurrence_case_ids = self.case_accessor.get_case_ids_in_domain(type='occurrence')
self.assertEqual(1, len(occurrence_case_ids))
occurrence_case = self.case_accessor.get_case(occurrence_case_ids[0])
self.assertEqual(occurrence_case.dynamic_case_properties()['hiv_status'], 'positive')
episode_case_ids = self.case_accessor.get_case_ids_in_domain(type='episode')
self.assertEqual(1, len(episode_case_ids))
episode_case = self.case_accessor.get_case(episode_case_ids[0])
self.assertEqual(episode_case.dynamic_case_properties()['disease_classification'], 'extra_pulmonary')
@run_with_all_backends
def test_location_not_found(self):
self.phi.delete()
call_command('create_enikshay_cases', self.domain)
person_case_ids = self.case_accessor.get_case_ids_in_domain(type='person')
self.assertEqual(1, len(person_case_ids))
person_case = self.case_accessor.get_case(person_case_ids[0])
self.assertEqual(person_case.owner_id, ARCHIVED_CASE_OWNER_ID)
self.assertEqual(person_case.dynamic_case_properties()['archive_reason'], 'migration_location_not_found')
self.assertEqual(person_case.dynamic_case_properties()['migration_error'], 'location_not_found')
self.assertEqual(person_case.dynamic_case_properties()['migration_error_details'], 'MH-ABD-05-16')
def _assertIndexEqual(self, index_1, index_2):
self.assertEqual(index_1.identifier, index_2.identifier)
self.assertEqual(index_1.referenced_type, index_2.referenced_type)
self.assertEqual(index_1.referenced_id, index_2.referenced_id)
self.assertEqual(index_1.relationship, index_2.relationship)
update tests
from collections import OrderedDict
from datetime import date, datetime
from django.core.management import call_command
from django.test import TestCase
from mock import patch
from casexml.apps.case.const import ARCHIVED_CASE_OWNER_ID
from casexml.apps.case.sharedmodels import CommCareCaseIndex
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import run_with_all_backends
from custom.enikshay.nikshay_datamigration.models import Followup, Outcome, PatientDetail
from custom.enikshay.tests.utils import ENikshayLocationStructureMixin
class TestCreateEnikshayCases(ENikshayLocationStructureMixin, TestCase):
def setUp(self):
self.domain = "enikshay-test-domain"
super(TestCreateEnikshayCases, self).setUp()
self.patient_detail = PatientDetail.objects.create(
PregId='MH-ABD-05-16-0001',
Tbunitcode=1,
pname='A B C',
pgender='M',
page=18,
poccupation='4',
paadharno=867386000000,
paddress='Cambridge MA',
pmob='5432109876',
pregdate1=date(2016, 12, 13),
cname='Secondary name',
caddress='Secondary address',
cmob='1234567890',
dcpulmunory='Y',
dotname='Bubble Bubbles',
dotmob='9876543210',
dotpType=1,
PHI=2,
atbtreatment='',
Ptype=4,
pcategory=4,
cvisitedDate1='2016-12-25 00:00:00.000',
InitiationDate1='2016-12-22 16:06:47.726',
dotmosignDate1='2016-12-23 00:00:00.000',
)
self.outcome = Outcome.objects.create(
PatientId=self.patient_detail,
HIVStatus='Neg',
loginDate=datetime(2016, 1, 2),
)
# Household.objects.create(
# PatientID=patient_detail,
# )
for i in range(5):
Followup.objects.create(
id=(i + 1),
PatientID=self.patient_detail,
)
self.case_accessor = CaseAccessors(self.domain)
def tearDown(self):
Outcome.objects.all().delete()
Followup.objects.all().delete()
# Household.objects.all().delete()
PatientDetail.objects.all().delete()
super(TestCreateEnikshayCases, self).tearDown()
@run_with_all_backends
@patch('custom.enikshay.nikshay_datamigration.factory.datetime')
def test_case_creation(self, mock_datetime):
mock_datetime.utcnow.return_value = datetime(2016, 9, 8, 1, 2, 3, 4123)
call_command('create_enikshay_cases', self.domain)
person_case_ids = self.case_accessor.get_case_ids_in_domain(type='person')
self.assertEqual(1, len(person_case_ids))
person_case = self.case_accessor.get_case(person_case_ids[0])
self.assertEqual(
OrderedDict([
('aadhaar_number', '867386000000'),
('age', '18'),
('age_entered', '18'),
('contact_phone_number', '5432109876'),
('current_address', 'Cambridge MA'),
('current_address_district_choice', self.dto.location_id),
('current_address_state_choice', self.sto.location_id),
('dob', '1998-07-01'),
('dob_known', 'no'),
('first_name', 'A B'),
('last_name', 'C'),
('migration_created_case', 'true'),
('nikshay_id', 'MH-ABD-05-16-0001'),
('person_id', 'FROM_NIKSHAY_MH-ABD-05-16-0001'),
('phi', 'PHI'),
('secondary_contact_name_address', 'Secondary name, Secondary address'),
('secondary_contact_phone_number', '1234567890'),
('sex', 'male'),
('tu_choice', 'TU'),
]),
person_case.dynamic_case_properties()
)
self.assertEqual('MH-ABD-05-16-0001', person_case.external_id)
self.assertEqual('A B C', person_case.name)
self.assertEqual(self.phi.location_id, person_case.owner_id)
# make sure the case is only created/modified by a single form
self.assertEqual(1, len(person_case.xform_ids))
occurrence_case_ids = self.case_accessor.get_case_ids_in_domain(type='occurrence')
self.assertEqual(1, len(occurrence_case_ids))
occurrence_case = self.case_accessor.get_case(occurrence_case_ids[0])
self.assertEqual(
OrderedDict([
('current_episode_type', 'confirmed_tb'),
('hiv_status', 'non_reactive'),
('ihv_date', '2016-12-25'),
('initial_home_visit_status', 'completed'),
('migration_created_case', 'true'),
('occurrence_episode_count', '1'),
('occurrence_id', '20160908010203004'),
]),
occurrence_case.dynamic_case_properties()
)
self.assertEqual('Occurrence #1', occurrence_case.name)
self.assertEqual(len(occurrence_case.indices), 1)
self._assertIndexEqual(
CommCareCaseIndex(
identifier='host',
referenced_type='person',
referenced_id=person_case.get_id,
relationship='extension',
),
occurrence_case.indices[0]
)
self.assertEqual('-', occurrence_case.owner_id)
# make sure the case is only created/modified by a single form
self.assertEqual(1, len(occurrence_case.xform_ids))
episode_case_ids = self.case_accessor.get_case_ids_in_domain(type='episode')
self.assertEqual(1, len(episode_case_ids))
episode_case = self.case_accessor.get_case(episode_case_ids[0])
self.assertEqual(
OrderedDict([
('date_of_mo_signature', '2016-12-23'),
('disease_classification', 'pulmonary'),
('dots_99_enabled', 'false'),
('episode_pending_registration', 'no'),
('episode_type', 'confirmed_tb'),
('migration_created_case', 'true'),
('occupation', 'physical_mathematical_and_engineering'),
('patient_type_choice', 'treatment_after_lfu'),
('treatment_initiation_date', '2016-12-22'),
('treatment_supporter_designation', 'health_worker'),
('treatment_supporter_first_name', 'Bubble'),
('treatment_supporter_last_name', 'Bubbles'),
('treatment_supporter_mobile_number', '9876543210'),
]),
episode_case.dynamic_case_properties()
)
self.assertEqual('Episode #1: Confirmed TB (Patient)', episode_case.name)
self.assertEqual(datetime(2016, 12, 13), episode_case.opened_on)
self.assertEqual('-', episode_case.owner_id)
self.assertEqual(len(episode_case.indices), 1)
self._assertIndexEqual(
CommCareCaseIndex(
identifier='host',
referenced_type='occurrence',
referenced_id=occurrence_case.get_id,
relationship='extension',
),
episode_case.indices[0]
)
# make sure the case is only created/modified by a single form
self.assertEqual(1, len(episode_case.xform_ids))
test_case_ids = set(self.case_accessor.get_case_ids_in_domain(type='test'))
self.assertEqual(5, len(test_case_ids))
test_cases = [
self.case_accessor.get_case(test_case_id)
for test_case_id in test_case_ids
]
self.assertItemsEqual(
[
test_case.dynamic_case_properties()
for test_case in test_cases
],
[
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(1)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(2)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(3)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(4)),
]),
OrderedDict([
('date_tested', ''),
('migration_created_case', 'true'),
('migration_followup_id', str(5)),
]),
]
)
for test_case in test_cases:
self.assertEqual('-', test_case.owner_id)
self.assertEqual(len(test_case.indices), 1)
self._assertIndexEqual(
CommCareCaseIndex(
identifier='host',
referenced_type='occurrence',
referenced_id=occurrence_case.get_id,
relationship='extension',
),
test_case.indices[0]
)
@run_with_all_backends
def test_case_update(self):
call_command('create_enikshay_cases', self.domain)
new_addhaar_number = 867386000001
self.patient_detail.paadharno = new_addhaar_number
self.patient_detail.dcpulmunory = 'N'
self.patient_detail.save()
self.outcome.HIVStatus = 'Pos'
self.outcome.save()
call_command('create_enikshay_cases', self.domain)
person_case_ids = self.case_accessor.get_case_ids_in_domain(type='person')
self.assertEqual(1, len(person_case_ids))
person_case = self.case_accessor.get_case(person_case_ids[0])
self.assertEqual(person_case.dynamic_case_properties()['aadhaar_number'], str(new_addhaar_number))
occurrence_case_ids = self.case_accessor.get_case_ids_in_domain(type='occurrence')
self.assertEqual(1, len(occurrence_case_ids))
occurrence_case = self.case_accessor.get_case(occurrence_case_ids[0])
self.assertEqual(occurrence_case.dynamic_case_properties()['hiv_status'], 'reactive')
episode_case_ids = self.case_accessor.get_case_ids_in_domain(type='episode')
self.assertEqual(1, len(episode_case_ids))
episode_case = self.case_accessor.get_case(episode_case_ids[0])
self.assertEqual(episode_case.dynamic_case_properties()['disease_classification'], 'extra_pulmonary')
@run_with_all_backends
def test_location_not_found(self):
self.phi.delete()
call_command('create_enikshay_cases', self.domain)
person_case_ids = self.case_accessor.get_case_ids_in_domain(type='person')
self.assertEqual(1, len(person_case_ids))
person_case = self.case_accessor.get_case(person_case_ids[0])
self.assertEqual(person_case.owner_id, ARCHIVED_CASE_OWNER_ID)
self.assertEqual(person_case.dynamic_case_properties()['archive_reason'], 'migration_location_not_found')
self.assertEqual(person_case.dynamic_case_properties()['migration_error'], 'location_not_found')
self.assertEqual(person_case.dynamic_case_properties()['migration_error_details'], 'MH-ABD-05-16')
def _assertIndexEqual(self, index_1, index_2):
self.assertEqual(index_1.identifier, index_2.identifier)
self.assertEqual(index_1.referenced_type, index_2.referenced_type)
self.assertEqual(index_1.referenced_id, index_2.referenced_id)
self.assertEqual(index_1.relationship, index_2.relationship)
|
# -*- coding: utf-8 -*-
##
## $Id$
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints short title
"""
__revision__ = "$Id$"
def format(bfo, highlight="no"):
"""
Prints a short title, suitable for brief format.
@param highlight highlights the words corresponding to search query if set to 'yes'
"""
if multilang == 'yes':
if bfo.lang == 'fr':
title = bfo.field('246_1a')
else:
title = bfo.field('245__a')
else:
title = bfo.field('245__a')
title_remainder = bfo.field('245__b')
edition_statement = bfo.field('250__a')
out = title
if len(title_remainder) > 0:
out += " : " + title_remainder
if len(edition_statement) > 0:
out += " ; " + edition_statement
#Try to display 'Conference' title if other titles were not found
if out == '':
out += bfo.field('111__a')
if highlight == 'yes':
from invenio import bibformat_utils
out = bibformat_utils.highlight(out, bfo.search_pattern,
prefix_tag="<span style='font-weight: bolder'>",
suffix_tag='</style>')
return out
bugfix
# -*- coding: utf-8 -*-
##
## $Id$
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints short title
"""
__revision__ = "$Id$"
def format(bfo, highlight="no", multilang='no'):
"""
Prints a short title, suitable for brief format.
@param highlight highlights the words corresponding to search query if set to 'yes'
"""
if multilang == 'yes':
if bfo.lang == 'fr':
title = bfo.field('246_1a')
else:
title = bfo.field('245__a')
else:
title = bfo.field('245__a')
title_remainder = bfo.field('245__b')
edition_statement = bfo.field('250__a')
out = title
if len(title_remainder) > 0:
out += " : " + title_remainder
if len(edition_statement) > 0:
out += " ; " + edition_statement
#Try to display 'Conference' title if other titles were not found
if out == '':
out += bfo.field('111__a')
if highlight == 'yes':
from invenio import bibformat_utils
out = bibformat_utils.highlight(out, bfo.search_pattern,
prefix_tag="<span style='font-weight: bolder'>",
suffix_tag='</style>')
return out
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import requests
import json
import sys
import logging
from urllib.parse import urljoin
from urllib.parse import quote
class dict_diff(object):
"""
Calculate items added, items removed, keys same in both but changed values,
keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.current_keys, self.past_keys = [
set(d.keys()) for d in (current_dict, past_dict)
]
self.intersect = self.current_keys.intersection(self.past_keys)
def added(self):
diff = self.current_keys - self.intersect
if diff == set():
return None
else:
return diff
def removed(self):
diff = self.past_keys - self.intersect
if diff == set():
return None
else:
return diff
def changed(self):
diff = set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o])
if diff == set():
return None
else:
return diff
def unchanged(self):
diff = set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o])
if diff == set():
return None
else:
return diff
def same(self):
return self.added() is None and self.removed() is None and self.changed() is None
class ENC_Key:
def __init__(self, keyfile, keyname):
keys_f = open(keyfile, 'r')
keys_json_string = keys_f.read()
keys_f.close()
keys = json.loads(keys_json_string)
key_dict = keys[keyname]
self.authid = key_dict['key']
self.authpw = key_dict['secret']
self.server = key_dict['server']
if not self.server.endswith("/"):
self.server += "/"
class ENC_Connection(object):
def __init__(self, key):
self.headers = {'content-type': 'application/json'}
self.server = key.server
self.auth = (key.authid, key.authpw)
class ENC_Collection(object):
def __init__(self, connection, supplied_name, frame='object'):
if supplied_name.endswith('s'):
self.name = supplied_name.replace('_', '-')
self.search_name = supplied_name.rstrip('s').replace('-', '_')
self.schema_name = self.search_name + '.json'
elif supplied_name.endswith('.json'):
self.name = supplied_name.replace('_', '-').rstrip('.json')
self.search_name = supplied_name.replace('-', '_').rstrip('.json')
self.schema_name = supplied_name
else:
self.name = supplied_name.replace('_', '-') + 's'
self.search_name = supplied_name.replace('-', '_')
self.schema_name = supplied_name.replace('-', '_') + '.json'
schema_uri = '/profiles/' + self.schema_name
self.connection = connection
self.server = connection.server
self.schema = get_ENCODE(schema_uri, connection)
self.frame = frame
search_string = '/search/?format=json&limit=all&\
type=%s&frame=%s' % (self.search_name, frame)
collection = get_ENCODE(search_string, connection)
self.items = collection['@graph']
self.es_connection = None
def query(self, query_dict, maxhits=10000):
from pyelasticsearch import ElasticSearch
if self.es_connection is None:
es_server = self.server.rstrip('/') + ':9200'
self.es_connection = ElasticSearch(es_server)
results = self.es_connection.search(query_dict, index='encoded',
doc_type=self.search_name,
size=maxhits)
return results
global schemas
schemas = []
class ENC_Schema(object):
def __init__(self, connection, uri):
self.uri = uri
self.connection = connection
self.server = connection.server
response = get_ENCODE(uri, connection)
self.properties = response['properties']
class ENC_Item(object):
def __init__(self, connection, id, frame='object'):
self.id = id
self.connection = connection
self.server = connection.server
self.frame = frame
if id is None:
self.type = None
self.properties = {}
else:
if id.rfind('?') == -1:
get_string = id + '?'
else:
get_string = id + '&'
get_string += 'frame=%s' % (frame)
item = get_ENCODE(get_string, connection)
self.type = next(x for x in item['@type'] if x != 'item')
self.properties = item
def get(self, key):
try:
return self.properties[key]
except KeyError:
return None
def sync(self):
if self.id is None: # There is no id, so this is a new object to POST
excluded_from_post = ['schema_version']
self.type = self.properties.pop('@type')
schema_uri = 'profiles/%s.json' % (self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
post_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_post:
post_payload.update({prop: self.properties[prop]})
else:
pass
# should return the new object that comes back from the patch
new_object = new_ENCODE(self.connection, self.type, post_payload)
else: # existing object to PATCH or PUT
if self.id.rfind('?') == -1:
get_string = self.id + '?'
else:
get_string = self.id + '&'
get_string += 'frame=%s' % (self.frame)
on_server = get_ENCODE(get_string, self.connection)
diff = dict_diff(on_server, self.properties)
if diff.same():
logging.warning("%s: No changes to sync" % (self.id))
elif diff.added() or diff.removed(): # PUT
excluded_from_put = ['schema_version']
schema_uri = '/profiles/%s.json' % (self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
put_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_put:
put_payload.update({prop: self.properties[prop]})
else:
pass
# should return the new object that comes back from the patch
new_object = replace_ENCODE(self.id, self.connection, put_payload)
else: # PATCH
excluded_from_patch = ['schema_version', 'accession', 'uuid']
patch_payload = {}
for prop in diff.changed():
if prop not in excluded_from_patch:
patch_payload.update({prop: self.properties[prop]})
# should probably return the new object that comes back from the patch
new_object = patch_ENCODE(self.id, self.connection, patch_payload)
return new_object
def new_creds(self):
if self.type == 'file': # There is no id, so this is a new object to POST
r = requests.post("%s/%s/upload/" % (self.connection.server, self.id),
auth=self.connection.auth,
headers=self.connection.headers,
data=json.dumps({}))
return r.json()['@graph'][0]['upload_credentials']
else:
return None
def get_ENCODE(obj_id, connection, frame="object"):
'''GET an ENCODE object as JSON and return as dict'''
if '?' in obj_id:
url = urljoin(connection.server, obj_id+'&limit=all&frame='+frame)
else:
url = urljoin(connection.server, obj_id+'?limit=all&frame='+frame)
logging.debug('GET %s' % (url))
response = requests.get(url, auth=connection.auth, headers=connection.headers)
logging.debug('GET RESPONSE code %s' % (response.status_code))
try:
if response.json():
logging.debug('GET RESPONSE JSON: %s' % (json.dumps(response.json(), indent=4, separators=(',', ': '))))
except:
logging.debug('GET RESPONSE text %s' % (response.text))
if not response.status_code == 200:
logging.warning('GET failure. Response code = %s' % (response.text))
return response.json()
def replace_ENCODE(obj_id, connection, put_input):
'''PUT an existing ENCODE object and return the response JSON
'''
if isinstance(put_input, dict):
json_payload = json.dumps(put_input)
elif isinstance(put_input, str):
json_payload = put_input
else:
logging.warning('Datatype to PUT is not string or dict.')
url = urljoin(connection.server, obj_id)
logging.debug('PUT URL : %s' % (url))
logging.debug('PUT data: %s' % (json_payload))
response = requests.put(url, auth=connection.auth, data=json_payload,
headers=connection.headers)
logging.debug('PUT RESPONSE: %s' % (json.dumps(response.json(), indent=4,
separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PUT failure. Response = %s' % (response.text))
return response.json()
def patch_ENCODE(obj_id, connection, patch_input):
'''PATCH an existing ENCODE object and return the response JSON
'''
if isinstance(patch_input, dict):
json_payload = json.dumps(patch_input)
elif isinstance(patch_input, str):
json_payload = patch_input
else:
print('Datatype to PATCH is not string or dict.', file=sys.stderr)
url = urljoin(connection.server, obj_id)
logging.debug('PATCH URL : %s' % (url))
logging.debug('PATCH data: %s' % (json_payload))
response = requests.patch(url, auth=connection.auth, data=json_payload,
headers=connection.headers)
logging.debug('PATCH RESPONSE: %s' % (json.dumps(response.json(), indent=4,
separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PATCH failure. Response = %s' % (response.text))
return response.json()
def new_ENCODE(connection, collection_name, post_input):
'''POST an ENCODE object as JSON and return the response JSON
'''
if isinstance(post_input, dict):
json_payload = json.dumps(post_input)
elif isinstance(post_input, str):
json_payload = post_input
else:
print('Datatype to POST is not string or dict.', file=sys.stderr)
url = urljoin(connection.server, collection_name)
logging.debug("POST URL : %s" % (url))
logging.debug("POST data: %s" % (json.dumps(post_input,
sort_keys=True, indent=4,
separators=(',', ': '))))
response = requests.post(url, auth=connection.auth,
headers=connection.headers, data=json_payload)
logging.debug("POST RESPONSE: %s" % (json.dumps(response.json(),
indent=4, separators=(',', ': '))))
if not response.status_code == 201:
logging.warning('POST failure. Response = %s' % (response.text))
logging.debug("Return object: %s" % (json.dumps(response.json(),
sort_keys=True, indent=4,
separators=(',', ': '))))
return response.json()
def flat_one(JSON_obj):
try:
return [JSON_obj[identifier] for identifier in
['accession', 'name', 'email', 'title', 'uuid', 'href']
if identifier in JSON_obj][0]
except:
return JSON_obj
def flat_ENCODE(JSON_obj):
flat_obj = {}
for key in JSON_obj:
if isinstance(JSON_obj[key], dict):
flat_obj.update({key: flat_one(JSON_obj[key])})
elif isinstance(JSON_obj[key], list) and JSON_obj[key] != [] and isinstance(JSON_obj[key][0], dict):
newlist = []
for obj in JSON_obj[key]:
newlist.append(flat_one(obj))
flat_obj.update({key: newlist})
else:
flat_obj.update({key: JSON_obj[key]})
return flat_obj
def pprint_ENCODE(JSON_obj):
if ('type' in JSON_obj) and (JSON_obj['type'] == "object"):
print(json.dumps(JSON_obj['properties'],
sort_keys=True, indent=4, separators=(',', ': ')))
else:
print(json.dumps(flat_ENCODE(JSON_obj),
sort_keys=True, indent=4, separators=(',', ': ')))
def get_fields(args, connection):
import csv
accessions = []
if args.query:
if "search" not in args.query:
args.query = "/search/?type=" + args.query
temp = get_ENCODE(args.query, connection).get("@graph", [])
for obj in temp:
if obj.get("accession"):
accessions.append(obj["accession"])
else:
accessions = [line.strip() for line in open(args.infile)]
if args.multifield:
fields = [line.strip() for line in open(args.multifield)]
elif args.onefield:
fields = [args.onefield]
else:
fields = []
data = {}
header = []
if "accession" not in fields:
header = ["accession"]
#for x in fields:
# header.append(x)
if any(accessions) and any(fields):
for a in accessions:
a = quote(a)
result = get_ENCODE(a, connection)
temp = {}
for f in fields:
if result.get(f):
name = f
print(type(result[f]))
if type(result[f]) == int:
name = name + ":int"
print("name is", name)
elif type(result[f]) == list:
name = name + ":list"
# else this must be a string
temp[name] = result[f]
header.append(name)
if "accession" not in fields:
temp["accession"] = a
data[a] = temp
else:
print("Could not complete request one or more arugments were not supplied")
return
writer = csv.DictWriter(sys.stdout, delimiter='\t', fieldnames=header)
writer.writeheader()
for key in data.keys():
writer.writerow(data.get(key))
def patch_set(args, connection):
import csv
data = []
if args.update:
print("This is an UPDATE run, data will be patched")
if args.remove:
print("On this run data will be REMOVED")
else:
print("This is a test run, nothing will be changed")
if args.accession:
if args.field and args.data:
if args.array:
args.data = [args.data]
data.append({"accession": args.accession, args.field: args.data})
else:
print("Missing information! Cannot PATCH object", args.accession)
return
elif args.infile:
with open(args.infile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
data.append(row)
else:
reader = csv.DictReader(sys.stdin, delimiter='\t')
for row in reader:
data.append(row)
for d in data:
accession = d.get("accession")
if not accession:
print("Missing accession! Cannot PATCH data")
return
new_data = d
new_data.pop("accession")
for key in new_data.keys():
for c in ["[", "]"]:
if c in new_data[key]:
l = new_data[key].strip("[]").split(", ")
l = [x.replace("'", "") for x in l]
new_data[key] = l
if "number" in key:
new_data[key] = int(new_data[key])
accession = quote(accession)
full_data = get_ENCODE(accession, connection, frame="edit")
old_data = {}
for key in new_data.keys():
old_data[key] = full_data.get(key)
if args.remove:
if args.update:
put_dict = full_data
for key in new_data.keys():
put_dict.pop(key, None)
replace_ENCODE(accession, connection, put_dict)
print("OBJECT:", accession)
print("Removing values", str(new_data.keys()))
else:
if args.update:
patch_ENCODE(accession, connection, new_data)
print("OBJECT:", accession)
for key in new_data.keys():
print("OLD DATA:", key, old_data[key])
print("NEW DATA:", key, new_data[key])
def fastq_read(connection, uri=None, filename=None, reads=1):
'''Read a few fastq records
'''
# https://github.com/detrout/encode3-curation/blob/master/validate_encode3_aliases.py#L290
# originally written by Diane Trout
import gzip
from io import BytesIO
# Reasonable power of 2 greater than 50 + 100 + 5 + 100
# which is roughly what a single fastq read is.
if uri:
BLOCK_SIZE = 512
url = urljoin(connection.server, quote(uri))
data = requests.get(url, auth=connection.auth, stream=True)
block = BytesIO(next(data.iter_content(BLOCK_SIZE * reads)))
compressed = gzip.GzipFile(None, 'r', fileobj=block)
elif filename:
compressed = gzip.GzipFile(filename, 'r')
else:
print("No url or filename provided! Cannot access file!")
return
for i in range(reads):
header = compressed.readline().rstrip()
sequence = compressed.readline().rstrip()
qual_header = compressed.readline().rstrip()
quality = compressed.readline().rstrip()
yield (header, sequence, qual_header, quality)
patchSet attempt and get_fields update
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import requests
import json
import sys
import logging
from urllib.parse import urljoin
from urllib.parse import quote
class dict_diff(object):
"""
Calculate items added, items removed, keys same in both but changed values,
keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.current_keys, self.past_keys = [
set(d.keys()) for d in (current_dict, past_dict)
]
self.intersect = self.current_keys.intersection(self.past_keys)
def added(self):
diff = self.current_keys - self.intersect
if diff == set():
return None
else:
return diff
def removed(self):
diff = self.past_keys - self.intersect
if diff == set():
return None
else:
return diff
def changed(self):
diff = set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o])
if diff == set():
return None
else:
return diff
def unchanged(self):
diff = set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o])
if diff == set():
return None
else:
return diff
def same(self):
return self.added() is None and self.removed() is None and self.changed() is None
class ENC_Key:
def __init__(self, keyfile, keyname):
keys_f = open(keyfile, 'r')
keys_json_string = keys_f.read()
keys_f.close()
keys = json.loads(keys_json_string)
key_dict = keys[keyname]
self.authid = key_dict['key']
self.authpw = key_dict['secret']
self.server = key_dict['server']
if not self.server.endswith("/"):
self.server += "/"
class ENC_Connection(object):
def __init__(self, key):
self.headers = {'content-type': 'application/json'}
self.server = key.server
self.auth = (key.authid, key.authpw)
class ENC_Collection(object):
def __init__(self, connection, supplied_name, frame='object'):
if supplied_name.endswith('s'):
self.name = supplied_name.replace('_', '-')
self.search_name = supplied_name.rstrip('s').replace('-', '_')
self.schema_name = self.search_name + '.json'
elif supplied_name.endswith('.json'):
self.name = supplied_name.replace('_', '-').rstrip('.json')
self.search_name = supplied_name.replace('-', '_').rstrip('.json')
self.schema_name = supplied_name
else:
self.name = supplied_name.replace('_', '-') + 's'
self.search_name = supplied_name.replace('-', '_')
self.schema_name = supplied_name.replace('-', '_') + '.json'
schema_uri = '/profiles/' + self.schema_name
self.connection = connection
self.server = connection.server
self.schema = get_ENCODE(schema_uri, connection)
self.frame = frame
search_string = '/search/?format=json&limit=all&\
type=%s&frame=%s' % (self.search_name, frame)
collection = get_ENCODE(search_string, connection)
self.items = collection['@graph']
self.es_connection = None
def query(self, query_dict, maxhits=10000):
from pyelasticsearch import ElasticSearch
if self.es_connection is None:
es_server = self.server.rstrip('/') + ':9200'
self.es_connection = ElasticSearch(es_server)
results = self.es_connection.search(query_dict, index='encoded',
doc_type=self.search_name,
size=maxhits)
return results
global schemas
schemas = []
class ENC_Schema(object):
def __init__(self, connection, uri):
self.uri = uri
self.connection = connection
self.server = connection.server
response = get_ENCODE(uri, connection)
self.properties = response['properties']
class ENC_Item(object):
def __init__(self, connection, id, frame='object'):
self.id = id
self.connection = connection
self.server = connection.server
self.frame = frame
if id is None:
self.type = None
self.properties = {}
else:
if id.rfind('?') == -1:
get_string = id + '?'
else:
get_string = id + '&'
get_string += 'frame=%s' % (frame)
item = get_ENCODE(get_string, connection)
self.type = next(x for x in item['@type'] if x != 'item')
self.properties = item
def get(self, key):
try:
return self.properties[key]
except KeyError:
return None
def sync(self):
if self.id is None: # There is no id, so this is a new object to POST
excluded_from_post = ['schema_version']
self.type = self.properties.pop('@type')
schema_uri = 'profiles/%s.json' % (self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
post_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_post:
post_payload.update({prop: self.properties[prop]})
else:
pass
# should return the new object that comes back from the patch
new_object = new_ENCODE(self.connection, self.type, post_payload)
else: # existing object to PATCH or PUT
if self.id.rfind('?') == -1:
get_string = self.id + '?'
else:
get_string = self.id + '&'
get_string += 'frame=%s' % (self.frame)
on_server = get_ENCODE(get_string, self.connection)
diff = dict_diff(on_server, self.properties)
if diff.same():
logging.warning("%s: No changes to sync" % (self.id))
elif diff.added() or diff.removed(): # PUT
excluded_from_put = ['schema_version']
schema_uri = '/profiles/%s.json' % (self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
put_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_put:
put_payload.update({prop: self.properties[prop]})
else:
pass
# should return the new object that comes back from the patch
new_object = replace_ENCODE(self.id, self.connection, put_payload)
else: # PATCH
excluded_from_patch = ['schema_version', 'accession', 'uuid']
patch_payload = {}
for prop in diff.changed():
if prop not in excluded_from_patch:
patch_payload.update({prop: self.properties[prop]})
# should probably return the new object that comes back from the patch
new_object = patch_ENCODE(self.id, self.connection, patch_payload)
return new_object
def new_creds(self):
if self.type == 'file': # There is no id, so this is a new object to POST
r = requests.post("%s/%s/upload/" % (self.connection.server, self.id),
auth=self.connection.auth,
headers=self.connection.headers,
data=json.dumps({}))
return r.json()['@graph'][0]['upload_credentials']
else:
return None
def get_ENCODE(obj_id, connection, frame="object"):
'''GET an ENCODE object as JSON and return as dict'''
if '?' in obj_id:
url = urljoin(connection.server, obj_id+'&limit=all&frame='+frame)
else:
url = urljoin(connection.server, obj_id+'?limit=all&frame='+frame)
logging.debug('GET %s' % (url))
response = requests.get(url, auth=connection.auth, headers=connection.headers)
logging.debug('GET RESPONSE code %s' % (response.status_code))
try:
if response.json():
logging.debug('GET RESPONSE JSON: %s' % (json.dumps(response.json(), indent=4, separators=(',', ': '))))
except:
logging.debug('GET RESPONSE text %s' % (response.text))
if not response.status_code == 200:
logging.warning('GET failure. Response code = %s' % (response.text))
return response.json()
def replace_ENCODE(obj_id, connection, put_input):
'''PUT an existing ENCODE object and return the response JSON
'''
if isinstance(put_input, dict):
json_payload = json.dumps(put_input)
elif isinstance(put_input, str):
json_payload = put_input
else:
logging.warning('Datatype to PUT is not string or dict.')
url = urljoin(connection.server, obj_id)
logging.debug('PUT URL : %s' % (url))
logging.debug('PUT data: %s' % (json_payload))
response = requests.put(url, auth=connection.auth, data=json_payload,
headers=connection.headers)
logging.debug('PUT RESPONSE: %s' % (json.dumps(response.json(), indent=4,
separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PUT failure. Response = %s' % (response.text))
return response.json()
def patch_ENCODE(obj_id, connection, patch_input):
'''PATCH an existing ENCODE object and return the response JSON
'''
if isinstance(patch_input, dict):
json_payload = json.dumps(patch_input)
elif isinstance(patch_input, str):
json_payload = patch_input
else:
print('Datatype to PATCH is not string or dict.', file=sys.stderr)
url = urljoin(connection.server, obj_id)
logging.debug('PATCH URL : %s' % (url))
logging.debug('PATCH data: %s' % (json_payload))
response = requests.patch(url, auth=connection.auth, data=json_payload,
headers=connection.headers)
logging.debug('PATCH RESPONSE: %s' % (json.dumps(response.json(), indent=4,
separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PATCH failure. Response = %s' % (response.text))
return response.json()
def new_ENCODE(connection, collection_name, post_input):
'''POST an ENCODE object as JSON and return the response JSON
'''
if isinstance(post_input, dict):
json_payload = json.dumps(post_input)
elif isinstance(post_input, str):
json_payload = post_input
else:
print('Datatype to POST is not string or dict.', file=sys.stderr)
url = urljoin(connection.server, collection_name)
logging.debug("POST URL : %s" % (url))
logging.debug("POST data: %s" % (json.dumps(post_input,
sort_keys=True, indent=4,
separators=(',', ': '))))
response = requests.post(url, auth=connection.auth,
headers=connection.headers, data=json_payload)
logging.debug("POST RESPONSE: %s" % (json.dumps(response.json(),
indent=4, separators=(',', ': '))))
if not response.status_code == 201:
logging.warning('POST failure. Response = %s' % (response.text))
logging.debug("Return object: %s" % (json.dumps(response.json(),
sort_keys=True, indent=4,
separators=(',', ': '))))
return response.json()
def flat_one(JSON_obj):
try:
return [JSON_obj[identifier] for identifier in
['accession', 'name', 'email', 'title', 'uuid', 'href']
if identifier in JSON_obj][0]
except:
return JSON_obj
def flat_ENCODE(JSON_obj):
flat_obj = {}
for key in JSON_obj:
if isinstance(JSON_obj[key], dict):
flat_obj.update({key: flat_one(JSON_obj[key])})
elif isinstance(JSON_obj[key], list) and JSON_obj[key] != [] and isinstance(JSON_obj[key][0], dict):
newlist = []
for obj in JSON_obj[key]:
newlist.append(flat_one(obj))
flat_obj.update({key: newlist})
else:
flat_obj.update({key: JSON_obj[key]})
return flat_obj
def pprint_ENCODE(JSON_obj):
if ('type' in JSON_obj) and (JSON_obj['type'] == "object"):
print(json.dumps(JSON_obj['properties'],
sort_keys=True, indent=4, separators=(',', ': ')))
else:
print(json.dumps(flat_ENCODE(JSON_obj),
sort_keys=True, indent=4, separators=(',', ': ')))
def get_fields(args, connection):
import csv
accessions = []
if args.query:
if "search" not in args.query:
args.query = "/search/?type=" + args.query
temp = get_ENCODE(args.query, connection).get("@graph", [])
for obj in temp:
if obj.get("accession"):
accessions.append(obj["accession"])
else:
accessions = [line.strip() for line in open(args.infile)]
if args.multifield:
fields = [line.strip() for line in open(args.multifield)]
elif args.onefield:
fields = [args.onefield]
else:
fields = []
data = {}
header = []
if "accession" not in fields:
header = ["accession"]
if any(accessions) and any(fields):
for a in accessions:
a = quote(a)
result = get_ENCODE(a, connection)
temp = {}
for f in fields:
if result.get(f):
name = f
if type(result[f]) == int:
name = name + ":int"
print("name is", name)
elif type(result[f]) == list:
name = name + ":list"
elif type(result[f]) == dict:
name = name + ":dict"
else:
# this must be a string
pass
temp[name] = result[f]
header.append(name)
if "accession" not in fields:
temp["accession"] = a
data[a] = temp
else:
print("Could not complete request one or more arugments were not supplied")
return
writer = csv.DictWriter(sys.stdout, delimiter='\t', fieldnames=header)
writer.writeheader()
for key in data.keys():
writer.writerow(data.get(key))
def patch_set(args, connection):
import csv
data = []
if args.update:
print("This is an UPDATE run, data will be patched")
if args.remove:
print("On this run data will be REMOVED")
else:
print("This is a test run, nothing will be changed")
if args.accession:
if args.field and args.data:
if args.array:
args.data = args.data.split(",")
data.append({"accession": args.accession, args.field: args.data})
else:
print("Missing field/data! Cannot PATCH object", args.accession)
return
elif args.infile:
with open(args.infile, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
data.append(row)
else:
reader = csv.DictReader(sys.stdin, delimiter='\t')
for row in reader:
data.append(row)
for d in data:
accession = d.get("accession")
if not accession:
print("Missing accession! Cannot PATCH data")
return
new_data = d
new_data.pop("accession")
for key in new_data.keys():
k = key.split(":")
if len(k) > 1:
if k[1] == "int":
new_data[k[0]] = int(new_data[key])
elif k[1] == "list":
l = new_data[key].strip("[]").split(",")
l = [x.replace(" ", "") for x in l]
new_data[k[0]] = l
else:
new_data[k[0]] = new_data[key]
accession = quote(accession)
full_data = get_ENCODE(accession, connection, frame="edit")
old_data = {}
for key in new_data.keys():
old_data[key] = full_data.get(key)
if args.remove:
if args.update:
put_dict = full_data
for key in new_data.keys():
put_dict.pop(key, None)
replace_ENCODE(accession, connection, put_dict)
print("OBJECT:", accession)
print("Removing values", str(new_data.keys()))
else:
if args.update:
patch_ENCODE(accession, connection, new_data)
print("OBJECT:", accession)
for key in new_data.keys():
print("OLD DATA:", key, old_data[key])
print("NEW DATA:", key, new_data[key])
def fastq_read(connection, uri=None, filename=None, reads=1):
'''Read a few fastq records
'''
# https://github.com/detrout/encode3-curation/blob/master/validate_encode3_aliases.py#L290
# originally written by Diane Trout
import gzip
from io import BytesIO
# Reasonable power of 2 greater than 50 + 100 + 5 + 100
# which is roughly what a single fastq read is.
if uri:
BLOCK_SIZE = 512
url = urljoin(connection.server, quote(uri))
data = requests.get(url, auth=connection.auth, stream=True)
block = BytesIO(next(data.iter_content(BLOCK_SIZE * reads)))
compressed = gzip.GzipFile(None, 'r', fileobj=block)
elif filename:
compressed = gzip.GzipFile(filename, 'r')
else:
print("No url or filename provided! Cannot access file!")
return
for i in range(reads):
header = compressed.readline().rstrip()
sequence = compressed.readline().rstrip()
qual_header = compressed.readline().rstrip()
quality = compressed.readline().rstrip()
yield (header, sequence, qual_header, quality)
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import requests, json, jsonschema
import sys, logging
class dict_diff(object):
"""
Calculate items added, items removed, keys same in both but changed values, keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.current_keys, self.past_keys = [
set(d.keys()) for d in (current_dict, past_dict)
]
self.intersect = self.current_keys.intersection(self.past_keys)
def added(self):
diff = self.current_keys - self.intersect
if diff == set():
return None
else:
return diff
def removed(self):
diff = self.past_keys - self.intersect
if diff == set():
return None
else:
return diff
def changed(self):
diff = set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o])
if diff == set():
return None
else:
return diff
def unchanged(self):
diff = set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o])
if diff == set():
return None
else:
return diff
def same(self):
return self.added() == None and self.removed() == None and self.changed() == None
class ENC_Key:
def __init__(self, keyfile, keyname):
keys_f = open(keyfile,'r')
keys_json_string = keys_f.read()
keys_f.close()
keys = json.loads(keys_json_string)
key_dict = keys[keyname]
self.authid = key_dict['key']
self.authpw = key_dict['secret']
self.server = key_dict['server']
if not self.server.endswith("/"):
self.server += "/"
class ENC_Connection(object):
def __init__(self, key):
self.headers = {'content-type': 'application/json'}
self.server = key.server
self.auth = (key.authid, key.authpw)
class ENC_Collection(object):
def __init__(self, connection, supplied_name, frame='object'):
if supplied_name.endswith('s'):
self.name = supplied_name.replace('_','-')
self.search_name = supplied_name.rstrip('s').replace('-','_')
self.schema_name = self.search_name + '.json'
elif supplied_name.endswith('.json'):
self.name = supplied_name.replace('_','-').rstrip('.json')
self.search_name = supplied_name.replace('-','_').rstrip('.json')
self.schema_name = supplied_name
else:
self.name = supplied_name.replace('_','-') + 's'
self.search_name = supplied_name.replace('-','_')
self.schema_name = supplied_name.replace('-','_') + '.json'
schema_uri = '/profiles/' + self.schema_name
self.connection = connection
self.server = connection.server
self.schema = get_ENCODE(schema_uri, connection)
self.frame = frame
search_string = '/search/?format=json&limit=all&type=%s&frame=%s' %(self.search_name, frame)
collection = get_ENCODE(search_string, connection)
self.items = collection['@graph']
self.es_connection = None
def query(self, query_dict, maxhits=10000):
from pyelasticsearch import ElasticSearch
if self.es_connection == None:
es_server = self.server.rstrip('/') + ':9200'
self.es_connection = ElasticSearch(es_server)
results = self.es_connection.search(query_dict, index='encoded', doc_type=self.search_name, size=maxhits)
return results
global schemas
schemas = []
class ENC_Schema(object):
def __init__(self, connection, uri):
self.uri = uri
self.connection = connection
self.server = connection.server
response = get_ENCODE(uri, connection)
self.properties = response['properties']
class ENC_Item(object):
def __init__(self, connection, id, frame='object'):
self.id = id
self.connection = connection
self.server = connection.server
self.frame = frame
if id == None:
self.type = None
self.properties = {}
else:
if id.rfind('?') == -1:
get_string = id + '?'
else:
get_string = id + '&'
get_string += 'frame=%s' %(frame)
item = get_ENCODE(get_string, connection)
self.type = next(x for x in item['@type'] if x != 'item')
self.properties = item
def get(self, key):
try:
return self.properties[key]
except KeyError:
return None
def sync(self):
if self.id == None: #There is no id, so this is a new object to POST
excluded_from_post = ['schema_version']
self.type = self.properties.pop('@type')
schema_uri = '/profiles/%s.json' %(self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
post_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_post:
post_payload.update({prop : self.properties[prop]})
else:
pass
# should probably return the new object that comes back from the patch
new_object = new_ENCODE(self.connection, self.type, post_payload)
else: #existing object to PATCH or PUT
if self.id.rfind('?') == -1:
get_string = self.id + '?'
else:
get_string = self.id + '&'
get_string += 'frame=%s' %(self.frame)
on_server = get_ENCODE(get_string, self.connection)
diff = dict_diff(on_server, self.properties)
if diff.same():
logging.warning("%s: No changes to sync" %(self.id))
elif diff.added() or diff.removed(): #PUT
excluded_from_put = ['schema_version']
schema_uri = '/profiles/%s.json' %(self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
put_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_put:
put_payload.update({prop : self.properties[prop]})
else:
pass
# should probably return the new object that comes back from the patch
new_object = replace_ENCODE(self.id, self.connection, put_payload)
else: #PATCH
excluded_from_patch = ['schema_version', 'accession', 'uuid']
patch_payload = {}
for prop in diff.changed():
patch_payload.update({prop : self.properties[prop]})
#should probably return the new object that comes back from the patch
new_object = patch_ENCODE(self.id, self.connection, patch_payload)
return new_object
def get_ENCODE(obj_id, connection):
'''GET an ENCODE object as JSON and return as dict'''
if obj_id.rfind('?') == -1:
url = connection.server+obj_id+'?limit=all'
else:
url = connection.server+obj_id+'&limit=all'
logging.debug('GET %s' %(url))
response = requests.get(url, auth=connection.auth, headers=connection.headers)
logging.debug('GET RESPONSE code %s' %(response.status_code))
try:
if response.json():
logging.debug('GET RESPONSE JSON: %s' %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
except:
logging.debug('GET RESPONSE text %s' %(response.text))
if not response.status_code == 200:
logging.warning('GET failure. Response code = %s' %(response.text))
return response.json()
def replace_ENCODE(obj_id, connection, put_input):
'''PUT an existing ENCODE object and return the response JSON
'''
if isinstance(put_input, dict):
json_payload = json.dumps(put_input)
elif isinstance(put_input, basestring):
json_payload = put_input
else:
logging.warning('Datatype to put is not string or dict.')
url = connection.server + obj_id
logging.debug('PUT URL : %s' %(url))
logging.debug('PUT data: %s' %(json_payload))
response = requests.put(url, auth=connection.auth, data=json_payload, headers=connection.headers)
logging.debug('PUT RESPONSE: %s' %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PUT failure. Response = %s' %(response.text))
return response.json()
def patch_ENCODE(obj_id, connection, patch_input):
'''PATCH an existing ENCODE object and return the response JSON
'''
if isinstance(patch_input, dict):
json_payload = json.dumps(patch_input)
elif isinstance(patch_input, basestring):
json_payload = patch_input
else:
print >> sys.stderr, 'Datatype to patch is not string or dict.'
url = connection.server + obj_id
logging.debug('PATCH URL : %s' %(url))
logging.debug('PATCH data: %s' %(json_payload))
response = requests.patch(url, auth=connection.auth, data=json_payload, headers=connection.headers)
logging.debug('PATCH RESPONSE: %s' %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PATCH failure. Response = %s' %(response.text))
return response.json()
def new_ENCODE(connection, collection_name, post_input):
'''POST an ENCODE object as JSON and return the response JSON
'''
if isinstance(post_input, dict):
json_payload = json.dumps(post_input)
elif isinstance(post_input, basestring):
json_payload = post_input
else:
print >> sys.stderr, 'Datatype to post is not string or dict.'
url = connection.server + collection_name
logging.debug("POST URL : %s" %(url))
logging.debug("POST data: %s" %(json.dumps(post_input, sort_keys=True, indent=4, separators=(',', ': '))))
response = requests.post(url, auth=connection.auth, headers=connection.headers, data=json_payload)
logging.debug("POST RESPONSE: %s" %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
if not response.status_code == 201:
logging.warning('POST failure. Response = %s' %(response.text))
logging.debug("Return object: %s" %(json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': '))))
return response.json()
def flat_one(JSON_obj):
try:
return [JSON_obj[identifier] for identifier in \
['accession', 'name', 'email', 'title', 'uuid', 'href'] \
if identifier in JSON_obj][0]
except:
return JSON_obj
def flat_ENCODE(JSON_obj):
flat_obj = {}
for key in JSON_obj:
if isinstance(JSON_obj[key], dict):
flat_obj.update({key:flat_one(JSON_obj[key])})
elif isinstance(JSON_obj[key], list) and JSON_obj[key] != [] and isinstance(JSON_obj[key][0], dict):
newlist = []
for obj in JSON_obj[key]:
newlist.append(flat_one(obj))
flat_obj.update({key:newlist})
else:
flat_obj.update({key:JSON_obj[key]})
return flat_obj
def pprint_ENCODE(JSON_obj):
if ('type' in JSON_obj) and (JSON_obj['type'] == "object"):
print json.dumps(JSON_obj['properties'], sort_keys=True, indent=4, separators=(',', ': '))
else:
print json.dumps(flat_ENCODE(JSON_obj), sort_keys=True, indent=4, separators=(',', ': '))
Patch only patchable properties.
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import requests, json, jsonschema
import sys, logging
class dict_diff(object):
"""
Calculate items added, items removed, keys same in both but changed values, keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.current_keys, self.past_keys = [
set(d.keys()) for d in (current_dict, past_dict)
]
self.intersect = self.current_keys.intersection(self.past_keys)
def added(self):
diff = self.current_keys - self.intersect
if diff == set():
return None
else:
return diff
def removed(self):
diff = self.past_keys - self.intersect
if diff == set():
return None
else:
return diff
def changed(self):
diff = set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o])
if diff == set():
return None
else:
return diff
def unchanged(self):
diff = set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o])
if diff == set():
return None
else:
return diff
def same(self):
return self.added() == None and self.removed() == None and self.changed() == None
class ENC_Key:
def __init__(self, keyfile, keyname):
keys_f = open(keyfile,'r')
keys_json_string = keys_f.read()
keys_f.close()
keys = json.loads(keys_json_string)
key_dict = keys[keyname]
self.authid = key_dict['key']
self.authpw = key_dict['secret']
self.server = key_dict['server']
if not self.server.endswith("/"):
self.server += "/"
class ENC_Connection(object):
def __init__(self, key):
self.headers = {'content-type': 'application/json'}
self.server = key.server
self.auth = (key.authid, key.authpw)
class ENC_Collection(object):
def __init__(self, connection, supplied_name, frame='object'):
if supplied_name.endswith('s'):
self.name = supplied_name.replace('_','-')
self.search_name = supplied_name.rstrip('s').replace('-','_')
self.schema_name = self.search_name + '.json'
elif supplied_name.endswith('.json'):
self.name = supplied_name.replace('_','-').rstrip('.json')
self.search_name = supplied_name.replace('-','_').rstrip('.json')
self.schema_name = supplied_name
else:
self.name = supplied_name.replace('_','-') + 's'
self.search_name = supplied_name.replace('-','_')
self.schema_name = supplied_name.replace('-','_') + '.json'
schema_uri = '/profiles/' + self.schema_name
self.connection = connection
self.server = connection.server
self.schema = get_ENCODE(schema_uri, connection)
self.frame = frame
search_string = '/search/?format=json&limit=all&type=%s&frame=%s' %(self.search_name, frame)
collection = get_ENCODE(search_string, connection)
self.items = collection['@graph']
self.es_connection = None
def query(self, query_dict, maxhits=10000):
from pyelasticsearch import ElasticSearch
if self.es_connection == None:
es_server = self.server.rstrip('/') + ':9200'
self.es_connection = ElasticSearch(es_server)
results = self.es_connection.search(query_dict, index='encoded', doc_type=self.search_name, size=maxhits)
return results
global schemas
schemas = []
class ENC_Schema(object):
def __init__(self, connection, uri):
self.uri = uri
self.connection = connection
self.server = connection.server
response = get_ENCODE(uri, connection)
self.properties = response['properties']
class ENC_Item(object):
def __init__(self, connection, id, frame='object'):
self.id = id
self.connection = connection
self.server = connection.server
self.frame = frame
if id == None:
self.type = None
self.properties = {}
else:
if id.rfind('?') == -1:
get_string = id + '?'
else:
get_string = id + '&'
get_string += 'frame=%s' %(frame)
item = get_ENCODE(get_string, connection)
self.type = next(x for x in item['@type'] if x != 'item')
self.properties = item
def get(self, key):
try:
return self.properties[key]
except KeyError:
return None
def sync(self):
if self.id == None: #There is no id, so this is a new object to POST
excluded_from_post = ['schema_version']
self.type = self.properties.pop('@type')
schema_uri = '/profiles/%s.json' %(self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
post_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_post:
post_payload.update({prop : self.properties[prop]})
else:
pass
# should probably return the new object that comes back from the patch
new_object = new_ENCODE(self.connection, self.type, post_payload)
else: #existing object to PATCH or PUT
if self.id.rfind('?') == -1:
get_string = self.id + '?'
else:
get_string = self.id + '&'
get_string += 'frame=%s' %(self.frame)
on_server = get_ENCODE(get_string, self.connection)
diff = dict_diff(on_server, self.properties)
if diff.same():
logging.warning("%s: No changes to sync" %(self.id))
elif diff.added() or diff.removed(): #PUT
excluded_from_put = ['schema_version']
schema_uri = '/profiles/%s.json' %(self.type)
try:
schema = next(x for x in schemas if x.uri == schema_uri)
except StopIteration:
schema = ENC_Schema(self.connection, schema_uri)
schemas.append(schema)
put_payload = {}
for prop in self.properties:
if prop in schema.properties and prop not in excluded_from_put:
put_payload.update({prop : self.properties[prop]})
else:
pass
# should probably return the new object that comes back from the patch
new_object = replace_ENCODE(self.id, self.connection, put_payload)
else: #PATCH
excluded_from_patch = ['schema_version', 'accession', 'uuid']
patch_payload = {}
for prop in diff.changed() and prop not in excluded_from_patch:
patch_payload.update({prop : self.properties[prop]})
#should probably return the new object that comes back from the patch
new_object = patch_ENCODE(self.id, self.connection, patch_payload)
return new_object
def get_ENCODE(obj_id, connection):
'''GET an ENCODE object as JSON and return as dict'''
if obj_id.rfind('?') == -1:
url = connection.server+obj_id+'?limit=all'
else:
url = connection.server+obj_id+'&limit=all'
logging.debug('GET %s' %(url))
response = requests.get(url, auth=connection.auth, headers=connection.headers)
logging.debug('GET RESPONSE code %s' %(response.status_code))
try:
if response.json():
logging.debug('GET RESPONSE JSON: %s' %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
except:
logging.debug('GET RESPONSE text %s' %(response.text))
if not response.status_code == 200:
logging.warning('GET failure. Response code = %s' %(response.text))
return response.json()
def replace_ENCODE(obj_id, connection, put_input):
'''PUT an existing ENCODE object and return the response JSON
'''
if isinstance(put_input, dict):
json_payload = json.dumps(put_input)
elif isinstance(put_input, basestring):
json_payload = put_input
else:
logging.warning('Datatype to put is not string or dict.')
url = connection.server + obj_id
logging.debug('PUT URL : %s' %(url))
logging.debug('PUT data: %s' %(json_payload))
response = requests.put(url, auth=connection.auth, data=json_payload, headers=connection.headers)
logging.debug('PUT RESPONSE: %s' %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PUT failure. Response = %s' %(response.text))
return response.json()
def patch_ENCODE(obj_id, connection, patch_input):
'''PATCH an existing ENCODE object and return the response JSON
'''
if isinstance(patch_input, dict):
json_payload = json.dumps(patch_input)
elif isinstance(patch_input, basestring):
json_payload = patch_input
else:
print >> sys.stderr, 'Datatype to patch is not string or dict.'
url = connection.server + obj_id
logging.debug('PATCH URL : %s' %(url))
logging.debug('PATCH data: %s' %(json_payload))
response = requests.patch(url, auth=connection.auth, data=json_payload, headers=connection.headers)
logging.debug('PATCH RESPONSE: %s' %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
if not response.status_code == 200:
logging.warning('PATCH failure. Response = %s' %(response.text))
return response.json()
def new_ENCODE(connection, collection_name, post_input):
'''POST an ENCODE object as JSON and return the response JSON
'''
if isinstance(post_input, dict):
json_payload = json.dumps(post_input)
elif isinstance(post_input, basestring):
json_payload = post_input
else:
print >> sys.stderr, 'Datatype to post is not string or dict.'
url = connection.server + collection_name
logging.debug("POST URL : %s" %(url))
logging.debug("POST data: %s" %(json.dumps(post_input, sort_keys=True, indent=4, separators=(',', ': '))))
response = requests.post(url, auth=connection.auth, headers=connection.headers, data=json_payload)
logging.debug("POST RESPONSE: %s" %(json.dumps(response.json(), indent=4, separators=(',', ': '))))
if not response.status_code == 201:
logging.warning('POST failure. Response = %s' %(response.text))
logging.debug("Return object: %s" %(json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': '))))
return response.json()
def flat_one(JSON_obj):
try:
return [JSON_obj[identifier] for identifier in \
['accession', 'name', 'email', 'title', 'uuid', 'href'] \
if identifier in JSON_obj][0]
except:
return JSON_obj
def flat_ENCODE(JSON_obj):
flat_obj = {}
for key in JSON_obj:
if isinstance(JSON_obj[key], dict):
flat_obj.update({key:flat_one(JSON_obj[key])})
elif isinstance(JSON_obj[key], list) and JSON_obj[key] != [] and isinstance(JSON_obj[key][0], dict):
newlist = []
for obj in JSON_obj[key]:
newlist.append(flat_one(obj))
flat_obj.update({key:newlist})
else:
flat_obj.update({key:JSON_obj[key]})
return flat_obj
def pprint_ENCODE(JSON_obj):
if ('type' in JSON_obj) and (JSON_obj['type'] == "object"):
print json.dumps(JSON_obj['properties'], sort_keys=True, indent=4, separators=(',', ': '))
else:
print json.dumps(flat_ENCODE(JSON_obj), sort_keys=True, indent=4, separators=(',', ': '))
|
# -*- coding: utf-8 -*-
import sys, os.path
path = os.path.expanduser('~/RandomMetroidSolver')
if os.path.exists(path) and path not in sys.path:
sys.path.append(path)
import datetime, os, hashlib, json, subprocess, tempfile, glob, random
from datetime import datetime, date
from collections import OrderedDict
# to solve the rom
from parameters import easy, medium, hard, harder, hardcore, mania
from parameters import Knows, Settings, Controller, isKnows, isButton
from solver import Conf
from parameters import diff2text, text2diff
from solver import StandardSolver, DifficultyDisplayer, InteractiveSolver
from utils import PresetLoader, removeChars
import db
from graph_access import vanillaTransitions, vanillaBossesTransitions
from utils import isStdPreset
from graph_locations import locations
from smboolmanager import SMBoolManager
from rom import RomPatches
# put an expiration date to the default cookie to have it kept between browser restart
response.cookies['session_id_solver']['expires'] = 31 * 24 * 3600
# use the correct one
pythonExec = "python{}.{}".format(sys.version_info.major, sys.version_info.minor)
def maxPresetsReach():
# to prevent a spammer to create presets in a loop and fill the fs
return len(os.listdir('community_presets')) >= 2048
def getPresetDir(preset):
if isStdPreset(preset):
return 'standard_presets'
else:
return 'community_presets'
def loadPreset():
# load conf from session if available
loaded = False
if request.vars.action is not None:
# press solve, load or save button
if request.vars.action in ['Update', 'Create']:
# store the changes in case the form won't be accepted
presetDict = genJsonFromParams(request.vars)
session.presets['presetDict'] = presetDict
params = PresetLoader.factory(presetDict).params
loaded = True
elif request.vars.action in ['Load']:
# nothing to load, we'll load the new params file with the load form code
pass
else:
# no forms button pressed
if session.presets['presetDict'] is not None:
params = PresetLoader.factory(session.presets['presetDict']).params
loaded = True
if not loaded:
params = PresetLoader.factory('{}/{}.json'.format(getPresetDir(session.presets['preset']), session.presets['preset'])).params
return params
def completePreset(params):
# add missing knows
for know in Knows.__dict__:
if isKnows(know):
if know not in params['Knows'].keys():
params['Knows'][know] = Knows.__dict__[know]
# add missing settings
for boss in ['Kraid', 'Phantoon', 'Draygon', 'Ridley', 'MotherBrain']:
if boss not in params['Settings']:
params['Settings'][boss] = 'Default'
for hellrun in ['Ice', 'MainUpperNorfair', 'LowerNorfair']:
if hellrun not in params['Settings']:
params['Settings'][hellrun] = 'Default'
for hardroom in ['X-Ray', 'Gauntlet']:
if hardroom not in params['Settings']:
params['Settings'][hardroom] = 'Default'
# add missing controller buttons
for button in Controller.__dict__:
if isButton(button):
if button not in params['Controller'].keys():
params['Controller'][button] = Controller.__dict__[button]
def loadPresetsList():
files = sorted(os.listdir('community_presets'), key=lambda v: v.upper())
stdPresets = ['noob', 'casual', 'regular', 'veteran', 'speedrunner', 'master']
tourPresets = ['Season_Races', 'Playoff_Races', 'Playoff_Races_Chozo', 'SMRAT2020']
comPresets = [os.path.splitext(file)[0] for file in files if file != '.git']
return (stdPresets, tourPresets, comPresets)
def loadRandoPresetsList():
tourPresets = ['Season_Races', 'Season_Races_Chozo', 'Playoff_Races', 'Playoff_Races_Chozo', 'SMRAT2020']
files = sorted(os.listdir('rando_presets'), key=lambda v: v.upper())
randoPresets = [os.path.splitext(file)[0] for file in files]
randoPresets = [preset for preset in randoPresets if preset not in tourPresets]
return (randoPresets, tourPresets)
def validatePresetsParams(action):
if action == 'Create':
preset = request.vars.presetCreate
else:
preset = request.vars.preset
if IS_NOT_EMPTY()(preset)[1] is not None:
return (False, "Preset name is empty")
if IS_ALPHANUMERIC()(preset)[1] is not None:
return (False, "Preset name must be alphanumeric: {}".format(preset))
if IS_LENGTH(32)(preset)[1] is not None:
return (False, "Preset name must be max 32 chars: {}".format(preset))
if action in ['Create', 'Update']:
if IS_NOT_EMPTY()(request.vars.password)[1] is not None:
return (False, "Password is empty")
if IS_ALPHANUMERIC()(request.vars.password)[1] is not None:
return (False, "Password must be alphanumeric")
if IS_LENGTH(32)(request.vars.password)[1] is not None:
return (False, "Password must be max 32 chars")
# check that there's not two buttons for the same action
map = {}
for button in Controller.__dict__:
if isButton(button):
value = request.vars[button]
if button == "Moonwalk":
if value not in [None, 'on']:
return (False, "Invalid value for Moonwalk: {}".format(value))
else:
if value is None:
return (False, "Button {} not set".format(button))
else:
if value in map:
return (False, "Action {} set for two buttons: {} and {}".format(value, button, map[value]))
map[value] = button
if request.vars.currenttab not in ['Global', 'Techniques1', 'Techniques2', 'Techniques3', 'Techniques4', 'Techniques5', 'Techniques6', 'Techniques7', 'Mapping']:
return (False, "Wrong value for current tab: [{}]".format(request.vars.currenttab))
return (True, None)
def getSkillLevelBarData(preset):
result = {'standards': {}}
result['name'] = preset
try:
params = PresetLoader.factory('{}/{}.json'.format(getPresetDir(preset), preset)).params
result['custom'] = (preset, params['score'])
# add stats on the preset
result['knowsKnown'] = len([know for know in params['Knows'] if params['Knows'][know][0] == True])
except:
result['custom'] = (preset, 'N/A')
result['knowsKnown'] = 'N/A'
# get score of standard presets
for preset in ['noob', 'casual', 'regular', 'veteran', 'speedrunner', 'master', 'samus']:
score = PresetLoader.factory('{}/{}.json'.format(getPresetDir(preset), preset)).params['score']
result['standards'][preset] = score
DB = db.DB()
result['generatedSeeds'] = DB.getGeneratedSeeds(result['custom'][0])
result['lastAction'] = DB.getPresetLastActionDate(result['custom'][0])
DB.close()
# TODO: normalize result (or not ?)
return result
def initPresetsSession():
if session.presets is None:
session.presets = {}
session.presets['preset'] = 'regular'
session.presets['presetDict'] = None
session.presets['currentTab'] = 'Global'
def updatePresetsSession():
if request.vars.action == 'Create':
session.presets['preset'] = request.vars.presetCreate
elif request.vars.preset == None:
session.presets['preset'] = 'regular'
else:
session.presets['preset'] = request.vars.preset
def computeGauntlet(sm, bomb, addVaria):
result = {}
for key in Settings.hardRoomsPresets['Gauntlet']:
Settings.hardRooms['Gauntlet'] = Settings.hardRoomsPresets['Gauntlet'][key]
sm.resetItems()
if addVaria == True:
sm.addItem('Varia')
sm.addItem(bomb)
result[key] = {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}
for i in range(18):
ret = sm.energyReserveCountOkHardRoom('Gauntlet', 0.51 if bomb == 'Bomb' else 1.0)
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
result[key][ret.difficulty] = nEtank
sm.addItem('ETank')
return result
def computeXray(sm, addVaria):
result = {}
for key in Settings.hardRoomsPresets['X-Ray']:
if key == 'Solution':
continue
Settings.hardRooms['X-Ray'] = Settings.hardRoomsPresets['X-Ray'][key]
sm.resetItems()
if addVaria == True:
sm.addItem('Varia')
result[key] = {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}
for i in range(18):
ret = sm.energyReserveCountOkHardRoom('X-Ray')
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
result[key][ret.difficulty] = nEtank
sm.addItem('ETank')
return result
def computeHardRooms(hardRooms):
# add gravity patch (as we add it by default in the randomizer)
RomPatches.ActivePatches.append(RomPatches.NoGravityEnvProtection)
sm = SMBoolManager()
# xray
xray = {}
xray['Suitless'] = computeXray(sm, False)
xray['Varia'] = computeXray(sm, True)
hardRooms['X-Ray'] = xray
# gauntlet
gauntlet = {}
gauntlet['SuitlessBomb'] = computeGauntlet(sm, 'Bomb', False)
gauntlet['SuitlessPowerBomb'] = computeGauntlet(sm, 'PowerBomb', False)
gauntlet['VariaBomb'] = computeGauntlet(sm, 'Bomb', True)
gauntlet['VariaPowerBomb'] = computeGauntlet(sm, 'PowerBomb', True)
hardRooms['Gauntlet'] = gauntlet
return hardRooms
def addCF(sm, count):
sm.addItem('Morph')
sm.addItem('PowerBomb')
for i in range(count):
sm.addItem('Missile')
sm.addItem('Missile')
sm.addItem('Super')
sm.addItem('Super')
sm.addItem('PowerBomb')
sm.addItem('PowerBomb')
def computeHellruns(hellRuns):
sm = SMBoolManager()
for hellRun in ['Ice', 'MainUpperNorfair']:
hellRuns[hellRun] = {}
for (actualHellRun, params) in Settings.hellRunsTable[hellRun].items():
hellRuns[hellRun][actualHellRun] = {}
for (key, difficulties) in Settings.hellRunPresets[hellRun].items():
if key == 'Solution':
continue
Settings.hellRuns[hellRun] = difficulties
hellRuns[hellRun][actualHellRun][key] = {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}
if difficulties == None:
continue
sm.resetItems()
for etank in range(19):
ret = sm.canHellRun(**params)
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
hellRuns[hellRun][actualHellRun][key][ret.difficulty] = nEtank
sm.addItem('ETank')
hellRun = 'LowerNorfair'
hellRuns[hellRun] = {}
hellRuns[hellRun]["NoScrew"] = computeLNHellRun(sm, False)
hellRuns[hellRun]["Screw"] = computeLNHellRun(sm, True)
def getNearestDifficulty(difficulty):
epsilon = 0.001
if difficulty < medium - epsilon:
return easy
elif difficulty < hard - epsilon:
return medium
elif difficulty < harder - epsilon:
return hard
elif difficulty < hardcore - epsilon:
return harder
elif difficulty < mania - epsilon:
return hardcore
else:
return mania
def computeLNHellRun(sm, addScrew):
result = {}
hellRun = 'LowerNorfair'
for (actualHellRun, params) in Settings.hellRunsTable[hellRun].items():
result[actualHellRun] = {}
for (key, difficulties) in Settings.hellRunPresets[hellRun].items():
if key == 'Solution':
continue
Settings.hellRuns[hellRun] = difficulties
result[actualHellRun][key] = {'ETank': {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}, 'CF': {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}}
if difficulties == None:
continue
for cf in range(3, 0, -1):
sm.resetItems()
if addScrew == True:
sm.addItem('ScrewAttack')
addCF(sm, cf)
for etank in range(19):
ret = sm.canHellRun(**params)
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
result[actualHellRun][key]['ETank'][getNearestDifficulty(ret.difficulty)] = nEtank
result[actualHellRun][key]['CF'][getNearestDifficulty(ret.difficulty)] = cf
sm.addItem('ETank')
return result
def presets():
initPresetsSession()
# use web2py builtin cache to avoid recomputing the hardrooms requirements
hardRooms = cache.ram('hardRooms', lambda:dict(), time_expire=None)
if len(hardRooms) == 0:
computeHardRooms(hardRooms)
hellRuns = cache.ram('hellRuns', lambda:dict(), time_expire=None)
if len(hellRuns) == 0:
computeHellruns(hellRuns)
if request.vars.action is not None:
(ok, msg) = validatePresetsParams(request.vars.action)
if not ok:
session.flash = msg
redirect(URL(r=request, f='presets'))
else:
session.presets['currentTab'] = request.vars.currenttab
if request.vars.action == 'Create':
preset = request.vars.presetCreate
else:
preset = request.vars.preset
# in web2py.js, in disableElement, remove 'working...' to have action with correct value
if request.vars.action == 'Load':
# check that the presets file exists
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if os.path.isfile(fullPath):
# load it
try:
params = PresetLoader.factory(fullPath).params
updatePresetsSession()
session.presets["presetDict"] = None
except Exception as e:
session.flash = "L:Error loading the preset {}: {}".format(preset, e)
else:
session.flash = "Presets file not found: {}".format(fullPath)
redirect(URL(r=request, f='presets'))
elif request.vars.action in ['Update', 'Create']:
# check if the presets file already exists
password = request.vars['password']
password = password.encode('utf-8')
passwordSHA256 = hashlib.sha256(password).hexdigest()
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if os.path.isfile(fullPath):
# load it
end = False
try:
oldParams = PresetLoader.factory(fullPath).params
except Exception as e:
session.flash = "UC:Error loading the preset {}: {}".format(preset, e)
end = True
if end == True:
redirect(URL(r=request, f='presets'))
# check if password match
if 'password' in oldParams and passwordSHA256 == oldParams['password']:
# update the presets file
paramsDict = genJsonFromParams(request.vars)
paramsDict['password'] = passwordSHA256
try:
PresetLoader.factory(paramsDict).dump(fullPath)
DB = db.DB()
DB.addPresetAction(preset, 'update')
DB.close()
updatePresetsSession()
session.flash = "Preset {} updated".format(preset)
except Exception as e:
session.flash = "Error writing the preset {}: {}".format(preset, e)
redirect(URL(r=request, f='presets'))
else:
session.flash = "Password mismatch with existing presets file {}".format(preset)
redirect(URL(r=request, f='presets'))
else:
# check that there's no more than 2K presets (there's less than 2K sm rando players in the world)
if not maxPresetsReach():
# write the presets file
paramsDict = genJsonFromParams(request.vars)
paramsDict['password'] = passwordSHA256
try:
PresetLoader.factory(paramsDict).dump(fullPath)
DB = db.DB()
DB.addPresetAction(preset, 'create')
DB.close()
updatePresetsSession()
session.flash = "Preset {} created".format(preset)
except Exception as e:
session.flash = "Error writing the preset {}: {}".format(preset, e)
redirect(URL(r=request, f='presets'))
else:
session.flash = "Sorry, there's already 2048 presets on the website, can't add more"
redirect(URL(r=request, f='presets'))
# set title
response.title = 'Super Metroid VARIA Presets'
# load conf from session if available
error = False
try:
params = loadPreset()
except Exception as e:
session.presets['preset'] = 'regular'
session.flash = "S:Error loading the preset: {}".format(e)
error = True
if error == True:
redirect(URL(r=request, f='presets'))
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# add missing knows/settings
completePreset(params)
# compute score for skill bar
skillBarData = getSkillLevelBarData(session.presets['preset'])
# send values to view
return dict(desc=Knows.desc, difficulties=diff2text,
categories=Knows.categories, settings=params['Settings'], knows=params['Knows'],
easy=easy, medium=medium, hard=hard, harder=harder, hardcore=hardcore, mania=mania,
controller=params['Controller'], stdPresets=stdPresets, tourPresets=tourPresets,
comPresets=comPresets, skillBarData=skillBarData, hardRooms=hardRooms, hellRuns=hellRuns)
def initSolverSession():
if session.solver is None:
session.solver = {}
session.solver['preset'] = 'regular'
session.solver['difficultyTarget'] = Conf.difficultyTarget
session.solver['pickupStrategy'] = Conf.itemsPickup
session.solver['itemsForbidden'] = []
session.solver['romFiles'] = []
session.solver['romFile'] = None
session.solver['result'] = None
session.solver['complexity'] = 'simple'
def updateSolverSession():
if session.solver is None:
session.solver = {}
session.solver['preset'] = request.vars.preset
session.solver['difficultyTarget'] = text2diff[request.vars.difficultyTarget]
session.solver['pickupStrategy'] = request.vars.pickupStrategy
session.solver['complexity'] = request.vars.complexity
itemsForbidden = []
for item in ['ETank', 'Missile', 'Super', 'PowerBomb', 'Bomb', 'Charge', 'Ice', 'HiJump', 'SpeedBooster', 'Wave', 'Spazer', 'SpringBall', 'Varia', 'Plasma', 'Grapple', 'Morph', 'Reserve', 'Gravity', 'XRayScope', 'SpaceJump', 'ScrewAttack']:
boolvar = request.vars[item+"_bool"]
if boolvar is not None:
itemsForbidden.append(item)
session.solver['itemsForbidden'] = itemsForbidden
def getROMsList():
# filter the displayed roms to display only the ones uploaded in this session
if session.solver['romFiles'] is None:
session.solver['romFiles'] = []
roms = []
elif len(session.solver['romFiles']) == 0:
roms = []
else:
files = sorted(os.listdir('roms'))
bases = [os.path.splitext(file)[0] for file in files]
filtered = [base for base in bases if base in session.solver['romFiles']]
roms = ['{}.sfc'.format(file) for file in filtered]
return roms
def getLastSolvedROM():
if session.solver['romFile'] is not None:
return '{}.sfc'.format(session.solver['romFile'])
else:
return None
def genPathTable(locations, displayAPs=True):
if locations is None or len(locations) == 0:
return None
lastAP = None
pathTable = """
<table class="full">
<colgroup>
<col class="locName" /><col class="area" /><col class="subarea" /><col class="item" /><col class="difficulty" /><col class="knowsUsed" /><col class="itemsUsed" />
</colgroup>
<tr>
<th>Location Name</th><th>Area</th><th>SubArea</th><th>Item</th><th>Difficulty</th><th>Techniques used</th><th>Items used</th>
</tr>
"""
for location, area, subarea, item, diff, techniques, items, path, _class in locations:
if path is not None:
lastAP = path[-1]
if displayAPs == True and not (len(path) == 1 and path[0] == lastAP):
pathTable += """<tr class="grey"><td>Path</td><td colspan="6">{}</td></tr>\n""".format(" -> ".join(path))
(name, room) = location
# not picked up items start with an '-'
if item[0] != '-':
pathTable += """
<tr class="{}">
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>
""".format(item, getRoomLink(name, room), getAreaLink(area), subarea,
getBossImg(name) if "Boss" in _class else getItemImg(item), diff,
getTechniques(techniques), getItems(items))
else:
pathTable += """
<tr class="{}">
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td><div class="linethrough">{}</div></td>
<td>{}</td>
<td></td>
<td></td>
</tr>
""".format(item, getRoomLink(name, room), getAreaLink(area), subarea, item, diff)
pathTable += "</table>"
return pathTable
def getItems(items):
ret = ""
for item in items:
if item[0] >= '0' and item[0] <= '9':
# for etanks and reserves
count = item[:item.find('-')]
item = item[item.find('-')+1:]
ret += "<span>{}-{}</span>".format(count, getItemImg(item, True))
else:
ret += getItemImg(item, True)
return ret
def getTechniques(techniques):
ret = ""
for tech in techniques:
if tech in Knows.desc and Knows.desc[tech]['href'] != None:
ret += """ <a class="marginKnows" href="{}" target="_blank">{}</a>""".format(Knows.desc[tech]['href'], tech)
else:
ret += """ {}""".format(tech)
return ret
def getRoomLink(name, room):
roomUrl = room.replace(' ', '_').replace("'", '%27')
roomImg = room.replace(' ', '').replace('-', '').replace("'", '')
return """<a target="_blank" href="https://wiki.supermetroid.run/{}" data-thumbnail-src="/solver/static/images/{}.png" class="room">{}</a>""".format(roomUrl, roomImg, name)
def getAreaLink(name):
if name == "WreckedShip":
url = "Wrecked_Ship"
elif name == "LowerNorfair":
url = "Norfair"
else:
url = name
return """<a target="_blank" href="https://metroid.fandom.com/wiki/{}" data-thumbnail-src="/solver/static/images/{}.png" class="area">{}</a>""".format(url, name, name)
def getBossImg(boss):
return """<img alt="{}" class="imageBoss" src="/solver/static/images/{}.png" title="{}" />""".format(boss, boss, boss)
def getItemImg(item, small=False):
if small == True:
_class = "imageItems"
else:
_class = "imageItem"
return """<img alt="{}" class="{}" src="/solver/static/images/{}.png" title="{}" />""".format(item, _class, item, item)
def prepareResult():
if session.solver['result'] is not None:
result = session.solver['result']
# utf8 files
if sys.version_info.major == 2:
result['randomizedRom'] = result['randomizedRom'].encode('utf8', 'replace')
if result['difficulty'] == -1:
result['resultText'] = "The ROM \"{}\" is not finishable with the known techniques".format(result['randomizedRom'])
else:
if result['itemsOk'] is False:
result['resultText'] = "The ROM \"{}\" is finishable but not all the requested items can be picked up with the known techniques. Estimated difficulty is: ".format(result['randomizedRom'])
else:
result['resultText'] = "The ROM \"{}\" estimated difficulty is: ".format(result['randomizedRom'])
# add generated path (spoiler !)
result['pathTable'] = genPathTable(result['generatedPath'])
result['pathremainTry'] = genPathTable(result['remainTry'])
result['pathremainMajors'] = genPathTable(result['remainMajors'], False)
result['pathremainMinors'] = genPathTable(result['remainMinors'], False)
result['pathskippedMajors'] = genPathTable(result['skippedMajors'], False)
result['pathunavailMajors'] = genPathTable(result['unavailMajors'], False)
# display the result only once
session.solver['result'] = None
else:
result = None
return result
def validateSolverParams():
for param in ['difficultyTarget', 'pickupStrategy', 'complexity']:
if request.vars[param] is None:
return (False, "Missing parameter {}".format(param))
if request.vars.preset == None:
return (False, "Missing parameter preset")
preset = request.vars.preset
if IS_ALPHANUMERIC()(preset)[1] is not None:
return (False, "Wrong value for preset, must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
return (False, "Wrong length for preset, name must be between 1 and 32 characters")
# check that preset exists
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if not os.path.isfile(fullPath):
return (False, "Unknown preset: {}".format(preset))
difficultyTargetChoices = ["easy", "medium", "hard", "very hard", "hardcore", "mania"]
if request.vars.difficultyTarget not in difficultyTargetChoices:
return (False, "Wrong value for difficultyTarget: {}, authorized values: {}".format(request.vars.difficultyTarget, difficultyTargetChoices))
pickupStrategyChoices = ["all", "minimal", "any"]
if request.vars.pickupStrategy not in pickupStrategyChoices:
return (False, "Wrong value for pickupStrategy: {}, authorized values: {}".format(request.vars.pickupStrategy, pickupStrategyChoice))
complexityChoices = ["simple", "advanced"]
if request.vars.complexity not in complexityChoices:
return (False, "Wrong value for complexity: {}, authorized values: {}".format(request.vars.complexity, complexityChoices))
itemsForbidden = []
for item in ['ETank', 'Missile', 'Super', 'PowerBomb', 'Bomb', 'Charge', 'Ice', 'HiJump', 'SpeedBooster', 'Wave', 'Spazer', 'SpringBall', 'Varia', 'Plasma', 'Grapple', 'Morph', 'Reserve', 'Gravity', 'XRayScope', 'SpaceJump', 'ScrewAttack']:
boolvar = request.vars[item+"_bool"]
if boolvar is not None:
if boolvar != 'on':
return (False, "Wrong value for {}: {}, authorized values: on/off".format(item, boolvar))
if request.vars.romJson is None and request.vars.uploadFile is None and request.vars.romFile is None:
return (False, "Missing ROM to solve")
if request.vars.romFile is not None:
if IS_LENGTH(maxsize=255, minsize=1)(request.vars.romFile)[1] is not None:
return (False, "Wrong length for romFile, name must be between 1 and 256 characters: {}".format(request.vars.romFile))
if request.vars.romJson is not None and len(request.vars.romJson) > 0:
try:
json.loads(request.vars.romJson)
except:
return (False, "Wrong value for romJson, must be a JSON string: [{}]".format(request.vars.romJson))
if request.vars.uploadFile is not None:
if type(request.vars.uploadFile) == str:
if IS_MATCH('[a-zA-Z0-9_\.() ,\-]*', strict=True)(request.vars.uploadFile)[1] is not None:
return (False, "Wrong value for uploadFile, must be a valid file name: {}".format(request.vars.uploadFile))
if IS_LENGTH(maxsize=256, minsize=1)(request.vars.uploadFile)[1] is not None:
return (False, "Wrong length for uploadFile, name must be between 1 and 255 characters")
return (True, None)
def generateJsonROM(romJsonStr):
tempRomJson = json.loads(romJsonStr)
# handle filename with utf8 characters in it
if sys.version_info.major > 2:
romFileName = tempRomJson["romFileName"]
else:
romFileName = tempRomJson["romFileName"].encode('utf8', 'replace')
(base, ext) = os.path.splitext(romFileName)
jsonRomFileName = 'roms/{}.json'.format(base)
del tempRomJson["romFileName"]
with open(jsonRomFileName, 'w') as jsonFile:
json.dump(tempRomJson, jsonFile)
return (base, jsonRomFileName)
def solver():
# init session
initSolverSession()
if request.vars.action == 'Solve':
(ok, msg) = validateSolverParams()
if not ok:
session.flash = msg
redirect(URL(r=request, f='solver'))
updateSolverSession()
preset = request.vars.preset
# new uploaded rom ?
error = False
if request.vars['romJson'] != '':
try:
(base, jsonRomFileName) = generateJsonROM(request.vars['romJson'])
session.solver['romFile'] = base
if base not in session.solver['romFiles']:
session.solver['romFiles'].append(base)
except Exception as e:
print("Error loading the ROM file, exception: {}".format(e))
session.flash = "Error loading the json ROM file"
error = True
elif request.vars['romFile'] is not None and len(request.vars['romFile']) != 0:
session.solver['romFile'] = os.path.splitext(request.vars['romFile'])[0]
jsonRomFileName = 'roms/' + session.solver['romFile'] + '.json'
else:
session.flash = "No rom file selected for upload"
error = True
if not error:
# check that the json file exists
if not os.path.isfile(jsonRomFileName):
session.flash = "Missing json ROM file on the server"
else:
try:
(ok, result) = computeDifficulty(jsonRomFileName, preset)
if not ok:
session.flash = result
redirect(URL(r=request, f='solver'))
session.solver['result'] = result
except Exception as e:
print("Error loading the ROM file, exception: {}".format(e))
session.flash = "Error loading the ROM file"
redirect(URL(r=request, f='solver'))
# display result
result = prepareResult()
# set title
response.title = 'Super Metroid VARIA Solver'
ROMs = getROMsList()
# last solved ROM
lastRomFile = getLastSolvedROM()
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# send values to view
return dict(desc=Knows.desc, stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets, roms=ROMs,
lastRomFile=lastRomFile, difficulties=diff2text, categories=Knows.categories,
result=result,
easy=easy, medium=medium, hard=hard, harder=harder, hardcore=hardcore, mania=mania)
def genJsonFromParams(vars):
paramsDict = {'Knows': {}, 'Settings': {}, 'Controller': {}}
# Knows
for var in Knows.__dict__:
if isKnows(var):
boolVar = vars[var+"_bool"]
if boolVar is None:
paramsDict['Knows'][var] = [False, 0]
else:
diffVar = vars[var+"_diff"]
if diffVar is not None:
paramsDict['Knows'][var] = [True, text2diff[diffVar]]
# Settings
for hellRun in ['Ice', 'MainUpperNorfair', 'LowerNorfair']:
value = vars[hellRun]
if value is not None:
paramsDict['Settings'][hellRun] = value
for boss in ['Kraid', 'Phantoon', 'Draygon', 'Ridley', 'MotherBrain']:
value = vars[boss]
if value is not None:
paramsDict['Settings'][boss] = value
for room in ['X-Ray', 'Gauntlet']:
value = vars[room]
if value is not None:
paramsDict['Settings'][room] = value
# Controller
for button in Controller.__dict__:
if isButton(button):
value = vars[button]
if value is None:
paramsDict['Controller'][button] = Controller.__dict__[button]
else:
if button == "Moonwalk":
if value != None and value == "on":
paramsDict['Controller'][button] = True
else:
paramsDict['Controller'][button] = False
else:
paramsDict['Controller'][button] = value
return paramsDict
def computeDifficulty(jsonRomFileName, preset):
randomizedRom = os.path.basename(jsonRomFileName.replace('json', 'sfc'))
presetFileName = "{}/{}.json".format(getPresetDir(preset), preset)
(fd, jsonFileName) = tempfile.mkstemp()
DB = db.DB()
id = DB.initSolver()
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/solver.py"),
'-r', str(jsonRomFileName),
'--preset', presetFileName,
'--difficultyTarget', str(session.solver['difficultyTarget']),
'--pickupStrategy', session.solver['pickupStrategy'],
'--type', 'web',
'--output', jsonFileName
]
for item in session.solver['itemsForbidden']:
params += ['--itemsForbidden', item]
DB.addSolverParams(id, randomizedRom, preset, session.solver['difficultyTarget'],
session.solver['pickupStrategy'], session.solver['itemsForbidden'])
print("before calling solver: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonFileName) as jsonFile:
result = json.load(jsonFile)
else:
result = "Solver: something wrong happened while solving the ROM"
DB.addSolverResult(id, ret, duration, result)
DB.close()
os.close(fd)
os.remove(jsonFileName)
return (ret == 0, result)
def infos():
# set title
response.title = 'Super Metroid VARIA Randomizer and Solver'
return dict()
def initRandomizerSession():
if session.randomizer is None:
session.randomizer = {}
session.randomizer['complexity'] = "simple"
session.randomizer['preset'] = 'regular'
session.randomizer['randoPreset'] = ""
session.randomizer['majorsSplit'] = "Full"
session.randomizer['maxDifficulty'] = 'hardcore'
session.randomizer['progressionSpeed'] = "medium"
session.randomizer['progressionDifficulty'] = 'normal'
session.randomizer['morphPlacement'] = "early"
session.randomizer['suitsRestriction'] = "on"
session.randomizer['hideItems'] = "off"
session.randomizer['strictMinors'] = "off"
session.randomizer['missileQty'] = "3"
session.randomizer['superQty'] = "2"
session.randomizer['powerBombQty'] = "1"
session.randomizer['minorQty'] = "100"
session.randomizer['energyQty'] = "vanilla"
session.randomizer['areaRandomization'] = "off"
session.randomizer['areaLayout'] = "off"
session.randomizer['bossRandomization'] = "off"
session.randomizer['funCombat'] = "off"
session.randomizer['funMovement'] = "off"
session.randomizer['funSuits'] = "off"
session.randomizer['layoutPatches'] = "on"
session.randomizer['variaTweaks'] = "on"
session.randomizer['gravityBehaviour'] = "Balanced"
session.randomizer['nerfedCharge'] = "off"
session.randomizer['itemsounds'] = "on"
session.randomizer['elevators_doors_speed'] = "on"
session.randomizer['spinjumprestart'] = "off"
session.randomizer['rando_speed'] = "off"
session.randomizer['startLocation'] = "Landing Site"
session.randomizer['animals'] = "off"
session.randomizer['No_Music'] = "off"
def randomizer():
response.title = 'Super Metroid VARIA Randomizer'
initRandomizerSession()
(stdPresets, tourPresets, comPresets) = loadPresetsList()
(randoPresets, tourRandoPresets) = loadRandoPresetsList()
# add empty entry for default value
randoPresets.append("")
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
randoPresets=randoPresets, tourRandoPresets=tourRandoPresets)
def raiseHttp(code, msg, isJson=False):
#print("raiseHttp: code {} msg {} isJson {}".format(code, msg, isJson))
if isJson is True:
msg = json.dumps(msg)
raise HTTP(code, msg)
def getInt(param, isJson=False):
try:
return int(request.vars[param])
except:
raiseHttp(400, "Wrong value for {}: {}, must be an int".format(param, request.vars[param]), isJson)
def getFloat(param, isJson=False):
try:
return float(request.vars[param])
except:
raiseHttp(400, "Wrong value for {}: {}, must be a float".format(param, request.vars[param]), isJson)
def validateWebServiceParams(switchs, quantities, others, isJson=False):
parameters = switchs + quantities + others
for param in parameters:
if request.vars[param] is None:
raiseHttp(400, "Missing parameter: {}".format(param), isJson)
for switch in switchs:
if request.vars[switch] not in ['on', 'off', 'random']:
raiseHttp(400, "Wrong value for {}: {}, authorized values: on/off".format(switch, request.vars[switch]), isJson)
for qty in quantities:
if request.vars[qty] == 'random':
continue
qtyFloat = getFloat(qty, isJson)
if qtyFloat < 1.0 or qtyFloat > 9.0:
raiseHttp(400, json.dumps("Wrong value for {}: {}, must be between 1 and 9".format(qty, request.vars[qty])), isJson)
if 'complexity' in others:
if request.vars['complexity'] not in ['simple', 'medium', 'advanced']:
raiseHttp(400, "Wrong value for complexity: {}, authorized values simple/medium/advanced".format(request.vars['complexity']), isJson)
if 'paramsFileTarget' in others:
try:
json.loads(request.vars.paramsFileTarget)
except:
raiseHttp(400, "Wrong value for paramsFileTarget, must be a JSON string", isJson)
if 'seed' in others:
seedInt = getInt('seed', isJson)
if seedInt < 0 or seedInt > 9999999:
raiseHttp(400, "Wrong value for seed: {}, must be between 0 and 9999999".format(request.vars[seed]), isJson)
preset = request.vars.preset
if preset != None:
if IS_ALPHANUMERIC()(preset)[1] is not None:
raiseHttp(400, "Wrong value for preset, must be alphanumeric", isJson)
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
raiseHttp(400, "Wrong length for preset, name must be between 1 and 32 characters", isJson)
# check that preset exists
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if not os.path.isfile(fullPath):
raiseHttp(400, "Unknown preset: {}".format(preset), isJson)
randoPreset = request.vars.randoPreset
if randoPreset != None and len(randoPreset) > 0:
if IS_ALPHANUMERIC()(randoPreset)[1] is not None:
raiseHttp(400, "Wrong value for randoPreset, must be alphanumeric", isJson)
if IS_LENGTH(maxsize=32, minsize=1)(randoPreset)[1] is not None:
raiseHttp(400, "Wrong length for randoPreset, name must be between 1 and 32 characters", isJson)
# check that randoPreset exists
fullPath = 'rando_presets/{}.json'.format(randoPreset)
if not os.path.isfile(fullPath):
raiseHttp(400, "Unknown randoPreset: {}".format(randoPreset), isJson)
# check race mode
if 'raceMode' in request.vars:
if request.vars.raceMode not in ['on', 'off']:
raiseHttp(400, "Wrong value for race mode: {}, must on/off".format(request.vars.raceMode), isJson)
if 'majorsSplit' in others:
if request.vars['majorsSplit'] not in ['Full', 'Major', 'Chozo', 'random']:
raiseHttp(400, "Wrong value for majorsSplit: {}, authorized values Full/Major/Chozo/random".format(request.vars['majorsSplit']), isJson)
if request.vars['maxDifficulty'] is not None:
if request.vars.maxDifficulty not in ['no difficulty cap', 'easy', 'medium', 'hard', 'harder', 'hardcore', 'mania', 'random']:
raiseHttp(400, "Wrong value for difficulty_target, authorized values: no difficulty cap/easy/medium/hard/harder/hardcore/mania", isJson)
if 'progressionSpeed' in others:
for progSpeed in request.vars['progressionSpeed'].split(','):
if progSpeed not in ['slowest', 'slow', 'medium', 'fast', 'fastest', 'random', 'basic', 'VARIAble']:
raiseHttp(400, "Wrong value for progressionSpeed: {}, authorized values slowest/slow/medium/fast/fastest/basic/VARIAble".format(progSpeed), isJson)
if 'progressionDifficulty' in others:
if request.vars['progressionDifficulty'] not in ['easier', 'normal', 'harder', 'random']:
raiseHttp(400, "Wrong value for progressionDifficulty: {}, authorized values easier/normal/harder".format(request.vars['progressionDifficulty']), isJson)
if 'morphPlacement' in others:
if request.vars['morphPlacement'] not in ['early', 'late', 'normal', 'random']:
raiseHttp(400, "Wrong value for morphPlacement: {}, authorized values early/late/normal".format(request.vars['morphPlacement']), isJson)
if request.vars.minorQty not in ['random', None]:
minorQtyInt = getInt('minorQty', isJson)
if minorQtyInt < 7 or minorQtyInt > 100:
raiseHttp(400, "Wrong value for minorQty, must be between 7 and 100", isJson)
if 'energyQty' in others:
if request.vars.energyQty not in ['sparse', 'medium', 'vanilla', 'random']:
raiseHttp(400, "Wrong value for energyQty: authorized values: sparse/medium/vanilla", isJson)
if 'gravityBehaviour' in others:
if request.vars.gravityBehaviour not in ['Balanced', 'Progressive', 'Vanilla']:
raiseHttp(400, "Wrong value for gravityBehaviour: {}".format(request.vars.gravityBehaviour), isJson)
if 'startLocation' in others:
if request.vars.startLocation not in ['Ceres', 'Landing Site']:
raiseHttp(400, "Wrong value for startLocation: {}".format(request.vars.startLocation), isJson)
def sessionWebService():
# web service to update the session
switchs = ['suitsRestriction', 'hideItems', 'strictMinors',
'areaRandomization', 'areaLayout', 'bossRandomization',
'funCombat', 'funMovement', 'funSuits',
'layoutPatches', 'variaTweaks', 'nerfedCharge',
'itemsounds', 'elevators_doors_speed', 'spinjumprestart',
'rando_speed', 'animals', 'No_Music']
quantities = ['missileQty', 'superQty', 'powerBombQty']
others = ['complexity', 'preset', 'randoPreset', 'majorsSplit',
'maxDifficulty', 'progressionSpeed', 'progressionDifficulty',
'morphPlacement', 'minorQty', 'energyQty',
'gravityBehaviour', 'startLocation']
validateWebServiceParams(switchs, quantities, others)
if session.randomizer is None:
session.randomizer = {}
session.randomizer['complexity'] = request.vars.complexity
session.randomizer['preset'] = request.vars.preset
session.randomizer['randoPreset'] = request.vars.randoPreset
session.randomizer['majorsSplit'] = request.vars.majorsSplit
session.randomizer['maxDifficulty'] = request.vars.maxDifficulty
session.randomizer['progressionSpeed'] = request.vars.progressionSpeed.split(',')
session.randomizer['progressionDifficulty'] = request.vars.progressionDifficulty
session.randomizer['morphPlacement'] = request.vars.morphPlacement
session.randomizer['suitsRestriction'] = request.vars.suitsRestriction
session.randomizer['hideItems'] = request.vars.hideItems
session.randomizer['strictMinors'] = request.vars.strictMinors
session.randomizer['missileQty'] = request.vars.missileQty
session.randomizer['superQty'] = request.vars.superQty
session.randomizer['powerBombQty'] = request.vars.powerBombQty
session.randomizer['minorQty'] = request.vars.minorQty
session.randomizer['energyQty'] = request.vars.energyQty
session.randomizer['areaRandomization'] = request.vars.areaRandomization
session.randomizer['areaLayout'] = request.vars.areaLayout
session.randomizer['bossRandomization'] = request.vars.bossRandomization
session.randomizer['funCombat'] = request.vars.funCombat
session.randomizer['funMovement'] = request.vars.funMovement
session.randomizer['funSuits'] = request.vars.funSuits
session.randomizer['layoutPatches'] = request.vars.layoutPatches
session.randomizer['variaTweaks'] = request.vars.variaTweaks
session.randomizer['gravityBehaviour'] = request.vars.gravityBehaviour
session.randomizer['nerfedCharge'] = request.vars.nerfedCharge
session.randomizer['itemsounds'] = request.vars.itemsounds
session.randomizer['elevators_doors_speed'] = request.vars.elevators_doors_speed
session.randomizer['spinjumprestart'] = request.vars.spinjumprestart
session.randomizer['rando_speed'] = request.vars.rando_speed
session.randomizer['startLocation'] = request.vars.startLocation
session.randomizer['animals'] = request.vars.animals
session.randomizer['No_Music'] = request.vars.No_Music
# to create a new rando preset, uncomment next lines
#with open('rando_presets/new.json', 'w') as jsonFile:
# json.dump(session.randomizer, jsonFile)
def getCustomMapping(controlMapping):
if len(controlMapping) == 0:
return (False, None)
inv = {}
for button in controlMapping:
inv[controlMapping[button]] = button
return (True, "{},{},{},{},{},{},{}".format(inv["Shoot"], inv["Jump"], inv["Dash"], inv["Item Select"], inv["Item Cancel"], inv["Angle Up"], inv["Angle Down"]))
def randomizerWebService():
# web service to compute a new random (returns json string)
print("randomizerWebService")
session.forget(response)
# set header to authorize cross domain AJAX
response.headers['Access-Control-Allow-Origin'] = '*'
# check validity of all parameters
switchs = ['suitsRestriction', 'hideItems', 'strictMinors',
'areaRandomization', 'areaLayout', 'bossRandomization',
'funCombat', 'funMovement', 'funSuits',
'layoutPatches', 'variaTweaks', 'nerfedCharge',
'itemsounds', 'elevators_doors_speed', 'spinjumprestart',
'rando_speed', 'animals', 'No_Music']
quantities = ['missileQty', 'superQty', 'powerBombQty']
others = ['complexity', 'paramsFileTarget', 'seed', 'preset', 'majorsSplit',
'maxDifficulty', 'progressionSpeed', 'progressionDifficulty',
'morphPlacement', 'minorQty', 'energyQty',
'gravityBehaviour', 'startLocation']
validateWebServiceParams(switchs, quantities, others, isJson=True)
# randomize
DB = db.DB()
id = DB.initRando()
# race mode
useRace = False
if request.vars.raceMode == 'on':
magic = getMagic()
useRace = True
(fd1, presetFileName) = tempfile.mkstemp()
presetFileName += '.json'
(fd2, jsonFileName) = tempfile.mkstemp()
print("randomizerWebService, params validated")
for var in request.vars:
print("{}: {}".format(var, request.vars[var]))
with open(presetFileName, 'w') as presetFile:
presetFile.write(request.vars.paramsFileTarget)
seed = request.vars.seed
if seed == '0':
seed = str(random.randint(0, 9999999))
preset = request.vars.preset
params = [pythonExec, os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--runtime', '20',
'--seed', seed,
'--output', jsonFileName,
'--param', presetFileName,
'--preset', preset,
'--progressionSpeed', request.vars.progressionSpeed,
'--progressionDifficulty', request.vars.progressionDifficulty,
'--morphPlacement', request.vars.morphPlacement,
'--majorsSplit', request.vars.majorsSplit]
params += ['--missileQty', request.vars.missileQty if request.vars.missileQty != 'random' else '0',
'--superQty', request.vars.superQty if request.vars.superQty != 'random' else '0',
'--powerBombQty', request.vars.powerBombQty if request.vars.powerBombQty != 'random' else '0',
'--minorQty', request.vars.minorQty if request.vars.minorQty != 'random' else '0',
'--energyQty', request.vars.energyQty]
if useRace == True:
params += ['--race', str(magic)]
if request.vars.nerfedCharge == 'on':
params.append('--nerfedCharge')
if request.vars.itemsounds == 'on':
params += ['-c', 'itemsounds.ips']
if request.vars.elevators_doors_speed == 'on':
params += ['-c', 'elevators_doors_speed.ips']
if request.vars.spinjumprestart == 'on':
params += ['-c', 'spinjumprestart.ips']
if request.vars.rando_speed == 'on':
params += ['-c', 'rando_speed.ips']
if request.vars.No_Music == 'on':
params += ['-c', 'No_Music']
if request.vars.startLocation == "Ceres":
params += ['-c', 'skip_intro.ips']
else:
params += ['-c', 'skip_ceres.ips']
if request.vars.animals == 'on':
params.append('--animals')
if request.vars.areaLayout == 'off':
params.append('--areaLayoutBase')
if request.vars.variaTweaks == 'off':
params.append('--novariatweaks')
if request.vars.maxDifficulty != 'no difficulty cap':
params.append('--maxDifficulty')
params.append(request.vars.maxDifficulty)
def addParamRandom(id, params):
if request.vars[id] in ['on', 'random']:
params.append('--{}'.format(id))
if request.vars[id] == 'random':
params.append('random')
addParamRandom('suitsRestriction', params)
addParamRandom('hideItems', params)
addParamRandom('strictMinors', params)
def addSuperFun(id, params):
fun = id[len('fun'):]
if request.vars[id] == 'on':
params += ['--superFun', fun]
elif request.vars[id] == 'random':
params += ['--superFun', "{}Random".format(fun)]
addSuperFun('funCombat', params)
addSuperFun('funMovement', params)
addSuperFun('funSuits', params)
if request.vars.layoutPatches == 'off':
params.append('--nolayout')
if request.vars.gravityBehaviour == 'Vanilla':
params.append('--nogravheat')
elif request.vars.gravityBehaviour == 'Progressive':
params.append('--progressiveSuits')
if request.vars.areaRandomization == 'on':
params.append('--area')
elif request.vars.areaRandomization == 'random':
params += ['--area', 'random']
if request.vars.bossRandomization == 'on':
params.append('--bosses')
elif request.vars.bossRandomization == 'random':
params += ['--bosses', 'random']
# load content of preset to get controller mapping
try:
controlMapping = PresetLoader.factory(presetFileName).params['Controller']
except Exception as e:
os.close(fd1)
os.remove(presetFileName)
os.close(fd2)
os.remove(jsonFileName)
raise HTTP(400, json.dumps("randomizerWebService: can't load the preset"))
(custom, controlParam) = getCustomMapping(controlMapping)
if custom == True:
params += ['--controls', controlParam]
if "Moonwalk" in controlMapping and controlMapping["Moonwalk"] == True:
params.append('--moonwalk')
DB.addRandoParams(id, params + ['--complexity', request.vars.complexity])
print("before calling: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonFileName) as jsonFile:
locsItems = json.load(jsonFile)
# check if an info message has been returned
msg = ''
if len(locsItems['errorMsg']) > 0:
msg = locsItems['errorMsg']
DB.addRandoResult(id, ret, duration, msg)
DB.close()
os.close(fd1)
os.remove(presetFileName)
os.close(fd2)
os.remove(jsonFileName)
return json.dumps(locsItems)
else:
# extract error from json
try:
with open(jsonFileName) as jsonFile:
msg = json.load(jsonFile)['errorMsg']
except:
msg = "randomizerWebService: something wrong happened"
DB.addRandoResult(id, ret, duration, msg)
DB.close()
os.close(fd1)
os.remove(presetFileName)
os.close(fd2)
os.remove(jsonFileName)
raise HTTP(400, json.dumps(msg))
def presetWebService():
# web service to get the content of the preset file
if request.vars.preset == None:
raiseHttp(400, "Missing parameter preset")
preset = request.vars.preset
if IS_ALPHANUMERIC()(preset)[1] is not None:
raise HTTP(400, "Preset name must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
raise HTTP(400, "Preset name must be between 1 and 32 characters")
print("presetWebService: preset={}".format(preset))
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
# check that the presets file exists
if os.path.isfile(fullPath):
# load it
try:
params = PresetLoader.factory(fullPath).params
except Exception as e:
raise HTTP(400, "Can't load the preset")
params = json.dumps(params)
return params
else:
raise HTTP(400, "Preset '{}' not found".format(fullPath))
def randoPresetWebService():
# web service to get the content of the rando preset file
if request.vars.randoPreset == None:
raiseHttp(400, "Missing parameter rando preset")
preset = request.vars.randoPreset
if IS_ALPHANUMERIC()(preset)[1] is not None:
raise HTTP(400, "Preset name must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
raise HTTP(400, "Preset name must be between 1 and 32 characters")
if request.vars.origin not in ["extStats", "randomizer"]:
raise HTTP(400, "Unknown origin")
print("randoPresetWebService: preset={}".format(preset))
fullPath = 'rando_presets/{}.json'.format(preset)
# check that the presets file exists
if os.path.isfile(fullPath):
# load it
try:
updateSession = request.vars.origin == "randomizer"
params = loadRandoPreset(fullPath, updateSession)
if updateSession == True:
session.randomizer['randoPreset'] = preset
params = json.dumps(params)
return params
except Exception as e:
raise HTTP(400, "Can't load the rando preset: {}".format(preset))
else:
raise HTTP(400, "Rando preset '{}' not found".format(fullPath))
def loadRandoPreset(presetFullPath, updateSession):
with open(presetFullPath) as jsonFile:
randoPreset = json.load(jsonFile)
if updateSession == True:
# update session
for key in randoPreset:
session.randomizer[key] = randoPreset[key]
return randoPreset
def home():
# set title
response.title = 'Super Metroid VARIA Randomizer, Solver and Trackers'
return dict()
def getErrors():
# check dir exists
errDir = os.path.expanduser("~/web2py/applications/solver/errors")
if os.path.isdir(errDir):
# list error files
errFiles = glob.glob(os.path.join(errDir, "*"))
# sort by date
errFiles.sort(key=os.path.getmtime)
errFiles = [os.path.basename(f) for f in errFiles]
return errFiles
else:
return []
def getFsUsage():
fsData = os.statvfs('/home')
percent = round(100 - (100.0 * fsData.f_bavail / fsData.f_blocks), 2)
if percent < 80:
return ('OK', percent)
elif percent < 95:
return ('WARNING', percent)
else:
return ('CRITICAL', percent)
def randoParamsWebService():
# get a string of the randomizer parameters for a given seed
if request.vars.seed == None:
raiseHttp(400, "Missing parameter seed", False)
seed = getInt('seed', False)
if seed < 0 or seed > 9999999:
raiseHttp(400, "Wrong value for seed: {}, must be between 0 and 9999999".format(request.vars[seed]), False)
DB = db.DB()
params = DB.getRandomizerSeedParams(seed)
DB.close()
return params
def stats():
response.title = 'Super Metroid VARIA Randomizer and Solver usage statistics'
DB = db.DB()
weeks = 1
solverPresets = DB.getSolverPresets(weeks)
randomizerPresets = DB.getRandomizerPresets(weeks)
solverDurations = DB.getSolverDurations(weeks)
randomizerDurations = DB.getRandomizerDurations(weeks)
solverData = DB.getSolverData(weeks)
randomizerData = DB.getRandomizerData(weeks)
isolver = DB.getISolver(weeks)
isolverData = DB.getISolverData(weeks)
errors = getErrors()
DB.close()
(fsStatus, fsPercent) = getFsUsage()
return dict(solverPresets=solverPresets, randomizerPresets=randomizerPresets,
solverDurations=solverDurations, randomizerDurations=randomizerDurations,
solverData=solverData, randomizerData=randomizerData,
isolver=isolver, isolverData=isolverData, errors=errors,
fsStatus=fsStatus, fsPercent=fsPercent)
def transition2isolver(transition):
transition = str(transition)
return transition[0].lower() + removeChars(transition[1:], " ,()-")
def tracker():
response.title = 'Super Metroid VARIA Areas and Items Tracker'
# init session
if session.tracker is None:
session.tracker = {}
session.tracker["state"] = {}
session.tracker["preset"] = "regular"
session.tracker["seed"] = None
# set to False in tracker.html
session.tracker["firstTime"] = True
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# access points
vanillaAPs = []
for (src, dest) in vanillaTransitions:
vanillaAPs += [transition2isolver(src), transition2isolver(dest)]
vanillaBossesAPs = []
for (src, dest) in vanillaBossesTransitions:
vanillaBossesAPs += [transition2isolver(src), transition2isolver(dest)]
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
vanillaAPs=vanillaAPs, vanillaBossesAPs=vanillaBossesAPs,
curSession=session.tracker)
def plando():
response.title = 'Super Metroid VARIA Areas and Items Plandomizer'
# init session
if session.plando is None:
session.plando = {}
session.plando["state"] = {}
session.plando["preset"] = "regular"
session.plando["seed"] = None
# rando params
session.plando["rando"] = {}
# set to False in plando.html
session.plando["firstTime"] = True
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# access points
vanillaAPs = []
for (src, dest) in vanillaTransitions:
vanillaAPs += [transition2isolver(src), transition2isolver(dest)]
vanillaBossesAPs = []
for (src, dest) in vanillaBossesTransitions:
vanillaBossesAPs += [transition2isolver(src), transition2isolver(dest)]
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
vanillaAPs=vanillaAPs, vanillaBossesAPs=vanillaBossesAPs,
curSession=session.plando)
class WS(object):
@staticmethod
def factory():
scope = request.vars.scope
if scope not in ["area", "item", "common"]:
raiseHttp(400, "Unknown scope: {}, must be area/item/common".format(scope), True)
action = request.vars.action
if action not in ['add', 'remove', 'clear', 'init', 'get', 'save', 'replace', 'randomize']:
raiseHttp(400, "Unknown action {}, must be add/remove/clear/init/get/save/randomize".format(action), True)
mode = request.vars.mode
if mode not in ["standard", "seedless", "plando"]:
raiseHttp(400, "Unknown mode, must be standard/seedless/plando", True)
try:
WSClass = globals()["WS_{}_{}".format(scope, action)]
return WSClass(mode)
except Exception as e:
raiseHttp(400, "{}".format(e.body if "body" in e.__dict__ else e).replace('"', ''), True)
def __init__(self, mode):
if mode == "plando":
if session.plando is None:
raiseHttp(400, "No session found for the Plandomizer Web service", True)
self.session = session.plando
else:
if session.tracker is None:
raiseHttp(400, "No session found for the Tracker Web service", True)
self.session = session.tracker
def validate(self):
if self.session is None:
raiseHttp(400, "No session found for the Tracker", True)
if request.vars.action == None:
raiseHttp(400, "Missing parameter action", True)
action = request.vars.action
if action not in ['init', 'add', 'remove', 'clear', 'get', 'save', 'replace', 'randomize']:
raiseHttp(400, "Unknown action {}, must be init/add/remove/clear/get/save/randomize".format(action), True)
def validatePoint(self, point):
if request.vars[point] == None:
raiseHttp(400, "Missing parameter {}".format(point), True)
pointValue = request.vars[point]
if pointValue not in ['lowerMushroomsLeft', 'moatRight', 'greenPiratesShaftBottomRight',
'keyhunterRoomBottom', 'morphBallRoomLeft', 'greenBrinstarElevatorRight',
'greenHillZoneTopRight', 'noobBridgeRight', 'westOceanLeft', 'crabMazeLeft',
'lavaDiveRight', 'threeMuskateersRoomLeft', 'warehouseZeelaRoomLeft',
'warehouseEntranceLeft', 'warehouseEntranceRight', 'singleChamberTopRight',
'kronicBoostRoomBottomLeft', 'mainStreetBottom', 'crabHoleBottomLeft', 'leCoudeRight',
'redFishRoomLeft', 'redTowerTopLeft', 'caterpillarRoomTopRight', 'redBrinstarElevator',
'eastTunnelRight', 'eastTunnelTopRight', 'glassTunnelTop', 'statuesHallwayLeft',
'ridleyRoomOut', 'ridleyRoomIn', 'kraidRoomOut', 'kraidRoomIn',
'draygonRoomOut', 'draygonRoomIn', 'phantoonRoomOut', 'phantoonRoomIn']:
raiseHttp(400, "Wrong value for {}: {}".format(point, pointValue), True)
def action(self):
pass
def locName4isolver(self, locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
locName = str(locName)
return locName[0].lower() + removeChars(locName[1:], " ,()-")
def returnState(self):
if len(self.session["state"]) > 0:
state = self.session["state"]
#print("state returned to frontend: availWeb {}, visWeb {}".format(state["availableLocationsWeb"], state["visitedLocationsWeb"]))
return json.dumps({
# item tracker
"availableLocations": state["availableLocationsWeb"],
"visitedLocations": state["visitedLocationsWeb"],
# compatibility with existing sessions
"remainLocations": state["remainLocationsWeb"] if "remainLocationsWeb" in state else [],
"lastLoc": self.locName4isolver(state["lastLoc"]),
# area tracker
"lines": state["linesWeb"],
"linesSeq": state["linesSeqWeb"],
"allTransitions": state["allTransitions"],
# infos on seed
"mode": state["mode"],
"areaRando": state["areaRando"],
"bossRando": state["bossRando"],
"seed": state["seed"],
"preset": os.path.basename(os.path.splitext(state["presetFileName"])[0]),
"errorMsg": state["errorMsg"],
"last": state["last"]
})
else:
raiseHttp(200, "OK", True)
def callSolverAction(self, scope, action, parameters):
# check that we have a state in the session
if "state" not in self.session:
raiseHttp(400, "Missing Solver state in the session", True)
mode = self.session["mode"]
(fd1, jsonInFileName) = tempfile.mkstemp()
(fd2, jsonOutFileName) = tempfile.mkstemp()
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/solver.py"),
'--interactive',
'--state', jsonInFileName,
'--output', jsonOutFileName,
'--action', action,
'--mode', mode,
'--scope', scope
]
if action in ['add', 'replace']:
if scope == 'item':
params += ['--loc', parameters["loc"]]
if mode != 'standard':
params += ['--item', parameters["item"]]
if parameters['hide'] == True:
params.append('--hide')
elif scope == 'area':
params += ['--startPoint', parameters["startPoint"],
'--endPoint', parameters["endPoint"]]
elif action == 'remove' and scope == 'item':
params += ['--count', str(parameters["count"])]
elif action == 'remove' and scope == 'area' and "startPoint" in parameters:
params += ['--startPoint', parameters["startPoint"]]
elif action == 'save' and scope == 'common':
if parameters['lock'] == True:
params.append('--lock')
elif action == 'randomize':
params += ['--progressionSpeed', parameters["progressionSpeed"],
'--minorQty', parameters["minorQty"],
'--energyQty', parameters["energyQty"]
]
if request.vars.debug != None:
params.append('--vcr')
params.append('--debug')
# dump state as input
with open(jsonInFileName, 'w') as jsonFile:
json.dump(self.session["state"], jsonFile)
print("before calling isolver: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonOutFileName) as jsonFile:
state = json.load(jsonFile)
os.close(fd1)
os.remove(jsonInFileName)
os.close(fd2)
os.remove(jsonOutFileName)
if action == 'save':
return json.dumps(state)
else:
self.session["state"] = state
return self.returnState()
else:
os.close(fd1)
os.remove(jsonInFileName)
msg = "Something wrong happened while iteratively solving the ROM"
try:
with open(jsonOutFileName, 'r') as jsonFile:
data = json.load(jsonFile)
if "errorMsg" in data:
msg = data["errorMsg"]
except Exception as e:
pass
os.close(fd2)
os.remove(jsonOutFileName)
raiseHttp(400, msg, True)
class WS_common_init(WS):
def validate(self):
super(WS_common_init, self).validate()
if request.vars.scope != 'common':
raiseHttp(400, "Unknown scope, must be common", True)
# preset
preset = request.vars.preset
if request == None:
raiseHttp(400, "Missing parameter preset", True)
if IS_NOT_EMPTY()(preset)[1] is not None:
raiseHttp(400, "Preset name is empty", True)
if IS_ALPHANUMERIC()(preset)[1] is not None:
raiseHttp(400, "Preset name must be alphanumeric: {}".format(preset), True)
if IS_LENGTH(32)(preset)[1] is not None:
raiseHttp(400, "Preset name must be max 32 chars: {}".format(preset), True)
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if not os.path.isfile(fullPath):
raiseHttp(400, "Unknown preset: {}".format(preset), True)
if request.vars.mode != 'seedless':
# ROM (only through file API)
if request.vars.romJson is None or len(request.vars.romJson) == 0:
raiseHttp(400, "Missing ROM to solve", True)
try:
json.loads(request.vars.romJson)
except:
raiseHttp(400, "Wrong value for romJson, must be a JSON string: [{}]".format(request.vars.romJson))
# ROM file name
uploadFile = request.vars.fileName
if uploadFile is None:
raiseHttp(400, "Missing ROM file name", True)
if IS_NOT_EMPTY()(uploadFile)[1] is not None:
raiseHttp(400, "File name is empty", True)
if IS_LENGTH(maxsize=255, minsize=1)(uploadFile)[1] is not None:
raiseHttp(400, "Wrong length for ROM file name, name must be between 1 and 255 characters", True)
def action(self):
mode = request.vars.mode
if mode != 'seedless':
try:
(base, jsonRomFileName) = generateJsonROM(request.vars.romJson)
except Exception as e:
raiseHttp(400, "Can't load JSON ROM: {}".format(e), True)
seed = base + '.sfc'
else:
seed = 'seedless'
jsonRomFileName = None
preset = request.vars.preset
presetFileName = '{}/{}.json'.format(getPresetDir(preset), preset)
self.session["seed"] = seed
self.session["preset"] = preset
self.session["mode"] = mode
vcr = request.vars.debug != None
fill = request.vars.fill == "true"
return self.callSolverInit(jsonRomFileName, presetFileName, preset, seed, mode, vcr, fill)
def callSolverInit(self, jsonRomFileName, presetFileName, preset, romFileName, mode, vcr, fill):
(fd, jsonOutFileName) = tempfile.mkstemp()
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/solver.py"),
'--preset', presetFileName,
'--output', jsonOutFileName,
'--action', "init",
'--interactive',
'--mode', mode,
'--scope', 'common',
]
if mode != "seedless":
params += ['-r', str(jsonRomFileName)]
if vcr == True:
params.append('--vcr')
if fill == True:
params.append('--fill')
print("before calling isolver: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
DB = db.DB()
DB.addISolver(preset, romFileName)
DB.close()
with open(jsonOutFileName) as jsonFile:
state = json.load(jsonFile)
os.close(fd)
os.remove(jsonOutFileName)
self.session["state"] = state
return self.returnState()
else:
os.close(fd)
os.remove(jsonOutFileName)
raiseHttp(400, "Something wrong happened while initializing the ISolver", True)
class WS_common_get(WS):
def validate(self):
super(WS_common_get, self).validate()
def action(self):
return self.returnState()
class WS_common_save(WS):
def validate(self):
super(WS_common_save, self).validate()
if request.vars.lock == None:
raiseHttp(400, "Missing parameter lock", True)
if request.vars.lock not in ["save", "lock"]:
raiseHttp(400, "Wrong value for lock: {}, authorized values: save/lock".format(request.vars.lock), True)
def action(self):
if self.session["mode"] != "plando":
raiseHttp(400, "Save can only be use in plando mode", True)
return self.callSolverAction("common", "save", {'lock': request.vars.lock == "lock"})
class WS_common_randomize(WS):
def validate(self):
super(WS_common_randomize, self).validate()
if request.vars.progressionSpeed not in ["slowest", "slow", "medium", "fast", "fastest", "basic", "VARIAble"]:
raiseHttp(400, "Wrong value for progressionSpeed: {}".format(request.vars.progressionSpeed), True)
minorQtyInt = getInt('minorQty', True)
if minorQtyInt < 7 or minorQtyInt > 100:
raiseHttp(400, "Wrong value for minorQty, must be between 7 and 100", True)
if request.vars.energyQty not in ["sparse", "medium", "vanilla"]:
raiseHttp(400, "Wrong value for energyQty: {}".format(request.vars.energyQty), True)
def action(self):
if self.session["mode"] != "plando":
raiseHttp(400, "Randomize can only be use in plando mode", True)
params = {}
for elem in "progressionSpeed", "minorQty", "energyQty":
params[elem] = request.vars[elem]
self.session["rando"] = params
return self.callSolverAction("common", "randomize", params)
class WS_area_add(WS):
def validate(self):
super(WS_area_add, self).validate()
# startPoint and endPoint
self.validatePoint("startPoint")
self.validatePoint("endPoint")
if len(self.session["state"]) == 0:
raiseHttp(400, "ISolver state is empty", True)
def action(self):
return self.callSolverAction("area", "add", {"startPoint": request.vars.startPoint,
"endPoint": request.vars.endPoint})
class WS_area_remove(WS):
def validate(self):
if request.vars["startPoint"] != None:
self.validatePoint("startPoint")
super(WS_area_remove, self).validate()
def action(self):
parameters = {}
if request.vars["startPoint"] != None:
parameters["startPoint"] = request.vars.startPoint
return self.callSolverAction("area", "remove", parameters)
class WS_area_clear(WS):
def validate(self):
super(WS_area_clear, self).validate()
def action(self):
return self.callSolverAction("area", "clear", {})
class WS_item_add(WS):
def validate(self):
super(WS_item_add, self).validate()
# new location
def name4isolver(locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
return removeChars(locName, " ,()-")
locName = name4isolver(request.vars.locName)
if locName not in ['EnergyTankGauntlet', 'Bomb', 'EnergyTankTerminator', 'ReserveTankBrinstar', 'ChargeBeam', 'MorphingBall', 'EnergyTankBrinstarCeiling', 'EnergyTankEtecoons', 'EnergyTankWaterway', 'EnergyTankBrinstarGate', 'XRayScope', 'Spazer', 'EnergyTankKraid', 'VariaSuit', 'IceBeam', 'EnergyTankCrocomire', 'HiJumpBoots', 'GrappleBeam', 'ReserveTankNorfair', 'SpeedBooster', 'WaveBeam', 'EnergyTankRidley', 'ScrewAttack', 'EnergyTankFirefleas', 'ReserveTankWreckedShip', 'EnergyTankWreckedShip', 'RightSuperWreckedShip', 'GravitySuit', 'EnergyTankMamaturtle', 'PlasmaBeam', 'ReserveTankMaridia', 'SpringBall', 'EnergyTankBotwoon', 'SpaceJump', 'PowerBombCrateriasurface', 'MissileoutsideWreckedShipbottom', 'MissileoutsideWreckedShiptop', 'MissileoutsideWreckedShipmiddle', 'MissileCrateriamoat', 'MissileCrateriabottom', 'MissileCrateriagauntletright', 'MissileCrateriagauntletleft', 'SuperMissileCrateria', 'MissileCrateriamiddle', 'PowerBombgreenBrinstarbottom', 'SuperMissilepinkBrinstar', 'MissilegreenBrinstarbelowsupermissile', 'SuperMissilegreenBrinstartop', 'MissilegreenBrinstarbehindmissile', 'MissilegreenBrinstarbehindreservetank', 'MissilepinkBrinstartop', 'MissilepinkBrinstarbottom', 'PowerBombpinkBrinstar', 'MissilegreenBrinstarpipe', 'PowerBombblueBrinstar', 'MissileblueBrinstarmiddle', 'SuperMissilegreenBrinstarbottom', 'MissileblueBrinstarbottom', 'MissileblueBrinstartop', 'MissileblueBrinstarbehindmissile', 'PowerBombredBrinstarsidehopperroom', 'PowerBombredBrinstarspikeroom', 'MissileredBrinstarspikeroom', 'MissileKraid', 'Missilelavaroom', 'MissilebelowIceBeam', 'MissileaboveCrocomire', 'MissileHiJumpBoots', 'EnergyTankHiJumpBoots', 'PowerBombCrocomire', 'MissilebelowCrocomire', 'MissileGrappleBeam', 'MissileNorfairReserveTank', 'MissilebubbleNorfairgreendoor', 'MissilebubbleNorfair', 'MissileSpeedBooster', 'MissileWaveBeam', 'MissileGoldTorizo', 'SuperMissileGoldTorizo', 'MissileMickeyMouseroom', 'MissilelowerNorfairabovefireflearoom', 'PowerBomblowerNorfairabovefireflearoom', 'PowerBombPowerBombsofshame', 'MissilelowerNorfairnearWaveBeam', 'MissileWreckedShipmiddle', 'MissileGravitySuit', 'MissileWreckedShiptop', 'SuperMissileWreckedShipleft', 'MissilegreenMaridiashinespark', 'SuperMissilegreenMaridia', 'MissilegreenMaridiatatori', 'SuperMissileyellowMaridia', 'MissileyellowMaridiasupermissile', 'MissileyellowMaridiafalsewall', 'MissileleftMaridiasandpitroom', 'MissilerightMaridiasandpitroom', 'PowerBombrightMaridiasandpitroom', 'MissilepinkMaridia', 'SuperMissilepinkMaridia', 'MissileDraygon', 'Kraid', 'Ridley', 'Phantoon', 'Draygon', 'MotherBrain']:
raiseHttp(400, "Unknown location name: {}".format(request.vars.locName), True)
request.vars.locName = locName
itemName = request.vars.itemName
if itemName == "NoEnergy":
itemName = "Nothing"
if itemName not in [None, 'ETank', 'Missile', 'Super', 'PowerBomb', 'Bomb', 'Charge', 'Ice', 'HiJump', 'SpeedBooster', 'Wave', 'Spazer', 'SpringBall', 'Varia', 'Plasma', 'Grapple', 'Morph', 'Reserve', 'Gravity', 'XRayScope', 'SpaceJump', 'ScrewAttack', 'Nothing', 'NoEnergy', 'Boss']:
raiseHttp(400, "Unknown item name: {}".format(request.vars.itemName), True)
def action(self):
item = request.vars.itemName
# items used only in the randomizer that we get in vcr mode
if item in ["Boss", "NoEnergy"]:
item = 'Nothing'
return self.callSolverAction("item", "add", {"loc": request.vars.locName, "item": item, "hide": request.vars.hide == "true"})
class WS_item_replace(WS_item_add):
def validate(self):
super(WS_item_replace, self).validate()
def action(self):
return self.callSolverAction("item", "replace", {"loc": request.vars.locName, "item": request.vars.itemName, "hide": request.vars.hide == "true"})
class WS_item_remove(WS):
def validate(self):
super(WS_item_remove, self).validate()
def action(self):
count = request.vars.count
if count != None:
count = getInt("count", True)
if count > 105 or count < 1:
raiseHttp(400, "Wrong value for count, must be in [1-105] ", True)
else:
count = 1
return self.callSolverAction("item", "remove", {"count": count})
class WS_item_clear(WS):
def validate(self):
super(WS_item_clear, self).validate()
def action(self):
return self.callSolverAction("item", "clear", {})
def trackerWebService():
# unified web service for item/area trackers
print("trackerWebService called")
ws = WS.factory()
ws.validate()
ret = ws.action()
if ret == None:
# return something
raiseHttp(200, "OK", True)
else:
return ret
# race mode
def getMagic():
return random.randint(1, 0xffff)
def initCustomizerSession():
if session.customizer == None:
session.customizer = {}
session.customizer['colorsRandomization'] = "off"
session.customizer['suitsPalettes'] = "on"
session.customizer['beamsPalettes'] = "on"
session.customizer['tilesPalettes'] = "on"
session.customizer['enemiesPalettes'] = "on"
session.customizer['bossesPalettes'] = "on"
session.customizer['minDegree'] = -15
session.customizer['maxDegree'] = 15
session.customizer['invert'] = "on"
session.customizer['globalShift'] = "on"
session.customizer['customSpriteEnable'] = "off"
session.customizer['customSprite'] = "samus"
session.customizer['itemsounds'] = "off"
session.customizer['spinjumprestart'] = "off"
session.customizer['rando_speed'] = "off"
session.customizer['elevators_doors_speed'] = "off"
session.customizer['animals'] = "off"
session.customizer['No_Music'] = "off"
customSprites = {
'samus': {"index":0, "name": "Samus", "desc": "Samus, with a distinct animation for Screw Attack without Space Jump and a new Crystal Flash animation", "author": "Artheau and Feesh", "group": "Samus"},
'hitbox_helper': {"index":1, "name": "Hitbox", "desc": "Samus, with her actual hitbox on top", "author": "Artheau and Komaru", "group": "Samus"},
'hack_ancient_chozo': {"index":2, "name": "Chozo", "desc": "Samus, from Ancient Chozo hack", "author": "Albert V.", "group": "Samus"},
'hack_ascent': {"index":3, "name": "Ascent", "desc": "Samus, from Ascent hack", "author": "Benox50", "group": "Samus"},
'hack_decision': {"index":4, "name": "Decision", "desc": "Samus, from Decision hack", "author": "JoshShoeWah", "group": "Samus"},
'hack_escape2': {"index":5, "name": "Escape II", "desc": "Samus, from Escape II hack", "author": "Hiroishi", "group": "Samus"},
'hack_hyper': {"index":6, "name": "Hyper", "desc": "Samus, from Hyper Metroid hack", "author": "RealRed", "group": "Samus"},
'hack_nature': {"index":7, "name": "Nature", "desc": "Samus, from Nature hack", "author": "Jefe962", "group": "Samus"},
'hack_phazon': {"index":8, "name": "Phazon", "desc": "Samus, from Phazon hack", "author": "A_red_monk_called_Key", "group": "Samus"},
'hack_redesign': {"index":9, "name": "Redesign", "desc": "Samus, from Redesign hack", "author": "Drewseph", "group": "Samus"},
'hack_szm': {"index":10, "name": "SZM", "desc": "Samus, from Super Zero Mission hack", "author": "SBniconico", "group": "Samus"},
'bailey': {"index":11, "name": "Bailey", "desc": "Justin Bailey, aka Samus in an 80s swimsuit", "author": "Auximines", "group": "Custom"},
'alucard': {"index":12, "name": "Alucard", "desc": "Alucard from Castlevania Symphony Of The Night", "author": "Nintoaster", "group": "Custom"},
'megaman': {"index":13, "name": "Megaman", "desc": "Megaman X!", "author": "Artheau", "group": "Custom"},
'fed_trooper': {"index":14, "name": "GF Trooper", "desc": "A Galactic Federation trooper", "author": "Physix", "group": "Custom"},
'super_controid': {"index":15, "name": "Contra", "desc": "Badass soldier from Contra III", "author": "Nintoaster", "group": "Custom"},
'marga': {"index":16, "name": "Margatroid", "desc": "Alice Margatroid from the Touhou Project", "author": "Plan", "group": "Custom"},
'win95_cursor': {"index":17, "name": "Win95 Cursor", "desc": "A classic Windows cursor...", "author": "PlaguedOne", "group": "Custom"}
}
def customizer():
response.title = 'Super Metroid VARIA Seeds Customizer'
initCustomizerSession()
return dict(customSprites=customSprites)
def customWebService():
# check validity of all parameters
patches = ['itemsounds', 'spinjumprestart', 'rando_speed', 'elevators_doors_speed', 'No_Music', 'animals']
others = ['colorsRandomization', 'suitsPalettes', 'beamsPalettes', 'tilesPalettes', 'enemiesPalettes',
'bossesPalettes', 'minDegree', 'maxDegree', 'invert']
validateWebServiceParams(patches, [], others, isJson=True)
if request.vars.customSpriteEnable == 'on':
if request.vars.customSprite not in customSprites:
raiseHttp(400, "Wrong value for customSprite", True)
if session.customizer == None:
session.customizer = {}
# update session
session.customizer['colorsRandomization'] = request.vars.colorsRandomization
session.customizer['suitsPalettes'] = request.vars.suitsPalettes
session.customizer['beamsPalettes'] = request.vars.beamsPalettes
session.customizer['tilesPalettes'] = request.vars.tilesPalettes
session.customizer['enemiesPalettes'] = request.vars.enemiesPalettes
session.customizer['bossesPalettes'] = request.vars.bossesPalettes
session.customizer['minDegree'] = request.vars.minDegree
session.customizer['maxDegree'] = request.vars.maxDegree
session.customizer['invert'] = request.vars.invert
session.customizer['globalShift'] = request.vars.globalShift
session.customizer['customSpriteEnable'] = request.vars.customSpriteEnable
session.customizer['customSprite'] = request.vars.customSprite
session.customizer['itemsounds'] = request.vars.itemsounds
session.customizer['spinjumprestart'] = request.vars.spinjumprestart
session.customizer['rando_speed'] = request.vars.rando_speed
session.customizer['elevators_doors_speed'] = request.vars.elevators_doors_speed
session.customizer['animals'] = request.vars.animals
session.customizer['No_Music'] = request.vars.No_Music
# call the randomizer
(fd, jsonFileName) = tempfile.mkstemp()
params = [pythonExec, os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--output', jsonFileName, '--patchOnly']
if request.vars.itemsounds == 'on':
params += ['-c', 'itemsounds.ips']
if request.vars.elevators_doors_speed == 'on':
params += ['-c', 'elevators_doors_speed.ips']
if request.vars.spinjumprestart == 'on':
params += ['-c', 'spinjumprestart.ips']
if request.vars.rando_speed == 'on':
params += ['-c', 'rando_speed.ips']
if request.vars.No_Music == 'on':
params += ['-c', 'No_Music']
if request.vars.animals == 'on':
params.append('--animals')
if request.vars.colorsRandomization == 'on':
params.append('--palette')
if request.vars.suitsPalettes == 'off':
params.append('--no_shift_suit_palettes')
if request.vars.beamsPalettes == 'off':
params.append('--no_shift_beam_palettes')
if request.vars.tilesPalettes == 'off':
params.append('--no_shift_tileset_palette')
if request.vars.enemiesPalettes == 'off':
params.append('--no_shift_enemy_palettes')
if request.vars.bossesPalettes == 'off':
params.append('--no_shift_boss_palettes')
if request.vars.globalShift == 'off':
params.append('--no_global_shift')
params.append('--individual_suit_shift')
params.append('--individual_tileset_shift')
params.append('--no_match_ship_and_power')
params += ['--min_degree', request.vars.minDegree, '--max_degree', request.vars.maxDegree]
if request.vars.invert == 'on':
params.append('--invert')
if request.vars.customSpriteEnable == 'on':
params += ['--sprite', "{}.ips".format(request.vars.customSprite)]
print("before calling: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonFileName) as jsonFile:
data = json.load(jsonFile)
os.close(fd)
os.remove(jsonFileName)
return json.dumps(data)
else:
# extract error from json
try:
with open(jsonFileName) as jsonFile:
msg = json.load(jsonFile)['errorMsg']
except:
msg = "customizerWebService: something wrong happened"
os.close(fd)
os.remove(jsonFileName)
raise HTTP(400, json.dumps(msg))
def initExtStatsSession():
if session.extStats == None:
session.extStats = {}
session.extStats['preset'] = 'regular'
session.extStats['randoPreset'] = 'default'
def updateExtStatsSession():
if session.extStats is None:
session.extStats = {}
session.extStats['preset'] = request.vars.preset
session.extStats['randoPreset'] = request.vars.randoPreset
def validateExtStatsParams():
for (preset, directory) in [("preset", "standard_presets"), ("randoPreset", "rando_presets")]:
if request.vars[preset] == None:
return (False, "Missing parameter preset")
preset = request.vars[preset]
if IS_ALPHANUMERIC()(preset)[1] is not None:
return (False, "Wrong value for preset, must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
return (False, "Wrong length for preset, name must be between 1 and 32 characters")
# check that preset exists
fullPath = '{}/{}.json'.format(directory, preset)
if not os.path.isfile(fullPath):
return (False, "Unknown preset: {}".format(preset))
return (True, None)
def extStats():
response.title = 'Super Metroid VARIA Randomizer statistics'
initExtStatsSession()
if request.vars.action == 'Load':
(ok, msg) = validateExtStatsParams()
if not ok:
session.flash = msg
redirect(URL(r=request, f='extStats'))
updateExtStatsSession()
skillPreset = request.vars.preset
randoPreset = request.vars.randoPreset
# load rando preset
fullPath = 'rando_presets/{}.json'.format(randoPreset)
try:
with open(fullPath) as jsonFile:
randoPreset = json.load(jsonFile)
except Exception as e:
raise HTTP(400, "Can't load the rando preset: {}: {}".format(randoPreset, e))
# load skill preset
fullPath = '{}/{}.json'.format(getPresetDir(skillPreset), skillPreset)
try:
skillPresetContent = PresetLoader.factory(fullPath).params
completePreset(skillPresetContent)
except Exception as e:
raise HTTP(400, "Error loading the preset {}: {}".format(skillPreset, e))
parameters = {
'preset': skillPreset,
'area': 'areaRandomization' in randoPreset and randoPreset['areaRandomization'] == 'on',
'boss': 'bossRandomization' in randoPreset and randoPreset['bossRandomization'] == 'on',
'gravityBehaviour': randoPreset['gravityBehaviour'],
'nerfedCharge': randoPreset['nerfedCharge'] == 'on',
'maxDifficulty': randoPreset['maxDifficulty'],
# parameters which can be random:
'majorsSplit': randoPreset['majorsSplit'] if 'majorsSplit' in randoPreset else 'Full',
'progSpeed': randoPreset['progressionSpeed'] if 'progressionSpeed' in randoPreset else 'variable',
'morphPlacement': randoPreset['morphPlacement'] if 'morphPlacement' in randoPreset else 'early',
'suitsRestriction': 'suitsRestriction' in randoPreset and randoPreset['suitsRestriction'] == 'on',
'progDiff': randoPreset['progressionDifficulty'] if 'progressionDifficulty' in randoPreset else 'normal',
'superFunMovement': 'funMovement' in randoPreset and randoPreset['funMovement'] == 'on',
'superFunCombat': 'funCombat' in randoPreset and randoPreset['funCombat'] == 'on',
'superFunSuit': 'funSuits' in randoPreset and randoPreset['funSuits'] == 'on'
}
if randoPreset['suitsRestriction'] == "random":
parameters["suitsRestriction"] = "random"
if randoPreset['funMovement'] == "random":
parameters["superFunMovement"] = "random"
if randoPreset['funCombat'] == "random":
parameters["superFunCombat"] = "random"
if randoPreset['funSuits'] == "random":
parameters["superFunSuit"] = "random"
DB = db.DB()
(itemsStats, techniquesStats, difficulties) = DB.getExtStat(parameters)
DB.close()
# check that all items are present in the stats:
nbItems = 19
nbLocs = 105
if itemsStats != None and len(itemsStats) > 0 and len(itemsStats) != nbItems:
for i, item in enumerate(['Bomb', 'Charge', 'Grapple', 'Gravity', 'HiJump', 'Ice', 'Missile', 'Morph',
'Plasma', 'PowerBomb', 'ScrewAttack', 'SpaceJump', 'Spazer', 'SpeedBooster',
'SpringBall', 'Super', 'Varia', 'Wave', 'XRayScope']):
if itemsStats[i][1] != item:
itemsStats.insert(i, [itemsStats[0][0], item] + [0]*nbLocs)
else:
itemsStats = None
techniquesStats = None
difficulties = None
skillPresetContent = None
parameters = None
(randoPresets, tourRandoPresets) = loadRandoPresetsList()
# remove random presets those statistics are useless
randoPresets.remove("all_random")
randoPresets.remove("quite_random")
(stdPresets, tourPresets, comPresets) = loadPresetsList()
return dict(stdPresets=stdPresets, tourPresets=tourPresets,
randoPresets=randoPresets, tourRandoPresets=tourRandoPresets,
itemsStats=itemsStats, techniquesStats=techniquesStats,
categories=Knows.categories, knowsDesc=Knows.desc, skillPresetContent=skillPresetContent,
locations=locations, parameters=parameters, difficulties=difficulties)
solver: mother brain sprite was missing.
# -*- coding: utf-8 -*-
import sys, os.path
path = os.path.expanduser('~/RandomMetroidSolver')
if os.path.exists(path) and path not in sys.path:
sys.path.append(path)
import datetime, os, hashlib, json, subprocess, tempfile, glob, random
from datetime import datetime, date
from collections import OrderedDict
# to solve the rom
from parameters import easy, medium, hard, harder, hardcore, mania
from parameters import Knows, Settings, Controller, isKnows, isButton
from solver import Conf
from parameters import diff2text, text2diff
from solver import StandardSolver, DifficultyDisplayer, InteractiveSolver
from utils import PresetLoader, removeChars
import db
from graph_access import vanillaTransitions, vanillaBossesTransitions
from utils import isStdPreset
from graph_locations import locations
from smboolmanager import SMBoolManager
from rom import RomPatches
# put an expiration date to the default cookie to have it kept between browser restart
response.cookies['session_id_solver']['expires'] = 31 * 24 * 3600
# use the correct one
pythonExec = "python{}.{}".format(sys.version_info.major, sys.version_info.minor)
def maxPresetsReach():
# to prevent a spammer to create presets in a loop and fill the fs
return len(os.listdir('community_presets')) >= 2048
def getPresetDir(preset):
if isStdPreset(preset):
return 'standard_presets'
else:
return 'community_presets'
def loadPreset():
# load conf from session if available
loaded = False
if request.vars.action is not None:
# press solve, load or save button
if request.vars.action in ['Update', 'Create']:
# store the changes in case the form won't be accepted
presetDict = genJsonFromParams(request.vars)
session.presets['presetDict'] = presetDict
params = PresetLoader.factory(presetDict).params
loaded = True
elif request.vars.action in ['Load']:
# nothing to load, we'll load the new params file with the load form code
pass
else:
# no forms button pressed
if session.presets['presetDict'] is not None:
params = PresetLoader.factory(session.presets['presetDict']).params
loaded = True
if not loaded:
params = PresetLoader.factory('{}/{}.json'.format(getPresetDir(session.presets['preset']), session.presets['preset'])).params
return params
def completePreset(params):
# add missing knows
for know in Knows.__dict__:
if isKnows(know):
if know not in params['Knows'].keys():
params['Knows'][know] = Knows.__dict__[know]
# add missing settings
for boss in ['Kraid', 'Phantoon', 'Draygon', 'Ridley', 'MotherBrain']:
if boss not in params['Settings']:
params['Settings'][boss] = 'Default'
for hellrun in ['Ice', 'MainUpperNorfair', 'LowerNorfair']:
if hellrun not in params['Settings']:
params['Settings'][hellrun] = 'Default'
for hardroom in ['X-Ray', 'Gauntlet']:
if hardroom not in params['Settings']:
params['Settings'][hardroom] = 'Default'
# add missing controller buttons
for button in Controller.__dict__:
if isButton(button):
if button not in params['Controller'].keys():
params['Controller'][button] = Controller.__dict__[button]
def loadPresetsList():
files = sorted(os.listdir('community_presets'), key=lambda v: v.upper())
stdPresets = ['noob', 'casual', 'regular', 'veteran', 'speedrunner', 'master']
tourPresets = ['Season_Races', 'Playoff_Races', 'Playoff_Races_Chozo', 'SMRAT2020']
comPresets = [os.path.splitext(file)[0] for file in files if file != '.git']
return (stdPresets, tourPresets, comPresets)
def loadRandoPresetsList():
tourPresets = ['Season_Races', 'Season_Races_Chozo', 'Playoff_Races', 'Playoff_Races_Chozo', 'SMRAT2020']
files = sorted(os.listdir('rando_presets'), key=lambda v: v.upper())
randoPresets = [os.path.splitext(file)[0] for file in files]
randoPresets = [preset for preset in randoPresets if preset not in tourPresets]
return (randoPresets, tourPresets)
def validatePresetsParams(action):
if action == 'Create':
preset = request.vars.presetCreate
else:
preset = request.vars.preset
if IS_NOT_EMPTY()(preset)[1] is not None:
return (False, "Preset name is empty")
if IS_ALPHANUMERIC()(preset)[1] is not None:
return (False, "Preset name must be alphanumeric: {}".format(preset))
if IS_LENGTH(32)(preset)[1] is not None:
return (False, "Preset name must be max 32 chars: {}".format(preset))
if action in ['Create', 'Update']:
if IS_NOT_EMPTY()(request.vars.password)[1] is not None:
return (False, "Password is empty")
if IS_ALPHANUMERIC()(request.vars.password)[1] is not None:
return (False, "Password must be alphanumeric")
if IS_LENGTH(32)(request.vars.password)[1] is not None:
return (False, "Password must be max 32 chars")
# check that there's not two buttons for the same action
map = {}
for button in Controller.__dict__:
if isButton(button):
value = request.vars[button]
if button == "Moonwalk":
if value not in [None, 'on']:
return (False, "Invalid value for Moonwalk: {}".format(value))
else:
if value is None:
return (False, "Button {} not set".format(button))
else:
if value in map:
return (False, "Action {} set for two buttons: {} and {}".format(value, button, map[value]))
map[value] = button
if request.vars.currenttab not in ['Global', 'Techniques1', 'Techniques2', 'Techniques3', 'Techniques4', 'Techniques5', 'Techniques6', 'Techniques7', 'Mapping']:
return (False, "Wrong value for current tab: [{}]".format(request.vars.currenttab))
return (True, None)
def getSkillLevelBarData(preset):
result = {'standards': {}}
result['name'] = preset
try:
params = PresetLoader.factory('{}/{}.json'.format(getPresetDir(preset), preset)).params
result['custom'] = (preset, params['score'])
# add stats on the preset
result['knowsKnown'] = len([know for know in params['Knows'] if params['Knows'][know][0] == True])
except:
result['custom'] = (preset, 'N/A')
result['knowsKnown'] = 'N/A'
# get score of standard presets
for preset in ['noob', 'casual', 'regular', 'veteran', 'speedrunner', 'master', 'samus']:
score = PresetLoader.factory('{}/{}.json'.format(getPresetDir(preset), preset)).params['score']
result['standards'][preset] = score
DB = db.DB()
result['generatedSeeds'] = DB.getGeneratedSeeds(result['custom'][0])
result['lastAction'] = DB.getPresetLastActionDate(result['custom'][0])
DB.close()
# TODO: normalize result (or not ?)
return result
def initPresetsSession():
if session.presets is None:
session.presets = {}
session.presets['preset'] = 'regular'
session.presets['presetDict'] = None
session.presets['currentTab'] = 'Global'
def updatePresetsSession():
if request.vars.action == 'Create':
session.presets['preset'] = request.vars.presetCreate
elif request.vars.preset == None:
session.presets['preset'] = 'regular'
else:
session.presets['preset'] = request.vars.preset
def computeGauntlet(sm, bomb, addVaria):
result = {}
for key in Settings.hardRoomsPresets['Gauntlet']:
Settings.hardRooms['Gauntlet'] = Settings.hardRoomsPresets['Gauntlet'][key]
sm.resetItems()
if addVaria == True:
sm.addItem('Varia')
sm.addItem(bomb)
result[key] = {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}
for i in range(18):
ret = sm.energyReserveCountOkHardRoom('Gauntlet', 0.51 if bomb == 'Bomb' else 1.0)
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
result[key][ret.difficulty] = nEtank
sm.addItem('ETank')
return result
def computeXray(sm, addVaria):
result = {}
for key in Settings.hardRoomsPresets['X-Ray']:
if key == 'Solution':
continue
Settings.hardRooms['X-Ray'] = Settings.hardRoomsPresets['X-Ray'][key]
sm.resetItems()
if addVaria == True:
sm.addItem('Varia')
result[key] = {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}
for i in range(18):
ret = sm.energyReserveCountOkHardRoom('X-Ray')
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
result[key][ret.difficulty] = nEtank
sm.addItem('ETank')
return result
def computeHardRooms(hardRooms):
# add gravity patch (as we add it by default in the randomizer)
RomPatches.ActivePatches.append(RomPatches.NoGravityEnvProtection)
sm = SMBoolManager()
# xray
xray = {}
xray['Suitless'] = computeXray(sm, False)
xray['Varia'] = computeXray(sm, True)
hardRooms['X-Ray'] = xray
# gauntlet
gauntlet = {}
gauntlet['SuitlessBomb'] = computeGauntlet(sm, 'Bomb', False)
gauntlet['SuitlessPowerBomb'] = computeGauntlet(sm, 'PowerBomb', False)
gauntlet['VariaBomb'] = computeGauntlet(sm, 'Bomb', True)
gauntlet['VariaPowerBomb'] = computeGauntlet(sm, 'PowerBomb', True)
hardRooms['Gauntlet'] = gauntlet
return hardRooms
def addCF(sm, count):
sm.addItem('Morph')
sm.addItem('PowerBomb')
for i in range(count):
sm.addItem('Missile')
sm.addItem('Missile')
sm.addItem('Super')
sm.addItem('Super')
sm.addItem('PowerBomb')
sm.addItem('PowerBomb')
def computeHellruns(hellRuns):
sm = SMBoolManager()
for hellRun in ['Ice', 'MainUpperNorfair']:
hellRuns[hellRun] = {}
for (actualHellRun, params) in Settings.hellRunsTable[hellRun].items():
hellRuns[hellRun][actualHellRun] = {}
for (key, difficulties) in Settings.hellRunPresets[hellRun].items():
if key == 'Solution':
continue
Settings.hellRuns[hellRun] = difficulties
hellRuns[hellRun][actualHellRun][key] = {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}
if difficulties == None:
continue
sm.resetItems()
for etank in range(19):
ret = sm.canHellRun(**params)
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
hellRuns[hellRun][actualHellRun][key][ret.difficulty] = nEtank
sm.addItem('ETank')
hellRun = 'LowerNorfair'
hellRuns[hellRun] = {}
hellRuns[hellRun]["NoScrew"] = computeLNHellRun(sm, False)
hellRuns[hellRun]["Screw"] = computeLNHellRun(sm, True)
def getNearestDifficulty(difficulty):
epsilon = 0.001
if difficulty < medium - epsilon:
return easy
elif difficulty < hard - epsilon:
return medium
elif difficulty < harder - epsilon:
return hard
elif difficulty < hardcore - epsilon:
return harder
elif difficulty < mania - epsilon:
return hardcore
else:
return mania
def computeLNHellRun(sm, addScrew):
result = {}
hellRun = 'LowerNorfair'
for (actualHellRun, params) in Settings.hellRunsTable[hellRun].items():
result[actualHellRun] = {}
for (key, difficulties) in Settings.hellRunPresets[hellRun].items():
if key == 'Solution':
continue
Settings.hellRuns[hellRun] = difficulties
result[actualHellRun][key] = {'ETank': {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}, 'CF': {easy: -1, medium: -1, hard: -1, harder: -1, hardcore: -1, mania: -1}}
if difficulties == None:
continue
for cf in range(3, 0, -1):
sm.resetItems()
if addScrew == True:
sm.addItem('ScrewAttack')
addCF(sm, cf)
for etank in range(19):
ret = sm.canHellRun(**params)
if ret.bool == True:
nEtank = 0
for item in ret.items:
if item.find('ETank') != -1:
nEtank = int(item[0:item.find('-ETank')])
break
result[actualHellRun][key]['ETank'][getNearestDifficulty(ret.difficulty)] = nEtank
result[actualHellRun][key]['CF'][getNearestDifficulty(ret.difficulty)] = cf
sm.addItem('ETank')
return result
def presets():
initPresetsSession()
# use web2py builtin cache to avoid recomputing the hardrooms requirements
hardRooms = cache.ram('hardRooms', lambda:dict(), time_expire=None)
if len(hardRooms) == 0:
computeHardRooms(hardRooms)
hellRuns = cache.ram('hellRuns', lambda:dict(), time_expire=None)
if len(hellRuns) == 0:
computeHellruns(hellRuns)
if request.vars.action is not None:
(ok, msg) = validatePresetsParams(request.vars.action)
if not ok:
session.flash = msg
redirect(URL(r=request, f='presets'))
else:
session.presets['currentTab'] = request.vars.currenttab
if request.vars.action == 'Create':
preset = request.vars.presetCreate
else:
preset = request.vars.preset
# in web2py.js, in disableElement, remove 'working...' to have action with correct value
if request.vars.action == 'Load':
# check that the presets file exists
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if os.path.isfile(fullPath):
# load it
try:
params = PresetLoader.factory(fullPath).params
updatePresetsSession()
session.presets["presetDict"] = None
except Exception as e:
session.flash = "L:Error loading the preset {}: {}".format(preset, e)
else:
session.flash = "Presets file not found: {}".format(fullPath)
redirect(URL(r=request, f='presets'))
elif request.vars.action in ['Update', 'Create']:
# check if the presets file already exists
password = request.vars['password']
password = password.encode('utf-8')
passwordSHA256 = hashlib.sha256(password).hexdigest()
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if os.path.isfile(fullPath):
# load it
end = False
try:
oldParams = PresetLoader.factory(fullPath).params
except Exception as e:
session.flash = "UC:Error loading the preset {}: {}".format(preset, e)
end = True
if end == True:
redirect(URL(r=request, f='presets'))
# check if password match
if 'password' in oldParams and passwordSHA256 == oldParams['password']:
# update the presets file
paramsDict = genJsonFromParams(request.vars)
paramsDict['password'] = passwordSHA256
try:
PresetLoader.factory(paramsDict).dump(fullPath)
DB = db.DB()
DB.addPresetAction(preset, 'update')
DB.close()
updatePresetsSession()
session.flash = "Preset {} updated".format(preset)
except Exception as e:
session.flash = "Error writing the preset {}: {}".format(preset, e)
redirect(URL(r=request, f='presets'))
else:
session.flash = "Password mismatch with existing presets file {}".format(preset)
redirect(URL(r=request, f='presets'))
else:
# check that there's no more than 2K presets (there's less than 2K sm rando players in the world)
if not maxPresetsReach():
# write the presets file
paramsDict = genJsonFromParams(request.vars)
paramsDict['password'] = passwordSHA256
try:
PresetLoader.factory(paramsDict).dump(fullPath)
DB = db.DB()
DB.addPresetAction(preset, 'create')
DB.close()
updatePresetsSession()
session.flash = "Preset {} created".format(preset)
except Exception as e:
session.flash = "Error writing the preset {}: {}".format(preset, e)
redirect(URL(r=request, f='presets'))
else:
session.flash = "Sorry, there's already 2048 presets on the website, can't add more"
redirect(URL(r=request, f='presets'))
# set title
response.title = 'Super Metroid VARIA Presets'
# load conf from session if available
error = False
try:
params = loadPreset()
except Exception as e:
session.presets['preset'] = 'regular'
session.flash = "S:Error loading the preset: {}".format(e)
error = True
if error == True:
redirect(URL(r=request, f='presets'))
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# add missing knows/settings
completePreset(params)
# compute score for skill bar
skillBarData = getSkillLevelBarData(session.presets['preset'])
# send values to view
return dict(desc=Knows.desc, difficulties=diff2text,
categories=Knows.categories, settings=params['Settings'], knows=params['Knows'],
easy=easy, medium=medium, hard=hard, harder=harder, hardcore=hardcore, mania=mania,
controller=params['Controller'], stdPresets=stdPresets, tourPresets=tourPresets,
comPresets=comPresets, skillBarData=skillBarData, hardRooms=hardRooms, hellRuns=hellRuns)
def initSolverSession():
if session.solver is None:
session.solver = {}
session.solver['preset'] = 'regular'
session.solver['difficultyTarget'] = Conf.difficultyTarget
session.solver['pickupStrategy'] = Conf.itemsPickup
session.solver['itemsForbidden'] = []
session.solver['romFiles'] = []
session.solver['romFile'] = None
session.solver['result'] = None
session.solver['complexity'] = 'simple'
def updateSolverSession():
if session.solver is None:
session.solver = {}
session.solver['preset'] = request.vars.preset
session.solver['difficultyTarget'] = text2diff[request.vars.difficultyTarget]
session.solver['pickupStrategy'] = request.vars.pickupStrategy
session.solver['complexity'] = request.vars.complexity
itemsForbidden = []
for item in ['ETank', 'Missile', 'Super', 'PowerBomb', 'Bomb', 'Charge', 'Ice', 'HiJump', 'SpeedBooster', 'Wave', 'Spazer', 'SpringBall', 'Varia', 'Plasma', 'Grapple', 'Morph', 'Reserve', 'Gravity', 'XRayScope', 'SpaceJump', 'ScrewAttack']:
boolvar = request.vars[item+"_bool"]
if boolvar is not None:
itemsForbidden.append(item)
session.solver['itemsForbidden'] = itemsForbidden
def getROMsList():
# filter the displayed roms to display only the ones uploaded in this session
if session.solver['romFiles'] is None:
session.solver['romFiles'] = []
roms = []
elif len(session.solver['romFiles']) == 0:
roms = []
else:
files = sorted(os.listdir('roms'))
bases = [os.path.splitext(file)[0] for file in files]
filtered = [base for base in bases if base in session.solver['romFiles']]
roms = ['{}.sfc'.format(file) for file in filtered]
return roms
def getLastSolvedROM():
if session.solver['romFile'] is not None:
return '{}.sfc'.format(session.solver['romFile'])
else:
return None
def genPathTable(locations, displayAPs=True):
if locations is None or len(locations) == 0:
return None
lastAP = None
pathTable = """
<table class="full">
<colgroup>
<col class="locName" /><col class="area" /><col class="subarea" /><col class="item" /><col class="difficulty" /><col class="knowsUsed" /><col class="itemsUsed" />
</colgroup>
<tr>
<th>Location Name</th><th>Area</th><th>SubArea</th><th>Item</th><th>Difficulty</th><th>Techniques used</th><th>Items used</th>
</tr>
"""
for location, area, subarea, item, diff, techniques, items, path, _class in locations:
if path is not None:
lastAP = path[-1]
if displayAPs == True and not (len(path) == 1 and path[0] == lastAP):
pathTable += """<tr class="grey"><td>Path</td><td colspan="6">{}</td></tr>\n""".format(" -> ".join(path))
(name, room) = location
# not picked up items start with an '-'
if item[0] != '-':
pathTable += """
<tr class="{}">
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>
""".format(item, getRoomLink(name, room), getAreaLink(area), subarea,
getBossImg(name) if "Boss" in _class else getItemImg(item), diff,
getTechniques(techniques), getItems(items))
else:
pathTable += """
<tr class="{}">
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td><div class="linethrough">{}</div></td>
<td>{}</td>
<td></td>
<td></td>
</tr>
""".format(item, getRoomLink(name, room), getAreaLink(area), subarea, item, diff)
pathTable += "</table>"
return pathTable
def getItems(items):
ret = ""
for item in items:
if item[0] >= '0' and item[0] <= '9':
# for etanks and reserves
count = item[:item.find('-')]
item = item[item.find('-')+1:]
ret += "<span>{}-{}</span>".format(count, getItemImg(item, True))
else:
ret += getItemImg(item, True)
return ret
def getTechniques(techniques):
ret = ""
for tech in techniques:
if tech in Knows.desc and Knows.desc[tech]['href'] != None:
ret += """ <a class="marginKnows" href="{}" target="_blank">{}</a>""".format(Knows.desc[tech]['href'], tech)
else:
ret += """ {}""".format(tech)
return ret
def getRoomLink(name, room):
roomUrl = room.replace(' ', '_').replace("'", '%27')
roomImg = room.replace(' ', '').replace('-', '').replace("'", '')
return """<a target="_blank" href="https://wiki.supermetroid.run/{}" data-thumbnail-src="/solver/static/images/{}.png" class="room">{}</a>""".format(roomUrl, roomImg, name)
def getAreaLink(name):
if name == "WreckedShip":
url = "Wrecked_Ship"
elif name == "LowerNorfair":
url = "Norfair"
else:
url = name
return """<a target="_blank" href="https://metroid.fandom.com/wiki/{}" data-thumbnail-src="/solver/static/images/{}.png" class="area">{}</a>""".format(url, name, name)
def getBossImg(boss):
return """<img alt="{}" class="imageBoss" src="/solver/static/images/{}.png" title="{}" />""".format(boss, boss.replace(' ', ''), boss)
def getItemImg(item, small=False):
if small == True:
_class = "imageItems"
else:
_class = "imageItem"
return """<img alt="{}" class="{}" src="/solver/static/images/{}.png" title="{}" />""".format(item, _class, item, item)
def prepareResult():
if session.solver['result'] is not None:
result = session.solver['result']
# utf8 files
if sys.version_info.major == 2:
result['randomizedRom'] = result['randomizedRom'].encode('utf8', 'replace')
if result['difficulty'] == -1:
result['resultText'] = "The ROM \"{}\" is not finishable with the known techniques".format(result['randomizedRom'])
else:
if result['itemsOk'] is False:
result['resultText'] = "The ROM \"{}\" is finishable but not all the requested items can be picked up with the known techniques. Estimated difficulty is: ".format(result['randomizedRom'])
else:
result['resultText'] = "The ROM \"{}\" estimated difficulty is: ".format(result['randomizedRom'])
# add generated path (spoiler !)
result['pathTable'] = genPathTable(result['generatedPath'])
result['pathremainTry'] = genPathTable(result['remainTry'])
result['pathremainMajors'] = genPathTable(result['remainMajors'], False)
result['pathremainMinors'] = genPathTable(result['remainMinors'], False)
result['pathskippedMajors'] = genPathTable(result['skippedMajors'], False)
result['pathunavailMajors'] = genPathTable(result['unavailMajors'], False)
# display the result only once
session.solver['result'] = None
else:
result = None
return result
def validateSolverParams():
for param in ['difficultyTarget', 'pickupStrategy', 'complexity']:
if request.vars[param] is None:
return (False, "Missing parameter {}".format(param))
if request.vars.preset == None:
return (False, "Missing parameter preset")
preset = request.vars.preset
if IS_ALPHANUMERIC()(preset)[1] is not None:
return (False, "Wrong value for preset, must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
return (False, "Wrong length for preset, name must be between 1 and 32 characters")
# check that preset exists
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if not os.path.isfile(fullPath):
return (False, "Unknown preset: {}".format(preset))
difficultyTargetChoices = ["easy", "medium", "hard", "very hard", "hardcore", "mania"]
if request.vars.difficultyTarget not in difficultyTargetChoices:
return (False, "Wrong value for difficultyTarget: {}, authorized values: {}".format(request.vars.difficultyTarget, difficultyTargetChoices))
pickupStrategyChoices = ["all", "minimal", "any"]
if request.vars.pickupStrategy not in pickupStrategyChoices:
return (False, "Wrong value for pickupStrategy: {}, authorized values: {}".format(request.vars.pickupStrategy, pickupStrategyChoice))
complexityChoices = ["simple", "advanced"]
if request.vars.complexity not in complexityChoices:
return (False, "Wrong value for complexity: {}, authorized values: {}".format(request.vars.complexity, complexityChoices))
itemsForbidden = []
for item in ['ETank', 'Missile', 'Super', 'PowerBomb', 'Bomb', 'Charge', 'Ice', 'HiJump', 'SpeedBooster', 'Wave', 'Spazer', 'SpringBall', 'Varia', 'Plasma', 'Grapple', 'Morph', 'Reserve', 'Gravity', 'XRayScope', 'SpaceJump', 'ScrewAttack']:
boolvar = request.vars[item+"_bool"]
if boolvar is not None:
if boolvar != 'on':
return (False, "Wrong value for {}: {}, authorized values: on/off".format(item, boolvar))
if request.vars.romJson is None and request.vars.uploadFile is None and request.vars.romFile is None:
return (False, "Missing ROM to solve")
if request.vars.romFile is not None:
if IS_LENGTH(maxsize=255, minsize=1)(request.vars.romFile)[1] is not None:
return (False, "Wrong length for romFile, name must be between 1 and 256 characters: {}".format(request.vars.romFile))
if request.vars.romJson is not None and len(request.vars.romJson) > 0:
try:
json.loads(request.vars.romJson)
except:
return (False, "Wrong value for romJson, must be a JSON string: [{}]".format(request.vars.romJson))
if request.vars.uploadFile is not None:
if type(request.vars.uploadFile) == str:
if IS_MATCH('[a-zA-Z0-9_\.() ,\-]*', strict=True)(request.vars.uploadFile)[1] is not None:
return (False, "Wrong value for uploadFile, must be a valid file name: {}".format(request.vars.uploadFile))
if IS_LENGTH(maxsize=256, minsize=1)(request.vars.uploadFile)[1] is not None:
return (False, "Wrong length for uploadFile, name must be between 1 and 255 characters")
return (True, None)
def generateJsonROM(romJsonStr):
tempRomJson = json.loads(romJsonStr)
# handle filename with utf8 characters in it
if sys.version_info.major > 2:
romFileName = tempRomJson["romFileName"]
else:
romFileName = tempRomJson["romFileName"].encode('utf8', 'replace')
(base, ext) = os.path.splitext(romFileName)
jsonRomFileName = 'roms/{}.json'.format(base)
del tempRomJson["romFileName"]
with open(jsonRomFileName, 'w') as jsonFile:
json.dump(tempRomJson, jsonFile)
return (base, jsonRomFileName)
def solver():
# init session
initSolverSession()
if request.vars.action == 'Solve':
(ok, msg) = validateSolverParams()
if not ok:
session.flash = msg
redirect(URL(r=request, f='solver'))
updateSolverSession()
preset = request.vars.preset
# new uploaded rom ?
error = False
if request.vars['romJson'] != '':
try:
(base, jsonRomFileName) = generateJsonROM(request.vars['romJson'])
session.solver['romFile'] = base
if base not in session.solver['romFiles']:
session.solver['romFiles'].append(base)
except Exception as e:
print("Error loading the ROM file, exception: {}".format(e))
session.flash = "Error loading the json ROM file"
error = True
elif request.vars['romFile'] is not None and len(request.vars['romFile']) != 0:
session.solver['romFile'] = os.path.splitext(request.vars['romFile'])[0]
jsonRomFileName = 'roms/' + session.solver['romFile'] + '.json'
else:
session.flash = "No rom file selected for upload"
error = True
if not error:
# check that the json file exists
if not os.path.isfile(jsonRomFileName):
session.flash = "Missing json ROM file on the server"
else:
try:
(ok, result) = computeDifficulty(jsonRomFileName, preset)
if not ok:
session.flash = result
redirect(URL(r=request, f='solver'))
session.solver['result'] = result
except Exception as e:
print("Error loading the ROM file, exception: {}".format(e))
session.flash = "Error loading the ROM file"
redirect(URL(r=request, f='solver'))
# display result
result = prepareResult()
# set title
response.title = 'Super Metroid VARIA Solver'
ROMs = getROMsList()
# last solved ROM
lastRomFile = getLastSolvedROM()
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# send values to view
return dict(desc=Knows.desc, stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets, roms=ROMs,
lastRomFile=lastRomFile, difficulties=diff2text, categories=Knows.categories,
result=result,
easy=easy, medium=medium, hard=hard, harder=harder, hardcore=hardcore, mania=mania)
def genJsonFromParams(vars):
paramsDict = {'Knows': {}, 'Settings': {}, 'Controller': {}}
# Knows
for var in Knows.__dict__:
if isKnows(var):
boolVar = vars[var+"_bool"]
if boolVar is None:
paramsDict['Knows'][var] = [False, 0]
else:
diffVar = vars[var+"_diff"]
if diffVar is not None:
paramsDict['Knows'][var] = [True, text2diff[diffVar]]
# Settings
for hellRun in ['Ice', 'MainUpperNorfair', 'LowerNorfair']:
value = vars[hellRun]
if value is not None:
paramsDict['Settings'][hellRun] = value
for boss in ['Kraid', 'Phantoon', 'Draygon', 'Ridley', 'MotherBrain']:
value = vars[boss]
if value is not None:
paramsDict['Settings'][boss] = value
for room in ['X-Ray', 'Gauntlet']:
value = vars[room]
if value is not None:
paramsDict['Settings'][room] = value
# Controller
for button in Controller.__dict__:
if isButton(button):
value = vars[button]
if value is None:
paramsDict['Controller'][button] = Controller.__dict__[button]
else:
if button == "Moonwalk":
if value != None and value == "on":
paramsDict['Controller'][button] = True
else:
paramsDict['Controller'][button] = False
else:
paramsDict['Controller'][button] = value
return paramsDict
def computeDifficulty(jsonRomFileName, preset):
randomizedRom = os.path.basename(jsonRomFileName.replace('json', 'sfc'))
presetFileName = "{}/{}.json".format(getPresetDir(preset), preset)
(fd, jsonFileName) = tempfile.mkstemp()
DB = db.DB()
id = DB.initSolver()
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/solver.py"),
'-r', str(jsonRomFileName),
'--preset', presetFileName,
'--difficultyTarget', str(session.solver['difficultyTarget']),
'--pickupStrategy', session.solver['pickupStrategy'],
'--type', 'web',
'--output', jsonFileName
]
for item in session.solver['itemsForbidden']:
params += ['--itemsForbidden', item]
DB.addSolverParams(id, randomizedRom, preset, session.solver['difficultyTarget'],
session.solver['pickupStrategy'], session.solver['itemsForbidden'])
print("before calling solver: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonFileName) as jsonFile:
result = json.load(jsonFile)
else:
result = "Solver: something wrong happened while solving the ROM"
DB.addSolverResult(id, ret, duration, result)
DB.close()
os.close(fd)
os.remove(jsonFileName)
return (ret == 0, result)
def infos():
# set title
response.title = 'Super Metroid VARIA Randomizer and Solver'
return dict()
def initRandomizerSession():
if session.randomizer is None:
session.randomizer = {}
session.randomizer['complexity'] = "simple"
session.randomizer['preset'] = 'regular'
session.randomizer['randoPreset'] = ""
session.randomizer['majorsSplit'] = "Full"
session.randomizer['maxDifficulty'] = 'hardcore'
session.randomizer['progressionSpeed'] = "medium"
session.randomizer['progressionDifficulty'] = 'normal'
session.randomizer['morphPlacement'] = "early"
session.randomizer['suitsRestriction'] = "on"
session.randomizer['hideItems'] = "off"
session.randomizer['strictMinors'] = "off"
session.randomizer['missileQty'] = "3"
session.randomizer['superQty'] = "2"
session.randomizer['powerBombQty'] = "1"
session.randomizer['minorQty'] = "100"
session.randomizer['energyQty'] = "vanilla"
session.randomizer['areaRandomization'] = "off"
session.randomizer['areaLayout'] = "off"
session.randomizer['bossRandomization'] = "off"
session.randomizer['funCombat'] = "off"
session.randomizer['funMovement'] = "off"
session.randomizer['funSuits'] = "off"
session.randomizer['layoutPatches'] = "on"
session.randomizer['variaTweaks'] = "on"
session.randomizer['gravityBehaviour'] = "Balanced"
session.randomizer['nerfedCharge'] = "off"
session.randomizer['itemsounds'] = "on"
session.randomizer['elevators_doors_speed'] = "on"
session.randomizer['spinjumprestart'] = "off"
session.randomizer['rando_speed'] = "off"
session.randomizer['startLocation'] = "Landing Site"
session.randomizer['animals'] = "off"
session.randomizer['No_Music'] = "off"
def randomizer():
response.title = 'Super Metroid VARIA Randomizer'
initRandomizerSession()
(stdPresets, tourPresets, comPresets) = loadPresetsList()
(randoPresets, tourRandoPresets) = loadRandoPresetsList()
# add empty entry for default value
randoPresets.append("")
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
randoPresets=randoPresets, tourRandoPresets=tourRandoPresets)
def raiseHttp(code, msg, isJson=False):
#print("raiseHttp: code {} msg {} isJson {}".format(code, msg, isJson))
if isJson is True:
msg = json.dumps(msg)
raise HTTP(code, msg)
def getInt(param, isJson=False):
try:
return int(request.vars[param])
except:
raiseHttp(400, "Wrong value for {}: {}, must be an int".format(param, request.vars[param]), isJson)
def getFloat(param, isJson=False):
try:
return float(request.vars[param])
except:
raiseHttp(400, "Wrong value for {}: {}, must be a float".format(param, request.vars[param]), isJson)
def validateWebServiceParams(switchs, quantities, others, isJson=False):
parameters = switchs + quantities + others
for param in parameters:
if request.vars[param] is None:
raiseHttp(400, "Missing parameter: {}".format(param), isJson)
for switch in switchs:
if request.vars[switch] not in ['on', 'off', 'random']:
raiseHttp(400, "Wrong value for {}: {}, authorized values: on/off".format(switch, request.vars[switch]), isJson)
for qty in quantities:
if request.vars[qty] == 'random':
continue
qtyFloat = getFloat(qty, isJson)
if qtyFloat < 1.0 or qtyFloat > 9.0:
raiseHttp(400, json.dumps("Wrong value for {}: {}, must be between 1 and 9".format(qty, request.vars[qty])), isJson)
if 'complexity' in others:
if request.vars['complexity'] not in ['simple', 'medium', 'advanced']:
raiseHttp(400, "Wrong value for complexity: {}, authorized values simple/medium/advanced".format(request.vars['complexity']), isJson)
if 'paramsFileTarget' in others:
try:
json.loads(request.vars.paramsFileTarget)
except:
raiseHttp(400, "Wrong value for paramsFileTarget, must be a JSON string", isJson)
if 'seed' in others:
seedInt = getInt('seed', isJson)
if seedInt < 0 or seedInt > 9999999:
raiseHttp(400, "Wrong value for seed: {}, must be between 0 and 9999999".format(request.vars[seed]), isJson)
preset = request.vars.preset
if preset != None:
if IS_ALPHANUMERIC()(preset)[1] is not None:
raiseHttp(400, "Wrong value for preset, must be alphanumeric", isJson)
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
raiseHttp(400, "Wrong length for preset, name must be between 1 and 32 characters", isJson)
# check that preset exists
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if not os.path.isfile(fullPath):
raiseHttp(400, "Unknown preset: {}".format(preset), isJson)
randoPreset = request.vars.randoPreset
if randoPreset != None and len(randoPreset) > 0:
if IS_ALPHANUMERIC()(randoPreset)[1] is not None:
raiseHttp(400, "Wrong value for randoPreset, must be alphanumeric", isJson)
if IS_LENGTH(maxsize=32, minsize=1)(randoPreset)[1] is not None:
raiseHttp(400, "Wrong length for randoPreset, name must be between 1 and 32 characters", isJson)
# check that randoPreset exists
fullPath = 'rando_presets/{}.json'.format(randoPreset)
if not os.path.isfile(fullPath):
raiseHttp(400, "Unknown randoPreset: {}".format(randoPreset), isJson)
# check race mode
if 'raceMode' in request.vars:
if request.vars.raceMode not in ['on', 'off']:
raiseHttp(400, "Wrong value for race mode: {}, must on/off".format(request.vars.raceMode), isJson)
if 'majorsSplit' in others:
if request.vars['majorsSplit'] not in ['Full', 'Major', 'Chozo', 'random']:
raiseHttp(400, "Wrong value for majorsSplit: {}, authorized values Full/Major/Chozo/random".format(request.vars['majorsSplit']), isJson)
if request.vars['maxDifficulty'] is not None:
if request.vars.maxDifficulty not in ['no difficulty cap', 'easy', 'medium', 'hard', 'harder', 'hardcore', 'mania', 'random']:
raiseHttp(400, "Wrong value for difficulty_target, authorized values: no difficulty cap/easy/medium/hard/harder/hardcore/mania", isJson)
if 'progressionSpeed' in others:
for progSpeed in request.vars['progressionSpeed'].split(','):
if progSpeed not in ['slowest', 'slow', 'medium', 'fast', 'fastest', 'random', 'basic', 'VARIAble']:
raiseHttp(400, "Wrong value for progressionSpeed: {}, authorized values slowest/slow/medium/fast/fastest/basic/VARIAble".format(progSpeed), isJson)
if 'progressionDifficulty' in others:
if request.vars['progressionDifficulty'] not in ['easier', 'normal', 'harder', 'random']:
raiseHttp(400, "Wrong value for progressionDifficulty: {}, authorized values easier/normal/harder".format(request.vars['progressionDifficulty']), isJson)
if 'morphPlacement' in others:
if request.vars['morphPlacement'] not in ['early', 'late', 'normal', 'random']:
raiseHttp(400, "Wrong value for morphPlacement: {}, authorized values early/late/normal".format(request.vars['morphPlacement']), isJson)
if request.vars.minorQty not in ['random', None]:
minorQtyInt = getInt('minorQty', isJson)
if minorQtyInt < 7 or minorQtyInt > 100:
raiseHttp(400, "Wrong value for minorQty, must be between 7 and 100", isJson)
if 'energyQty' in others:
if request.vars.energyQty not in ['sparse', 'medium', 'vanilla', 'random']:
raiseHttp(400, "Wrong value for energyQty: authorized values: sparse/medium/vanilla", isJson)
if 'gravityBehaviour' in others:
if request.vars.gravityBehaviour not in ['Balanced', 'Progressive', 'Vanilla']:
raiseHttp(400, "Wrong value for gravityBehaviour: {}".format(request.vars.gravityBehaviour), isJson)
if 'startLocation' in others:
if request.vars.startLocation not in ['Ceres', 'Landing Site']:
raiseHttp(400, "Wrong value for startLocation: {}".format(request.vars.startLocation), isJson)
def sessionWebService():
# web service to update the session
switchs = ['suitsRestriction', 'hideItems', 'strictMinors',
'areaRandomization', 'areaLayout', 'bossRandomization',
'funCombat', 'funMovement', 'funSuits',
'layoutPatches', 'variaTweaks', 'nerfedCharge',
'itemsounds', 'elevators_doors_speed', 'spinjumprestart',
'rando_speed', 'animals', 'No_Music']
quantities = ['missileQty', 'superQty', 'powerBombQty']
others = ['complexity', 'preset', 'randoPreset', 'majorsSplit',
'maxDifficulty', 'progressionSpeed', 'progressionDifficulty',
'morphPlacement', 'minorQty', 'energyQty',
'gravityBehaviour', 'startLocation']
validateWebServiceParams(switchs, quantities, others)
if session.randomizer is None:
session.randomizer = {}
session.randomizer['complexity'] = request.vars.complexity
session.randomizer['preset'] = request.vars.preset
session.randomizer['randoPreset'] = request.vars.randoPreset
session.randomizer['majorsSplit'] = request.vars.majorsSplit
session.randomizer['maxDifficulty'] = request.vars.maxDifficulty
session.randomizer['progressionSpeed'] = request.vars.progressionSpeed.split(',')
session.randomizer['progressionDifficulty'] = request.vars.progressionDifficulty
session.randomizer['morphPlacement'] = request.vars.morphPlacement
session.randomizer['suitsRestriction'] = request.vars.suitsRestriction
session.randomizer['hideItems'] = request.vars.hideItems
session.randomizer['strictMinors'] = request.vars.strictMinors
session.randomizer['missileQty'] = request.vars.missileQty
session.randomizer['superQty'] = request.vars.superQty
session.randomizer['powerBombQty'] = request.vars.powerBombQty
session.randomizer['minorQty'] = request.vars.minorQty
session.randomizer['energyQty'] = request.vars.energyQty
session.randomizer['areaRandomization'] = request.vars.areaRandomization
session.randomizer['areaLayout'] = request.vars.areaLayout
session.randomizer['bossRandomization'] = request.vars.bossRandomization
session.randomizer['funCombat'] = request.vars.funCombat
session.randomizer['funMovement'] = request.vars.funMovement
session.randomizer['funSuits'] = request.vars.funSuits
session.randomizer['layoutPatches'] = request.vars.layoutPatches
session.randomizer['variaTweaks'] = request.vars.variaTweaks
session.randomizer['gravityBehaviour'] = request.vars.gravityBehaviour
session.randomizer['nerfedCharge'] = request.vars.nerfedCharge
session.randomizer['itemsounds'] = request.vars.itemsounds
session.randomizer['elevators_doors_speed'] = request.vars.elevators_doors_speed
session.randomizer['spinjumprestart'] = request.vars.spinjumprestart
session.randomizer['rando_speed'] = request.vars.rando_speed
session.randomizer['startLocation'] = request.vars.startLocation
session.randomizer['animals'] = request.vars.animals
session.randomizer['No_Music'] = request.vars.No_Music
# to create a new rando preset, uncomment next lines
#with open('rando_presets/new.json', 'w') as jsonFile:
# json.dump(session.randomizer, jsonFile)
def getCustomMapping(controlMapping):
if len(controlMapping) == 0:
return (False, None)
inv = {}
for button in controlMapping:
inv[controlMapping[button]] = button
return (True, "{},{},{},{},{},{},{}".format(inv["Shoot"], inv["Jump"], inv["Dash"], inv["Item Select"], inv["Item Cancel"], inv["Angle Up"], inv["Angle Down"]))
def randomizerWebService():
# web service to compute a new random (returns json string)
print("randomizerWebService")
session.forget(response)
# set header to authorize cross domain AJAX
response.headers['Access-Control-Allow-Origin'] = '*'
# check validity of all parameters
switchs = ['suitsRestriction', 'hideItems', 'strictMinors',
'areaRandomization', 'areaLayout', 'bossRandomization',
'funCombat', 'funMovement', 'funSuits',
'layoutPatches', 'variaTweaks', 'nerfedCharge',
'itemsounds', 'elevators_doors_speed', 'spinjumprestart',
'rando_speed', 'animals', 'No_Music']
quantities = ['missileQty', 'superQty', 'powerBombQty']
others = ['complexity', 'paramsFileTarget', 'seed', 'preset', 'majorsSplit',
'maxDifficulty', 'progressionSpeed', 'progressionDifficulty',
'morphPlacement', 'minorQty', 'energyQty',
'gravityBehaviour', 'startLocation']
validateWebServiceParams(switchs, quantities, others, isJson=True)
# randomize
DB = db.DB()
id = DB.initRando()
# race mode
useRace = False
if request.vars.raceMode == 'on':
magic = getMagic()
useRace = True
(fd1, presetFileName) = tempfile.mkstemp()
presetFileName += '.json'
(fd2, jsonFileName) = tempfile.mkstemp()
print("randomizerWebService, params validated")
for var in request.vars:
print("{}: {}".format(var, request.vars[var]))
with open(presetFileName, 'w') as presetFile:
presetFile.write(request.vars.paramsFileTarget)
seed = request.vars.seed
if seed == '0':
seed = str(random.randint(0, 9999999))
preset = request.vars.preset
params = [pythonExec, os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--runtime', '20',
'--seed', seed,
'--output', jsonFileName,
'--param', presetFileName,
'--preset', preset,
'--progressionSpeed', request.vars.progressionSpeed,
'--progressionDifficulty', request.vars.progressionDifficulty,
'--morphPlacement', request.vars.morphPlacement,
'--majorsSplit', request.vars.majorsSplit]
params += ['--missileQty', request.vars.missileQty if request.vars.missileQty != 'random' else '0',
'--superQty', request.vars.superQty if request.vars.superQty != 'random' else '0',
'--powerBombQty', request.vars.powerBombQty if request.vars.powerBombQty != 'random' else '0',
'--minorQty', request.vars.minorQty if request.vars.minorQty != 'random' else '0',
'--energyQty', request.vars.energyQty]
if useRace == True:
params += ['--race', str(magic)]
if request.vars.nerfedCharge == 'on':
params.append('--nerfedCharge')
if request.vars.itemsounds == 'on':
params += ['-c', 'itemsounds.ips']
if request.vars.elevators_doors_speed == 'on':
params += ['-c', 'elevators_doors_speed.ips']
if request.vars.spinjumprestart == 'on':
params += ['-c', 'spinjumprestart.ips']
if request.vars.rando_speed == 'on':
params += ['-c', 'rando_speed.ips']
if request.vars.No_Music == 'on':
params += ['-c', 'No_Music']
if request.vars.startLocation == "Ceres":
params += ['-c', 'skip_intro.ips']
else:
params += ['-c', 'skip_ceres.ips']
if request.vars.animals == 'on':
params.append('--animals')
if request.vars.areaLayout == 'off':
params.append('--areaLayoutBase')
if request.vars.variaTweaks == 'off':
params.append('--novariatweaks')
if request.vars.maxDifficulty != 'no difficulty cap':
params.append('--maxDifficulty')
params.append(request.vars.maxDifficulty)
def addParamRandom(id, params):
if request.vars[id] in ['on', 'random']:
params.append('--{}'.format(id))
if request.vars[id] == 'random':
params.append('random')
addParamRandom('suitsRestriction', params)
addParamRandom('hideItems', params)
addParamRandom('strictMinors', params)
def addSuperFun(id, params):
fun = id[len('fun'):]
if request.vars[id] == 'on':
params += ['--superFun', fun]
elif request.vars[id] == 'random':
params += ['--superFun', "{}Random".format(fun)]
addSuperFun('funCombat', params)
addSuperFun('funMovement', params)
addSuperFun('funSuits', params)
if request.vars.layoutPatches == 'off':
params.append('--nolayout')
if request.vars.gravityBehaviour == 'Vanilla':
params.append('--nogravheat')
elif request.vars.gravityBehaviour == 'Progressive':
params.append('--progressiveSuits')
if request.vars.areaRandomization == 'on':
params.append('--area')
elif request.vars.areaRandomization == 'random':
params += ['--area', 'random']
if request.vars.bossRandomization == 'on':
params.append('--bosses')
elif request.vars.bossRandomization == 'random':
params += ['--bosses', 'random']
# load content of preset to get controller mapping
try:
controlMapping = PresetLoader.factory(presetFileName).params['Controller']
except Exception as e:
os.close(fd1)
os.remove(presetFileName)
os.close(fd2)
os.remove(jsonFileName)
raise HTTP(400, json.dumps("randomizerWebService: can't load the preset"))
(custom, controlParam) = getCustomMapping(controlMapping)
if custom == True:
params += ['--controls', controlParam]
if "Moonwalk" in controlMapping and controlMapping["Moonwalk"] == True:
params.append('--moonwalk')
DB.addRandoParams(id, params + ['--complexity', request.vars.complexity])
print("before calling: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonFileName) as jsonFile:
locsItems = json.load(jsonFile)
# check if an info message has been returned
msg = ''
if len(locsItems['errorMsg']) > 0:
msg = locsItems['errorMsg']
DB.addRandoResult(id, ret, duration, msg)
DB.close()
os.close(fd1)
os.remove(presetFileName)
os.close(fd2)
os.remove(jsonFileName)
return json.dumps(locsItems)
else:
# extract error from json
try:
with open(jsonFileName) as jsonFile:
msg = json.load(jsonFile)['errorMsg']
except:
msg = "randomizerWebService: something wrong happened"
DB.addRandoResult(id, ret, duration, msg)
DB.close()
os.close(fd1)
os.remove(presetFileName)
os.close(fd2)
os.remove(jsonFileName)
raise HTTP(400, json.dumps(msg))
def presetWebService():
# web service to get the content of the preset file
if request.vars.preset == None:
raiseHttp(400, "Missing parameter preset")
preset = request.vars.preset
if IS_ALPHANUMERIC()(preset)[1] is not None:
raise HTTP(400, "Preset name must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
raise HTTP(400, "Preset name must be between 1 and 32 characters")
print("presetWebService: preset={}".format(preset))
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
# check that the presets file exists
if os.path.isfile(fullPath):
# load it
try:
params = PresetLoader.factory(fullPath).params
except Exception as e:
raise HTTP(400, "Can't load the preset")
params = json.dumps(params)
return params
else:
raise HTTP(400, "Preset '{}' not found".format(fullPath))
def randoPresetWebService():
# web service to get the content of the rando preset file
if request.vars.randoPreset == None:
raiseHttp(400, "Missing parameter rando preset")
preset = request.vars.randoPreset
if IS_ALPHANUMERIC()(preset)[1] is not None:
raise HTTP(400, "Preset name must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
raise HTTP(400, "Preset name must be between 1 and 32 characters")
if request.vars.origin not in ["extStats", "randomizer"]:
raise HTTP(400, "Unknown origin")
print("randoPresetWebService: preset={}".format(preset))
fullPath = 'rando_presets/{}.json'.format(preset)
# check that the presets file exists
if os.path.isfile(fullPath):
# load it
try:
updateSession = request.vars.origin == "randomizer"
params = loadRandoPreset(fullPath, updateSession)
if updateSession == True:
session.randomizer['randoPreset'] = preset
params = json.dumps(params)
return params
except Exception as e:
raise HTTP(400, "Can't load the rando preset: {}".format(preset))
else:
raise HTTP(400, "Rando preset '{}' not found".format(fullPath))
def loadRandoPreset(presetFullPath, updateSession):
with open(presetFullPath) as jsonFile:
randoPreset = json.load(jsonFile)
if updateSession == True:
# update session
for key in randoPreset:
session.randomizer[key] = randoPreset[key]
return randoPreset
def home():
# set title
response.title = 'Super Metroid VARIA Randomizer, Solver and Trackers'
return dict()
def getErrors():
# check dir exists
errDir = os.path.expanduser("~/web2py/applications/solver/errors")
if os.path.isdir(errDir):
# list error files
errFiles = glob.glob(os.path.join(errDir, "*"))
# sort by date
errFiles.sort(key=os.path.getmtime)
errFiles = [os.path.basename(f) for f in errFiles]
return errFiles
else:
return []
def getFsUsage():
fsData = os.statvfs('/home')
percent = round(100 - (100.0 * fsData.f_bavail / fsData.f_blocks), 2)
if percent < 80:
return ('OK', percent)
elif percent < 95:
return ('WARNING', percent)
else:
return ('CRITICAL', percent)
def randoParamsWebService():
# get a string of the randomizer parameters for a given seed
if request.vars.seed == None:
raiseHttp(400, "Missing parameter seed", False)
seed = getInt('seed', False)
if seed < 0 or seed > 9999999:
raiseHttp(400, "Wrong value for seed: {}, must be between 0 and 9999999".format(request.vars[seed]), False)
DB = db.DB()
params = DB.getRandomizerSeedParams(seed)
DB.close()
return params
def stats():
response.title = 'Super Metroid VARIA Randomizer and Solver usage statistics'
DB = db.DB()
weeks = 1
solverPresets = DB.getSolverPresets(weeks)
randomizerPresets = DB.getRandomizerPresets(weeks)
solverDurations = DB.getSolverDurations(weeks)
randomizerDurations = DB.getRandomizerDurations(weeks)
solverData = DB.getSolverData(weeks)
randomizerData = DB.getRandomizerData(weeks)
isolver = DB.getISolver(weeks)
isolverData = DB.getISolverData(weeks)
errors = getErrors()
DB.close()
(fsStatus, fsPercent) = getFsUsage()
return dict(solverPresets=solverPresets, randomizerPresets=randomizerPresets,
solverDurations=solverDurations, randomizerDurations=randomizerDurations,
solverData=solverData, randomizerData=randomizerData,
isolver=isolver, isolverData=isolverData, errors=errors,
fsStatus=fsStatus, fsPercent=fsPercent)
def transition2isolver(transition):
transition = str(transition)
return transition[0].lower() + removeChars(transition[1:], " ,()-")
def tracker():
response.title = 'Super Metroid VARIA Areas and Items Tracker'
# init session
if session.tracker is None:
session.tracker = {}
session.tracker["state"] = {}
session.tracker["preset"] = "regular"
session.tracker["seed"] = None
# set to False in tracker.html
session.tracker["firstTime"] = True
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# access points
vanillaAPs = []
for (src, dest) in vanillaTransitions:
vanillaAPs += [transition2isolver(src), transition2isolver(dest)]
vanillaBossesAPs = []
for (src, dest) in vanillaBossesTransitions:
vanillaBossesAPs += [transition2isolver(src), transition2isolver(dest)]
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
vanillaAPs=vanillaAPs, vanillaBossesAPs=vanillaBossesAPs,
curSession=session.tracker)
def plando():
response.title = 'Super Metroid VARIA Areas and Items Plandomizer'
# init session
if session.plando is None:
session.plando = {}
session.plando["state"] = {}
session.plando["preset"] = "regular"
session.plando["seed"] = None
# rando params
session.plando["rando"] = {}
# set to False in plando.html
session.plando["firstTime"] = True
# load presets list
(stdPresets, tourPresets, comPresets) = loadPresetsList()
# access points
vanillaAPs = []
for (src, dest) in vanillaTransitions:
vanillaAPs += [transition2isolver(src), transition2isolver(dest)]
vanillaBossesAPs = []
for (src, dest) in vanillaBossesTransitions:
vanillaBossesAPs += [transition2isolver(src), transition2isolver(dest)]
return dict(stdPresets=stdPresets, tourPresets=tourPresets, comPresets=comPresets,
vanillaAPs=vanillaAPs, vanillaBossesAPs=vanillaBossesAPs,
curSession=session.plando)
class WS(object):
@staticmethod
def factory():
scope = request.vars.scope
if scope not in ["area", "item", "common"]:
raiseHttp(400, "Unknown scope: {}, must be area/item/common".format(scope), True)
action = request.vars.action
if action not in ['add', 'remove', 'clear', 'init', 'get', 'save', 'replace', 'randomize']:
raiseHttp(400, "Unknown action {}, must be add/remove/clear/init/get/save/randomize".format(action), True)
mode = request.vars.mode
if mode not in ["standard", "seedless", "plando"]:
raiseHttp(400, "Unknown mode, must be standard/seedless/plando", True)
try:
WSClass = globals()["WS_{}_{}".format(scope, action)]
return WSClass(mode)
except Exception as e:
raiseHttp(400, "{}".format(e.body if "body" in e.__dict__ else e).replace('"', ''), True)
def __init__(self, mode):
if mode == "plando":
if session.plando is None:
raiseHttp(400, "No session found for the Plandomizer Web service", True)
self.session = session.plando
else:
if session.tracker is None:
raiseHttp(400, "No session found for the Tracker Web service", True)
self.session = session.tracker
def validate(self):
if self.session is None:
raiseHttp(400, "No session found for the Tracker", True)
if request.vars.action == None:
raiseHttp(400, "Missing parameter action", True)
action = request.vars.action
if action not in ['init', 'add', 'remove', 'clear', 'get', 'save', 'replace', 'randomize']:
raiseHttp(400, "Unknown action {}, must be init/add/remove/clear/get/save/randomize".format(action), True)
def validatePoint(self, point):
if request.vars[point] == None:
raiseHttp(400, "Missing parameter {}".format(point), True)
pointValue = request.vars[point]
if pointValue not in ['lowerMushroomsLeft', 'moatRight', 'greenPiratesShaftBottomRight',
'keyhunterRoomBottom', 'morphBallRoomLeft', 'greenBrinstarElevatorRight',
'greenHillZoneTopRight', 'noobBridgeRight', 'westOceanLeft', 'crabMazeLeft',
'lavaDiveRight', 'threeMuskateersRoomLeft', 'warehouseZeelaRoomLeft',
'warehouseEntranceLeft', 'warehouseEntranceRight', 'singleChamberTopRight',
'kronicBoostRoomBottomLeft', 'mainStreetBottom', 'crabHoleBottomLeft', 'leCoudeRight',
'redFishRoomLeft', 'redTowerTopLeft', 'caterpillarRoomTopRight', 'redBrinstarElevator',
'eastTunnelRight', 'eastTunnelTopRight', 'glassTunnelTop', 'statuesHallwayLeft',
'ridleyRoomOut', 'ridleyRoomIn', 'kraidRoomOut', 'kraidRoomIn',
'draygonRoomOut', 'draygonRoomIn', 'phantoonRoomOut', 'phantoonRoomIn']:
raiseHttp(400, "Wrong value for {}: {}".format(point, pointValue), True)
def action(self):
pass
def locName4isolver(self, locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
locName = str(locName)
return locName[0].lower() + removeChars(locName[1:], " ,()-")
def returnState(self):
if len(self.session["state"]) > 0:
state = self.session["state"]
#print("state returned to frontend: availWeb {}, visWeb {}".format(state["availableLocationsWeb"], state["visitedLocationsWeb"]))
return json.dumps({
# item tracker
"availableLocations": state["availableLocationsWeb"],
"visitedLocations": state["visitedLocationsWeb"],
# compatibility with existing sessions
"remainLocations": state["remainLocationsWeb"] if "remainLocationsWeb" in state else [],
"lastLoc": self.locName4isolver(state["lastLoc"]),
# area tracker
"lines": state["linesWeb"],
"linesSeq": state["linesSeqWeb"],
"allTransitions": state["allTransitions"],
# infos on seed
"mode": state["mode"],
"areaRando": state["areaRando"],
"bossRando": state["bossRando"],
"seed": state["seed"],
"preset": os.path.basename(os.path.splitext(state["presetFileName"])[0]),
"errorMsg": state["errorMsg"],
"last": state["last"]
})
else:
raiseHttp(200, "OK", True)
def callSolverAction(self, scope, action, parameters):
# check that we have a state in the session
if "state" not in self.session:
raiseHttp(400, "Missing Solver state in the session", True)
mode = self.session["mode"]
(fd1, jsonInFileName) = tempfile.mkstemp()
(fd2, jsonOutFileName) = tempfile.mkstemp()
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/solver.py"),
'--interactive',
'--state', jsonInFileName,
'--output', jsonOutFileName,
'--action', action,
'--mode', mode,
'--scope', scope
]
if action in ['add', 'replace']:
if scope == 'item':
params += ['--loc', parameters["loc"]]
if mode != 'standard':
params += ['--item', parameters["item"]]
if parameters['hide'] == True:
params.append('--hide')
elif scope == 'area':
params += ['--startPoint', parameters["startPoint"],
'--endPoint', parameters["endPoint"]]
elif action == 'remove' and scope == 'item':
params += ['--count', str(parameters["count"])]
elif action == 'remove' and scope == 'area' and "startPoint" in parameters:
params += ['--startPoint', parameters["startPoint"]]
elif action == 'save' and scope == 'common':
if parameters['lock'] == True:
params.append('--lock')
elif action == 'randomize':
params += ['--progressionSpeed', parameters["progressionSpeed"],
'--minorQty', parameters["minorQty"],
'--energyQty', parameters["energyQty"]
]
if request.vars.debug != None:
params.append('--vcr')
params.append('--debug')
# dump state as input
with open(jsonInFileName, 'w') as jsonFile:
json.dump(self.session["state"], jsonFile)
print("before calling isolver: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonOutFileName) as jsonFile:
state = json.load(jsonFile)
os.close(fd1)
os.remove(jsonInFileName)
os.close(fd2)
os.remove(jsonOutFileName)
if action == 'save':
return json.dumps(state)
else:
self.session["state"] = state
return self.returnState()
else:
os.close(fd1)
os.remove(jsonInFileName)
msg = "Something wrong happened while iteratively solving the ROM"
try:
with open(jsonOutFileName, 'r') as jsonFile:
data = json.load(jsonFile)
if "errorMsg" in data:
msg = data["errorMsg"]
except Exception as e:
pass
os.close(fd2)
os.remove(jsonOutFileName)
raiseHttp(400, msg, True)
class WS_common_init(WS):
def validate(self):
super(WS_common_init, self).validate()
if request.vars.scope != 'common':
raiseHttp(400, "Unknown scope, must be common", True)
# preset
preset = request.vars.preset
if request == None:
raiseHttp(400, "Missing parameter preset", True)
if IS_NOT_EMPTY()(preset)[1] is not None:
raiseHttp(400, "Preset name is empty", True)
if IS_ALPHANUMERIC()(preset)[1] is not None:
raiseHttp(400, "Preset name must be alphanumeric: {}".format(preset), True)
if IS_LENGTH(32)(preset)[1] is not None:
raiseHttp(400, "Preset name must be max 32 chars: {}".format(preset), True)
fullPath = '{}/{}.json'.format(getPresetDir(preset), preset)
if not os.path.isfile(fullPath):
raiseHttp(400, "Unknown preset: {}".format(preset), True)
if request.vars.mode != 'seedless':
# ROM (only through file API)
if request.vars.romJson is None or len(request.vars.romJson) == 0:
raiseHttp(400, "Missing ROM to solve", True)
try:
json.loads(request.vars.romJson)
except:
raiseHttp(400, "Wrong value for romJson, must be a JSON string: [{}]".format(request.vars.romJson))
# ROM file name
uploadFile = request.vars.fileName
if uploadFile is None:
raiseHttp(400, "Missing ROM file name", True)
if IS_NOT_EMPTY()(uploadFile)[1] is not None:
raiseHttp(400, "File name is empty", True)
if IS_LENGTH(maxsize=255, minsize=1)(uploadFile)[1] is not None:
raiseHttp(400, "Wrong length for ROM file name, name must be between 1 and 255 characters", True)
def action(self):
mode = request.vars.mode
if mode != 'seedless':
try:
(base, jsonRomFileName) = generateJsonROM(request.vars.romJson)
except Exception as e:
raiseHttp(400, "Can't load JSON ROM: {}".format(e), True)
seed = base + '.sfc'
else:
seed = 'seedless'
jsonRomFileName = None
preset = request.vars.preset
presetFileName = '{}/{}.json'.format(getPresetDir(preset), preset)
self.session["seed"] = seed
self.session["preset"] = preset
self.session["mode"] = mode
vcr = request.vars.debug != None
fill = request.vars.fill == "true"
return self.callSolverInit(jsonRomFileName, presetFileName, preset, seed, mode, vcr, fill)
def callSolverInit(self, jsonRomFileName, presetFileName, preset, romFileName, mode, vcr, fill):
(fd, jsonOutFileName) = tempfile.mkstemp()
params = [
pythonExec, os.path.expanduser("~/RandomMetroidSolver/solver.py"),
'--preset', presetFileName,
'--output', jsonOutFileName,
'--action', "init",
'--interactive',
'--mode', mode,
'--scope', 'common',
]
if mode != "seedless":
params += ['-r', str(jsonRomFileName)]
if vcr == True:
params.append('--vcr')
if fill == True:
params.append('--fill')
print("before calling isolver: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
DB = db.DB()
DB.addISolver(preset, romFileName)
DB.close()
with open(jsonOutFileName) as jsonFile:
state = json.load(jsonFile)
os.close(fd)
os.remove(jsonOutFileName)
self.session["state"] = state
return self.returnState()
else:
os.close(fd)
os.remove(jsonOutFileName)
raiseHttp(400, "Something wrong happened while initializing the ISolver", True)
class WS_common_get(WS):
def validate(self):
super(WS_common_get, self).validate()
def action(self):
return self.returnState()
class WS_common_save(WS):
def validate(self):
super(WS_common_save, self).validate()
if request.vars.lock == None:
raiseHttp(400, "Missing parameter lock", True)
if request.vars.lock not in ["save", "lock"]:
raiseHttp(400, "Wrong value for lock: {}, authorized values: save/lock".format(request.vars.lock), True)
def action(self):
if self.session["mode"] != "plando":
raiseHttp(400, "Save can only be use in plando mode", True)
return self.callSolverAction("common", "save", {'lock': request.vars.lock == "lock"})
class WS_common_randomize(WS):
def validate(self):
super(WS_common_randomize, self).validate()
if request.vars.progressionSpeed not in ["slowest", "slow", "medium", "fast", "fastest", "basic", "VARIAble"]:
raiseHttp(400, "Wrong value for progressionSpeed: {}".format(request.vars.progressionSpeed), True)
minorQtyInt = getInt('minorQty', True)
if minorQtyInt < 7 or minorQtyInt > 100:
raiseHttp(400, "Wrong value for minorQty, must be between 7 and 100", True)
if request.vars.energyQty not in ["sparse", "medium", "vanilla"]:
raiseHttp(400, "Wrong value for energyQty: {}".format(request.vars.energyQty), True)
def action(self):
if self.session["mode"] != "plando":
raiseHttp(400, "Randomize can only be use in plando mode", True)
params = {}
for elem in "progressionSpeed", "minorQty", "energyQty":
params[elem] = request.vars[elem]
self.session["rando"] = params
return self.callSolverAction("common", "randomize", params)
class WS_area_add(WS):
def validate(self):
super(WS_area_add, self).validate()
# startPoint and endPoint
self.validatePoint("startPoint")
self.validatePoint("endPoint")
if len(self.session["state"]) == 0:
raiseHttp(400, "ISolver state is empty", True)
def action(self):
return self.callSolverAction("area", "add", {"startPoint": request.vars.startPoint,
"endPoint": request.vars.endPoint})
class WS_area_remove(WS):
def validate(self):
if request.vars["startPoint"] != None:
self.validatePoint("startPoint")
super(WS_area_remove, self).validate()
def action(self):
parameters = {}
if request.vars["startPoint"] != None:
parameters["startPoint"] = request.vars.startPoint
return self.callSolverAction("area", "remove", parameters)
class WS_area_clear(WS):
def validate(self):
super(WS_area_clear, self).validate()
def action(self):
return self.callSolverAction("area", "clear", {})
class WS_item_add(WS):
def validate(self):
super(WS_item_add, self).validate()
# new location
def name4isolver(locName):
# remove space and special characters
# sed -e 's+ ++g' -e 's+,++g' -e 's+(++g' -e 's+)++g' -e 's+-++g'
return removeChars(locName, " ,()-")
locName = name4isolver(request.vars.locName)
if locName not in ['EnergyTankGauntlet', 'Bomb', 'EnergyTankTerminator', 'ReserveTankBrinstar', 'ChargeBeam', 'MorphingBall', 'EnergyTankBrinstarCeiling', 'EnergyTankEtecoons', 'EnergyTankWaterway', 'EnergyTankBrinstarGate', 'XRayScope', 'Spazer', 'EnergyTankKraid', 'VariaSuit', 'IceBeam', 'EnergyTankCrocomire', 'HiJumpBoots', 'GrappleBeam', 'ReserveTankNorfair', 'SpeedBooster', 'WaveBeam', 'EnergyTankRidley', 'ScrewAttack', 'EnergyTankFirefleas', 'ReserveTankWreckedShip', 'EnergyTankWreckedShip', 'RightSuperWreckedShip', 'GravitySuit', 'EnergyTankMamaturtle', 'PlasmaBeam', 'ReserveTankMaridia', 'SpringBall', 'EnergyTankBotwoon', 'SpaceJump', 'PowerBombCrateriasurface', 'MissileoutsideWreckedShipbottom', 'MissileoutsideWreckedShiptop', 'MissileoutsideWreckedShipmiddle', 'MissileCrateriamoat', 'MissileCrateriabottom', 'MissileCrateriagauntletright', 'MissileCrateriagauntletleft', 'SuperMissileCrateria', 'MissileCrateriamiddle', 'PowerBombgreenBrinstarbottom', 'SuperMissilepinkBrinstar', 'MissilegreenBrinstarbelowsupermissile', 'SuperMissilegreenBrinstartop', 'MissilegreenBrinstarbehindmissile', 'MissilegreenBrinstarbehindreservetank', 'MissilepinkBrinstartop', 'MissilepinkBrinstarbottom', 'PowerBombpinkBrinstar', 'MissilegreenBrinstarpipe', 'PowerBombblueBrinstar', 'MissileblueBrinstarmiddle', 'SuperMissilegreenBrinstarbottom', 'MissileblueBrinstarbottom', 'MissileblueBrinstartop', 'MissileblueBrinstarbehindmissile', 'PowerBombredBrinstarsidehopperroom', 'PowerBombredBrinstarspikeroom', 'MissileredBrinstarspikeroom', 'MissileKraid', 'Missilelavaroom', 'MissilebelowIceBeam', 'MissileaboveCrocomire', 'MissileHiJumpBoots', 'EnergyTankHiJumpBoots', 'PowerBombCrocomire', 'MissilebelowCrocomire', 'MissileGrappleBeam', 'MissileNorfairReserveTank', 'MissilebubbleNorfairgreendoor', 'MissilebubbleNorfair', 'MissileSpeedBooster', 'MissileWaveBeam', 'MissileGoldTorizo', 'SuperMissileGoldTorizo', 'MissileMickeyMouseroom', 'MissilelowerNorfairabovefireflearoom', 'PowerBomblowerNorfairabovefireflearoom', 'PowerBombPowerBombsofshame', 'MissilelowerNorfairnearWaveBeam', 'MissileWreckedShipmiddle', 'MissileGravitySuit', 'MissileWreckedShiptop', 'SuperMissileWreckedShipleft', 'MissilegreenMaridiashinespark', 'SuperMissilegreenMaridia', 'MissilegreenMaridiatatori', 'SuperMissileyellowMaridia', 'MissileyellowMaridiasupermissile', 'MissileyellowMaridiafalsewall', 'MissileleftMaridiasandpitroom', 'MissilerightMaridiasandpitroom', 'PowerBombrightMaridiasandpitroom', 'MissilepinkMaridia', 'SuperMissilepinkMaridia', 'MissileDraygon', 'Kraid', 'Ridley', 'Phantoon', 'Draygon', 'MotherBrain']:
raiseHttp(400, "Unknown location name: {}".format(request.vars.locName), True)
request.vars.locName = locName
itemName = request.vars.itemName
if itemName == "NoEnergy":
itemName = "Nothing"
if itemName not in [None, 'ETank', 'Missile', 'Super', 'PowerBomb', 'Bomb', 'Charge', 'Ice', 'HiJump', 'SpeedBooster', 'Wave', 'Spazer', 'SpringBall', 'Varia', 'Plasma', 'Grapple', 'Morph', 'Reserve', 'Gravity', 'XRayScope', 'SpaceJump', 'ScrewAttack', 'Nothing', 'NoEnergy', 'Boss']:
raiseHttp(400, "Unknown item name: {}".format(request.vars.itemName), True)
def action(self):
item = request.vars.itemName
# items used only in the randomizer that we get in vcr mode
if item in ["Boss", "NoEnergy"]:
item = 'Nothing'
return self.callSolverAction("item", "add", {"loc": request.vars.locName, "item": item, "hide": request.vars.hide == "true"})
class WS_item_replace(WS_item_add):
def validate(self):
super(WS_item_replace, self).validate()
def action(self):
return self.callSolverAction("item", "replace", {"loc": request.vars.locName, "item": request.vars.itemName, "hide": request.vars.hide == "true"})
class WS_item_remove(WS):
def validate(self):
super(WS_item_remove, self).validate()
def action(self):
count = request.vars.count
if count != None:
count = getInt("count", True)
if count > 105 or count < 1:
raiseHttp(400, "Wrong value for count, must be in [1-105] ", True)
else:
count = 1
return self.callSolverAction("item", "remove", {"count": count})
class WS_item_clear(WS):
def validate(self):
super(WS_item_clear, self).validate()
def action(self):
return self.callSolverAction("item", "clear", {})
def trackerWebService():
# unified web service for item/area trackers
print("trackerWebService called")
ws = WS.factory()
ws.validate()
ret = ws.action()
if ret == None:
# return something
raiseHttp(200, "OK", True)
else:
return ret
# race mode
def getMagic():
return random.randint(1, 0xffff)
def initCustomizerSession():
if session.customizer == None:
session.customizer = {}
session.customizer['colorsRandomization'] = "off"
session.customizer['suitsPalettes'] = "on"
session.customizer['beamsPalettes'] = "on"
session.customizer['tilesPalettes'] = "on"
session.customizer['enemiesPalettes'] = "on"
session.customizer['bossesPalettes'] = "on"
session.customizer['minDegree'] = -15
session.customizer['maxDegree'] = 15
session.customizer['invert'] = "on"
session.customizer['globalShift'] = "on"
session.customizer['customSpriteEnable'] = "off"
session.customizer['customSprite'] = "samus"
session.customizer['itemsounds'] = "off"
session.customizer['spinjumprestart'] = "off"
session.customizer['rando_speed'] = "off"
session.customizer['elevators_doors_speed'] = "off"
session.customizer['animals'] = "off"
session.customizer['No_Music'] = "off"
customSprites = {
'samus': {"index":0, "name": "Samus", "desc": "Samus, with a distinct animation for Screw Attack without Space Jump and a new Crystal Flash animation", "author": "Artheau and Feesh", "group": "Samus"},
'hitbox_helper': {"index":1, "name": "Hitbox", "desc": "Samus, with her actual hitbox on top", "author": "Artheau and Komaru", "group": "Samus"},
'hack_ancient_chozo': {"index":2, "name": "Chozo", "desc": "Samus, from Ancient Chozo hack", "author": "Albert V.", "group": "Samus"},
'hack_ascent': {"index":3, "name": "Ascent", "desc": "Samus, from Ascent hack", "author": "Benox50", "group": "Samus"},
'hack_decision': {"index":4, "name": "Decision", "desc": "Samus, from Decision hack", "author": "JoshShoeWah", "group": "Samus"},
'hack_escape2': {"index":5, "name": "Escape II", "desc": "Samus, from Escape II hack", "author": "Hiroishi", "group": "Samus"},
'hack_hyper': {"index":6, "name": "Hyper", "desc": "Samus, from Hyper Metroid hack", "author": "RealRed", "group": "Samus"},
'hack_nature': {"index":7, "name": "Nature", "desc": "Samus, from Nature hack", "author": "Jefe962", "group": "Samus"},
'hack_phazon': {"index":8, "name": "Phazon", "desc": "Samus, from Phazon hack", "author": "A_red_monk_called_Key", "group": "Samus"},
'hack_redesign': {"index":9, "name": "Redesign", "desc": "Samus, from Redesign hack", "author": "Drewseph", "group": "Samus"},
'hack_szm': {"index":10, "name": "SZM", "desc": "Samus, from Super Zero Mission hack", "author": "SBniconico", "group": "Samus"},
'bailey': {"index":11, "name": "Bailey", "desc": "Justin Bailey, aka Samus in an 80s swimsuit", "author": "Auximines", "group": "Custom"},
'alucard': {"index":12, "name": "Alucard", "desc": "Alucard from Castlevania Symphony Of The Night", "author": "Nintoaster", "group": "Custom"},
'megaman': {"index":13, "name": "Megaman", "desc": "Megaman X!", "author": "Artheau", "group": "Custom"},
'fed_trooper': {"index":14, "name": "GF Trooper", "desc": "A Galactic Federation trooper", "author": "Physix", "group": "Custom"},
'super_controid': {"index":15, "name": "Contra", "desc": "Badass soldier from Contra III", "author": "Nintoaster", "group": "Custom"},
'marga': {"index":16, "name": "Margatroid", "desc": "Alice Margatroid from the Touhou Project", "author": "Plan", "group": "Custom"},
'win95_cursor': {"index":17, "name": "Win95 Cursor", "desc": "A classic Windows cursor...", "author": "PlaguedOne", "group": "Custom"}
}
def customizer():
response.title = 'Super Metroid VARIA Seeds Customizer'
initCustomizerSession()
return dict(customSprites=customSprites)
def customWebService():
# check validity of all parameters
patches = ['itemsounds', 'spinjumprestart', 'rando_speed', 'elevators_doors_speed', 'No_Music', 'animals']
others = ['colorsRandomization', 'suitsPalettes', 'beamsPalettes', 'tilesPalettes', 'enemiesPalettes',
'bossesPalettes', 'minDegree', 'maxDegree', 'invert']
validateWebServiceParams(patches, [], others, isJson=True)
if request.vars.customSpriteEnable == 'on':
if request.vars.customSprite not in customSprites:
raiseHttp(400, "Wrong value for customSprite", True)
if session.customizer == None:
session.customizer = {}
# update session
session.customizer['colorsRandomization'] = request.vars.colorsRandomization
session.customizer['suitsPalettes'] = request.vars.suitsPalettes
session.customizer['beamsPalettes'] = request.vars.beamsPalettes
session.customizer['tilesPalettes'] = request.vars.tilesPalettes
session.customizer['enemiesPalettes'] = request.vars.enemiesPalettes
session.customizer['bossesPalettes'] = request.vars.bossesPalettes
session.customizer['minDegree'] = request.vars.minDegree
session.customizer['maxDegree'] = request.vars.maxDegree
session.customizer['invert'] = request.vars.invert
session.customizer['globalShift'] = request.vars.globalShift
session.customizer['customSpriteEnable'] = request.vars.customSpriteEnable
session.customizer['customSprite'] = request.vars.customSprite
session.customizer['itemsounds'] = request.vars.itemsounds
session.customizer['spinjumprestart'] = request.vars.spinjumprestart
session.customizer['rando_speed'] = request.vars.rando_speed
session.customizer['elevators_doors_speed'] = request.vars.elevators_doors_speed
session.customizer['animals'] = request.vars.animals
session.customizer['No_Music'] = request.vars.No_Music
# call the randomizer
(fd, jsonFileName) = tempfile.mkstemp()
params = [pythonExec, os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--output', jsonFileName, '--patchOnly']
if request.vars.itemsounds == 'on':
params += ['-c', 'itemsounds.ips']
if request.vars.elevators_doors_speed == 'on':
params += ['-c', 'elevators_doors_speed.ips']
if request.vars.spinjumprestart == 'on':
params += ['-c', 'spinjumprestart.ips']
if request.vars.rando_speed == 'on':
params += ['-c', 'rando_speed.ips']
if request.vars.No_Music == 'on':
params += ['-c', 'No_Music']
if request.vars.animals == 'on':
params.append('--animals')
if request.vars.colorsRandomization == 'on':
params.append('--palette')
if request.vars.suitsPalettes == 'off':
params.append('--no_shift_suit_palettes')
if request.vars.beamsPalettes == 'off':
params.append('--no_shift_beam_palettes')
if request.vars.tilesPalettes == 'off':
params.append('--no_shift_tileset_palette')
if request.vars.enemiesPalettes == 'off':
params.append('--no_shift_enemy_palettes')
if request.vars.bossesPalettes == 'off':
params.append('--no_shift_boss_palettes')
if request.vars.globalShift == 'off':
params.append('--no_global_shift')
params.append('--individual_suit_shift')
params.append('--individual_tileset_shift')
params.append('--no_match_ship_and_power')
params += ['--min_degree', request.vars.minDegree, '--max_degree', request.vars.maxDegree]
if request.vars.invert == 'on':
params.append('--invert')
if request.vars.customSpriteEnable == 'on':
params += ['--sprite', "{}.ips".format(request.vars.customSprite)]
print("before calling: {}".format(params))
start = datetime.now()
ret = subprocess.call(params)
end = datetime.now()
duration = (end - start).total_seconds()
print("ret: {}, duration: {}s".format(ret, duration))
if ret == 0:
with open(jsonFileName) as jsonFile:
data = json.load(jsonFile)
os.close(fd)
os.remove(jsonFileName)
return json.dumps(data)
else:
# extract error from json
try:
with open(jsonFileName) as jsonFile:
msg = json.load(jsonFile)['errorMsg']
except:
msg = "customizerWebService: something wrong happened"
os.close(fd)
os.remove(jsonFileName)
raise HTTP(400, json.dumps(msg))
def initExtStatsSession():
if session.extStats == None:
session.extStats = {}
session.extStats['preset'] = 'regular'
session.extStats['randoPreset'] = 'default'
def updateExtStatsSession():
if session.extStats is None:
session.extStats = {}
session.extStats['preset'] = request.vars.preset
session.extStats['randoPreset'] = request.vars.randoPreset
def validateExtStatsParams():
for (preset, directory) in [("preset", "standard_presets"), ("randoPreset", "rando_presets")]:
if request.vars[preset] == None:
return (False, "Missing parameter preset")
preset = request.vars[preset]
if IS_ALPHANUMERIC()(preset)[1] is not None:
return (False, "Wrong value for preset, must be alphanumeric")
if IS_LENGTH(maxsize=32, minsize=1)(preset)[1] is not None:
return (False, "Wrong length for preset, name must be between 1 and 32 characters")
# check that preset exists
fullPath = '{}/{}.json'.format(directory, preset)
if not os.path.isfile(fullPath):
return (False, "Unknown preset: {}".format(preset))
return (True, None)
def extStats():
response.title = 'Super Metroid VARIA Randomizer statistics'
initExtStatsSession()
if request.vars.action == 'Load':
(ok, msg) = validateExtStatsParams()
if not ok:
session.flash = msg
redirect(URL(r=request, f='extStats'))
updateExtStatsSession()
skillPreset = request.vars.preset
randoPreset = request.vars.randoPreset
# load rando preset
fullPath = 'rando_presets/{}.json'.format(randoPreset)
try:
with open(fullPath) as jsonFile:
randoPreset = json.load(jsonFile)
except Exception as e:
raise HTTP(400, "Can't load the rando preset: {}: {}".format(randoPreset, e))
# load skill preset
fullPath = '{}/{}.json'.format(getPresetDir(skillPreset), skillPreset)
try:
skillPresetContent = PresetLoader.factory(fullPath).params
completePreset(skillPresetContent)
except Exception as e:
raise HTTP(400, "Error loading the preset {}: {}".format(skillPreset, e))
parameters = {
'preset': skillPreset,
'area': 'areaRandomization' in randoPreset and randoPreset['areaRandomization'] == 'on',
'boss': 'bossRandomization' in randoPreset and randoPreset['bossRandomization'] == 'on',
'gravityBehaviour': randoPreset['gravityBehaviour'],
'nerfedCharge': randoPreset['nerfedCharge'] == 'on',
'maxDifficulty': randoPreset['maxDifficulty'],
# parameters which can be random:
'majorsSplit': randoPreset['majorsSplit'] if 'majorsSplit' in randoPreset else 'Full',
'progSpeed': randoPreset['progressionSpeed'] if 'progressionSpeed' in randoPreset else 'variable',
'morphPlacement': randoPreset['morphPlacement'] if 'morphPlacement' in randoPreset else 'early',
'suitsRestriction': 'suitsRestriction' in randoPreset and randoPreset['suitsRestriction'] == 'on',
'progDiff': randoPreset['progressionDifficulty'] if 'progressionDifficulty' in randoPreset else 'normal',
'superFunMovement': 'funMovement' in randoPreset and randoPreset['funMovement'] == 'on',
'superFunCombat': 'funCombat' in randoPreset and randoPreset['funCombat'] == 'on',
'superFunSuit': 'funSuits' in randoPreset and randoPreset['funSuits'] == 'on'
}
if randoPreset['suitsRestriction'] == "random":
parameters["suitsRestriction"] = "random"
if randoPreset['funMovement'] == "random":
parameters["superFunMovement"] = "random"
if randoPreset['funCombat'] == "random":
parameters["superFunCombat"] = "random"
if randoPreset['funSuits'] == "random":
parameters["superFunSuit"] = "random"
DB = db.DB()
(itemsStats, techniquesStats, difficulties) = DB.getExtStat(parameters)
DB.close()
# check that all items are present in the stats:
nbItems = 19
nbLocs = 105
if itemsStats != None and len(itemsStats) > 0 and len(itemsStats) != nbItems:
for i, item in enumerate(['Bomb', 'Charge', 'Grapple', 'Gravity', 'HiJump', 'Ice', 'Missile', 'Morph',
'Plasma', 'PowerBomb', 'ScrewAttack', 'SpaceJump', 'Spazer', 'SpeedBooster',
'SpringBall', 'Super', 'Varia', 'Wave', 'XRayScope']):
if itemsStats[i][1] != item:
itemsStats.insert(i, [itemsStats[0][0], item] + [0]*nbLocs)
else:
itemsStats = None
techniquesStats = None
difficulties = None
skillPresetContent = None
parameters = None
(randoPresets, tourRandoPresets) = loadRandoPresetsList()
# remove random presets those statistics are useless
randoPresets.remove("all_random")
randoPresets.remove("quite_random")
(stdPresets, tourPresets, comPresets) = loadPresetsList()
return dict(stdPresets=stdPresets, tourPresets=tourPresets,
randoPresets=randoPresets, tourRandoPresets=tourRandoPresets,
itemsStats=itemsStats, techniquesStats=techniquesStats,
categories=Knows.categories, knowsDesc=Knows.desc, skillPresetContent=skillPresetContent,
locations=locations, parameters=parameters, difficulties=difficulties)
|
"""A separate Flask app that serves fake endpoints for demo purposes."""
# -*- coding: utf-8 -*-
import json
import os
from random import randrange as rr
from random import choice, random
import time
from flask import (
Flask,
abort,
)
from flask.ext.cors import CORS
from flask.ext.cors import cross_origin
app = Flask(__name__)
CORS(app)
app.config['SECRET_KEY'] = 'NOTSECURELOL'
app.debug = True
cwd = os.getcwd()
@cross_origin()
@app.route('/timeline/')
def timeline():
"""Fake endpoint."""
with open('{}/examples/timeline3.json'.format(cwd), 'r') as timelinejson:
return timelinejson.read()
return json.dumps({})
@app.route('/dtable', methods=['GET'])
def dtable():
"""Fake endpoint."""
with open('{}/examples/dtable.json'.format(os.getcwd()), 'r') as djson:
return djson.read()
return json.dumps({})
@cross_origin()
@app.route('/deadend/')
def test_die():
"""Fake endpoint that ends in a random 50x error."""
# Simulate slow connection
time.sleep(random())
abort(choice([500, 501, 502, 503, 504]))
@cross_origin()
@app.route('/test4/')
def test4():
"""Fake endpoint."""
# Simulate slow connection
time.sleep(random())
return json.dumps({
"name": "foo",
"children": [{
"name": '---foo---', "size": i
} for i in range(0, 30)]
})
@cross_origin()
@app.route('/test3/')
def test3():
"""Fake endpoint."""
# Simulate slow connection
time.sleep(random())
return json.dumps({
"name": "foo",
"children": [{
"name": '---foo---', "size": i
} for i in range(0, 30)]
})
@cross_origin()
@app.route('/test1/')
def test1():
"""Fake endpoint."""
# Simulate slow connection
time.sleep(random())
return json.dumps([
['data1'] + [rr(0, 100) for _ in range(12)],
['data2'] + [rr(0, 100) for _ in range(12)],
['data3'] + [rr(0, 100) for _ in range(12)],
['data4'] + [rr(0, 100) for _ in range(12)],
])
@cross_origin()
@app.route('/test2/')
def test2():
"""Fake endpoint."""
# Simulate slow connection
time.sleep(random())
return json.dumps([
['data3'] + [rr(0, 100) for _ in range(12)],
['data4'] + [rr(0, 100) for _ in range(12)],
])
@app.route('/sparklines', methods=['GET'])
def sparklines():
"""Fake endpoint."""
return json.dumps([rr(0, 100) for _ in range(20)])
@app.route('/circlepack', methods=['GET'])
def circlepack():
"""Fake endpoint."""
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return json.dumps({})
@app.route('/treemap', methods=['GET'])
def treemap():
"""Fake endpoint."""
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return json.dumps({})
@app.route('/map', methods=['GET'])
def datamap():
"""Fake endpoint."""
with open('{}/examples/map.html'.format(cwd, 'r')) as maphtml:
return maphtml.read()
return ''
@app.route('/dendrogram', methods=['GET'])
def dendro():
"""Fake endpoint."""
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return json.dumps({})
if __name__ == '__main__':
app.run(debug=True, port=5004)
Clean up old endpoints, add support for venn.js example.
"""A separate Flask app that serves fake endpoints for demo purposes."""
# -*- coding: utf-8 -*-
import json
import os
from random import randrange as rr
from random import choice, random
import time
from flask import (
Flask,
abort,
)
from flask.ext.cors import CORS
from flask.ext.cors import cross_origin
app = Flask(__name__)
CORS(app)
app.config['SECRET_KEY'] = 'NOTSECURELOL'
app.debug = True
cwd = os.getcwd()
@cross_origin()
@app.route('/timeline/')
def timeline():
"""Fake endpoint."""
with open('{}/examples/timeline3.json'.format(cwd), 'r') as timelinejson:
return timelinejson.read()
return json.dumps({})
@app.route('/dtable', methods=['GET'])
def dtable():
"""Fake endpoint."""
with open('{}/examples/dtable.json'.format(os.getcwd()), 'r') as djson:
return djson.read()
return json.dumps({})
@cross_origin()
@app.route('/deadend/')
def test_die():
"""Fake endpoint that ends in a random 50x error."""
# Simulate slow connection
time.sleep(random())
abort(choice([500, 501, 502, 503, 504]))
@cross_origin()
@app.route('/venn/')
def test_venn():
"""Fake endpoint."""
data = [
{'sets': ['A'], 'size': rr(10, 100)},
{'sets': ['B'], 'size': rr(10, 100)},
{'sets': ['C'], 'size': rr(10, 100)},
{'sets': ['A', 'B'], 'size': rr(10, 100)},
{'sets': ['A', 'B', 'C'], 'size': rr(10, 100)},
]
return json.dumps(data)
@cross_origin()
@app.route('/test1/')
def test1():
"""Fake endpoint."""
# Simulate slow connection
time.sleep(random())
return json.dumps([
['data1'] + [rr(0, 100) for _ in range(12)],
['data2'] + [rr(0, 100) for _ in range(12)],
['data3'] + [rr(0, 100) for _ in range(12)],
['data4'] + [rr(0, 100) for _ in range(12)],
])
@cross_origin()
@app.route('/test2/')
def test2():
"""Fake endpoint."""
# Simulate slow connection
time.sleep(random())
return json.dumps([
['data3'] + [rr(0, 100) for _ in range(12)],
['data4'] + [rr(0, 100) for _ in range(12)],
])
@app.route('/sparklines', methods=['GET'])
def sparklines():
"""Fake endpoint."""
return json.dumps([rr(0, 100) for _ in range(20)])
@app.route('/circlepack', methods=['GET'])
def circlepack():
"""Fake endpoint."""
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return json.dumps({})
@app.route('/treemap', methods=['GET'])
def treemap():
"""Fake endpoint."""
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return json.dumps({})
@app.route('/map', methods=['GET'])
def datamap():
"""Fake endpoint."""
with open('{}/examples/map.html'.format(cwd, 'r')) as maphtml:
return maphtml.read()
return ''
@app.route('/dendrogram', methods=['GET'])
def dendro():
"""Fake endpoint."""
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return json.dumps({})
if __name__ == '__main__':
app.run(debug=True, port=5004)
|
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
# noinspection PyUnresolvedReferences
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure",)
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
# INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
SECURITY_MIDDLEWARE = ('djangosecure.middleware.SecurityMiddleware',)
# RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
# 'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
# MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['annotation-tool.herokuapp.com', '.herokuapp.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn",)
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += ('storages',)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='annotation_tool <noreply@annotation-tool.herokuapp.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_ACCESS_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[annotation_tool] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader',
])
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
#
# # Sentry Configuration
# SENTRY_DSN = env('DJANGO_SENTRY_DSN')
# SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'root': {
# 'level': 'WARNING',
# 'handlers': ['sentry'],
# },
# 'formatters': {
# 'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s '
# '%(process)d %(thread)d %(message)s'
# },
# },
# 'handlers': {
# 'sentry': {
# 'level': 'ERROR',
# 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'verbose'
# }
# },
# 'loggers': {
# 'django.db.backends': {
# 'level': 'ERROR',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'raven': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'sentry.errors': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'django.security.DisallowedHost': {
# 'level': 'ERROR',
# 'handlers': ['console', 'sentry'],
# 'propagate': False,
# },
# },
# }
# SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
# RAVEN_CONFIG = {
# 'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
# 'DSN': SENTRY_DSN
# }
# Your production stuff: Below this line define 3rd party library settings
heroku prep
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
# noinspection PyUnresolvedReferences
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure",)
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
# INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
SECURITY_MIDDLEWARE = ('djangosecure.middleware.SecurityMiddleware',)
# RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
# 'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
# MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['annotation-tool.herokuapp.com', '.herokuapp.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn",)
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += ('storages',)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='annotation_tool <noreply@annotation-tool.herokuapp.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_ACCESS_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[annotation_tool] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader',
])
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
#
# # Sentry Configuration
# SENTRY_DSN = env('DJANGO_SENTRY_DSN')
# SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'root': {
# 'level': 'WARNING',
# 'handlers': ['sentry'],
# },
# 'formatters': {
# 'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s '
# '%(process)d %(thread)d %(message)s'
# },
# },
# 'handlers': {
# 'sentry': {
# 'level': 'ERROR',
# 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'verbose'
# }
# },
# 'loggers': {
# 'django.db.backends': {
# 'level': 'ERROR',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'raven': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'sentry.errors': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'django.security.DisallowedHost': {
# 'level': 'ERROR',
# 'handlers': ['console', 'sentry'],
# 'propagate': False,
# },
# },
# }
# SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
# RAVEN_CONFIG = {
# 'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
# 'DSN': SENTRY_DSN
# }
# Your production stuff: Below this line define 3rd party library settings
|
__version__ = '1.0.54'
1.0.55
__version__ = '1.0.55' |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
import os
from configurations import Configuration, values
from django.urls import reverse_lazy
from unipath import Path
class Base(Configuration):
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
LOGIN_URL = reverse_lazy('auth_login')
LOGIN_REDIRECT_URL = reverse_lazy('api-root')
LOGOUT_REDIRECT_URL = reverse_lazy('auth_login')
ADMINS = (
)
MANAGERS = ADMINS
USE_I18N = True
USE_L10N = True
USE_TZ = True
ALLOWED_HOSTS = [
'*'
]
PROJECT_DIR = Path(__file__).ancestor(2)
LOCALE_PATH = PROJECT_DIR.child('locale')
STATIC_ROOT = PROJECT_DIR.child('static-compiled')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
PROJECT_DIR.child('static')
]
MEDIA_ROOT = PROJECT_DIR.child('media')
MEDIA_URL = '/media/'
DATABASES = values.DatabaseURLValue()
DATABASE_ROUTERS = ['impact.routers.APIRouter']
EMAIL = values.EmailURLValue()
SECRET_KEY = values.Value()
SITE_ID = 1
ROOT_URLCONF = 'impact.urls'
WSGI_APPLICATION = 'impact.wsgi.application'
INSTALLED_APPS = [
'accelerator.apps.AcceleratorConfig',
'corsheaders',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'embed_video',
'impact',
'oauth2_provider',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'rest_framework_tracking',
'simpleuser',
]
ACCELERATOR_MODELS_ARE_MANAGED = False
AUTH_USER_MODEL = 'simpleuser.User'
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
]
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': os.environ['DJANGO_HIREDIS_CACHE_LOCATION'],
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
}
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [PROJECT_DIR.child('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf'
],
},
},
]
V0_SECURITY_KEY = bytes(
os.environ.get('IMPACT_API_V0_SECURITY_KEY', 'XXX'),
'utf-8')
V0_IMAGE_PASSWORD = bytes(
os.environ.get('IMPACT_API_V0_IMAGE_PASSWORD', 'XXX'),
'utf-8')
V0_SITE_NAME = bytes(os.environ.get(
'IMPACT_API_V0_SITE_NAME', 'masschallenge.org'), 'utf-8')
V0_API_GROUP = bytes(os.environ.get(
'IMPACT_API_V0_API_GROUP', 'v0_clients'), 'utf-8')
# This and the above should get generalized. See AC-4574.
V1_API_GROUP = bytes(os.environ.get(
'IMPACT_API_V1_API_GROUP', 'v1_clients'), 'utf-8')
V1_CONFIDENTIAL_API_GROUP = 'v1_confidential', 'utf-8'
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {
'read': 'Read scope',
'write': 'Write scope',
'groups': 'Access to your groups'
}
}
CORS_ORIGIN_ALLOW_ALL = True
# settings.py
REST_PROXY = {
'HOST': bytes(
os.environ.get('ACCELERATE_SITE_URL',
'https://accelerate.masschallenge.org'), 'utf-8'),
'AUTH': {
'user': None,
'password': None,
# Or alternatively:
'token': None,
},
'VERIFY_SSL': False,
}
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': (
'rest_framework.pagination.LimitOffsetPagination'),
'PAGE_SIZE': 10,
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
class Dev(Base):
DEBUG = True
Base.TEMPLATES[0]['OPTIONS']['debug'] = True
INTERNAL_IPS = (
'127.0.0.1',
)
ALLOWED_HOSTS = [
'*'
]
MIDDLEWARE_CLASSES = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
] + Base.MIDDLEWARE_CLASSES
INSTALLED_APPS = Base.INSTALLED_APPS + [
'debug_toolbar',
]
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda x: True
}
class Test(Base):
MIGRATION_MODULES = {'django.contrib.auth': None, 'impact': None}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'NAME': 'test.db',
'USER': '',
'PASSWORD': ''
}
}
DATABASE_ROUTERS = []
DEBUG = False
TEST_RUNNER = 'impact.test_runner.UnManagedModelTestRunner'
LANGUAGE_CODE = 'en'
class Prod(Base):
ALLOWED_HOSTS = Base.ALLOWED_HOSTS + [
os.environ.get('DJANGO_ALLOWED_HOST', '*'),
]
[AC-4935] Fix definition of V1_CONFIDENTIAL_API_GROUP
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
import os
from configurations import Configuration, values
from django.urls import reverse_lazy
from unipath import Path
class Base(Configuration):
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
LOGIN_URL = reverse_lazy('auth_login')
LOGIN_REDIRECT_URL = reverse_lazy('api-root')
LOGOUT_REDIRECT_URL = reverse_lazy('auth_login')
ADMINS = (
)
MANAGERS = ADMINS
USE_I18N = True
USE_L10N = True
USE_TZ = True
ALLOWED_HOSTS = [
'*'
]
PROJECT_DIR = Path(__file__).ancestor(2)
LOCALE_PATH = PROJECT_DIR.child('locale')
STATIC_ROOT = PROJECT_DIR.child('static-compiled')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
PROJECT_DIR.child('static')
]
MEDIA_ROOT = PROJECT_DIR.child('media')
MEDIA_URL = '/media/'
DATABASES = values.DatabaseURLValue()
DATABASE_ROUTERS = ['impact.routers.APIRouter']
EMAIL = values.EmailURLValue()
SECRET_KEY = values.Value()
SITE_ID = 1
ROOT_URLCONF = 'impact.urls'
WSGI_APPLICATION = 'impact.wsgi.application'
INSTALLED_APPS = [
'accelerator.apps.AcceleratorConfig',
'corsheaders',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'embed_video',
'impact',
'oauth2_provider',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'rest_framework_tracking',
'simpleuser',
]
ACCELERATOR_MODELS_ARE_MANAGED = False
AUTH_USER_MODEL = 'simpleuser.User'
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
]
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': os.environ['DJANGO_HIREDIS_CACHE_LOCATION'],
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
}
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [PROJECT_DIR.child('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf'
],
},
},
]
V0_SECURITY_KEY = bytes(
os.environ.get('IMPACT_API_V0_SECURITY_KEY', 'XXX'),
'utf-8')
V0_IMAGE_PASSWORD = bytes(
os.environ.get('IMPACT_API_V0_IMAGE_PASSWORD', 'XXX'),
'utf-8')
V0_SITE_NAME = bytes(os.environ.get(
'IMPACT_API_V0_SITE_NAME', 'masschallenge.org'), 'utf-8')
V0_API_GROUP = bytes(os.environ.get(
'IMPACT_API_V0_API_GROUP', 'v0_clients'), 'utf-8')
# This and the above should get generalized. See AC-4574.
V1_API_GROUP = bytes(os.environ.get(
'IMPACT_API_V1_API_GROUP', 'v1_clients'), 'utf-8')
V1_CONFIDENTIAL_API_GROUP = bytes('v1_confidential', 'utf-8')
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {
'read': 'Read scope',
'write': 'Write scope',
'groups': 'Access to your groups'
}
}
CORS_ORIGIN_ALLOW_ALL = True
# settings.py
REST_PROXY = {
'HOST': bytes(
os.environ.get('ACCELERATE_SITE_URL',
'https://accelerate.masschallenge.org'), 'utf-8'),
'AUTH': {
'user': None,
'password': None,
# Or alternatively:
'token': None,
},
'VERIFY_SSL': False,
}
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': (
'rest_framework.pagination.LimitOffsetPagination'),
'PAGE_SIZE': 10,
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
class Dev(Base):
DEBUG = True
Base.TEMPLATES[0]['OPTIONS']['debug'] = True
INTERNAL_IPS = (
'127.0.0.1',
)
ALLOWED_HOSTS = [
'*'
]
MIDDLEWARE_CLASSES = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
] + Base.MIDDLEWARE_CLASSES
INSTALLED_APPS = Base.INSTALLED_APPS + [
'debug_toolbar',
]
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda x: True
}
class Test(Base):
MIGRATION_MODULES = {'django.contrib.auth': None, 'impact': None}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'NAME': 'test.db',
'USER': '',
'PASSWORD': ''
}
}
DATABASE_ROUTERS = []
DEBUG = False
TEST_RUNNER = 'impact.test_runner.UnManagedModelTestRunner'
LANGUAGE_CODE = 'en'
class Prod(Base):
ALLOWED_HOSTS = Base.ALLOWED_HOSTS + [
os.environ.get('DJANGO_ALLOWED_HOST', '*'),
]
|
test: Move google imports into try/except block
The datastore tests require google imports. The imports are only
required for the datastore backend and thus shouldn't cause a test
failure.
|
from urllib.error import URLError
import numpy
import smopy
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import math
from gtfspy.gtfs import GTFS
from gtfspy.stats import get_spatial_bounds, get_percentile_stop_bounds, get_median_lat_lon_of_stops
from gtfspy.route_types import ROUTE_TYPE_TO_COLOR, ROUTE_TYPE_TO_ZORDER, ROUTE_TYPE_TO_SHORT_DESCRIPTION
import matplotlib as mpl
from matplotlib_scalebar.scalebar import ScaleBar
from gtfspy import util
"""
This module contains functions for plotting (static) visualizations of the public transport networks using matplotlib.
"""
from gtfspy.extended_route_types import ROUTE_TYPE_CONVERSION
MAP_STYLES = [
"rastertiles/voyager",
"rastertiles/voyager_nolabels",
"rastertiles/voyager_only_labels",
"rastertiles/voyager_labels_under",
"light_all",
"dark_all",
"light_nolabels",
"light_only_labels",
"dark_nolabels",
"dark_only_labels"
]
def _get_median_centered_plot_bounds(g):
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
lat_median, lon_median = get_median_lat_lon_of_stops(g)
lon_diff = max(abs(lon_median - lon_min), abs(lon_median - lon_max))
lat_diff = max(abs(lat_median - lat_min), abs(lat_median - lat_max))
plot_lon_min = lon_median - lon_diff
plot_lon_max = lon_median + lon_diff
plot_lat_min = lat_median - lat_diff
plot_lat_max = lat_median + lat_diff
return plot_lon_min, plot_lon_max, plot_lat_min, plot_lat_max
def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=0.8, scalebar=True, legend=True,
return_smopy_map=False, map_style=None):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
Where to get the data from?
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created
spatial_bounds: dict, optional
with str keys: lon_min, lon_max, lat_min, lat_max
return_smopy_map: bool, optional
defaulting to false
Returns
-------
ax: matplotlib.axes.Axes
"""
assert(isinstance(g, GTFS))
route_shapes = g.get_all_route_shapes()
if spatial_bounds is None:
spatial_bounds = get_spatial_bounds(g, as_dict=True)
if ax is not None:
bbox = ax.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
spatial_bounds = _expand_spatial_bounds_to_fit_axes(spatial_bounds, width, height)
return plot_as_routes(route_shapes,
ax=ax,
spatial_bounds=spatial_bounds,
map_alpha=map_alpha,
plot_scalebar=scalebar,
legend=legend,
return_smopy_map=return_smopy_map,
map_style=map_style)
def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=0.8, plot_scalebar=True, legend=True,
return_smopy_map=False, line_width_attribute=None, line_width_scale=1.0, map_style=None):
"""
Parameters
----------
route_shapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
ax: axis object
spatial_bounds: dict
map_alpha:
plot_scalebar: bool
legend:
return_smopy_map:
line_width_attribute:
line_width_scale:
Returns
-------
ax: matplotlib.axes object
"""
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
route_types_to_lines = {}
for shape in route_shapes:
route_type = ROUTE_TYPE_CONVERSION[shape['type']]
lats = numpy.array(shape['lats'])
lons = numpy.array(shape['lons'])
if line_width_attribute:
line_width = line_width_scale * shape[line_width_attribute]
else:
line_width = 1
xs, ys = smopy_map.to_pixels(lats, lons)
line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])
route_types_to_lines[route_type] = line
if legend:
lines = list(route_types_to_lines.values())
labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]
ax.legend(lines, labels, loc="upper left")
if plot_scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def plot_routes_as_stop_to_stop_network(from_lats, from_lons, to_lats, to_lons, attributes=None, color_attributes=None,
zorders=None,
line_labels=None,
ax=None,
spatial_bounds=None,
alpha=1,
map_alpha=0.8,
scalebar=True,
return_smopy_map=False,
c=None, linewidth=None,
linewidth_multiplier=1,
use_log_scale=False):
if attributes is None:
attributes = len(list(from_lats))*[None]
if not linewidth:
linewidth = 1
if color_attributes is None:
color_attributes = len(list(from_lats))*[None]
assert c is not None
if zorders is None:
zorders = len(list(from_lats))*[1]
if line_labels is None:
line_labels = len(list(from_lats))*[None]
if spatial_bounds is None:
lon_min = min(list(from_lons) + list(to_lons))
lon_max = max(list(from_lons) + list(to_lons))
lat_min = min(list(from_lats) + list(to_lats))
lat_max = max(list(from_lats) + list(to_lats))
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
for from_lat, from_lon, to_lat, to_lon, attribute, color_attribute, zorder, line_label in zip(from_lats,
from_lons,
to_lats,
to_lons,
attributes,
color_attributes,
zorders,
line_labels):
if color_attribute is None:
color = c
else:
a = ROUTE_TYPE_CONVERSION[color_attribute]
color = ROUTE_TYPE_TO_COLOR[a]
zorder = ROUTE_TYPE_TO_ZORDER[a]
if not attribute:
attribute = linewidth
if use_log_scale:
attribute = math.log10(attribute)
xs, ys = smopy_map.to_pixels(numpy.array([from_lat, to_lat]), numpy.array([from_lon, to_lon]))
ax.plot(xs, ys, color=color, linewidth=attribute*linewidth_multiplier, zorder=zorder, alpha=alpha)
if line_label:
ax.text(xs.mean(), ys.mean(), line_label,
# verticalalignment='bottom', horizontalalignment='right',
color='green', fontsize=15)
legend = True if color_attributes[0] is not None else False
import matplotlib.lines as mlines
if legend:
unique_types = set(color_attributes)
lines = []
for i in unique_types:
line = mlines.Line2D([], [], color=ROUTE_TYPE_TO_COLOR[i], markersize=15,
label=ROUTE_TYPE_TO_SHORT_DESCRIPTION[i])
lines.append(line)
handles = lines
labels = [h.get_label() for h in handles]
ax.legend(handles=handles, labels=labels, loc=4)
if scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def _add_scale_bar(ax, lat, lon_min, lon_max, width_pixels):
distance_m = util.wgs84_distance(lat, lon_min, lat, lon_max)
scalebar = ScaleBar(distance_m / width_pixels) # 1 pixel = 0.2 meter
ax.add_artist(scalebar)
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds
def plot_route_network_thumbnail(g, map_style=None):
width = 512 # pixels
height = 300 # pixels
scale = 24
dpi = mpl.rcParams["figure.dpi"]
width_m = width * scale
height_m = height * scale
median_lat, median_lon = get_median_lat_lon_of_stops(g)
dlat = util.wgs84_height(height_m)
dlon = util.wgs84_width(width_m, median_lat)
spatial_bounds = {
"lon_min": median_lon - dlon,
"lon_max": median_lon + dlon,
"lat_min": median_lat - dlat,
"lat_max": median_lat + dlat
}
fig = plt.figure(figsize=(width/dpi, height/dpi))
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.0, left=0.0, right=1.0, top=1.0)
return plot_route_network_from_gtfs(g, ax, spatial_bounds, map_alpha=1.0, scalebar=False, legend=False, map_style=map_style)
def plot_stops_with_categorical_attributes(lats_list, lons_list, attributes_list, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min([min(x) for x in lons_list])
lon_max = max([max(x) for x in lons_list])
lat_min = min([min(x) for x in lats_list])
lat_max = max([max(x) for x in lats_list])
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
min_x = max_x = min_y = max_y = None
for lat in [lat_min, lat_max]:
for lon in [lon_min, lon_max]:
x, y = smopy_map.to_pixels(lat, lon)
if not min_x:
min_x = x
max_x = x
min_y = y
max_y = y
else:
max_x = max(max_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
min_x = min(min_x, x)
ax.set_xlim(min_x, max_x)
ax.set_ylim(max_y, min_y)
ax.set_xticks([])
ax.set_yticks([])
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
axes = []
for lats, lons, attributes, c in zip(lats_list, lons_list, attributes_list, mcolors.BASE_COLORS):
x, y = zip(*[smopy_map.to_pixels(lat, lon) for lat, lon in zip(lats, lons)])
ax = plt.scatter(x, y, s=s, c=c) #, marker=".")
axes.append(ax)
return axes
def plot_stops_with_attributes(lats, lons, attribute, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min(lons)
lon_max = max(lons)
lat_min = min(lats)
lat_max = max(lats)
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
xs, ys = smopy_map.to_pixels(lats, lons)
cax = ax.scatter(xs, ys, c=attribute, s=s, cmap=cmap, norm=norm, alpha=alpha)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
if colorbar:
return ax, cax, smopy_map
return ax
def plot_all_stops(g, ax=None, scalebar=False):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created, otherwise results are plotted on the axis.
scalebar: bool, optional
Whether to include a scalebar to the plot.
Returns
-------
ax: matplotlib.Axes
"""
assert(isinstance(g, GTFS))
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
stops = g.stops()
lats = numpy.array(stops['lat'])
lons = numpy.array(stops['lon'])
xs, ys = smopy_map.to_pixels(lats, lons)
ax.scatter(xs, ys, color="red", s=10)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
return ax
def get_smopy_map(lon_min, lon_max, lat_min, lat_max, z=None, map_style=None):
ORIG_TILE_SERVER = smopy.TILE_SERVER
if map_style is not None:
assert map_style in MAP_STYLES, map_style + \
" (map_style parameter) is not a valid CartoDB mapping style. Options are " + \
str(MAP_STYLES)
smopy.TILE_SERVER = "http://1.basemaps.cartocdn.com/" + map_style + "/{z}/{x}/{y}.png"
args = (lat_min, lat_max, lon_min, lon_max, map_style, z)
if args not in get_smopy_map.maps:
kwargs = {}
if z is not None: # this hack may not work
smopy.Map.get_allowed_zoom = lambda self, z: z
kwargs['z'] = z
try:
get_smopy_map.maps[args] = smopy.Map((lat_min, lon_min, lat_max, lon_max), **kwargs)
except URLError:
raise RuntimeError("\n Could not load background map from the tile server: "
+ smopy.TILE_SERVER +
"\n Please check that the tile server exists and "
"that your are connected to the internet.")
smopy.TILE_SERVER = ORIG_TILE_SERVER
return get_smopy_map.maps[args]
get_smopy_map.maps = {}
gtfspy/mapviz.py: changed default tileserver to http://1.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png
from urllib.error import URLError
import numpy
import smopy
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import math
from gtfspy.gtfs import GTFS
from gtfspy.stats import get_spatial_bounds, get_percentile_stop_bounds, get_median_lat_lon_of_stops
from gtfspy.route_types import ROUTE_TYPE_TO_COLOR, ROUTE_TYPE_TO_ZORDER, ROUTE_TYPE_TO_SHORT_DESCRIPTION
import matplotlib as mpl
from matplotlib_scalebar.scalebar import ScaleBar
from gtfspy import util
"""
This module contains functions for plotting (static) visualizations of the public transport networks using matplotlib.
"""
from gtfspy.extended_route_types import ROUTE_TYPE_CONVERSION
MAP_STYLES = [
"rastertiles/voyager",
"rastertiles/voyager_nolabels",
"rastertiles/voyager_only_labels",
"rastertiles/voyager_labels_under",
"light_all",
"dark_all",
"light_nolabels",
"light_only_labels",
"dark_nolabels",
"dark_only_labels"
]
def _get_median_centered_plot_bounds(g):
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
lat_median, lon_median = get_median_lat_lon_of_stops(g)
lon_diff = max(abs(lon_median - lon_min), abs(lon_median - lon_max))
lat_diff = max(abs(lat_median - lat_min), abs(lat_median - lat_max))
plot_lon_min = lon_median - lon_diff
plot_lon_max = lon_median + lon_diff
plot_lat_min = lat_median - lat_diff
plot_lat_max = lat_median + lat_diff
return plot_lon_min, plot_lon_max, plot_lat_min, plot_lat_max
def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=0.8, scalebar=True, legend=True,
return_smopy_map=False, map_style=None):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
Where to get the data from?
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created
spatial_bounds: dict, optional
with str keys: lon_min, lon_max, lat_min, lat_max
return_smopy_map: bool, optional
defaulting to false
Returns
-------
ax: matplotlib.axes.Axes
"""
assert(isinstance(g, GTFS))
route_shapes = g.get_all_route_shapes()
if spatial_bounds is None:
spatial_bounds = get_spatial_bounds(g, as_dict=True)
if ax is not None:
bbox = ax.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
spatial_bounds = _expand_spatial_bounds_to_fit_axes(spatial_bounds, width, height)
return plot_as_routes(route_shapes,
ax=ax,
spatial_bounds=spatial_bounds,
map_alpha=map_alpha,
plot_scalebar=scalebar,
legend=legend,
return_smopy_map=return_smopy_map,
map_style=map_style)
def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=0.8, plot_scalebar=True, legend=True,
return_smopy_map=False, line_width_attribute=None, line_width_scale=1.0, map_style=None):
"""
Parameters
----------
route_shapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
ax: axis object
spatial_bounds: dict
map_alpha:
plot_scalebar: bool
legend:
return_smopy_map:
line_width_attribute:
line_width_scale:
Returns
-------
ax: matplotlib.axes object
"""
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
route_types_to_lines = {}
for shape in route_shapes:
route_type = ROUTE_TYPE_CONVERSION[shape['type']]
lats = numpy.array(shape['lats'])
lons = numpy.array(shape['lons'])
if line_width_attribute:
line_width = line_width_scale * shape[line_width_attribute]
else:
line_width = 1
xs, ys = smopy_map.to_pixels(lats, lons)
line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])
route_types_to_lines[route_type] = line
if legend:
lines = list(route_types_to_lines.values())
labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]
ax.legend(lines, labels, loc="upper left")
if plot_scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def plot_routes_as_stop_to_stop_network(from_lats, from_lons, to_lats, to_lons, attributes=None, color_attributes=None,
zorders=None,
line_labels=None,
ax=None,
spatial_bounds=None,
alpha=1,
map_alpha=0.8,
scalebar=True,
return_smopy_map=False,
c=None, linewidth=None,
linewidth_multiplier=1,
use_log_scale=False):
if attributes is None:
attributes = len(list(from_lats))*[None]
if not linewidth:
linewidth = 1
if color_attributes is None:
color_attributes = len(list(from_lats))*[None]
assert c is not None
if zorders is None:
zorders = len(list(from_lats))*[1]
if line_labels is None:
line_labels = len(list(from_lats))*[None]
if spatial_bounds is None:
lon_min = min(list(from_lons) + list(to_lons))
lon_max = max(list(from_lons) + list(to_lons))
lat_min = min(list(from_lats) + list(to_lats))
lat_max = max(list(from_lats) + list(to_lats))
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
for from_lat, from_lon, to_lat, to_lon, attribute, color_attribute, zorder, line_label in zip(from_lats,
from_lons,
to_lats,
to_lons,
attributes,
color_attributes,
zorders,
line_labels):
if color_attribute is None:
color = c
else:
a = ROUTE_TYPE_CONVERSION[color_attribute]
color = ROUTE_TYPE_TO_COLOR[a]
zorder = ROUTE_TYPE_TO_ZORDER[a]
if not attribute:
attribute = linewidth
if use_log_scale:
attribute = math.log10(attribute)
xs, ys = smopy_map.to_pixels(numpy.array([from_lat, to_lat]), numpy.array([from_lon, to_lon]))
ax.plot(xs, ys, color=color, linewidth=attribute*linewidth_multiplier, zorder=zorder, alpha=alpha)
if line_label:
ax.text(xs.mean(), ys.mean(), line_label,
# verticalalignment='bottom', horizontalalignment='right',
color='green', fontsize=15)
legend = True if color_attributes[0] is not None else False
import matplotlib.lines as mlines
if legend:
unique_types = set(color_attributes)
lines = []
for i in unique_types:
line = mlines.Line2D([], [], color=ROUTE_TYPE_TO_COLOR[i], markersize=15,
label=ROUTE_TYPE_TO_SHORT_DESCRIPTION[i])
lines.append(line)
handles = lines
labels = [h.get_label() for h in handles]
ax.legend(handles=handles, labels=labels, loc=4)
if scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
def _add_scale_bar(ax, lat, lon_min, lon_max, width_pixels):
distance_m = util.wgs84_distance(lat, lon_min, lat, lon_max)
scalebar = ScaleBar(distance_m / width_pixels) # 1 pixel = 0.2 meter
ax.add_artist(scalebar)
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds
def plot_route_network_thumbnail(g, map_style=None):
width = 512 # pixels
height = 300 # pixels
scale = 24
dpi = mpl.rcParams["figure.dpi"]
width_m = width * scale
height_m = height * scale
median_lat, median_lon = get_median_lat_lon_of_stops(g)
dlat = util.wgs84_height(height_m)
dlon = util.wgs84_width(width_m, median_lat)
spatial_bounds = {
"lon_min": median_lon - dlon,
"lon_max": median_lon + dlon,
"lat_min": median_lat - dlat,
"lat_max": median_lat + dlat
}
fig = plt.figure(figsize=(width/dpi, height/dpi))
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.0, left=0.0, right=1.0, top=1.0)
return plot_route_network_from_gtfs(g, ax, spatial_bounds, map_alpha=1.0, scalebar=False, legend=False, map_style=map_style)
def plot_stops_with_categorical_attributes(lats_list, lons_list, attributes_list, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min([min(x) for x in lons_list])
lon_max = max([max(x) for x in lons_list])
lat_min = min([min(x) for x in lats_list])
lat_max = max([max(x) for x in lats_list])
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
min_x = max_x = min_y = max_y = None
for lat in [lat_min, lat_max]:
for lon in [lon_min, lon_max]:
x, y = smopy_map.to_pixels(lat, lon)
if not min_x:
min_x = x
max_x = x
min_y = y
max_y = y
else:
max_x = max(max_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
min_x = min(min_x, x)
ax.set_xlim(min_x, max_x)
ax.set_ylim(max_y, min_y)
ax.set_xticks([])
ax.set_yticks([])
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
axes = []
for lats, lons, attributes, c in zip(lats_list, lons_list, attributes_list, mcolors.BASE_COLORS):
x, y = zip(*[smopy_map.to_pixels(lat, lon) for lat, lon in zip(lats, lons)])
ax = plt.scatter(x, y, s=s, c=c) #, marker=".")
axes.append(ax)
return axes
def plot_stops_with_attributes(lats, lons, attribute, s=0.5, spatial_bounds=None, colorbar=False, ax=None, cmap=None, norm=None, alpha=None):
if not spatial_bounds:
lon_min = min(lons)
lon_max = max(lons)
lat_min = min(lats)
lat_max = max(lats)
else:
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
xs, ys = smopy_map.to_pixels(lats, lons)
cax = ax.scatter(xs, ys, c=attribute, s=s, cmap=cmap, norm=norm, alpha=alpha)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
if colorbar:
return ax, cax, smopy_map
return ax
def plot_all_stops(g, ax=None, scalebar=False):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created, otherwise results are plotted on the axis.
scalebar: bool, optional
Whether to include a scalebar to the plot.
Returns
-------
ax: matplotlib.Axes
"""
assert(isinstance(g, GTFS))
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
stops = g.stops()
lats = numpy.array(stops['lat'])
lons = numpy.array(stops['lon'])
xs, ys = smopy_map.to_pixels(lats, lons)
ax.scatter(xs, ys, color="red", s=10)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
return ax
def get_smopy_map(lon_min, lon_max, lat_min, lat_max, z=None, map_style=None):
ORIG_TILE_SERVER = smopy.TILE_SERVER
if map_style is not None:
assert map_style in MAP_STYLES, map_style + \
" (map_style parameter) is not a valid CartoDB mapping style. Options are " + \
str(MAP_STYLES)
smopy.TILE_SERVER = "http://1.basemaps.cartocdn.com/" + map_style + "/{z}/{x}/{y}.png"
else:
smopy.TILE_SERVER = "http://1.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png"
args = (lat_min, lat_max, lon_min, lon_max, map_style, z)
if args not in get_smopy_map.maps:
kwargs = {}
if z is not None: # this hack may not work
smopy.Map.get_allowed_zoom = lambda self, z: z
kwargs['z'] = z
try:
get_smopy_map.maps[args] = smopy.Map((lat_min, lon_min, lat_max, lon_max), **kwargs)
except URLError:
raise RuntimeError("\n Could not load background map from the tile server: "
+ smopy.TILE_SERVER +
"\n Please check that the tile server exists and "
"that your are connected to the internet.")
smopy.TILE_SERVER = ORIG_TILE_SERVER
return get_smopy_map.maps[args]
get_smopy_map.maps = {}
|
from helpers import *
from java.util.UUID import fromString as java_uuid
from org.bukkit.util import Vector
from traceback import format_exc as print_traceback
ff_perms = ["utils.forcefield", "utils.forcefield.ignore"]
ff_prefix = "&8[&aFF&8]"
ff_users = []
whitelists = {} # {ff_owner_id: [white, listed, ids]}
fd = 6 # forcefield distance
speed_limiter = 100 # the higher, the lower the forcefield sensitivity.
Xv = 1.0 / speed_limiter # used in set_velocity_away(), this is more efficient.
Xve = (0.6 * speed_limiter) * Xv
# /ff admin is a future option I might implement
@hook.command("forcefield")
def on_forcefield_command(sender, args):
if not is_player(sender) or not sender.hasPermission(ff_perms[0]):
noperm(sender)
return True
sender_id = str(sender.getUniqueId())
if not args or args[0].lower() == "toggle": #Toggle
forcefield_toggle(sender)
elif args[0].lower() in ["whitelist", "wl", "wlist"]: #Whitelist commands
if not args[1:] or args[1].lower() == "list":
whitelist_list(sender)
elif args[1].lower() == "clear":
whitelist_clear(sender)
elif args[1].lower() in ["add", "+"]:
whitelist_add(sender, True, args[2:])
elif args[1].lower() in ["remove", "delete", "rem", "del", "-"]:
whitelist_add(sender, False, args[2:])
else:
invalid_syntax(sender)
elif args[0].lower() in ["help", "?"]: #/forcefield help
forcefield_help(sender)
else:
invalid_syntax(sender)
return True
def whitelist_add(sender, add, players):
if not players: msg(sender, "%s &cGive space-separated playernames." % ff_prefix)
else:
sender_id = str(sender.getUniqueId())
whitelists[sender_id] = [] if sender_id not in whitelists else whitelists[sender_id]
for name in players:
player = server.getOfflinePlayer(name)
if player:
player_id = str(player.getUniqueId())
pname = player.getName()
sname = stripcolors(sender.getDisplayName())
online = True if player in list(server.getOnlinePlayers()) else False
if add == True and player_id not in whitelists[sender_id]:
if not sender == player:
whitelists[sender_id].append(player_id)
msg(sender, "%s &aAdded %s to your forcefield whitelist." % (ff_prefix, pname))
if online == True: msg(player, "%s &a%s &aadded you to his forcefield whitelist." % (ff_prefix, sname))
else: msg(sender, "%s &cYou can't whitelist yourself." % ff_prefix)
elif add == False and player_id in whitelists[sender_id]:
whitelists[sender_id].remove(player_id)
msg(sender, "%s &cRemoved %s from your forcefield whitelist." % (ff_prefix, pname))
if online == True: msg(player, "%s &c%s &cremoved you from his forcefield whitelist." % (ff_prefix, sname))
elif add == True: msg(sender, "%s &c%s &cwas already in your forcefield whitelist." % (ff_prefix, pname))
else: msg(sender, "%s &c%s &cwas not in your forcefield whitelist." % (ff_prefix, pname))
else: msg(sender, "%s &cplayer %s &cwas not found." % (ff_prefix, name))
def whitelist_list(sender):
sender_id = str(sender.getUniqueId())
msg(sender, "%s &aForceField Whitelist:" % ff_prefix)
try:
count = 0
for player_id in whitelists.get(sender_id, []):
count += 1
msg(sender, "&a %s. &f%s" % (count, server.getOfflinePlayer(java_uuid(player_id)).getName()))
if count == 0:
msg(sender, "&c Your whitelist has no entries.")
except:
log(print_traceback())
def whitelist_clear(sender):
sender_id = str(sender.getUniqueId())
if len(whitelists[sender_id]) == 0:
msg(sender, "%s &cYou had no players whitelisted." % ff_prefix)
else:
whitelists.pop(sender_id)
msg(sender, "%s &aForceField Whitelist cleared." % ff_prefix)
def forcefield_help(sender):
msg(sender, "%s &a&l/ForceField Help: \n&aYou can use the forcefield to keep players on distance." % ff_prefix)
msg(sender, "&2Commands:")
msg(sender, "&a1. &6/ff &ohelp &a: aliases: ?")
msg(sender, "&a2. &6/ff &o(toggle)")
msg(sender, "&a3. &6/ff &owhitelist (list) &a: aliases: wlist, wl")
msg(sender, "&a4. &6/ff wl &oclear")
msg(sender, "&a5. &6/ff wl &oadd <players> &a: aliases: &o+")
msg(sender, "&a6. &6/ff wl &oremove <players> &a: aliases: &odelete, rem, del, -")
def forcefield_toggle(sender):
sender_id = str(sender.getUniqueId())
if sender_id in ff_users:
ff_users.remove(sender_id)
msg(sender, "%s &aForceField toggle: &cOFF" % ff_prefix)
else:
ff_users.append(sender_id)
msg(sender, "%s &aForceField toggle: &2ON" % ff_prefix)
def invalid_syntax(sender):
msg(sender, "%s &cInvalid syntax. Use &o/ff ? &cfor info." % ff_prefix)
#--------------------------------------------------------------------------------------------------------#
@hook.event("player.PlayerMoveEvent")
def on_move(event):
player = event.getPlayer()
if is_creative(player):
player_id = str(player.getUniqueId())
if player_id in ff_users: # player has forcefield, entity should be blocked
for entity in player.getNearbyEntities(fd, fd, fd):
if is_player(entity) and is_creative(entity) and not entity.hasPermission(ff_perms[1]) and not (str(entity.getUniqueId() in whitelists.get(player_id, []))):
#if not whitelists[entity_id], check in blank list e.g. False
set_velocity_away(player, entity)
if not player.hasPermission(ff_perms[1]): # player should be blocked, entity has forcefield
for entity in player.getNearbyEntities(fd, fd, fd):
entity_id = str(entity.getUniqueId())
if is_player(entity) and is_creative(entity) and (entity_id in ff_users) and not (player_id in whitelists.get(entity_id, [])):
#if not whitelists[entity_id], check in blank list e.g. False
set_velocity_away(entity, player) #Other way around
def set_velocity_away(player, entity): #Moves entity away from player
player_loc = player.getLocation()
entity_loc = entity.getLocation()
dx = entity_loc.getX() - player_loc.getX()
dx = dx if not (-Xv < dx < Xv) else Xv
vx = Xv / Xve * dx
dy = entity_loc.getY() - player_loc.getY()
dy = dy if not (-Xv < dy < Xv) else Xv
vy = Xv / Xve * dy
dz = entity_loc.getZ() - player_loc.getZ()
dz = dz if not (-Xv < dz < Xv) else Xv
vz = Xv / Xve * dz
ev = entity.getVelocity()
entity.setVelocity(Vector(vx + ev.getX(), vy + ev.getY(), vz + ev.getZ()))
#We don't want to go above max_speed, and we dont want to divide by 0.
#--------------------------------------------------------------------------------------------------------#
@hook.event("player.PlayerQuitEvent")
def on_quit(event):
player = event.getPlayer()
uid = str(player.getUniqueId())
if uid in ff_users:
ff_users.remove(uid)
Cool stuff
from helpers import *
from java.util.UUID import fromString as java_uuid
from org.bukkit.util import Vector
from traceback import format_exc as print_traceback
ff_perms = ["utils.forcefield", "utils.forcefield.ignore"]
ff_prefix = "&8[&aFF&8]"
ff_users = []
whitelists = {} # {ff_owner_id: [white, listed, ids]}
fd = 6 # forcefield distance
speed_limiter = 100 # the higher, the lower the forcefield sensitivity.
sphere_radius = (3*(fd**2))**0.5 # Distance from box center to box corner if box rib = 1/2 * fd
safe_radius = sphere_radius + 0.1 # Distance which is probably not going to throw errors and get people stuck
Xv = 1.0 / speed_limiter # used in set_velocity_away(), this is more efficient.
Xve = (0.6 * speed_limiter) * Xv
# /ff admin is a future option I might implement
@hook.command("forcefield")
def on_forcefield_command(sender, args):
if not is_player(sender) or not sender.hasPermission(ff_perms[0]):
noperm(sender)
return True
sender_id = str(sender.getUniqueId())
if not args or args[0].lower() == "toggle": #Toggle
forcefield_toggle(sender)
elif args[0].lower() in ["whitelist", "wl", "wlist"]: #Whitelist commands
if not args[1:] or args[1].lower() == "list":
whitelist_list(sender)
elif args[1].lower() == "clear":
whitelist_clear(sender)
elif args[1].lower() in ["add", "+"]:
whitelist_add(sender, True, args[2:])
elif args[1].lower() in ["remove", "delete", "rem", "del", "-"]:
whitelist_add(sender, False, args[2:])
else:
invalid_syntax(sender)
elif args[0].lower() in ["help", "?"]: #/forcefield help
forcefield_help(sender)
else:
invalid_syntax(sender)
return True
def whitelist_add(sender, add, players):
if not players: msg(sender, "%s &cGive space-separated playernames." % ff_prefix)
else:
sender_id = str(sender.getUniqueId())
whitelists[sender_id] = [] if sender_id not in whitelists else whitelists[sender_id]
for name in players:
player = server.getOfflinePlayer(name)
if player:
player_id = str(player.getUniqueId())
pname = player.getName()
sname = stripcolors(sender.getDisplayName())
online = True if player in list(server.getOnlinePlayers()) else False
if add == True and player_id not in whitelists[sender_id]:
if not sender == player:
whitelists[sender_id].append(player_id)
msg(sender, "%s &aAdded %s to your forcefield whitelist." % (ff_prefix, pname))
if online == True: msg(player, "%s &a%s &aadded you to his forcefield whitelist." % (ff_prefix, sname))
else: msg(sender, "%s &cYou can't whitelist yourself." % ff_prefix)
elif add == False and player_id in whitelists[sender_id]:
whitelists[sender_id].remove(player_id)
msg(sender, "%s &cRemoved %s from your forcefield whitelist." % (ff_prefix, pname))
if online == True: msg(player, "%s &c%s &cremoved you from his forcefield whitelist." % (ff_prefix, sname))
elif add == True: msg(sender, "%s &c%s &cwas already in your forcefield whitelist." % (ff_prefix, pname))
else: msg(sender, "%s &c%s &cwas not in your forcefield whitelist." % (ff_prefix, pname))
else: msg(sender, "%s &cplayer %s &cwas not found." % (ff_prefix, name))
def whitelist_list(sender):
sender_id = str(sender.getUniqueId())
msg(sender, "%s &aForceField Whitelist:" % ff_prefix)
try:
count = 0
for player_id in whitelists.get(sender_id, []):
count += 1
msg(sender, "&a %s. &f%s" % (count, server.getOfflinePlayer(java_uuid(player_id)).getName()))
if count == 0:
msg(sender, "&c Your whitelist has no entries.")
except:
log(print_traceback())
def whitelist_clear(sender):
sender_id = str(sender.getUniqueId())
if len(whitelists[sender_id]) == 0:
msg(sender, "%s &cYou had no players whitelisted." % ff_prefix)
else:
whitelists.pop(sender_id)
msg(sender, "%s &aForceField Whitelist cleared." % ff_prefix)
def forcefield_help(sender):
msg(sender, "%s &a&l/ForceField Help: \n&aYou can use the forcefield to keep players on distance." % ff_prefix)
msg(sender, "&2Commands:")
msg(sender, "&a1. &6/ff &ohelp &a: aliases: ?")
msg(sender, "&a2. &6/ff &o(toggle)")
msg(sender, "&a3. &6/ff &owhitelist (list) &a: aliases: wlist, wl")
msg(sender, "&a4. &6/ff wl &oclear")
msg(sender, "&a5. &6/ff wl &oadd <players> &a: aliases: &o+")
msg(sender, "&a6. &6/ff wl &oremove <players> &a: aliases: &odelete, rem, del, -")
def forcefield_toggle(sender):
sender_id = str(sender.getUniqueId())
if sender_id in ff_users:
ff_users.remove(sender_id)
msg(sender, "%s &aForceField toggle: &cOFF" % ff_prefix)
else:
ff_users.append(sender_id)
msg(sender, "%s &aForceField toggle: &2ON" % ff_prefix)
def invalid_syntax(sender):
msg(sender, "%s &cInvalid syntax. Use &o/ff ? &cfor info." % ff_prefix)
#--------------------------------------------------------------------------------------------------------#
@hook.event("player.PlayerMoveEvent")
def on_move(event):
player = event.getPlayer()
if is_creative(player):
player_id = str(player.getUniqueId())
if player_id in ff_users: # player has forcefield, entity should be blocked
for entity in player.getNearbyEntities(fd, fd, fd):
if is_player(entity) and is_creative(entity) and not entity.hasPermission(ff_perms[1]) and not (str(entity.getUniqueId() in whitelists.get(player_id, []))):
#if not whitelists[entity_id], check in blank list e.g. False
set_velocity_away(player, entity)
if not player.hasPermission(ff_perms[1]): # player should be blocked, entity has forcefield
for entity in player.getNearbyEntities(fd, fd, fd):
entity_id = str(entity.getUniqueId())
if is_player(entity) and is_creative(entity) and (entity_id in ff_users) and not (player_id in whitelists.get(entity_id, [])):
#if not whitelists[entity_id], check in blank list e.g. False
evloc = event.getFrom()
enloc = entity.getLocation()
dx = evloc.getX() - enloc.getX()
if dx < -fd or dx > fd:
dy = evloc.getY() - enloc.getY()
if dy < -fd or dy > fd:
dz = evloc.getZ() - enloc.getZ() # This is more efficient.
if dz < -fd or dz > fd:
event.setCancelled(True)
msg(player, "&cYou can't get closer than %sm to %s due to their forcefield." % (fd, stripcolors(entity.getDisplayName())))
if not event.isCancelled():
set_velocity_away(entity, player)
def set_velocity_away(player, entity): #Moves entity away from player
player_loc = player.getLocation()
entity_loc = entity.getLocation()
dx = entity_loc.getX() - player_loc.getX()
dx = dx if not (-Xv < dx < Xv) else Xv
vx = Xv / Xve * dx
dy = entity_loc.getY() - player_loc.getY()
dy = dy if not (-Xv < dy < Xv) else Xv
vy = Xv / Xve * dy
dz = entity_loc.getZ() - player_loc.getZ()
dz = dz if not (-Xv < dz < Xv) else Xv
vz = Xv / Xve * dz
ev = entity.getVelocity()
entity.setVelocity(Vector(vx + ev.getX(), vy + ev.getY(), vz + ev.getZ()))
#We don't want to go above max_speed, and we dont want to divide by 0.
#--------------------------------------------------------------------------------------------------------#
@hook.event("player.PlayerQuitEvent")
def on_quit(event):
player = event.getPlayer()
uid = str(player.getUniqueId())
if uid in ff_users:
ff_users.remove(uid) |
"""
Based off of `capsule`, by Tristan Jehan and Jason Sundram.
Heavily modified by Peter Sobot for integration with forever.fm.
Again by Mike iLL and Rosuav for Infinite Glitch
"""
import os
import gc
from . import apikeys
import logging
import pickle
import base64
import traceback
import threading
import subprocess
import multiprocessing
import weakref
from .lame import Lame
from .timer import Timer
from . import database
from audiodata import AudioData
from .capsule_support import resample_features, \
timbre_whiten, LOUDNESS_THRESH
# removed: terminate, FADE_OUT, is_valid which we don't seem to be using.
from .transitions import managed_transition
log = logging.getLogger(__name__)
import sys
test = 'test' in sys.argv
##########################################
## Code lifted from psobot's pyechonest ##
##########################################
import hashlib
import time
from amen.echo_nest_converter import AudioAnalysis
# from pyechonest.util import EchoNestAPIError
class EchoNestAPIError(Exception): pass
# import pyechonest.util
import numpy
# from echonest.remix.support.ffmpeg import ffmpeg
# Probe the system and find which name is available
ffmpeg_command = None
for command in ("avconv", "ffmpeg", "en-ffmpeg"):
try:
subprocess.Popen([command],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()
ffmpeg_command = command
break
except OSError:
# The command wasn't found. Move on to the next one.
pass
if not ffmpeg_command:
raise RuntimeError("No avconv/ffmpeg found, cannot continue")
log.info("Using %r for audio conversion.",ffmpeg_command)
class FFMPEGStreamHandler(threading.Thread):
def __init__(self, infile, numChannels=2, sampleRate=44100):
command = [ffmpeg_command]
self.filename = None
if isinstance(infile, basestring):
self.filename = infile
command.extend(["-i", self.filename or "pipe:0"])
if numChannels is not None:
command.extend(["-ac", str(numChannels)])
if sampleRate is not None:
command.extend(["-ar",str(sampleRate)])
command.extend(["-f","s16le","-acodec","pcm_s16le","pipe:1"])
log.info("Calling ffmpeg: %s", ' '.join(command)) # May be an imperfect representation of the command, but close enough
# On Windows, psobot had this not closing FDs, despite doing so on other platforms. (????)
# There's no os.uname() on Windows, presumably, and this is considered to be a reliable test.
close_fds = hasattr(os, 'uname')
self.p = subprocess.Popen(
command,
stdin=(subprocess.PIPE if not self.filename else None),
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
close_fds=close_fds
)
self.infile = infile if not self.filename else None
if not self.filename:
self.infile.seek(0)
threading.Thread.__init__(self)
self.daemon = True
self.start()
def __del__(self):
if hasattr(self, 'p'):
self.finish()
def run(self):
try:
self.p.stdin.write(self.infile.read())
except IOError:
pass
self.p.stdin.close()
def finish(self):
if self.filename:
try:
if self.p.stdin:
self.p.stdin.close()
except (OSError, IOError):
pass
try:
self.p.stdout.close()
except (OSError, IOError):
pass
try:
self.p.kill()
except (OSError, IOError):
pass
self.p.wait()
# TODO: Abstract me away from 44100Hz, 2ch 16 bit
def read(self, samples=-1):
if samples > 0:
samples *= 2
arr = numpy.fromfile(self.p.stdout,
dtype=numpy.int16,
count=samples)
if samples < 0 or len(arr) < samples:
self.finish()
arr = numpy.reshape(arr, (-1, 2))
return arr
def feed(self, samples):
self.p.stdout.read(samples * 4)
class AudioStream(object):
"""
Very much like an AudioData, but vastly more memory efficient.
However, AudioStream only supports sequential access - i.e.: one, un-seekable
stream of PCM data directly being streamed from FFMPEG.
"""
def __init__(self, fobj):
log.info("Audio Stream Init")
print("Audio Stream Init")
self.sampleRate = 44100
self.numChannels = 2
self.fobj = fobj
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
def __getitem__(self, index):
"""
Fetches a frame or slice. Returns an individual frame (if the index
is a time offset float or an integer sample number) or a slice if
the index is an `AudioQuantum` (or quacks like one). If the slice is
"in the past" (i.e.: has been read already, or the current cursor is
past the requested slice) then this will throw an exception.
"""
if isinstance(index, float):
index = int(index * self.sampleRate)
elif hasattr(index, "start") and hasattr(index, "duration"):
index = slice(float(index.start), index.start + index.duration)
if isinstance(index, slice):
if (hasattr(index.start, "start") and
hasattr(index.stop, "duration") and
hasattr(index.stop, "start")):
index = slice(index.start.start, index.stop.start + index.stop.duration)
if isinstance(index, slice):
return self.getslice(index)
else:
return self.getsample(index)
def getslice(self, index):
"Help `__getitem__` return a new AudioData for a given slice"
if isinstance(index.start, float):
index = slice(int(index.start * self.sampleRate),
int(index.stop * self.sampleRate), index.step)
if index.start < self.index:
self.stream.finish()
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
if index.start > self.index:
self.stream.feed(index.start - self.index)
self.index = index.stop
return AudioData(None, self.stream.read(index.stop - index.start),
sampleRate=self.sampleRate,
numChannels=self.numChannels, defer=False)
def getsample(self, index):
# TODO: Finish this properly
raise NotImplementedError()
if isinstance(index, float):
index = int(index * self.sampleRate)
if index >= self.index:
self.stream.feed(index.start - self.index)
self.index += index
else:
raise ValueError("Cannot seek backwards in AudioStream")
def render(self):
return self.stream.read()
def finish(self):
self.stream.finish()
def __del__(self):
if hasattr(self, "stream"):
self.stream.finish()
class LocalAudioStream(AudioStream):
"""
Like a non-seekable LocalAudioFile with vastly better memory usage
and performance. Takes a file-like object and supports slicing and
rendering. Attempting to read from a part of the input file that
has already been read will throw an exception.
If analysis is provided, it is assumed to be a pickle of an
AudioAnalysis, and will be used in preference to querying echonest.
"""
def __init__(self, initializer, analysis=None):
AudioStream.__init__(self, initializer)
try:
# Attempt to load up the existing analysis first.
# Assume that a successful unpickling represents correct
# data; there's no real guarantee of this, but if you
# fiddle in the database, I won't stop you shooting
# yourself in the foot.
tempanalysis = pickle.loads(base64.b64decode(analysis))
except (EOFError, TypeError, pickle.UnpicklingError):
# If there's no saved analysis (including if the arg is
# omitted; None will raise TypeError), load the file,
# and send it off to echonest. We try the MD5 first, as
# it means less uploading, and fall back on actually
# sending the whole file out. Note that if the pickle is
# normally saved correctly, then we might never hit the
# MD5 optimization any more, so it might be worth tossing
# it out and just uploading any time.
start = time.time()
if hasattr(initializer, 'seek'):
fobj = initializer
fobj.seek(0)
else:
fobj = open(initializer, 'r')
# This looks like a lot of work, but is much more lighter
# on memory than reading the entire file in.
md5 = hashlib.md5()
while True:
data = fobj.read(2 ^ 16)
if not data:
break
md5.update(data)
if not hasattr(initializer, 'seek'):
fobj.close()
track_md5 = md5.hexdigest()
log.info("Fetching analysis...")
try:
tempanalysis = AudioAnalysis(str(track_md5))
except EchoNestAPIError:
tempanalysis = AudioAnalysis(initializer, "mp3")
log.info("Fetched analysis in %ss", time.time() - start)
# By the time we get here, we ought to have a valid tempanalysis.
# The very last attempt (passing the original initializer to
# AudioAnalysis) will let any exceptions bubble all the way up,
# so we don't have to deal with that here.
self.analysis = tempanalysis
# let's try adding this back in
self.analysis.source = weakref.ref(self)
class data(object):
"""
Massive hack - certain operations are intrusive and check
`.data.ndim`, so in this case, we fake it.
"""
ndim = self.numChannels
self.data = data
##############################################
## End code lifted from psobot's pyechonest ##
##############################################
def metadata_of(a):
if hasattr(a, '_metadata'):
return a._metadata.track_details
if hasattr(a, 'track'):
return metadata_of(a.track)
if hasattr(a, 't1') and hasattr(a, 't2'):
return (metadata_of(a.t1), metadata_of(a.t2))
raise ValueError("No metadata found!")
def generate_metadata(a):
d = {
'action': a.__class__.__name__.split(".")[-1],
'duration': a.duration,
'samples': a.samples
}
m = metadata_of(a)
if isinstance(m, tuple):
m1, m2 = m
log.info("HERE: %r", dir(m1))
d['tracks'] = [{
"metadata": m1,
"start": a.s1,
"end": a.e1
}, {
"metadata": m2,
"start": a.s2,
"end": a.e2
}]
else:
d['tracks'] = [{
"metadata": m,
"start": a.start,
"end": a.start + a.duration
}]
return d
class Mixer(multiprocessing.Process):
def __init__(self, oqueue, infoqueue):
self.infoqueue = infoqueue
self.encoder = None
self.oqueue = oqueue
self.__track_lock = threading.Lock()
self.__tracks = []
self.transition_time = 30 if not test else 5
self.__stop = False
multiprocessing.Process.__init__(self)
@property
def tracks(self):
self.__track_lock.acquire()
tracks = self.__tracks
self.__track_lock.release()
return tracks
@tracks.setter
def tracks(self, new_val):
self.__track_lock.acquire()
self.__tracks = new_val
self.__track_lock.release()
@property
def current_track(self):
return self.tracks[0]
def get_stream(self, x):
for fname in (x.filename, "audio/"+x.filename):
if os.path.isfile(fname):
return fname
# TODO: Fetch the contents from the database and save to fname
raise NotImplementedError
def analyze(self, x):
if isinstance(x, list):
return [self.analyze(y) for y in x]
if isinstance(x, AudioData):
return self.process(x)
if isinstance(x, tuple):
return self.analyze(*x)
log.info("Grabbing stream [%r]...", x.id)
saved = database.get_analysis(x.id)
laf = LocalAudioStream(self.get_stream(x), saved)
if not saved:
database.save_analysis(x.id, base64.b64encode(pickle.dumps(laf.analysis,-1)))
setattr(laf, "_metadata", x)
return self.process(laf)
def add_track(self, track):
self.tracks.append(self.analyze(track))
def process(self, track):
if not hasattr(track.analysis.pyechonest_track, "title"):
setattr(track.analysis.pyechonest_track, "title", track._metadata.track_details['title'])
log.info("Resampling features [%r]...", track._metadata.id)
if len(track.analysis.beats):
track.resampled = resample_features(track, rate='beats')
track.resampled['matrix'] = timbre_whiten(track.resampled['matrix'])
else:
log.info("no beats returned for this track.")
track.resampled = {"rate":'beats', "matrix": []}
track.gain = self.__db_2_volume(track.analysis.loudness)
log.info("Done processing [%r].", track._metadata.id)
return track
def __db_2_volume(self, loudness):
return (1.0 - LOUDNESS_THRESH * (LOUDNESS_THRESH - loudness) / 100.0)
def generate_tracks(self):
"""Yield a series of lists of track segments - helper for run()"""
while len(self.tracks) < 2:
log.info("Waiting for a new track.")
track = database.get_track_to_play()
try:
self.add_track(track)
log.info("Got a new track.")
except Exception: # TODO: Why?
log.error("Exception while trying to add new track:\n%s",
traceback.format_exc())
# Initial transition.
# yield initialize(self.tracks[0], self.tracks[1])
mixer_state = {}
while not self.__stop:
while len(self.tracks) > 1:
tra = managed_transition(self.tracks[0],
self.tracks[1], mixer_state)
del self.tracks[0].analysis
gc.collect()
yield tra
log.debug("Finishing track 0 [%r]",self.tracks[0])
from datetime import datetime
now = datetime.now().time()
self.tracks[0].finish()
del self.tracks[0]
gc.collect()
if self.infoqueue is None: break # Hack: If we're not in infinite mode, don't wait for more tracks.
log.info("Waiting for a new track.")
try:
self.add_track(database.get_track_to_play())
log.info("Got a new track.")
except ValueError:
log.warning("Track too short! Trying another.")
except Exception:
log.error("Got an Exception while trying to add new track:\n%s",
traceback.format_exc())
log.error("Stopping!")
# Last chunk. Should contain 1 instruction: fadeout.
# CJA 20150227: Seems to be broken. Commenting this out may mean we ignore the
# last track's transition info when building MajorGlitch.mp3, but this is not
# serious. The track itself is correctly rendered; it will simply go on until
# it reaches the end, and then stop, as per the King's advice.
# yield terminate(self.tracks[-1], FADE_OUT)
def run(self):
database.reset_played()
self.encoder = Lame(oqueue=self.oqueue)
self.encoder.start()
try:
self.ctime = None
for actions in self.generate_tracks():
log.info("Rendering audio data for %d actions.", len(actions))
for a in actions:
try:
with Timer() as t:
# TODO: Move the "multiple encoding" support into
# LAME itself - it should be able to multiplex the
# streams itself.
self.encoder.add_pcm(a)
if self.infoqueue: self.infoqueue.put(generate_metadata(a))
log.info("Rendered in %fs!", t.ms)
except Exception:
log.error("Could not render %s. Skipping.\n%s SEE???", a,
traceback.format_exc())
gc.collect()
except Exception:
log.error("Something failed in mixer.run:\n%s",
traceback.format_exc())
self.stop()
return
def stop(self):
self.__stop = True
@property
def stopped(self):
return self.__stop
def build_entire_track(dest):
"""Build the entire-track file, saving to dest"""
with open(dest,"wb") as f:
encoder = Lame(ofile=f)
print("Building...")
encoder.start()
mixer = Mixer(None, None)
for idx,track in enumerate(database.get_many_mp3(order_by="sequence,id")):
print("Adding [%d]: ##%d %s (%r)"%(idx,track.id,track.track_details["artist"],track.filename))
mixer.add_track(track)
for actions in mixer.generate_tracks():
print("Encoder: Got %d actions"%len(actions))
for a in actions:
print("Encoder: Adding %r"%(a,))
encoder.add_pcm(a)
encoder.finish()
print("Build complete.")
def rebuild_major_glitch():
build_entire_track("MajorGlitch.mp3")
os.rename("MajorGlitch.mp3", "static/single-audio-files/MajorGlitch.mp3")
if __name__=="__main__":
rebuild_major_glitch()
Prefer FFMPEG
"""
Based off of `capsule`, by Tristan Jehan and Jason Sundram.
Heavily modified by Peter Sobot for integration with forever.fm.
Again by Mike iLL and Rosuav for Infinite Glitch
"""
import os
import gc
from . import apikeys
import logging
import pickle
import base64
import traceback
import threading
import subprocess
import multiprocessing
import weakref
from .lame import Lame
from .timer import Timer
from . import database
from audiodata import AudioData
from .capsule_support import resample_features, \
timbre_whiten, LOUDNESS_THRESH
# removed: terminate, FADE_OUT, is_valid which we don't seem to be using.
from .transitions import managed_transition
log = logging.getLogger(__name__)
import sys
test = 'test' in sys.argv
##########################################
## Code lifted from psobot's pyechonest ##
##########################################
import hashlib
import time
from amen.echo_nest_converter import AudioAnalysis
# from pyechonest.util import EchoNestAPIError
class EchoNestAPIError(Exception): pass
# import pyechonest.util
import numpy
# from echonest.remix.support.ffmpeg import ffmpeg
# Probe the system and find which name is available
ffmpeg_command = None
for command in ("ffmpeg", "avconv", "en-ffmpeg"):
try:
subprocess.Popen([command],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).wait()
ffmpeg_command = command
break
except OSError:
# The command wasn't found. Move on to the next one.
pass
if not ffmpeg_command:
raise RuntimeError("No avconv/ffmpeg found, cannot continue")
log.info("Using %r for audio conversion.",ffmpeg_command)
class FFMPEGStreamHandler(threading.Thread):
def __init__(self, infile, numChannels=2, sampleRate=44100):
command = [ffmpeg_command]
self.filename = None
if isinstance(infile, basestring):
self.filename = infile
command.extend(["-i", self.filename or "pipe:0"])
if numChannels is not None:
command.extend(["-ac", str(numChannels)])
if sampleRate is not None:
command.extend(["-ar",str(sampleRate)])
command.extend(["-f","s16le","-acodec","pcm_s16le","pipe:1"])
log.info("Calling ffmpeg: %s", ' '.join(command)) # May be an imperfect representation of the command, but close enough
# On Windows, psobot had this not closing FDs, despite doing so on other platforms. (????)
# There's no os.uname() on Windows, presumably, and this is considered to be a reliable test.
close_fds = hasattr(os, 'uname')
self.p = subprocess.Popen(
command,
stdin=(subprocess.PIPE if not self.filename else None),
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
close_fds=close_fds
)
self.infile = infile if not self.filename else None
if not self.filename:
self.infile.seek(0)
threading.Thread.__init__(self)
self.daemon = True
self.start()
def __del__(self):
if hasattr(self, 'p'):
self.finish()
def run(self):
try:
self.p.stdin.write(self.infile.read())
except IOError:
pass
self.p.stdin.close()
def finish(self):
if self.filename:
try:
if self.p.stdin:
self.p.stdin.close()
except (OSError, IOError):
pass
try:
self.p.stdout.close()
except (OSError, IOError):
pass
try:
self.p.kill()
except (OSError, IOError):
pass
self.p.wait()
# TODO: Abstract me away from 44100Hz, 2ch 16 bit
def read(self, samples=-1):
if samples > 0:
samples *= 2
arr = numpy.fromfile(self.p.stdout,
dtype=numpy.int16,
count=samples)
if samples < 0 or len(arr) < samples:
self.finish()
arr = numpy.reshape(arr, (-1, 2))
return arr
def feed(self, samples):
self.p.stdout.read(samples * 4)
class AudioStream(object):
"""
Very much like an AudioData, but vastly more memory efficient.
However, AudioStream only supports sequential access - i.e.: one, un-seekable
stream of PCM data directly being streamed from FFMPEG.
"""
def __init__(self, fobj):
log.info("Audio Stream Init")
print("Audio Stream Init")
self.sampleRate = 44100
self.numChannels = 2
self.fobj = fobj
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
def __getitem__(self, index):
"""
Fetches a frame or slice. Returns an individual frame (if the index
is a time offset float or an integer sample number) or a slice if
the index is an `AudioQuantum` (or quacks like one). If the slice is
"in the past" (i.e.: has been read already, or the current cursor is
past the requested slice) then this will throw an exception.
"""
if isinstance(index, float):
index = int(index * self.sampleRate)
elif hasattr(index, "start") and hasattr(index, "duration"):
index = slice(float(index.start), index.start + index.duration)
if isinstance(index, slice):
if (hasattr(index.start, "start") and
hasattr(index.stop, "duration") and
hasattr(index.stop, "start")):
index = slice(index.start.start, index.stop.start + index.stop.duration)
if isinstance(index, slice):
return self.getslice(index)
else:
return self.getsample(index)
def getslice(self, index):
"Help `__getitem__` return a new AudioData for a given slice"
if isinstance(index.start, float):
index = slice(int(index.start * self.sampleRate),
int(index.stop * self.sampleRate), index.step)
if index.start < self.index:
self.stream.finish()
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
if index.start > self.index:
self.stream.feed(index.start - self.index)
self.index = index.stop
return AudioData(None, self.stream.read(index.stop - index.start),
sampleRate=self.sampleRate,
numChannels=self.numChannels, defer=False)
def getsample(self, index):
# TODO: Finish this properly
raise NotImplementedError()
if isinstance(index, float):
index = int(index * self.sampleRate)
if index >= self.index:
self.stream.feed(index.start - self.index)
self.index += index
else:
raise ValueError("Cannot seek backwards in AudioStream")
def render(self):
return self.stream.read()
def finish(self):
self.stream.finish()
def __del__(self):
if hasattr(self, "stream"):
self.stream.finish()
class LocalAudioStream(AudioStream):
"""
Like a non-seekable LocalAudioFile with vastly better memory usage
and performance. Takes a file-like object and supports slicing and
rendering. Attempting to read from a part of the input file that
has already been read will throw an exception.
If analysis is provided, it is assumed to be a pickle of an
AudioAnalysis, and will be used in preference to querying echonest.
"""
def __init__(self, initializer, analysis=None):
AudioStream.__init__(self, initializer)
try:
# Attempt to load up the existing analysis first.
# Assume that a successful unpickling represents correct
# data; there's no real guarantee of this, but if you
# fiddle in the database, I won't stop you shooting
# yourself in the foot.
tempanalysis = pickle.loads(base64.b64decode(analysis))
except (EOFError, TypeError, pickle.UnpicklingError):
# If there's no saved analysis (including if the arg is
# omitted; None will raise TypeError), load the file,
# and send it off to echonest. We try the MD5 first, as
# it means less uploading, and fall back on actually
# sending the whole file out. Note that if the pickle is
# normally saved correctly, then we might never hit the
# MD5 optimization any more, so it might be worth tossing
# it out and just uploading any time.
start = time.time()
if hasattr(initializer, 'seek'):
fobj = initializer
fobj.seek(0)
else:
fobj = open(initializer, 'r')
# This looks like a lot of work, but is much more lighter
# on memory than reading the entire file in.
md5 = hashlib.md5()
while True:
data = fobj.read(2 ^ 16)
if not data:
break
md5.update(data)
if not hasattr(initializer, 'seek'):
fobj.close()
track_md5 = md5.hexdigest()
log.info("Fetching analysis...")
try:
tempanalysis = AudioAnalysis(str(track_md5))
except EchoNestAPIError:
tempanalysis = AudioAnalysis(initializer, "mp3")
log.info("Fetched analysis in %ss", time.time() - start)
# By the time we get here, we ought to have a valid tempanalysis.
# The very last attempt (passing the original initializer to
# AudioAnalysis) will let any exceptions bubble all the way up,
# so we don't have to deal with that here.
self.analysis = tempanalysis
# let's try adding this back in
self.analysis.source = weakref.ref(self)
class data(object):
"""
Massive hack - certain operations are intrusive and check
`.data.ndim`, so in this case, we fake it.
"""
ndim = self.numChannels
self.data = data
##############################################
## End code lifted from psobot's pyechonest ##
##############################################
def metadata_of(a):
if hasattr(a, '_metadata'):
return a._metadata.track_details
if hasattr(a, 'track'):
return metadata_of(a.track)
if hasattr(a, 't1') and hasattr(a, 't2'):
return (metadata_of(a.t1), metadata_of(a.t2))
raise ValueError("No metadata found!")
def generate_metadata(a):
d = {
'action': a.__class__.__name__.split(".")[-1],
'duration': a.duration,
'samples': a.samples
}
m = metadata_of(a)
if isinstance(m, tuple):
m1, m2 = m
log.info("HERE: %r", dir(m1))
d['tracks'] = [{
"metadata": m1,
"start": a.s1,
"end": a.e1
}, {
"metadata": m2,
"start": a.s2,
"end": a.e2
}]
else:
d['tracks'] = [{
"metadata": m,
"start": a.start,
"end": a.start + a.duration
}]
return d
class Mixer(multiprocessing.Process):
def __init__(self, oqueue, infoqueue):
self.infoqueue = infoqueue
self.encoder = None
self.oqueue = oqueue
self.__track_lock = threading.Lock()
self.__tracks = []
self.transition_time = 30 if not test else 5
self.__stop = False
multiprocessing.Process.__init__(self)
@property
def tracks(self):
self.__track_lock.acquire()
tracks = self.__tracks
self.__track_lock.release()
return tracks
@tracks.setter
def tracks(self, new_val):
self.__track_lock.acquire()
self.__tracks = new_val
self.__track_lock.release()
@property
def current_track(self):
return self.tracks[0]
def get_stream(self, x):
for fname in (x.filename, "audio/"+x.filename):
if os.path.isfile(fname):
return fname
# TODO: Fetch the contents from the database and save to fname
raise NotImplementedError
def analyze(self, x):
if isinstance(x, list):
return [self.analyze(y) for y in x]
if isinstance(x, AudioData):
return self.process(x)
if isinstance(x, tuple):
return self.analyze(*x)
log.info("Grabbing stream [%r]...", x.id)
saved = database.get_analysis(x.id)
laf = LocalAudioStream(self.get_stream(x), saved)
if not saved:
database.save_analysis(x.id, base64.b64encode(pickle.dumps(laf.analysis,-1)))
setattr(laf, "_metadata", x)
return self.process(laf)
def add_track(self, track):
self.tracks.append(self.analyze(track))
def process(self, track):
if not hasattr(track.analysis.pyechonest_track, "title"):
setattr(track.analysis.pyechonest_track, "title", track._metadata.track_details['title'])
log.info("Resampling features [%r]...", track._metadata.id)
if len(track.analysis.beats):
track.resampled = resample_features(track, rate='beats')
track.resampled['matrix'] = timbre_whiten(track.resampled['matrix'])
else:
log.info("no beats returned for this track.")
track.resampled = {"rate":'beats', "matrix": []}
track.gain = self.__db_2_volume(track.analysis.loudness)
log.info("Done processing [%r].", track._metadata.id)
return track
def __db_2_volume(self, loudness):
return (1.0 - LOUDNESS_THRESH * (LOUDNESS_THRESH - loudness) / 100.0)
def generate_tracks(self):
"""Yield a series of lists of track segments - helper for run()"""
while len(self.tracks) < 2:
log.info("Waiting for a new track.")
track = database.get_track_to_play()
try:
self.add_track(track)
log.info("Got a new track.")
except Exception: # TODO: Why?
log.error("Exception while trying to add new track:\n%s",
traceback.format_exc())
# Initial transition.
# yield initialize(self.tracks[0], self.tracks[1])
mixer_state = {}
while not self.__stop:
while len(self.tracks) > 1:
tra = managed_transition(self.tracks[0],
self.tracks[1], mixer_state)
del self.tracks[0].analysis
gc.collect()
yield tra
log.debug("Finishing track 0 [%r]",self.tracks[0])
from datetime import datetime
now = datetime.now().time()
self.tracks[0].finish()
del self.tracks[0]
gc.collect()
if self.infoqueue is None: break # Hack: If we're not in infinite mode, don't wait for more tracks.
log.info("Waiting for a new track.")
try:
self.add_track(database.get_track_to_play())
log.info("Got a new track.")
except ValueError:
log.warning("Track too short! Trying another.")
except Exception:
log.error("Got an Exception while trying to add new track:\n%s",
traceback.format_exc())
log.error("Stopping!")
# Last chunk. Should contain 1 instruction: fadeout.
# CJA 20150227: Seems to be broken. Commenting this out may mean we ignore the
# last track's transition info when building MajorGlitch.mp3, but this is not
# serious. The track itself is correctly rendered; it will simply go on until
# it reaches the end, and then stop, as per the King's advice.
# yield terminate(self.tracks[-1], FADE_OUT)
def run(self):
database.reset_played()
self.encoder = Lame(oqueue=self.oqueue)
self.encoder.start()
try:
self.ctime = None
for actions in self.generate_tracks():
log.info("Rendering audio data for %d actions.", len(actions))
for a in actions:
try:
with Timer() as t:
# TODO: Move the "multiple encoding" support into
# LAME itself - it should be able to multiplex the
# streams itself.
self.encoder.add_pcm(a)
if self.infoqueue: self.infoqueue.put(generate_metadata(a))
log.info("Rendered in %fs!", t.ms)
except Exception:
log.error("Could not render %s. Skipping.\n%s SEE???", a,
traceback.format_exc())
gc.collect()
except Exception:
log.error("Something failed in mixer.run:\n%s",
traceback.format_exc())
self.stop()
return
def stop(self):
self.__stop = True
@property
def stopped(self):
return self.__stop
def build_entire_track(dest):
"""Build the entire-track file, saving to dest"""
with open(dest,"wb") as f:
encoder = Lame(ofile=f)
print("Building...")
encoder.start()
mixer = Mixer(None, None)
for idx,track in enumerate(database.get_many_mp3(order_by="sequence,id")):
print("Adding [%d]: ##%d %s (%r)"%(idx,track.id,track.track_details["artist"],track.filename))
mixer.add_track(track)
for actions in mixer.generate_tracks():
print("Encoder: Got %d actions"%len(actions))
for a in actions:
print("Encoder: Adding %r"%(a,))
encoder.add_pcm(a)
encoder.finish()
print("Build complete.")
def rebuild_major_glitch():
build_entire_track("MajorGlitch.mp3")
os.rename("MajorGlitch.mp3", "static/single-audio-files/MajorGlitch.mp3")
if __name__=="__main__":
rebuild_major_glitch()
|
# -*- coding: utf-8; -*-
from unittest.mock import MagicMock, patch
from tests.base import AppInitialized
GIT_HOOK_URL = '/hooks/update-content'
# OPTIONS is implicitly added by flask and always handled
# http://flask.pocoo.org/docs/0.10/api/#flask.Flask.add_url_rule → Parameters → options
_HTTP_METHODS = {'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS', 'CONNECT'}
HTTP_METHODS = _HTTP_METHODS - {'OPTIONS'}
class GitHookNoToken(AppInitialized):
def test_git_hook_wrong_method(self):
"""Test that using wrong methods will cause a HTTP 405 return"""
for method in HTTP_METHODS - {'POST'}:
with self.subTest(method=method):
response = self.client.open(GIT_HOOK_URL, method=method)
self.assertEqual(response.status_code, 405)
def test_git_hook_not_existent(self):
"""Test that HTTP 404 is returned if no token is configured"""
response = self.client.post(GIT_HOOK_URL)
self.assertEqual(response.status_code, 404)
class GitHookExistent(AppInitialized):
def create_app(self):
self.token = "SuperDUPERsecret!!1"
return super().create_app(
additional_config={'GIT_UPDATE_HOOK_TOKEN': self.token}
)
def test_no_token_auth_required(self):
"""Test that `PUT`ting the hook w/o giving a token returns HTTP 401"""
response = self.client.post(GIT_HOOK_URL)
self.assertEqual(response.status_code, 401)
def test_empty_token_auth_required(self):
response = self.client.post("{}?token=".format(GIT_HOOK_URL))
self.assertEqual(response.status_code, 401)
def test_wrong_token_permission_denied(self):
"""Test that using a wrong token gets you a HTTP 403"""
response = self.client.post("{}?token={}".format(
GIT_HOOK_URL,
self.token + "wrong",
))
self.assertEqual(response.status_code, 403)
def test_correct_token_working(self):
"""Test that the hook returns HTTP 204 and calls `update_repo`"""
# Patch out `update_repo` – we don't care about anything
# git-related in this TestCase
mock = MagicMock()
with patch('sipa.blueprints.hooks.update_repo', mock):
response = self.client.post("{}?token={}".format(
GIT_HOOK_URL,
self.token,
))
self.assertEqual(response.status_code, 204)
self.assertTrue(mock.called)
Git Hook tests refactor
Some independent minor refactoring of the git hook tests
# -*- coding: utf-8; -*-
from unittest.mock import MagicMock, patch
from tests.base import AppInitialized
GIT_HOOK_URL = '/hooks/update-content'
# OPTIONS is implicitly added by flask and always handled
# http://flask.pocoo.org/docs/0.10/api/#flask.Flask.add_url_rule → Parameters → options
_HTTP_METHODS = {'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS', 'CONNECT'}
HTTP_METHODS = _HTTP_METHODS - {'OPTIONS'}
class GitHookTestBase(AppInitialized):
def assert_hook_status(self, status, token=None):
url = GIT_HOOK_URL
if token is not None:
url = "{base}?token={token}".format(base=url, token=token)
self.assertEqual(self.client.post(url).status_code, status)
class GitHookNoToken(GitHookTestBase):
def test_git_hook_wrong_method(self):
"""Test that using wrong methods will cause a HTTP 405 return"""
for method in HTTP_METHODS - {'POST'}:
with self.subTest(method=method):
response = self.client.open(GIT_HOOK_URL, method=method)
self.assertEqual(response.status_code, 405)
def test_git_hook_not_existent(self):
"""Test that HTTP 404 is returned if no token is configured"""
self.assert_hook_status(404)
class GitHookExistent(GitHookTestBase):
def create_app(self):
self.token = "SuperDUPERsecret!!1"
return super().create_app(
additional_config={'GIT_UPDATE_HOOK_TOKEN': self.token}
)
def test_no_token_auth_required(self):
"""Test that `PUT`ting the hook w/o giving a token returns HTTP 401"""
self.assert_hook_status(401)
def test_empty_token_auth_required(self):
self.assert_hook_status(401, token="")
def test_wrong_token_permission_denied(self):
"""Test that using a wrong token gets you a HTTP 403"""
self.assert_hook_status(403, token=self.token+"wrong")
def test_correct_token_working(self):
"""Test that the hook returns HTTP 204 and calls `update_repo`"""
# Patch out `update_repo` – we don't care about anything
# git-related in this TestCase
with patch('sipa.blueprints.hooks.update_repo') as mock:
self.assert_hook_status(204, token=self.token)
self.assertTrue(mock.called)
|
"""
Based off of `capsule`, by Tristan Jehan and Jason Sundram.
Heavily modified by Peter Sobot for integration with forever.fm.
Again by Mike iLL and Rosuav for Infinite Glitch
"""
import os
import gc
import apikeys
import logging
import urllib2
import traceback
import threading
import subprocess
import multiprocessing
from lame import Lame
from timer import Timer
import database
from audio import AudioData
from capsule_support import order_tracks, resample_features, \
timbre_whiten, initialize, make_transition, terminate, \
FADE_OUT, is_valid, LOUDNESS_THRESH
log = logging.getLogger(__name__)
import sys
test = 'test' in sys.argv
##########################################
## Code lifted from psobot's pyechonest ##
##########################################
import hashlib
import time
from echonest.remix.audio import AudioAnalysis
from pyechonest.util import EchoNestAPIError
import pyechonest.util
import weakref
import numpy
from echonest.remix.support.ffmpeg import ffmpeg
class FFMPEGStreamHandler(threading.Thread):
def __init__(self, infile, numChannels=2, sampleRate=44100):
command = ["en-ffmpeg"]
self.filename = None
if isinstance(infile, basestring):
self.filename = infile
command.extend(["-i", self.filename or "pipe:0"])
if numChannels is not None:
command.extend(["-ac", str(numChannels)])
if sampleRate is not None:
command.extend(["-ar",str(sampleRate)])
command.extend(["-f","s16le","-acodec","pcm_s16le","pipe:1"])
log.info("Calling ffmpeg: %s", ' '.join(command)) # May be an imperfect representation of the command, but close enough
# On Windows, psobot had this not closing FDs, despite doing so on other platforms. (????)
# There's no os.uname() on Windows, presumably, and this is considered to be a reliable test.
close_fds = hasattr(os, 'uname')
self.p = subprocess.Popen(
command,
stdin=(subprocess.PIPE if not self.filename else None),
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
close_fds=close_fds
)
self.infile = infile if not self.filename else None
if not self.filename:
self.infile.seek(0)
ExceptionThread.__init__(self)
self.daemon = True
self.start()
def __del__(self):
if hasattr(self, 'p'):
self.finish()
def run(self):
try:
self.p.stdin.write(self.infile.read())
except IOError:
pass
self.p.stdin.close()
def finish(self):
if self.filename:
try:
if self.p.stdin:
self.p.stdin.close()
except (OSError, IOError):
pass
try:
self.p.stdout.close()
except (OSError, IOError):
pass
try:
self.p.kill()
except (OSError, IOError):
pass
self.p.wait()
# TODO: Abstract me away from 44100Hz, 2ch 16 bit
def read(self, samples=-1):
if samples > 0:
samples *= 2
arr = numpy.fromfile(self.p.stdout,
dtype=numpy.int16,
count=samples)
if samples < 0 or len(arr) < samples:
self.finish()
arr = numpy.reshape(arr, (-1, 2))
return arr
def feed(self, samples):
self.p.stdout.read(samples * 4)
class AudioStream(object):
"""
Very much like an AudioData, but vastly more memory efficient.
However, AudioStream only supports sequential access - i.e.: one, un-seekable
stream of PCM data directly being streamed from FFMPEG.
"""
def __init__(self, fobj):
self.sampleRate = 44100
self.numChannels = 2
self.fobj = fobj
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
def __getitem__(self, index):
"""
Fetches a frame or slice. Returns an individual frame (if the index
is a time offset float or an integer sample number) or a slice if
the index is an `AudioQuantum` (or quacks like one). If the slice is
"in the past" (i.e.: has been read already, or the current cursor is
past the requested slice) then this will throw an exception.
"""
if isinstance(index, float):
index = int(index * self.sampleRate)
elif hasattr(index, "start") and hasattr(index, "duration"):
index = slice(float(index.start), index.start + index.duration)
if isinstance(index, slice):
if (hasattr(index.start, "start") and
hasattr(index.stop, "duration") and
hasattr(index.stop, "start")):
index = slice(index.start.start, index.stop.start + index.stop.duration)
if isinstance(index, slice):
return self.getslice(index)
else:
return self.getsample(index)
def getslice(self, index):
"Help `__getitem__` return a new AudioData for a given slice"
if isinstance(index.start, float):
index = slice(int(index.start * self.sampleRate),
int(index.stop * self.sampleRate), index.step)
if index.start < self.index:
self.stream.finish()
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
if index.start > self.index:
self.stream.feed(index.start - self.index)
self.index = index.stop
return AudioData(None, self.stream.read(index.stop - index.start),
sampleRate=self.sampleRate,
numChannels=self.numChannels, defer=False)
def getsample(self, index):
# TODO: Finish this properly
raise NotImplementedError()
if isinstance(index, float):
index = int(index * self.sampleRate)
if index >= self.index:
self.stream.feed(index.start - self.index)
self.index += index
else:
raise ValueError("Cannot seek backwards in AudioStream")
def render(self):
return self.stream.read()
def finish(self):
self.stream.finish()
def __del__(self):
if hasattr(self, "stream"):
self.stream.finish()
class LocalAudioStream(AudioStream):
"""
Like a non-seekable LocalAudioFile with vastly better memory usage
and performance. Takes a file-like object (and its kind, assumed
to be MP3) and supports slicing and rendering. Attempting to read
from a part of the input file that has already been read will throw
an exception.
"""
def __init__(self, initializer, kind="mp3"):
AudioStream.__init__(self, initializer)
start = time.time()
if hasattr(initializer, 'seek'):
fobj = initializer
fobj.seek(0)
else:
fobj = open(initializer, 'r')
# This looks like a lot of work, but is much more lighter
# on memory than reading the entire file in.
md5 = hashlib.md5()
while True:
data = fobj.read(2 ^ 16)
if not data:
break
md5.update(data)
if not hasattr(initializer, 'seek'):
fobj.close()
track_md5 = md5.hexdigest()
logging.getLogger(__name__).info("Fetching analysis...")
try:
tempanalysis = AudioAnalysis(str(track_md5))
except EchoNestAPIError:
tempanalysis = AudioAnalysis(initializer, kind)
logging.getLogger(__name__).info("Fetched analysis in %ss",
(time.time() - start))
self.analysis = tempanalysis
self.analysis.source = weakref.ref(self)
class data(object):
"""
Massive hack - certain operations are intrusive and check
`.data.ndim`, so in this case, we fake it.
"""
ndim = self.numChannels
self.data = data
##############################################
## End code lifted from psobot's pyechonest ##
##############################################
def metadata_of(a):
if hasattr(a, '_metadata'):
return a._metadata.obj
if hasattr(a, 'track'):
return metadata_of(a.track)
if hasattr(a, 't1') and hasattr(a, 't2'):
return (metadata_of(a.t1), metadata_of(a.t2))
raise ValueError("No metadata found!")
def generate_metadata(a):
d = {
'action': a.__class__.__name__.split(".")[-1],
'duration': a.duration,
'samples': a.samples
}
m = metadata_of(a)
if isinstance(m, tuple):
m1, m2 = m
d['tracks'] = [{
"metadata": m1,
"start": a.s1,
"end": a.e1
}, {
"metadata": m2,
"start": a.s2,
"end": a.e2
}]
else:
d['tracks'] = [{
"metadata": m,
"start": a.start,
"end": a.start + a.duration
}]
return d
class Mixer(multiprocessing.Process):
def __init__(self, iqueue, oqueues, infoqueue,
settings=({},), initial=None,
max_play_time=300, transition_time=30 if not test else 5,
samplerate=44100):
self.iqueue = iqueue
self.infoqueue = infoqueue
self.encoders = []
if len(oqueues) != len(settings):
raise ValueError("Differing number of output queues and settings!")
self.oqueues = oqueues
self.settings = settings
self.__track_lock = threading.Lock()
self.__tracks = []
self.max_play_time = max_play_time
self.transition_time = transition_time
self.samplerate = 44100
self.__stop = False
if isinstance(initial, list):
self.add_tracks(initial)
elif isinstance(initial, AudioData):
self.add_track(initial)
multiprocessing.Process.__init__(self)
@property
def tracks(self):
self.__track_lock.acquire()
tracks = self.__tracks
self.__track_lock.release()
return tracks
@tracks.setter
def tracks(self, new_val):
self.__track_lock.acquire()
self.__tracks = new_val
self.__track_lock.release()
@property
def current_track(self):
return self.tracks[0]
def get_stream(self, x):
for fname in (x.filename, "audio/"+x.filename):
if os.path.isfile(fname):
return fname
# TODO: Fetch the contents from the database and save to fname
raise NotImplementedError
def analyze(self, x):
if isinstance(x, list):
return [self.analyze(y) for y in x]
if isinstance(x, AudioData):
return self.process(x)
if isinstance(x, tuple):
return self.analyze(*x)
log.info("Grabbing stream [%r]...", x.id)
laf = LocalAudioStream(self.get_stream(x))
setattr(laf, "_metadata", x)
return self.process(laf)
def add_track(self, track):
self.tracks.append(self.analyze(track))
def add_tracks(self, tracks):
self.tracks += order_tracks(self.analyze(tracks))
def process(self, track):
if not hasattr(track.analysis.pyechonest_track, "title"):
setattr(track.analysis.pyechonest_track, "title", track._metadata.title)
log.info("Resampling features [%r]...", track._metadata.id)
track.resampled = resample_features(track, rate='beats')
track.resampled['matrix'] = timbre_whiten(track.resampled['matrix'])
if not is_valid(track, self.transition_time):
raise ValueError("Track too short!")
track.gain = self.__db_2_volume(track.analysis.loudness)
log.info("Done processing [%r].", track._metadata.id)
return track
def __db_2_volume(self, loudness):
return (1.0 - LOUDNESS_THRESH * (LOUDNESS_THRESH - loudness) / 100.0)
def loop(self):
while len(self.tracks) < 2:
log.info("Waiting for a new track.")
track = self.iqueue.get()
try:
self.add_track(track) # TODO: Extend to allow multiple tracks.
log.info("Got a new track.")
except Exception:
log.error("Exception while trying to add new track:\n%s",
traceback.format_exc())
# Initial transition. Should contain 2 instructions: fadein, and playback.
inter = self.tracks[0].analysis.duration
yield initialize(self.tracks[0], inter, self.transition_time, 10)
while not self.__stop:
while len(self.tracks) > 1:
stay_time = max(self.tracks[0].analysis.duration,
self.tracks[1].analysis.duration)
tra = make_transition(self.tracks[0],
self.tracks[1],
stay_time,
self.transition_time)
del self.tracks[0].analysis
gc.collect()
yield tra
self.tracks[0].finish()
del self.tracks[0]
gc.collect()
log.info("Waiting for a new track.")
try:
self.add_track(self.iqueue.get()) # TODO: Allow multiple tracks.
log.info("Got a new track.")
except ValueError:
log.warning("Track too short! Trying another.")
except Exception:
log.error("Exception while trying to add new track:\n%s",
traceback.format_exc())
log.error("Stopping!")
# Last chunk. Should contain 1 instruction: fadeout.
yield terminate(self.tracks[-1], FADE_OUT)
def run(self):
for oqueue, settings in zip(self.oqueues, self.settings):
e = Lame(oqueue=oqueue, **settings)
self.encoders.append(e)
e.start()
try:
self.ctime = None
for i, actions in enumerate(self.loop()):
log.info("Rendering audio data for %d actions.", len(actions))
for a in actions:
try:
with Timer() as t:
# TODO: Move the "multiple encoding" support into
# LAME itself - it should be able to multiplex the
# streams itself.
self.encoders[0].add_pcm(a)
self.infoqueue.put(generate_metadata(a))
log.info("Rendered in %fs!", t.ms)
except Exception:
log.error("Could not render %s. Skipping.\n%s", a,
traceback.format_exc())
gc.collect()
except Exception:
log.error("Something failed in mixer.run:\n%s",
traceback.format_exc())
self.stop()
return
def stop(self):
self.__stop = True
@property
def stopped(self):
return self.__stop
Remove last trace of exceptionthread
"""
Based off of `capsule`, by Tristan Jehan and Jason Sundram.
Heavily modified by Peter Sobot for integration with forever.fm.
Again by Mike iLL and Rosuav for Infinite Glitch
"""
import os
import gc
import apikeys
import logging
import urllib2
import traceback
import threading
import subprocess
import multiprocessing
from lame import Lame
from timer import Timer
import database
from audio import AudioData
from capsule_support import order_tracks, resample_features, \
timbre_whiten, initialize, make_transition, terminate, \
FADE_OUT, is_valid, LOUDNESS_THRESH
log = logging.getLogger(__name__)
import sys
test = 'test' in sys.argv
##########################################
## Code lifted from psobot's pyechonest ##
##########################################
import hashlib
import time
from echonest.remix.audio import AudioAnalysis
from pyechonest.util import EchoNestAPIError
import pyechonest.util
import weakref
import numpy
from echonest.remix.support.ffmpeg import ffmpeg
class FFMPEGStreamHandler(threading.Thread):
def __init__(self, infile, numChannels=2, sampleRate=44100):
command = ["en-ffmpeg"]
self.filename = None
if isinstance(infile, basestring):
self.filename = infile
command.extend(["-i", self.filename or "pipe:0"])
if numChannels is not None:
command.extend(["-ac", str(numChannels)])
if sampleRate is not None:
command.extend(["-ar",str(sampleRate)])
command.extend(["-f","s16le","-acodec","pcm_s16le","pipe:1"])
log.info("Calling ffmpeg: %s", ' '.join(command)) # May be an imperfect representation of the command, but close enough
# On Windows, psobot had this not closing FDs, despite doing so on other platforms. (????)
# There's no os.uname() on Windows, presumably, and this is considered to be a reliable test.
close_fds = hasattr(os, 'uname')
self.p = subprocess.Popen(
command,
stdin=(subprocess.PIPE if not self.filename else None),
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
close_fds=close_fds
)
self.infile = infile if not self.filename else None
if not self.filename:
self.infile.seek(0)
threading.Thread.__init__(self)
self.daemon = True
self.start()
def __del__(self):
if hasattr(self, 'p'):
self.finish()
def run(self):
try:
self.p.stdin.write(self.infile.read())
except IOError:
pass
self.p.stdin.close()
def finish(self):
if self.filename:
try:
if self.p.stdin:
self.p.stdin.close()
except (OSError, IOError):
pass
try:
self.p.stdout.close()
except (OSError, IOError):
pass
try:
self.p.kill()
except (OSError, IOError):
pass
self.p.wait()
# TODO: Abstract me away from 44100Hz, 2ch 16 bit
def read(self, samples=-1):
if samples > 0:
samples *= 2
arr = numpy.fromfile(self.p.stdout,
dtype=numpy.int16,
count=samples)
if samples < 0 or len(arr) < samples:
self.finish()
arr = numpy.reshape(arr, (-1, 2))
return arr
def feed(self, samples):
self.p.stdout.read(samples * 4)
class AudioStream(object):
"""
Very much like an AudioData, but vastly more memory efficient.
However, AudioStream only supports sequential access - i.e.: one, un-seekable
stream of PCM data directly being streamed from FFMPEG.
"""
def __init__(self, fobj):
self.sampleRate = 44100
self.numChannels = 2
self.fobj = fobj
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
def __getitem__(self, index):
"""
Fetches a frame or slice. Returns an individual frame (if the index
is a time offset float or an integer sample number) or a slice if
the index is an `AudioQuantum` (or quacks like one). If the slice is
"in the past" (i.e.: has been read already, or the current cursor is
past the requested slice) then this will throw an exception.
"""
if isinstance(index, float):
index = int(index * self.sampleRate)
elif hasattr(index, "start") and hasattr(index, "duration"):
index = slice(float(index.start), index.start + index.duration)
if isinstance(index, slice):
if (hasattr(index.start, "start") and
hasattr(index.stop, "duration") and
hasattr(index.stop, "start")):
index = slice(index.start.start, index.stop.start + index.stop.duration)
if isinstance(index, slice):
return self.getslice(index)
else:
return self.getsample(index)
def getslice(self, index):
"Help `__getitem__` return a new AudioData for a given slice"
if isinstance(index.start, float):
index = slice(int(index.start * self.sampleRate),
int(index.stop * self.sampleRate), index.step)
if index.start < self.index:
self.stream.finish()
self.stream = FFMPEGStreamHandler(self.fobj, self.numChannels, self.sampleRate)
self.index = 0
if index.start > self.index:
self.stream.feed(index.start - self.index)
self.index = index.stop
return AudioData(None, self.stream.read(index.stop - index.start),
sampleRate=self.sampleRate,
numChannels=self.numChannels, defer=False)
def getsample(self, index):
# TODO: Finish this properly
raise NotImplementedError()
if isinstance(index, float):
index = int(index * self.sampleRate)
if index >= self.index:
self.stream.feed(index.start - self.index)
self.index += index
else:
raise ValueError("Cannot seek backwards in AudioStream")
def render(self):
return self.stream.read()
def finish(self):
self.stream.finish()
def __del__(self):
if hasattr(self, "stream"):
self.stream.finish()
class LocalAudioStream(AudioStream):
"""
Like a non-seekable LocalAudioFile with vastly better memory usage
and performance. Takes a file-like object (and its kind, assumed
to be MP3) and supports slicing and rendering. Attempting to read
from a part of the input file that has already been read will throw
an exception.
"""
def __init__(self, initializer, kind="mp3"):
AudioStream.__init__(self, initializer)
start = time.time()
if hasattr(initializer, 'seek'):
fobj = initializer
fobj.seek(0)
else:
fobj = open(initializer, 'r')
# This looks like a lot of work, but is much more lighter
# on memory than reading the entire file in.
md5 = hashlib.md5()
while True:
data = fobj.read(2 ^ 16)
if not data:
break
md5.update(data)
if not hasattr(initializer, 'seek'):
fobj.close()
track_md5 = md5.hexdigest()
logging.getLogger(__name__).info("Fetching analysis...")
try:
tempanalysis = AudioAnalysis(str(track_md5))
except EchoNestAPIError:
tempanalysis = AudioAnalysis(initializer, kind)
logging.getLogger(__name__).info("Fetched analysis in %ss",
(time.time() - start))
self.analysis = tempanalysis
self.analysis.source = weakref.ref(self)
class data(object):
"""
Massive hack - certain operations are intrusive and check
`.data.ndim`, so in this case, we fake it.
"""
ndim = self.numChannels
self.data = data
##############################################
## End code lifted from psobot's pyechonest ##
##############################################
def metadata_of(a):
if hasattr(a, '_metadata'):
return a._metadata.obj
if hasattr(a, 'track'):
return metadata_of(a.track)
if hasattr(a, 't1') and hasattr(a, 't2'):
return (metadata_of(a.t1), metadata_of(a.t2))
raise ValueError("No metadata found!")
def generate_metadata(a):
d = {
'action': a.__class__.__name__.split(".")[-1],
'duration': a.duration,
'samples': a.samples
}
m = metadata_of(a)
if isinstance(m, tuple):
m1, m2 = m
d['tracks'] = [{
"metadata": m1,
"start": a.s1,
"end": a.e1
}, {
"metadata": m2,
"start": a.s2,
"end": a.e2
}]
else:
d['tracks'] = [{
"metadata": m,
"start": a.start,
"end": a.start + a.duration
}]
return d
class Mixer(multiprocessing.Process):
def __init__(self, iqueue, oqueues, infoqueue,
settings=({},), initial=None,
max_play_time=300, transition_time=30 if not test else 5,
samplerate=44100):
self.iqueue = iqueue
self.infoqueue = infoqueue
self.encoders = []
if len(oqueues) != len(settings):
raise ValueError("Differing number of output queues and settings!")
self.oqueues = oqueues
self.settings = settings
self.__track_lock = threading.Lock()
self.__tracks = []
self.max_play_time = max_play_time
self.transition_time = transition_time
self.samplerate = 44100
self.__stop = False
if isinstance(initial, list):
self.add_tracks(initial)
elif isinstance(initial, AudioData):
self.add_track(initial)
multiprocessing.Process.__init__(self)
@property
def tracks(self):
self.__track_lock.acquire()
tracks = self.__tracks
self.__track_lock.release()
return tracks
@tracks.setter
def tracks(self, new_val):
self.__track_lock.acquire()
self.__tracks = new_val
self.__track_lock.release()
@property
def current_track(self):
return self.tracks[0]
def get_stream(self, x):
for fname in (x.filename, "audio/"+x.filename):
if os.path.isfile(fname):
return fname
# TODO: Fetch the contents from the database and save to fname
raise NotImplementedError
def analyze(self, x):
if isinstance(x, list):
return [self.analyze(y) for y in x]
if isinstance(x, AudioData):
return self.process(x)
if isinstance(x, tuple):
return self.analyze(*x)
log.info("Grabbing stream [%r]...", x.id)
laf = LocalAudioStream(self.get_stream(x))
setattr(laf, "_metadata", x)
return self.process(laf)
def add_track(self, track):
self.tracks.append(self.analyze(track))
def add_tracks(self, tracks):
self.tracks += order_tracks(self.analyze(tracks))
def process(self, track):
if not hasattr(track.analysis.pyechonest_track, "title"):
setattr(track.analysis.pyechonest_track, "title", track._metadata.title)
log.info("Resampling features [%r]...", track._metadata.id)
track.resampled = resample_features(track, rate='beats')
track.resampled['matrix'] = timbre_whiten(track.resampled['matrix'])
if not is_valid(track, self.transition_time):
raise ValueError("Track too short!")
track.gain = self.__db_2_volume(track.analysis.loudness)
log.info("Done processing [%r].", track._metadata.id)
return track
def __db_2_volume(self, loudness):
return (1.0 - LOUDNESS_THRESH * (LOUDNESS_THRESH - loudness) / 100.0)
def loop(self):
while len(self.tracks) < 2:
log.info("Waiting for a new track.")
track = self.iqueue.get()
try:
self.add_track(track) # TODO: Extend to allow multiple tracks.
log.info("Got a new track.")
except Exception:
log.error("Exception while trying to add new track:\n%s",
traceback.format_exc())
# Initial transition. Should contain 2 instructions: fadein, and playback.
inter = self.tracks[0].analysis.duration
yield initialize(self.tracks[0], inter, self.transition_time, 10)
while not self.__stop:
while len(self.tracks) > 1:
stay_time = max(self.tracks[0].analysis.duration,
self.tracks[1].analysis.duration)
tra = make_transition(self.tracks[0],
self.tracks[1],
stay_time,
self.transition_time)
del self.tracks[0].analysis
gc.collect()
yield tra
self.tracks[0].finish()
del self.tracks[0]
gc.collect()
log.info("Waiting for a new track.")
try:
self.add_track(self.iqueue.get()) # TODO: Allow multiple tracks.
log.info("Got a new track.")
except ValueError:
log.warning("Track too short! Trying another.")
except Exception:
log.error("Exception while trying to add new track:\n%s",
traceback.format_exc())
log.error("Stopping!")
# Last chunk. Should contain 1 instruction: fadeout.
yield terminate(self.tracks[-1], FADE_OUT)
def run(self):
for oqueue, settings in zip(self.oqueues, self.settings):
e = Lame(oqueue=oqueue, **settings)
self.encoders.append(e)
e.start()
try:
self.ctime = None
for i, actions in enumerate(self.loop()):
log.info("Rendering audio data for %d actions.", len(actions))
for a in actions:
try:
with Timer() as t:
# TODO: Move the "multiple encoding" support into
# LAME itself - it should be able to multiplex the
# streams itself.
self.encoders[0].add_pcm(a)
self.infoqueue.put(generate_metadata(a))
log.info("Rendered in %fs!", t.ms)
except Exception:
log.error("Could not render %s. Skipping.\n%s", a,
traceback.format_exc())
gc.collect()
except Exception:
log.error("Something failed in mixer.run:\n%s",
traceback.format_exc())
self.stop()
return
def stop(self):
self.__stop = True
@property
def stopped(self):
return self.__stop
|
import flask
from pale.adapters import flask as pale_flask_adapter
from tests.example_app import api
def create_pale_flask_app():
"""Creates a flask app, and registers a blueprint bound to pale."""
blueprint = flask.Blueprint('api', 'tests.example_app')
pale_flask_adapter.bind_blueprint(api, blueprint)
app = flask.Flask(__name__)
app.register_blueprint(blueprint, url_prefix='/api')
return app
Add authenticator and context creator to example app
import flask
from pale.adapters import flask as pale_flask_adapter
from pale.config import authenticator, context_creator
from tests.example_app import api
@authenticator
def authenticate_pale_context(context):
"""Don't actually authenticate anything in this test."""
return context
@context_creator
def create_pale_context(endpoint,request):
return pale_flask_adapter.DefaultFlaskContext(endpoint, request)
def create_pale_flask_app():
"""Creates a flask app, and registers a blueprint bound to pale."""
blueprint = flask.Blueprint('api', 'tests.example_app')
pale_flask_adapter.bind_blueprint(api, blueprint)
app = flask.Flask(__name__)
app.register_blueprint(blueprint, url_prefix='/api')
return app
|
#
# Copyright (c) 2004-2005 rPath, Inc.
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
""" Extended pdb """
import stackutil
import inspect
import pdb
import os
import re
try:
import erlcompleter
import readline
except ImportError:
hasReadline = False
else:
hasReadline = True
import socket
import string
import sys
import tempfile
import traceback
from pdb import _saferepr
class Epdb(pdb.Pdb):
# epdb will print to here instead of to sys.stdout,
# and restore stdout when done
__old_stdout = None
_displayList = {}
# used to track the number of times a set_trace has been seen
trace_counts = {'default' : [ True, 0 ]}
_historyPath = os.path.expanduser('~/.epdbhistory')
prompt = '(Epdb) '
multiline_prompt = '| '
def __init__(self):
self._exc_type = None
self._exc_msg = None
self._tb = None
self._config = {}
pdb.Pdb.__init__(self)
if hasReadline:
self._completer = erlcompleter.ECompleter()
self.prompt = '(Epdb) '
self._oldHistory = []
def store_old_history(self):
historyLen = readline.get_current_history_length()
oldHistory = [ readline.get_history_item(x) for x in xrange(historyLen)]
self._oldHistory = oldHistory
readline.clear_history()
def restore_old_history(self):
readline.clear_history()
for line in self._oldHistory:
if line is None:
continue
readline.add_history(line)
self._oldHistory = []
def read_history(self, storeOldHistory=False):
if hasReadline and self._historyPath:
if storeOldHistory:
self.store_old_history()
else:
readline.clear_history()
try:
readline.read_history_file(self._historyPath)
except:
pass
def save_history(self, restoreOldHistory=False):
if hasReadline and self._historyPath:
readline.set_history_length(1000)
try:
readline.write_history_file(self._historyPath)
except:
pass
if restoreOldHistory:
self.restore_old_history()
else:
readline.clear_history()
def do_savestack(self, path):
if 'stack' in self.__dict__:
# when we're saving we always
# start from the top
frame = self.stack[-1][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
if path == "":
(tbfd,path) = tempfile.mkstemp('', 'conary-stack-')
output = os.fdopen(tbfd, 'w')
else:
output = open(path, 'w')
stackutil.printStack(frame, output)
print "Stack saved to %s" % path
def do_mailstack(self, arg):
tolist = arg.split()
subject = '[Conary Stacktrace]'
if 'stack' in self.__dict__:
# when we're saving we always
# start from the top
frame = self.stack[-1][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
sender = os.environ['USER']
host = socket.getfqdn()
extracontent = None
if self._tb:
lines = traceback.format_exception(self._exc_type, self._exc_msg,
self._tb)
extracontent = string.joinfields(lines, "")
stackutil.mailStack(frame, tolist, sender + '@' + host, subject,
extracontent)
print "Mailed stack to %s" % tolist
def do_printstack(self, arg):
if 'stack' in self.__dict__:
# print only the stack up to our current depth
frame = self.stack[-1][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
stackutil.printStack(frame, sys.stderr)
def do_printframe(self, arg):
if not arg:
if 'stack' in self.__dict__:
depth = self.curindex
else:
depth = 0
else:
depth = int(arg)
if 'stack' in self.__dict__:
# start at -1 (top) and go down...
depth = 0 - (depth + 1)
if 'stack' in self.__dict__:
print "Depth = %d" % depth
frame = self.stack[depth][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
for i in xrange(0, depth):
frame = frame.f_back
stackutil.printFrame(frame, sys.stderr)
def do_file(self, arg):
frame, lineno = self.stack[self.curindex]
filename = self.canonic(frame.f_code.co_filename)
print "%s:%s" % (filename, lineno)
do_f = do_file
def do_until(self, arg):
try:
int(arg)
except ValueError:
print "Error: only specify line numbers for until"
return 0
filename = self.canonic(self.curframe.f_code.co_filename)
if self.checkline(filename, int(arg)):
self.do_tbreak(arg)
self.set_continue()
return 1
else:
return 0
def do_set(self, arg):
if not arg:
keys = self._config.keys()
keys.sort()
for key in keys:
print "%s: %s" % (key, self._config[key])
else:
args = arg.split(None, 1)
if len(args) == 1:
key = args[0]
if key in self._config:
print "Removing %s: %s" % (key, self._config[key])
del self._config[key]
else:
print "%s: Not set" % (key)
else:
key, value = args
if(hasattr(self, '_set_' + key)):
fn = getattr(self, '_set_' + key)
fn(value)
else:
print "No such config value"
def do_trace_cond(self, args):
args = args.split(' ', 1)
if len(args) not in (1, 2):
print "trace_cond [marker] <cond>"
if len(args) == 1:
cond = args[0]
marker = 'default'
else:
marker, cond = args
if cond == 'None':
cond = None
self.set_trace_cond(marker, cond)
return
try:
cond = int(cond)
self.set_trace_cond(marker, cond)
return
except ValueError:
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
cond = eval(cond + '\n', globals, locals)
# test to be sure that what we code is a
# function that can take one arg and return a bool
rv = (type(cond) == bool) or bool(cond(1))
self.set_trace_cond(marker, cond)
except:
print self._reprExc()
do_tc = do_trace_cond
def _set_path(self, paths):
paths = paths.split(' ')
for path in paths:
if path[0] != '/':
print "must give absolute path"
if not os.path.exists(path):
print "Path %s does not exist" % path
if path[-1] == '/':
path = path[:-1]
path = os.path.realpath(path)
if 'path' not in self._config:
self._config['path'] = []
self._config['path'].append(path)
print "Set path to %s" % self._config['path']
def do_list(self, arg):
rel = re.compile(r'^[-+] *[0-9]* *$')
if arg and arg == '.':
self.lineno = None
pdb.Pdb.do_list(self, '')
return
if rel.match(arg):
if arg == '-':
reldist = -7
else:
reldist = int(arg)
if self.lineno is None:
lineno = self.curframe.f_lineno
else:
lineno = self.lineno
lineno += reldist - 5
pdb.Pdb.do_list(self, str(lineno))
self.lastcmd = 'list ' + arg
else:
pdb.Pdb.do_list(self, arg)
do_l = do_list
def default(self, line):
if line[0] == '!': line = line[1:]
if self.handle_directive(line):
return
if line == '<<EOF':
return self.multiline()
if line.strip().endswith(':'):
return self.multiline(line)
if line.endswith('\\'):
return self.multiline(line)
origLine = line
line = _removeQuotes(line)
if line is None:
return self.multiline(origLine)
if line.count('(') > line.count(')'):
return self.multiline(origLine)
try:
self.save_history()
return pdb.Pdb.default(self, line)
finally:
self.read_history()
def multiline(self, firstline=''):
full_input = []
# keep a list of the entries that we've made in history
old_hist = []
if firstline:
print ' ' + firstline
full_input.append(firstline)
while True:
if hasReadline:
# add the current readline position
old_hist.append(readline.get_current_history_length())
if self.use_rawinput:
try:
line = raw_input(self.multiline_prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.multiline_prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
if line == 'EOF':
break
full_input.append(line)
# add the final readline history position
if hasReadline:
old_hist.append(readline.get_current_history_length())
cmd = '\n'.join(full_input) + '\n'
if hasReadline:
# remove the old, individual readline history entries.
# first remove any duplicate entries
old_hist = sorted(set(old_hist))
# Make sure you do this in reversed order so you move from
# the end of the history up.
for pos in reversed(old_hist):
# get_current_history_length returns pos + 1
readline.remove_history_item(pos - 1)
# now add the full line
readline.add_history(cmd)
locals = self.curframe.f_locals
globals = self.curframe.f_globals
print
self.save_history()
try:
try:
code = compile(cmd, '<stdin>', 'single')
exec code in globals, locals
except:
print self._reprExc()
finally:
self.read_history()
def handle_directive(self, line):
cmd = line.split('?', 1)
if len(cmd) == 1:
return False
cmd, directive = cmd
if directive and directive not in '?cdmpx':
return False
self.do_define(cmd)
if directive == '?':
self.do_doc(cmd)
if directive == 'c':
self.do_showclasses(cmd)
elif directive == 'd':
self.do_showdata(cmd)
elif directive == 'm':
self.do_showmethods(cmd)
elif directive == 'p':
pdb.Pdb.default(self, 'print ' + cmd)
elif directive == 'x':
pdb.Pdb.default(self, 'hex(%s)' % cmd)
return True
def do_p(self, arg):
cmd = arg.split('?', 1)
if len(cmd) == 1:
self.save_history()
pdb.Pdb.do_p(self, arg)
self.read_history()
else:
self.default(arg)
def _showmethods(self, obj):
methods = self._getMembersOfType(obj, 'm')
methods.sort()
for (methodName, method) in methods:
try:
self._define(method)
except:
if hasattr(obj, '__name__'):
prefix = obj.__name__
else:
prefix = obj.__class__.__name__
print prefix + '.' + methodName
def _showdata(self, obj):
data = self._getMembersOfType(obj, 'd')
data.sort()
print [ x[0] for x in data]
def _showclasses(self, obj):
classes = self._getMembersOfType(obj, 'c')
classes.sort()
for (className, class_) in classes:
self._define(class_)
print
def _objtype(self, obj):
if inspect.isroutine(obj) or type(obj).__name__ == 'method-wrapper':
return 'm'
elif inspect.isclass(obj):
return 'c'
else:
return 'd'
def _eval(self, arg, fn=None, printExc=True):
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
result = eval(arg + '\n', globals, locals)
if fn is None:
return True, result
return True, fn(result)
except:
if printExc:
exc = self._reprExc()
print exc
return False, exc
else:
return False, self._reprExc()
def _reprExc(self):
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
return ' '.join(('***', exc_type_name + ':', _saferepr(str(v))))
def _getMembersOfType(self, obj, objType):
names = dir(obj)
members = []
for n in names:
member = getattr(obj, n)
if self._objtype(member) == objType:
members.append((n, member))
return members
def do_showmethods(self, arg):
self._eval(arg, self._showmethods)
def do_showclasses(self, arg):
self._eval(arg, self._showclasses)
def do_display(self, arg):
if not arg:
self._displayItems()
else:
params = arg.split()
if params[0] == 'list' and not params[1:]:
self._listDisplayItems()
elif params[0] in ('enable','disable','delete'):
try:
nums = [int(x) for x in params[1:]]
except ValueError, msg:
print '***', ValueError, msg
return
missing = []
_nums = []
for num in nums:
if num in self._displayList:
_nums.append(num)
else:
missing.append(str(num))
if params[0] == 'enable':
for num in _nums:
self._displayList[num][0] = True
if params[0] == 'disable':
for num in _nums:
self._displayList[num][0] = False
if params[0] == 'delete':
for num in nums:
del self._displayList[num]
self._listDisplayItems()
if missing:
print "Warning: could not find display num(s) %s" \
% ','.join(missing)
else:
if self._displayList:
displayNum = max(self._displayList) + 1
else:
displayNum = 0
self._displayList[displayNum] = [True, arg]
self._listDisplayItems()
def _listDisplayItems(self):
displayedItem = False
for num in sorted(self._displayList.iterkeys()):
if not displayedItem:
displayedItem = True
print
print "Cmds to display:"
enabled, item = self._displayList[num]
if not enabled:
print "%d: %s (disabled)" % (num, item)
else:
print "%d: %s" % (num, item)
if displayedItem:
print
else:
print "*** No items set to display at each cmd"
def _displayItems(self):
displayedItem = False
for num in sorted(self._displayList.iterkeys()):
enabled, item = self._displayList[num]
if not enabled:
continue
if not displayedItem:
displayedItem = True
print
passed, result = self._eval(item, printExc = False)
print "%d: %s = %s" % (num, item, _saferepr(result))
if displayedItem:
print
def do_showdata(self, arg):
result = self._eval(item, self._showdata)
def _define(self, obj):
if inspect.isclass(obj):
bases = inspect.getmro(obj)
bases = [ x.__name__ for x in bases[1:] ]
if bases:
bases = ' -- Bases (' + ', '.join(bases) + ')'
else:
bases = ''
if hasattr(obj, '__init__') and inspect.isroutine(obj.__init__):
try:
initfn = obj.__init__.im_func
argspec = inspect.getargspec(initfn)
# get rid of self from arg list...
fnargs = argspec[0][1:]
newArgSpec = (fnargs, argspec[1], argspec[2], argspec[3])
argspec = inspect.formatargspec(*newArgSpec)
except TypeError:
argspec = '(?)'
else:
argspec = ''
print "Class " + obj.__name__ + argspec + bases
elif inspect.ismethod(obj) or type(obj).__name__ == 'method-wrapper':
m_class = obj.im_class
m_self = obj.im_self
m_func = obj.im_func
name = m_class.__name__ + '.' + m_func.__name__
#if m_self:
# name = "<Bound>" + name
argspec = inspect.formatargspec(*inspect.getargspec(m_func))
print "%s%s" % (name, argspec)
elif type(obj).__name__ == 'builtin_function_or_method':
print obj
elif inspect.isfunction(obj):
name = obj.__name__
argspec = inspect.formatargspec(*inspect.getargspec(obj))
print "%s%s" % (name, argspec)
else:
print type(obj)
def do_define(self, arg):
self._eval(arg, self._define)
def do_showdata(self, arg):
result = self._eval(arg, self._showdata)
def _define(self, obj):
if inspect.isclass(obj):
bases = inspect.getmro(obj)
bases = [ x.__name__ for x in bases[1:] ]
if bases:
bases = ' -- Bases (' + ', '.join(bases) + ')'
else:
bases = ''
if hasattr(obj, '__init__') and inspect.isroutine(obj.__init__):
try:
initfn = obj.__init__.im_func
argspec = inspect.getargspec(initfn)
# get rid of self from arg list...
fnargs = argspec[0][1:]
newArgSpec = (fnargs, argspec[1], argspec[2], argspec[3])
argspec = inspect.formatargspec(*newArgSpec)
except TypeError:
argspec = '(?)'
else:
argspec = ''
print "Class " + obj.__name__ + argspec + bases
elif inspect.ismethod(obj) or type(obj).__name__ == 'method-wrapper':
m_class = obj.im_class
m_self = obj.im_self
m_func = obj.im_func
name = m_class.__name__ + '.' + m_func.__name__
#if m_self:
# name = "<Bound>" + name
argspec = inspect.formatargspec(*inspect.getargspec(m_func))
print "%s%s" % (name, argspec)
elif type(obj).__name__ == 'builtin_function_or_method':
print obj
elif inspect.isfunction(obj):
name = obj.__name__
argspec = inspect.formatargspec(*inspect.getargspec(obj))
print "%s%s" % (name, argspec)
else:
print type(obj)
def do_define(self, arg):
self._eval(arg, self._define)
def do_doc(self, arg):
self._eval(arg, self._doc)
def _doc(self, result):
docloc = None
if hasattr(result, '__doc__'):
if result.__doc__ is not None:
docstr = result.__doc__
elif inspect.ismethod(result):
bases = inspect.getmro(result.im_class)
found = False
for base in bases:
if hasattr(base, result.__name__):
baseres = getattr(base, result.__name__)
if (hasattr(baseres, '__doc__')
and baseres.__doc__ is not None):
docloc = baseres
docstr = baseres.__doc__
found = True
break
if not found:
docstr = None
else:
docstr = None
print "\"\"\"%s\"\"\"" % docstr
if docloc:
print "(Found doc in %s)" % docloc
if inspect.isclass(result):
if hasattr(result, '__init__'):
self.do_define(arg + '.__init__')
if hasattr(result.__init__, '__doc__'):
print "\"\"\"%s\"\"\"" % result.__init__.__doc__
else:
print "No init function"
def interaction(self, frame, traceback):
self.read_history(storeOldHistory=True)
self.setup(frame, traceback)
self._displayItems()
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
self.save_history(restoreOldHistory=True)
self.restore_input_output()
def switch_input_output(self):
self.switch_stdout()
self.switch_stdin()
def restore_input_output(self):
if not self.__old_stdout is None:
sys.stdout.flush()
# now we reset stdout to be the whatever it was before
sys.stdout = self.__old_stdout
if not self.__old_stdin is None:
sys.stdin = self.__old_stdin
def switch_stdout(self):
isatty = False
try:
fileno = sys.stdout.fileno()
isatty = os.isatty(fileno)
except AttributeError:
pass
# sys.stdout is not a regular file,
# go through some hoops
# (this is less desirable because it doesn't redirect
# low-level writes to 1)
if not isatty:
sys.stdout.flush()
self.__old_stdout = sys.stdout
# if this fails, we'll raise an IOError
stdout = open('/dev/tty', 'w')
sys.stdout = stdout
else:
self.__old_stdout = None
def switch_stdin(self):
isatty = False
try:
fileno = sys.stdin.fileno()
isatty = os.isatty(fileno)
except AttributeError:
pass
# sys.stdout is not a regular file,
# go through some hoops
# (this is less desirable because it doesn't redirect
# low-level writes to 1)
if not isatty:
sys.stdin.flush()
self.__old_stdin = sys.stdin
# if this fails, we'll raise an IOError
stdin = open('/dev/tty', 'r')
sys.stdin = stdin
else:
self.__old_stdin = None
# override for cases where we want to search a different
# path for the file
def canonic(self, filename):
canonic = self.fncache.get(filename)
if not canonic or not os.path.exists(canonic):
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
if not os.path.exists(canonic):
if 'path' in self._config:
for path in self._config['path']:
pos = matchFileOnDirPath(path, canonic)
if pos:
canonic = pos
break
self.fncache[filename] = canonic
return canonic
def reset_trace_count(klass, marker='default'):
tc = klass.trace_counts
try:
tc[marker][1] = 0
except KeyError:
pass
reset_trace_count = classmethod(reset_trace_count)
def set_trace_cond(klass, marker='default', cond=None):
""" Sets a condition for set_trace statements that have the
specified marker. A condition can either callable, in
which case it should take one argument, which is the
number of times set_trace(marker) has been called,
or it can be a number, in which case the break will
only be called.
"""
tc = klass.trace_counts
try:
curVals = tc[marker]
except KeyError:
curVals = [ None, 0 ]
tc[marker] = (cond, 0)
set_trace_cond = classmethod(set_trace_cond)
def set_trace(self, marker='default', skip=0):
tc = Epdb.trace_counts
try:
(cond, curCount) = tc[marker]
curCount += 1
except KeyError:
(cond, curCount) = None, 1
if cond is True:
rv = True
elif cond is None or cond is False:
rv = False
else:
try:
rv = cond(curCount)
except TypeError:
# assume that if the condition
# is not callable, it is an
# integer above which we are
# supposed to break
rv = curCount >= cond
if rv:
if marker != 'default':
self.prompt = '(Epdb [%s]) ' % marker
self._set_trace(skip=skip+1)
tc[marker] = [cond, curCount]
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Epdb()
p.prompt = "(%s) " % self.prompt.strip()
print "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def _set_trace(self, skip=0):
"""Start debugging from here."""
frame = sys._getframe().f_back
# go up the specified number of frames
for i in range(0,skip):
frame = frame.f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
# bdb hooks
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self.stop_here(frame):
self.switch_input_output()
pdb.Pdb.user_call(self, frame, argument_list)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
self.switch_input_output()
pdb.Pdb.user_line(self, frame)
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
self.switch_input_output()
pdb.Pdb.user_return(self, frame, return_value)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
self.switch_input_output()
pdb.Pdb.user_exception(self, frame, exc_info)
def complete(self, text, state):
if hasReadline:
# from cmd.py, override completion to match on local variables
allvars = {}
globals = self.curframe.f_globals.copy()
locals = self.curframe.f_locals.copy()
allvars.update(globals)
allvars.update(locals)
self._completer.namespace = allvars
self._completer.use_main_ns = 0
matches = self._completer.complete(text, state)
return matches
else:
return pdb.Pdb.complete(self, text, state)
def beingTraced():
frame = sys._getframe(0)
while frame:
if not frame.f_trace is None:
return True
frame = frame.f_back
return False
def set_trace_cond(*args, **kw):
""" Sets a condition for set_trace statements that have the
specified marker. A condition can either callable, in
which case it should take one argument, which is the
number of times set_trace(marker) has been called,
or it can be a number, in which case the break will
only be called.
"""
for key, val in kw.iteritems():
Epdb.set_trace_cond(key, val)
for arg in args:
Epdb.set_trace_cond(arg, True)
stc = set_trace_cond
def reset_trace_count(marker='default'):
""" Resets the number a set_trace for a marker has been
seen to 0. """
Epdb.reset_trace_count(marker)
def set_trace(marker='default'):
""" Starts the debugger at the current location. Takes an
optional argument 'marker' (default 'default'), that
can be used with the set_trace_cond function to support
turning on and off tracepoints based on conditionals
"""
Epdb().set_trace(marker=marker, skip=1)
st = set_trace
def post_mortem(t, exc_type=None, exc_msg=None):
p = Epdb()
p._exc_type = exc_type
p._exc_msg = exc_msg
p._tb = t
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.switch_input_output()
p.interaction(t.tb_frame, t)
def matchFileOnDirPath(curpath, pathdir):
"""Find match for a file by slicing away its directory elements
from the front and replacing them with pathdir. Assume that the
end of curpath is right and but that the beginning may contain
some garbage (or it may be short)
Overlaps are allowed:
e.g /tmp/fdjsklf/real/path/elements, /all/the/real/ =>
/all/the/real/path/elements (assuming that this combined
path exists)
"""
if os.path.exists(curpath):
return curpath
filedirs = curpath.split('/')[1:]
filename = filedirs[-1]
filedirs = filedirs[:-1]
if pathdir[-1] == '/':
pathdir = pathdir[:-1]
# assume absolute paths
pathdirs = pathdir.split('/')[1:]
lp = len(pathdirs)
# Cut off matching file elements from the ends of the two paths
for x in range(1, min(len(filedirs), len(pathdirs))):
# XXX this will not work if you have
# /usr/foo/foo/filename.py
if filedirs[-1] == pathdirs[-x]:
filedirs = filedirs[:-1]
else:
break
# Now cut try cuting off incorrect initial elements of curpath
while filedirs:
tmppath = '/' + '/'.join(pathdirs + filedirs + [filename])
if os.path.exists(tmppath):
return tmppath
filedirs = filedirs[1:]
tmppath = '/' + '/'.join(pathdirs + [filename])
if os.path.exists(tmppath):
return tmppath
return None
def _removeQuotes(line):
origLine = line
line = line.replace(r'\\', 'X')
line = re.sub(r'\\\"|\\\'', 'X', line)
line = _removeQuoteSet(line, '"""', "'''")
if line is None: return None
if line != _removeQuoteSet(line, '""', "''"):
return origLine
line = _removeQuoteSet(line, '"', "'")
if line is None:
return origLine
return line
def _removeQuoteSet(line, quote1, quote2):
ln = len(quote1)
while True:
a = line.find(quote1), quote1
b = line.find(quote2), quote2
if a[0] == -1 and b[0] == -1:
return line
if b[0] == -1 or (b[0] < a[0]):
firstPoint = a[0]
firstQuote = a[1]
else:
firstPoint = b[0]
firstQuote = b[1]
secondPoint = line[(firstPoint+ln):].find(firstQuote)
if secondPoint == -1:
return None
secondPoint += firstPoint
line = line[:firstPoint] + line[(secondPoint+2*ln):]
rearrange a bit, add fail_silently_on_ioerror flag to Epdb class.
#
# Copyright (c) 2004-2005 rPath, Inc.
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
""" Extended pdb """
import bdb
import stackutil
import inspect
import pdb
import os
import re
try:
import erlcompleter
import readline
except ImportError:
hasReadline = False
else:
hasReadline = True
import socket
import string
import sys
import tempfile
import traceback
from pdb import _saferepr
class Epdb(pdb.Pdb):
_historyPath = os.path.expanduser('~/.epdbhistory')
prompt = '(Epdb) '
multiline_prompt = '| '
fail_silently_on_ioerror = False # if set to True, ignore calls to epdb
# when there is no usable device
# epdb will print to here instead of to sys.stdout,
# and restore stdout when done
__old_stdin = None
__old_stdout = None
_displayList = {}
# used to track the number of times a set_trace has been seen
trace_counts = {'default' : [ True, 0 ]}
def __init__(self):
self._exc_type = None
self._exc_msg = None
self._tb = None
self._config = {}
pdb.Pdb.__init__(self)
if hasReadline:
self._completer = erlcompleter.ECompleter()
self.prompt = '(Epdb) '
self._oldHistory = []
def store_old_history(self):
historyLen = readline.get_current_history_length()
oldHistory = [ readline.get_history_item(x) for x in xrange(historyLen)]
self._oldHistory = oldHistory
readline.clear_history()
def restore_old_history(self):
readline.clear_history()
for line in self._oldHistory:
if line is None:
continue
readline.add_history(line)
self._oldHistory = []
def read_history(self, storeOldHistory=False):
if hasReadline and self._historyPath:
if storeOldHistory:
self.store_old_history()
else:
readline.clear_history()
try:
readline.read_history_file(self._historyPath)
except IOError:
pass
def save_history(self, restoreOldHistory=False):
if hasReadline and self._historyPath:
readline.set_history_length(1000)
try:
readline.write_history_file(self._historyPath)
except IOError:
pass
if restoreOldHistory:
self.restore_old_history()
else:
readline.clear_history()
def do_savestack(self, path):
if 'stack' in self.__dict__:
# when we're saving we always
# start from the top
frame = self.stack[-1][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
if path == "":
(tbfd,path) = tempfile.mkstemp('', 'conary-stack-')
output = os.fdopen(tbfd, 'w')
else:
output = open(path, 'w')
stackutil.printStack(frame, output)
print "Stack saved to %s" % path
def do_mailstack(self, arg):
tolist = arg.split()
subject = '[Conary Stacktrace]'
if 'stack' in self.__dict__:
# when we're saving we always
# start from the top
frame = self.stack[-1][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
sender = os.environ['USER']
host = socket.getfqdn()
extracontent = None
if self._tb:
lines = traceback.format_exception(self._exc_type, self._exc_msg,
self._tb)
extracontent = string.joinfields(lines, "")
stackutil.mailStack(frame, tolist, sender + '@' + host, subject,
extracontent)
print "Mailed stack to %s" % tolist
def do_printstack(self, arg):
if 'stack' in self.__dict__:
# print only the stack up to our current depth
frame = self.stack[-1][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
stackutil.printStack(frame, sys.stderr)
def do_printframe(self, arg):
if not arg:
if 'stack' in self.__dict__:
depth = self.curindex
else:
depth = 0
else:
depth = int(arg)
if 'stack' in self.__dict__:
# start at -1 (top) and go down...
depth = 0 - (depth + 1)
if 'stack' in self.__dict__:
print "Depth = %d" % depth
frame = self.stack[depth][0]
else:
frame = sys._getframe(1)
while frame.f_globals['__name__'] in ('epdb', 'pdb', 'bdb', 'cmd'):
frame = frame.f_back
for i in xrange(0, depth):
frame = frame.f_back
stackutil.printFrame(frame, sys.stderr)
def do_file(self, arg):
frame, lineno = self.stack[self.curindex]
filename = self.canonic(frame.f_code.co_filename)
print "%s:%s" % (filename, lineno)
do_f = do_file
def do_until(self, arg):
try:
int(arg)
except ValueError:
print "Error: only specify line numbers for until"
return 0
filename = self.canonic(self.curframe.f_code.co_filename)
if self.checkline(filename, int(arg)):
self.do_tbreak(arg)
self.set_continue()
return 1
else:
return 0
def do_set(self, arg):
if not arg:
keys = self._config.keys()
keys.sort()
for key in keys:
print "%s: %s" % (key, self._config[key])
else:
args = arg.split(None, 1)
if len(args) == 1:
key = args[0]
if key in self._config:
print "Removing %s: %s" % (key, self._config[key])
del self._config[key]
else:
print "%s: Not set" % (key)
else:
key, value = args
if(hasattr(self, '_set_' + key)):
fn = getattr(self, '_set_' + key)
fn(value)
else:
print "No such config value"
def do_trace_cond(self, args):
args = args.split(' ', 1)
if len(args) not in (1, 2):
print "trace_cond [marker] <cond>"
if len(args) == 1:
cond = args[0]
marker = 'default'
else:
marker, cond = args
if cond == 'None':
cond = None
self.set_trace_cond(marker, cond)
return
try:
cond = int(cond)
self.set_trace_cond(marker, cond)
return
except ValueError:
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
cond = eval(cond + '\n', globals, locals)
# test to be sure that what we code is a
# function that can take one arg and return a bool
rv = (type(cond) == bool) or bool(cond(1))
self.set_trace_cond(marker, cond)
except:
print self._reprExc()
do_tc = do_trace_cond
def _set_path(self, paths):
paths = paths.split(' ')
for path in paths:
if path[0] != '/':
print "must give absolute path"
if not os.path.exists(path):
print "Path %s does not exist" % path
if path[-1] == '/':
path = path[:-1]
path = os.path.realpath(path)
if 'path' not in self._config:
self._config['path'] = []
self._config['path'].append(path)
print "Set path to %s" % self._config['path']
def do_list(self, arg):
rel = re.compile(r'^[-+] *[0-9]* *$')
if arg and arg == '.':
self.lineno = None
pdb.Pdb.do_list(self, '')
return
if rel.match(arg):
if arg == '-':
reldist = -7
else:
reldist = int(arg)
if self.lineno is None:
lineno = self.curframe.f_lineno
else:
lineno = self.lineno
lineno += reldist - 5
pdb.Pdb.do_list(self, str(lineno))
self.lastcmd = 'list ' + arg
else:
pdb.Pdb.do_list(self, arg)
do_l = do_list
def default(self, line):
if line[0] == '!': line = line[1:]
if self.handle_directive(line):
return
if line == '<<EOF':
return self.multiline()
if line.strip().endswith(':'):
return self.multiline(line)
if line.endswith('\\'):
return self.multiline(line)
origLine = line
line = _removeQuotes(line)
if line is None:
return self.multiline(origLine)
if line.count('(') > line.count(')'):
return self.multiline(origLine)
try:
self.save_history()
return pdb.Pdb.default(self, line)
finally:
self.read_history()
def multiline(self, firstline=''):
full_input = []
# keep a list of the entries that we've made in history
old_hist = []
if firstline:
print ' ' + firstline
full_input.append(firstline)
while True:
if hasReadline:
# add the current readline position
old_hist.append(readline.get_current_history_length())
if self.use_rawinput:
try:
line = raw_input(self.multiline_prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.multiline_prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
if line == 'EOF':
break
full_input.append(line)
# add the final readline history position
if hasReadline:
old_hist.append(readline.get_current_history_length())
cmd = '\n'.join(full_input) + '\n'
if hasReadline:
# remove the old, individual readline history entries.
# first remove any duplicate entries
old_hist = sorted(set(old_hist))
# Make sure you do this in reversed order so you move from
# the end of the history up.
for pos in reversed(old_hist):
# get_current_history_length returns pos + 1
readline.remove_history_item(pos - 1)
# now add the full line
readline.add_history(cmd)
locals = self.curframe.f_locals
globals = self.curframe.f_globals
print
self.save_history()
try:
try:
code = compile(cmd, '<stdin>', 'single')
exec code in globals, locals
except:
print self._reprExc()
finally:
self.read_history()
def handle_directive(self, line):
cmd = line.split('?', 1)
if len(cmd) == 1:
return False
cmd, directive = cmd
if directive and directive not in '?cdmpx':
return False
self.do_define(cmd)
if directive == '?':
self.do_doc(cmd)
if directive == 'c':
self.do_showclasses(cmd)
elif directive == 'd':
self.do_showdata(cmd)
elif directive == 'm':
self.do_showmethods(cmd)
elif directive == 'p':
pdb.Pdb.default(self, 'print ' + cmd)
elif directive == 'x':
pdb.Pdb.default(self, 'hex(%s)' % cmd)
return True
def do_p(self, arg):
cmd = arg.split('?', 1)
if len(cmd) == 1:
self.save_history()
pdb.Pdb.do_p(self, arg)
self.read_history()
else:
self.default(arg)
def _showmethods(self, obj):
methods = self._getMembersOfType(obj, 'm')
methods.sort()
for (methodName, method) in methods:
try:
self._define(method)
except:
if hasattr(obj, '__name__'):
prefix = obj.__name__
else:
prefix = obj.__class__.__name__
print prefix + '.' + methodName
def _showdata(self, obj):
data = self._getMembersOfType(obj, 'd')
data.sort()
print [ x[0] for x in data]
def _showclasses(self, obj):
classes = self._getMembersOfType(obj, 'c')
classes.sort()
for (className, class_) in classes:
self._define(class_)
print
def _objtype(self, obj):
if inspect.isroutine(obj) or type(obj).__name__ == 'method-wrapper':
return 'm'
elif inspect.isclass(obj):
return 'c'
else:
return 'd'
def _eval(self, arg, fn=None, printExc=True):
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
result = eval(arg + '\n', globals, locals)
if fn is None:
return True, result
return True, fn(result)
except:
if printExc:
exc = self._reprExc()
print exc
return False, exc
else:
return False, self._reprExc()
def _reprExc(self):
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
return ' '.join(('***', exc_type_name + ':', _saferepr(str(v))))
def _getMembersOfType(self, obj, objType):
names = dir(obj)
members = []
for n in names:
member = getattr(obj, n)
if self._objtype(member) == objType:
members.append((n, member))
return members
def do_showmethods(self, arg):
self._eval(arg, self._showmethods)
def do_showclasses(self, arg):
self._eval(arg, self._showclasses)
def do_display(self, arg):
if not arg:
self._displayItems()
else:
params = arg.split()
if params[0] == 'list' and not params[1:]:
self._listDisplayItems()
elif params[0] in ('enable','disable','delete'):
try:
nums = [int(x) for x in params[1:]]
except ValueError, msg:
print '***', ValueError, msg
return
missing = []
_nums = []
for num in nums:
if num in self._displayList:
_nums.append(num)
else:
missing.append(str(num))
if params[0] == 'enable':
for num in _nums:
self._displayList[num][0] = True
if params[0] == 'disable':
for num in _nums:
self._displayList[num][0] = False
if params[0] == 'delete':
for num in nums:
del self._displayList[num]
self._listDisplayItems()
if missing:
print "Warning: could not find display num(s) %s" \
% ','.join(missing)
else:
if self._displayList:
displayNum = max(self._displayList) + 1
else:
displayNum = 0
self._displayList[displayNum] = [True, arg]
self._listDisplayItems()
def _listDisplayItems(self):
displayedItem = False
for num in sorted(self._displayList.iterkeys()):
if not displayedItem:
displayedItem = True
print
print "Cmds to display:"
enabled, item = self._displayList[num]
if not enabled:
print "%d: %s (disabled)" % (num, item)
else:
print "%d: %s" % (num, item)
if displayedItem:
print
else:
print "*** No items set to display at each cmd"
def _displayItems(self):
displayedItem = False
for num in sorted(self._displayList.iterkeys()):
enabled, item = self._displayList[num]
if not enabled:
continue
if not displayedItem:
displayedItem = True
print
passed, result = self._eval(item, printExc = False)
print "%d: %s = %s" % (num, item, _saferepr(result))
if displayedItem:
print
def do_showdata(self, arg):
result = self._eval(item, self._showdata)
def _define(self, obj):
if inspect.isclass(obj):
bases = inspect.getmro(obj)
bases = [ x.__name__ for x in bases[1:] ]
if bases:
bases = ' -- Bases (' + ', '.join(bases) + ')'
else:
bases = ''
if hasattr(obj, '__init__') and inspect.isroutine(obj.__init__):
try:
initfn = obj.__init__.im_func
argspec = inspect.getargspec(initfn)
# get rid of self from arg list...
fnargs = argspec[0][1:]
newArgSpec = (fnargs, argspec[1], argspec[2], argspec[3])
argspec = inspect.formatargspec(*newArgSpec)
except TypeError:
argspec = '(?)'
else:
argspec = ''
print "Class " + obj.__name__ + argspec + bases
elif inspect.ismethod(obj) or type(obj).__name__ == 'method-wrapper':
m_class = obj.im_class
m_self = obj.im_self
m_func = obj.im_func
name = m_class.__name__ + '.' + m_func.__name__
#if m_self:
# name = "<Bound>" + name
argspec = inspect.formatargspec(*inspect.getargspec(m_func))
print "%s%s" % (name, argspec)
elif type(obj).__name__ == 'builtin_function_or_method':
print obj
elif inspect.isfunction(obj):
name = obj.__name__
argspec = inspect.formatargspec(*inspect.getargspec(obj))
print "%s%s" % (name, argspec)
else:
print type(obj)
def do_define(self, arg):
self._eval(arg, self._define)
def do_showdata(self, arg):
result = self._eval(arg, self._showdata)
def _define(self, obj):
if inspect.isclass(obj):
bases = inspect.getmro(obj)
bases = [ x.__name__ for x in bases[1:] ]
if bases:
bases = ' -- Bases (' + ', '.join(bases) + ')'
else:
bases = ''
if hasattr(obj, '__init__') and inspect.isroutine(obj.__init__):
try:
initfn = obj.__init__.im_func
argspec = inspect.getargspec(initfn)
# get rid of self from arg list...
fnargs = argspec[0][1:]
newArgSpec = (fnargs, argspec[1], argspec[2], argspec[3])
argspec = inspect.formatargspec(*newArgSpec)
except TypeError:
argspec = '(?)'
else:
argspec = ''
print "Class " + obj.__name__ + argspec + bases
elif inspect.ismethod(obj) or type(obj).__name__ == 'method-wrapper':
m_class = obj.im_class
m_self = obj.im_self
m_func = obj.im_func
name = m_class.__name__ + '.' + m_func.__name__
#if m_self:
# name = "<Bound>" + name
argspec = inspect.formatargspec(*inspect.getargspec(m_func))
print "%s%s" % (name, argspec)
elif type(obj).__name__ == 'builtin_function_or_method':
print obj
elif inspect.isfunction(obj):
name = obj.__name__
argspec = inspect.formatargspec(*inspect.getargspec(obj))
print "%s%s" % (name, argspec)
else:
print type(obj)
def do_define(self, arg):
self._eval(arg, self._define)
def do_doc(self, arg):
self._eval(arg, self._doc)
def _doc(self, result):
docloc = None
if hasattr(result, '__doc__'):
if result.__doc__ is not None:
docstr = result.__doc__
elif inspect.ismethod(result):
bases = inspect.getmro(result.im_class)
found = False
for base in bases:
if hasattr(base, result.__name__):
baseres = getattr(base, result.__name__)
if (hasattr(baseres, '__doc__')
and baseres.__doc__ is not None):
docloc = baseres
docstr = baseres.__doc__
found = True
break
if not found:
docstr = None
else:
docstr = None
print "\"\"\"%s\"\"\"" % docstr
if docloc:
print "(Found doc in %s)" % docloc
if inspect.isclass(result):
if hasattr(result, '__init__'):
self.do_define(arg + '.__init__')
if hasattr(result.__init__, '__doc__'):
print "\"\"\"%s\"\"\"" % result.__init__.__doc__
else:
print "No init function"
def interaction(self, frame, traceback):
try:
self.switch_input_output()
except IOError:
if True or self.fail_silently_on_ioerror:
# pretend like we never saw this breakpoint
self.set_continue()
return
else:
raise
self.read_history(storeOldHistory=True)
self.setup(frame, traceback)
self._displayItems()
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
self.save_history(restoreOldHistory=True)
self.restore_input_output()
def switch_input_output(self):
self.switch_stdout()
self.switch_stdin()
def restore_input_output(self):
if not self.__old_stdout is None:
sys.stdout.flush()
# now we reset stdout to be the whatever it was before
sys.stdout = self.__old_stdout
if not self.__old_stdin is None:
sys.stdin = self.__old_stdin
def switch_stdout(self):
isatty = False
try:
fileno = sys.stdout.fileno()
isatty = os.isatty(fileno)
except AttributeError:
pass
# sys.stdout is not a regular file,
# go through some hoops
# (this is less desirable because it doesn't redirect
# low-level writes to 1)
if not isatty:
sys.stdout.flush()
self.__old_stdout = sys.stdout
# if this fails, we'll raise an IOError
stdout = open('/dev/tty', 'w')
sys.stdout = stdout
else:
self.__old_stdout = None
def switch_stdin(self):
isatty = False
try:
fileno = sys.stdin.fileno()
isatty = os.isatty(fileno)
except AttributeError:
pass
# sys.stdout is not a regular file,
# go through some hoops
# (this is less desirable because it doesn't redirect
# low-level writes to 1)
if not isatty:
sys.stdin.flush()
self.__old_stdin = sys.stdin
# if this fails, we'll raise an IOError
stdin = open('/dev/tty', 'r')
sys.stdin = stdin
else:
self.__old_stdin = None
# override for cases where we want to search a different
# path for the file
def canonic(self, filename):
canonic = self.fncache.get(filename)
if not canonic or not os.path.exists(canonic):
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
if not os.path.exists(canonic):
if 'path' in self._config:
for path in self._config['path']:
pos = matchFileOnDirPath(path, canonic)
if pos:
canonic = pos
break
self.fncache[filename] = canonic
return canonic
def reset_trace_count(klass, marker='default'):
tc = klass.trace_counts
try:
tc[marker][1] = 0
except KeyError:
pass
reset_trace_count = classmethod(reset_trace_count)
def set_trace_cond(klass, marker='default', cond=None):
""" Sets a condition for set_trace statements that have the
specified marker. A condition can either callable, in
which case it should take one argument, which is the
number of times set_trace(marker) has been called,
or it can be a number, in which case the break will
only be called.
"""
tc = klass.trace_counts
try:
curVals = tc[marker]
except KeyError:
curVals = [ None, 0 ]
tc[marker] = (cond, 0)
set_trace_cond = classmethod(set_trace_cond)
def set_trace(self, marker='default', skip=0):
tc = Epdb.trace_counts
try:
(cond, curCount) = tc[marker]
curCount += 1
except KeyError:
(cond, curCount) = None, 1
if cond is True:
rv = True
elif cond is None or cond is False:
rv = False
else:
try:
rv = cond(curCount)
except TypeError:
# assume that if the condition
# is not callable, it is an
# integer above which we are
# supposed to break
rv = curCount >= cond
if rv:
if marker != 'default':
self.prompt = '(Epdb [%s]) ' % marker
self._set_trace(skip=skip+1)
tc[marker] = [cond, curCount]
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Epdb()
p.prompt = "(%s) " % self.prompt.strip()
print "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def _set_trace(self, skip=0):
"""Start debugging from here."""
frame = sys._getframe().f_back
# go up the specified number of frames
for i in range(0,skip):
frame = frame.f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
# bdb hooks
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self.stop_here(frame):
pdb.Pdb.user_call(self, frame, argument_list)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
pdb.Pdb.user_line(self, frame)
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
pdb.Pdb.user_return(self, frame, return_value)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
pdb.Pdb.user_exception(self, frame, exc_info)
def complete(self, text, state):
if hasReadline:
# from cmd.py, override completion to match on local variables
allvars = {}
globals = self.curframe.f_globals.copy()
locals = self.curframe.f_locals.copy()
allvars.update(globals)
allvars.update(locals)
self._completer.namespace = allvars
self._completer.use_main_ns = 0
matches = self._completer.complete(text, state)
return matches
else:
return pdb.Pdb.complete(self, text, state)
def beingTraced():
frame = sys._getframe(0)
while frame:
if not frame.f_trace is None:
return True
frame = frame.f_back
return False
def set_trace_cond(*args, **kw):
""" Sets a condition for set_trace statements that have the
specified marker. A condition can either callable, in
which case it should take one argument, which is the
number of times set_trace(marker) has been called,
or it can be a number, in which case the break will
only be called.
"""
for key, val in kw.iteritems():
Epdb.set_trace_cond(key, val)
for arg in args:
Epdb.set_trace_cond(arg, True)
stc = set_trace_cond
def reset_trace_count(marker='default'):
""" Resets the number a set_trace for a marker has been
seen to 0. """
Epdb.reset_trace_count(marker)
def set_trace(marker='default'):
""" Starts the debugger at the current location. Takes an
optional argument 'marker' (default 'default'), that
can be used with the set_trace_cond function to support
turning on and off tracepoints based on conditionals
"""
Epdb().set_trace(marker=marker, skip=1)
st = set_trace
def post_mortem(t, exc_type=None, exc_msg=None):
p = Epdb()
p._exc_type = exc_type
p._exc_msg = exc_msg
p._tb = t
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def matchFileOnDirPath(curpath, pathdir):
"""Find match for a file by slicing away its directory elements
from the front and replacing them with pathdir. Assume that the
end of curpath is right and but that the beginning may contain
some garbage (or it may be short)
Overlaps are allowed:
e.g /tmp/fdjsklf/real/path/elements, /all/the/real/ =>
/all/the/real/path/elements (assuming that this combined
path exists)
"""
if os.path.exists(curpath):
return curpath
filedirs = curpath.split('/')[1:]
filename = filedirs[-1]
filedirs = filedirs[:-1]
if pathdir[-1] == '/':
pathdir = pathdir[:-1]
# assume absolute paths
pathdirs = pathdir.split('/')[1:]
lp = len(pathdirs)
# Cut off matching file elements from the ends of the two paths
for x in range(1, min(len(filedirs), len(pathdirs))):
# XXX this will not work if you have
# /usr/foo/foo/filename.py
if filedirs[-1] == pathdirs[-x]:
filedirs = filedirs[:-1]
else:
break
# Now cut try cuting off incorrect initial elements of curpath
while filedirs:
tmppath = '/' + '/'.join(pathdirs + filedirs + [filename])
if os.path.exists(tmppath):
return tmppath
filedirs = filedirs[1:]
tmppath = '/' + '/'.join(pathdirs + [filename])
if os.path.exists(tmppath):
return tmppath
return None
def _removeQuotes(line):
origLine = line
line = line.replace(r'\\', 'X')
line = re.sub(r'\\\"|\\\'', 'X', line)
line = _removeQuoteSet(line, '"""', "'''")
if line is None: return None
if line != _removeQuoteSet(line, '""', "''"):
return origLine
line = _removeQuoteSet(line, '"', "'")
if line is None:
return origLine
return line
def _removeQuoteSet(line, quote1, quote2):
ln = len(quote1)
while True:
a = line.find(quote1), quote1
b = line.find(quote2), quote2
if a[0] == -1 and b[0] == -1:
return line
if b[0] == -1 or (b[0] < a[0]):
firstPoint = a[0]
firstQuote = a[1]
else:
firstPoint = b[0]
firstQuote = b[1]
secondPoint = line[(firstPoint+ln):].find(firstQuote)
if secondPoint == -1:
return None
secondPoint += firstPoint
line = line[:firstPoint] + line[(secondPoint+2*ln):]
|
"""
Calculates ....
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import argparse, sys, os
import numpy as np
import pandas as pd
from pandas.tools.pivot import pivot_table
from scipy.stats import fisher_exact
from scipy.stats import ttest_ind
from utils import load_dataframe, design_matrix
sys.path.append(os.path.abspath('./utils'))
from evaluate import design_matrix as design_matrix_full
def calculate(test_filename, def_thresh=None):
df = load_dataframe(test_filename)
X, y = design_matrix(df)
surival_scores = df[df["OUT"] == 0]["SCORE"]
death_scores = df[df["OUT"] == 1]["SCORE"]
# Test if the mean values of the scores among the recovered and deceased patients
# are significantly different:
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html
ttest_stat, ttest_pvalue = ttest_ind(surival_scores, death_scores, equal_var=False)
# Builds the 2x2 contingency table by defining the EPS prediction as:
# Survive: score <= mean(survival) + std(survival)
# Die: otherwise
if def_thresh: surv_thresh = def_thresh
else: surv_thresh = np.mean(surival_scores) + np.std(surival_scores)
df["PRED"] = df["SCORE"].map(lambda x: 0 if x <= surv_thresh else 1)
print df
df["VALUE"] = pd.Series(np.ones(df.shape[0]), index=df.index)
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.tools.pivot.pivot_table.html
counts = pivot_table(df, values="VALUE", index=["OUT"], columns=["PRED"], aggfunc=np.sum, fill_value=0)
# ...and performs a Fisher exact test on the 2x2 contingency table
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.fisher_exact.html
fisher_ratio, fisher_pvalue = fisher_exact(counts)
print "Mean,standard deviation of survival score:", np.mean(surival_scores),"",np.std(surival_scores)
print "Mean,standard deviation of death score :", np.mean(death_scores),"",np.std(death_scores)
print "P-value of T-test to for means of survival and death scores :",ttest_pvalue
print ""
print "Observed outcome/EPS prediction contingency table"
print counts
print "P-Value of Fisher exact test on the Outcome/Prediction table:",fisher_pvalue
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test", nargs='?', default="./data/testing-data.csv",
help="test file")
args = parser.parse_args()
calculate(args.test)
added cutoff option
"""
Calculates a number of basic statistics for the EPS: T-test to check if the difference
between the mean score between surviving and deceased patients, and Fisher test for the
2x2 contingency table with outcome and EPS prediction.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import argparse, sys, os
import numpy as np
import pandas as pd
from pandas.tools.pivot import pivot_table
from scipy.stats import fisher_exact
from scipy.stats import ttest_ind
from utils import load_dataframe, design_matrix
sys.path.append(os.path.abspath('./utils'))
from evaluate import design_matrix as design_matrix_full
def calculate(test_filename, def_thresh=None):
df = load_dataframe(test_filename)
X, y = design_matrix(df)
surival_scores = df[df["OUT"] == 0]["SCORE"]
death_scores = df[df["OUT"] == 1]["SCORE"]
# Test if the mean values of the scores among the recovered and deceased patients
# are significantly different:
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html
ttest_stat, ttest_pvalue = ttest_ind(surival_scores, death_scores, equal_var=False)
# Builds the 2x2 contingency table by defining the EPS prediction as:
# Survive: score <= mean(survival) + std(survival)
# Die: otherwise
if not def_thresh == None: surv_thresh = def_thresh
else: surv_thresh = np.mean(surival_scores) + np.std(surival_scores)
df["PRED"] = df["SCORE"].map(lambda x: 0 if x <= surv_thresh else 1)
print df
df["VALUE"] = pd.Series(np.ones(df.shape[0]), index=df.index)
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.tools.pivot.pivot_table.html
counts = pivot_table(df, values="VALUE", index=["OUT"], columns=["PRED"], aggfunc=np.sum, fill_value=0)
# ...and performs a Fisher exact test on the 2x2 contingency table
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.fisher_exact.html
fisher_ratio, fisher_pvalue = fisher_exact(counts)
print "Mean,standard deviation of survival score:", np.mean(surival_scores),"",np.std(surival_scores)
print "Mean,standard deviation of death score :", np.mean(death_scores),"",np.std(death_scores)
print "P-value of T-test to for means of survival and death scores :",ttest_pvalue
print ""
print "Observed outcome/EPS prediction contingency table"
print counts
print "P-Value of Fisher exact test on the Outcome/Prediction table:",fisher_pvalue
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test", nargs='?', default="./data/testing-data.csv",
help="test file")
parser.add_argument('-c', '--cutoff', nargs=1, type=int, default=[None],
help="Cutoff for prediction in EPS, score less than or equal to cutoff results in survival prediction")
args = parser.parse_args()
print args
calculate(args.test, args.cutoff[0])
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp.osv.orm import except_orm
from account_banking.parsers import models
from account_banking.parsers.convert import str2date
bt = models.mem_bank_transaction
class transaction(models.mem_bank_transaction):
def __init__(self, values, *args, **kwargs):
super(transaction, self).__init__(*args, **kwargs)
for attr in values:
setattr(self, attr, values[attr])
def is_valid(self):
return not self.error_message
class parser(models.parser):
code = 'CAMT'
country_code = 'NL'
name = 'Generic CAMT Format'
doc = '''\
CAMT Format parser
'''
def tag(self, node):
"""
Return the tag of a node, stripped from its namespace
"""
return node.tag[len(self.ns):]
def assert_tag(self, node, expected):
"""
Get node's stripped tag and compare with expected
"""
assert self.tag(node) == expected, (
"Expected tag '%s', got '%s' instead" %
(self.tag(node), expected))
def xpath(self, node, expr):
"""
Wrap namespaces argument into call to Element.xpath():
self.xpath(node, './ns:Acct/ns:Id')
"""
return node.xpath(expr, namespaces={'ns': self.ns[1:-1]})
def find(self, node, expr):
"""
Like xpath(), but return first result if any or else False
Return None to test nodes for being truesy
"""
result = node.xpath(expr, namespaces={'ns': self.ns[1:-1]})
if result:
return result[0]
return None
def get_balance_type_node(self, node, balance_type):
"""
:param node: BkToCstmrStmt/Stmt/Bal node
:param balance type: one of 'OPBD', 'PRCD', 'ITBD', 'CLBD'
"""
code_expr = './ns:Bal/ns:Tp/ns:CdOrPrtry/ns:Cd[text()="%s"]/../../..' % balance_type
return self.xpath(node, code_expr)
def parse_amount(self, node):
"""
Parse an element that contains both Amount and CreditDebitIndicator
:return: signed amount
:returntype: float
"""
sign = -1 if node.find(self.ns + 'CdtDbtInd').text == 'CRDT' else 1
return sign * float(node.find(self.ns + 'Amt').text)
def get_start_balance(self, node):
"""
Find the (only) balance node with code OpeningBalance, or
the only one with code 'PreviousClosingBalance'
or the first balance node with code InterimBalance in
the case of preceeding pagination.
:param node: BkToCstmrStmt/Stmt/Bal node
"""
nodes = (
self.get_balance_type_node(node, 'OPBD') or
self.get_balance_type_node(node, 'PRCD') or
self.get_balance_type_node(node, 'ITBD'))
return self.parse_amount(nodes[0])
def get_end_balance(self, node):
"""
Find the (only) balance node with code ClosingBalance, or
the second (and last) balance node with code InterimBalance in
the case of continued pagination.
:param node: BkToCstmrStmt/Stmt/Bal node
"""
nodes = (
self.get_balance_type_node(node, 'CLBD') or
self.get_balance_type_node(node, 'ITBD'))
return self.parse_amount(nodes[-1])
def parse_Stmt(self, node):
"""
Parse a single Stmt node
"""
statement = models.mem_bank_statement()
statement.local_account = (
self.xpath(node, './ns:Acct/ns:Id/ns:IBAN')[0].text
if self.xpath(node, './ns:Acct/ns:Id/ns:IBAN')
else self.xpath(node, './ns:Acct/ns:Id/ns:Othr/ns:Id')[0].text)
statement.id = "%s-%s" % (
statement.local_account,
node.find(self.ns + 'Id').text)
statement.local_currency = self.xpath(node, './ns:Acct/ns:Ccy')[0].text
statement.start_balance = self.get_start_balance(node)
statement.end_balance = self.get_end_balance(node)
number = 0
for Ntry in self.xpath(node, './ns:Ntry'):
transaction_detail = self.parse_Ntry(Ntry)
if number == 0:
# Take the statement date from the first transaction
statement.date = str2date(
transaction_detail['execution_date'], "%Y-%m-%d")
number += 1
transaction_detail['id'] = str(number).zfill(4)
statement.transactions.append(
transaction(transaction_detail))
return statement
def get_transfer_type(self, node):
"""
Map entry descriptions to transfer types. To extend with
proper mapping from BkTxCd/Domn/Cd/Fmly/Cd to transfer types
if we can get our hands on real life samples.
For now, leave as a hook for bank specific overrides to map
properietary codes from BkTxCd/Prtry/Cd.
:param node: Ntry node
"""
return bt.ORDER
def parse_Ntry(self, node):
"""
:param node: Ntry node
"""
entry_details = {
'execution_date': self.xpath(node, './ns:BookgDt/ns:Dt')[0].text,
'effective_date': self.xpath(node, './ns:ValDt/ns:Dt')[0].text,
'transfer_type': self.get_transfer_type(node),
'transferred_amount': self.parse_amount(node)
}
TxDtls = self.xpath(node, './ns:NtryDtls/ns:TxDtls')
if len(TxDtls) == 1:
vals = self.parse_TxDtls(TxDtls[0], entry_details)
else:
vals = entry_details
return vals
def get_party_values(self, TxDtls):
"""
Determine to get either the debtor or creditor party node
and extract the available data from it
"""
vals = {}
party_type = self.find(
TxDtls, '../../ns:CdtDbtInd').text == 'CRDT' and 'Dbtr' or 'Cdtr'
party_node = self.find(TxDtls, './ns:RltdPties/ns:%s' % party_type)
account_node = self.find(
TxDtls, './ns:RltdPties/ns:%sAcct/ns:Id' % party_type)
bic_node = self.find(
TxDtls,
'./ns:RltdAgts/ns:%sAgt/ns:FinInstnId/ns:BIC' % party_type)
if party_node is not None:
name_node = self.find(party_node, './ns:Nm')
vals['remote_owner'] = (
name_node.text if name_node is not None else False)
country_node = self.find(party_node, './ns:PstlAdr/ns:Ctry')
vals['remote_owner_country'] = (
country_node.text if country_node is not None else False)
address_node = self.find(party_node, './ns:PstlAdr/ns:AdrLine')
if address_node is not None:
vals['remote_owner_address'] = [address_node.text]
if account_node is not None:
iban_node = self.find(account_node, './ns:IBAN')
if iban_node is not None:
vals['remote_account'] = iban_node.text
if bic_node is not None:
vals['remote_bank_bic'] = bic_node.text
else:
domestic_node = self.find(account_node, './ns:Othr/ns:Id')
vals['remote_account'] = (
domestic_node.text if domestic_node is not None else False)
return vals
def parse_TxDtls(self, TxDtls, entry_values):
"""
Parse a single TxDtls node
"""
vals = dict(entry_values)
unstructured = self.xpath(TxDtls, './ns:RmtInf/ns:Ustrd')
if unstructured:
vals['message'] = ' '.join([x.text for x in unstructured])
structured = self.find(
TxDtls, './ns:RmtInf/ns:Strd/ns:CdtrRefInf/ns:Ref')
if structured is not None:
vals['reference'] = structured.text
else:
if vals.get('message'):
vals['reference'] = vals['message']
vals.update(self.get_party_values(TxDtls))
return vals
def check_version(self):
"""
Sanity check the document's namespace
"""
if not self.ns.startswith('{urn:iso:std:iso:20022:tech:xsd:camt.'):
raise except_orm(
"Error",
"This does not seem to be a CAMT format bank statement.")
if not self.ns.startswith('{urn:iso:std:iso:20022:tech:xsd:camt.053.'):
raise except_orm(
"Error",
"Only CAMT.053 is supported at the moment.")
return True
def parse(self, cr, data):
"""
Parse a CAMT053 XML file
"""
root = etree.fromstring(data)
self.ns = root.tag[:root.tag.index("}") + 1]
self.check_version()
self.assert_tag(root[0][0], 'GrpHdr')
statements = []
for node in root[0][1:]:
statements.append(self.parse_Stmt(node))
return statements
[FIX] As per spec, an increase is marked CRDT, a decrease DBIT
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp.osv.orm import except_orm
from account_banking.parsers import models
from account_banking.parsers.convert import str2date
bt = models.mem_bank_transaction
class transaction(models.mem_bank_transaction):
def __init__(self, values, *args, **kwargs):
super(transaction, self).__init__(*args, **kwargs)
for attr in values:
setattr(self, attr, values[attr])
def is_valid(self):
return not self.error_message
class parser(models.parser):
code = 'CAMT'
country_code = 'NL'
name = 'Generic CAMT Format'
doc = '''\
CAMT Format parser
'''
def tag(self, node):
"""
Return the tag of a node, stripped from its namespace
"""
return node.tag[len(self.ns):]
def assert_tag(self, node, expected):
"""
Get node's stripped tag and compare with expected
"""
assert self.tag(node) == expected, (
"Expected tag '%s', got '%s' instead" %
(self.tag(node), expected))
def xpath(self, node, expr):
"""
Wrap namespaces argument into call to Element.xpath():
self.xpath(node, './ns:Acct/ns:Id')
"""
return node.xpath(expr, namespaces={'ns': self.ns[1:-1]})
def find(self, node, expr):
"""
Like xpath(), but return first result if any or else False
Return None to test nodes for being truesy
"""
result = node.xpath(expr, namespaces={'ns': self.ns[1:-1]})
if result:
return result[0]
return None
def get_balance_type_node(self, node, balance_type):
"""
:param node: BkToCstmrStmt/Stmt/Bal node
:param balance type: one of 'OPBD', 'PRCD', 'ITBD', 'CLBD'
"""
code_expr = './ns:Bal/ns:Tp/ns:CdOrPrtry/ns:Cd[text()="%s"]/../../..' % balance_type
return self.xpath(node, code_expr)
def parse_amount(self, node):
"""
Parse an element that contains both Amount and CreditDebitIndicator
:return: signed amount
:returntype: float
"""
sign = -1 if node.find(self.ns + 'CdtDbtInd').text == 'DBIT' else 1
return sign * float(node.find(self.ns + 'Amt').text)
def get_start_balance(self, node):
"""
Find the (only) balance node with code OpeningBalance, or
the only one with code 'PreviousClosingBalance'
or the first balance node with code InterimBalance in
the case of preceeding pagination.
:param node: BkToCstmrStmt/Stmt/Bal node
"""
nodes = (
self.get_balance_type_node(node, 'OPBD') or
self.get_balance_type_node(node, 'PRCD') or
self.get_balance_type_node(node, 'ITBD'))
return self.parse_amount(nodes[0])
def get_end_balance(self, node):
"""
Find the (only) balance node with code ClosingBalance, or
the second (and last) balance node with code InterimBalance in
the case of continued pagination.
:param node: BkToCstmrStmt/Stmt/Bal node
"""
nodes = (
self.get_balance_type_node(node, 'CLBD') or
self.get_balance_type_node(node, 'ITBD'))
return self.parse_amount(nodes[-1])
def parse_Stmt(self, node):
"""
Parse a single Stmt node
"""
statement = models.mem_bank_statement()
statement.local_account = (
self.xpath(node, './ns:Acct/ns:Id/ns:IBAN')[0].text
if self.xpath(node, './ns:Acct/ns:Id/ns:IBAN')
else self.xpath(node, './ns:Acct/ns:Id/ns:Othr/ns:Id')[0].text)
statement.id = "%s-%s" % (
statement.local_account,
node.find(self.ns + 'Id').text)
statement.local_currency = self.xpath(node, './ns:Acct/ns:Ccy')[0].text
statement.start_balance = self.get_start_balance(node)
statement.end_balance = self.get_end_balance(node)
number = 0
for Ntry in self.xpath(node, './ns:Ntry'):
transaction_detail = self.parse_Ntry(Ntry)
if number == 0:
# Take the statement date from the first transaction
statement.date = str2date(
transaction_detail['execution_date'], "%Y-%m-%d")
number += 1
transaction_detail['id'] = str(number).zfill(4)
statement.transactions.append(
transaction(transaction_detail))
return statement
def get_transfer_type(self, node):
"""
Map entry descriptions to transfer types. To extend with
proper mapping from BkTxCd/Domn/Cd/Fmly/Cd to transfer types
if we can get our hands on real life samples.
For now, leave as a hook for bank specific overrides to map
properietary codes from BkTxCd/Prtry/Cd.
:param node: Ntry node
"""
return bt.ORDER
def parse_Ntry(self, node):
"""
:param node: Ntry node
"""
entry_details = {
'execution_date': self.xpath(node, './ns:BookgDt/ns:Dt')[0].text,
'effective_date': self.xpath(node, './ns:ValDt/ns:Dt')[0].text,
'transfer_type': self.get_transfer_type(node),
'transferred_amount': self.parse_amount(node)
}
TxDtls = self.xpath(node, './ns:NtryDtls/ns:TxDtls')
if len(TxDtls) == 1:
vals = self.parse_TxDtls(TxDtls[0], entry_details)
else:
vals = entry_details
return vals
def get_party_values(self, TxDtls):
"""
Determine to get either the debtor or creditor party node
and extract the available data from it
"""
vals = {}
party_type = self.find(
TxDtls, '../../ns:CdtDbtInd').text == 'CRDT' and 'Dbtr' or 'Cdtr'
party_node = self.find(TxDtls, './ns:RltdPties/ns:%s' % party_type)
account_node = self.find(
TxDtls, './ns:RltdPties/ns:%sAcct/ns:Id' % party_type)
bic_node = self.find(
TxDtls,
'./ns:RltdAgts/ns:%sAgt/ns:FinInstnId/ns:BIC' % party_type)
if party_node is not None:
name_node = self.find(party_node, './ns:Nm')
vals['remote_owner'] = (
name_node.text if name_node is not None else False)
country_node = self.find(party_node, './ns:PstlAdr/ns:Ctry')
vals['remote_owner_country'] = (
country_node.text if country_node is not None else False)
address_node = self.find(party_node, './ns:PstlAdr/ns:AdrLine')
if address_node is not None:
vals['remote_owner_address'] = [address_node.text]
if account_node is not None:
iban_node = self.find(account_node, './ns:IBAN')
if iban_node is not None:
vals['remote_account'] = iban_node.text
if bic_node is not None:
vals['remote_bank_bic'] = bic_node.text
else:
domestic_node = self.find(account_node, './ns:Othr/ns:Id')
vals['remote_account'] = (
domestic_node.text if domestic_node is not None else False)
return vals
def parse_TxDtls(self, TxDtls, entry_values):
"""
Parse a single TxDtls node
"""
vals = dict(entry_values)
unstructured = self.xpath(TxDtls, './ns:RmtInf/ns:Ustrd')
if unstructured:
vals['message'] = ' '.join([x.text for x in unstructured])
structured = self.find(
TxDtls, './ns:RmtInf/ns:Strd/ns:CdtrRefInf/ns:Ref')
if structured is not None:
vals['reference'] = structured.text
else:
if vals.get('message'):
vals['reference'] = vals['message']
vals.update(self.get_party_values(TxDtls))
return vals
def check_version(self):
"""
Sanity check the document's namespace
"""
if not self.ns.startswith('{urn:iso:std:iso:20022:tech:xsd:camt.'):
raise except_orm(
"Error",
"This does not seem to be a CAMT format bank statement.")
if not self.ns.startswith('{urn:iso:std:iso:20022:tech:xsd:camt.053.'):
raise except_orm(
"Error",
"Only CAMT.053 is supported at the moment.")
return True
def parse(self, cr, data):
"""
Parse a CAMT053 XML file
"""
root = etree.fromstring(data)
self.ns = root.tag[:root.tag.index("}") + 1]
self.check_version()
self.assert_tag(root[0][0], 'GrpHdr')
statements = []
for node in root[0][1:]:
statements.append(self.parse_Stmt(node))
return statements
|
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009-2015 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
__version__ = '2.0.0alpha'
__license__ = __doc__
DEBUG_FORMLAYOUT = False
import os
import sys
import datetime
STDERR = sys.stderr
# ---+- PyQt-PySide compatibility -+----
_modname = os.environ.setdefault('QT_API', 'pyqt')
assert _modname in ('pyqt', 'pyqt5', 'pyside')
if os.environ['QT_API'].startswith('pyqt'):
try:
if os.environ['QT_API'] == 'pyqt5':
import PyQt5 # analysis:ignore
else:
import PyQt4 # analysis:ignore
except ImportError:
# Switching to PySide
os.environ['QT_API'] = _modname = 'pyside'
try:
import PySide # analysis:ignore
except ImportError:
raise ImportError("formlayout requires PyQt4, PyQt5 or PySide")
if os.environ['QT_API'] == 'pyqt':
try:
from PyQt4.QtGui import QFormLayout
except ImportError:
raise ImportError("formlayout requires PyQt4, PyQt5 or PySide")
from PyQt4.QtGui import * # analysis:ignore
from PyQt4.QtCore import * # analysis:ignore
from PyQt4.QtCore import pyqtSlot as Slot
from PyQt4.QtCore import pyqtProperty as Property
if os.environ['QT_API'] == 'pyqt5':
from PyQt5.QtWidgets import * # analysis:ignore
from PyQt5.QtGui import * # analysis:ignore
from PyQt5.QtCore import * # analysis:ignore
from PyQt5.QtCore import pyqtSignal as Signal # analysis:ignore
from PyQt5.QtCore import pyqtSlot as Slot # analysis:ignore
from PyQt5.QtCore import pyqtProperty as Property # analysis:ignore
SIGNAL = None # analysis:ignore
if os.environ['QT_API'] == 'pyside':
from PySide.QtGui import * # analysis:ignore
from PySide.QtCore import * # analysis:ignore
# ---+- Python 2-3 compatibility -+----
PY2 = sys.version[0] == '2'
if PY2:
# Python 2
import codecs
def u(obj):
"""Make unicode object"""
return codecs.unicode_escape_decode(obj)[0]
else:
# Python 3
def u(obj):
"""Return string as it is"""
return obj
def is_text_string(obj):
"""Return True if `obj` is a text string, False if it is anything else,
like binary data (Python 3) or QString (Python 2, PyQt API #1)"""
if PY2:
# Python 2
return isinstance(obj, basestring)
else:
# Python 3
return isinstance(obj, str)
def is_binary_string(obj):
"""Return True if `obj` is a binary string, False if it is anything else"""
if PY2:
# Python 2
return isinstance(obj, str)
else:
# Python 3
return isinstance(obj, bytes)
def is_string(obj):
"""Return True if `obj` is a text or binary Python string object,
False if it is anything else, like a QString (Python 2, PyQt API #1)"""
return is_text_string(obj) or is_binary_string(obj)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
class ColorButton(QPushButton):
"""
Color choosing push button
"""
__pyqtSignals__ = ("colorChanged(QColor)",)
if SIGNAL is None:
colorChanged = Signal("QColor")
def __init__(self, parent=None):
QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QSize(12, 12))
if SIGNAL is None:
self.clicked.connect(self.choose_color)
else:
self.connect(self, SIGNAL("clicked()"), self.choose_color)
self._color = QColor()
def choose_color(self):
color = QColorDialog.getColor(self._color, self.parentWidget())
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@Slot(QColor)
def set_color(self, color):
if color != self._color:
self._color = color
if SIGNAL is None:
self.colorChanged.emit(self._color)
else:
self.emit(SIGNAL("colorChanged(QColor)"), self._color)
pixmap = QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QIcon(pixmap))
color = Property("QColor", get_color, set_color)
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color
class ColorLayout(QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QHBoxLayout.__init__(self)
assert isinstance(color, QColor)
self.lineedit = QLineEdit(color.name(), parent)
if SIGNAL is None:
self.lineedit.textChanged.connect(self.update_color)
else:
self.connect(self.lineedit, SIGNAL("textChanged(QString)"),
self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
if SIGNAL is None:
self.colorbtn.colorChanged.connect(self.update_text)
else:
self.connect(self.colorbtn, SIGNAL("colorChanged(QColor)"),
self.update_text)
self.addWidget(self.colorbtn)
def update_color(self, text):
color = text_to_qcolor(text)
if color.isValid():
self.colorbtn.color = color
def update_text(self, color):
self.lineedit.setText(color.name())
def text(self):
return self.lineedit.text()
def setStyleSheet(self, style):
self.lineedit.setStyleSheet(style)
self.colorbtn.setStyleSheet(style)
class FileLayout(QHBoxLayout):
"""File-specialized QLineEdit layout"""
def __init__(self, value, parent=None):
QHBoxLayout.__init__(self)
self.value = value
self.lineedit = QLineEdit('', parent)
self.addWidget(self.lineedit)
self.filebtn = QPushButton('Browse')
self.filebtn.clicked.connect(self.getfile)
self.addWidget(self.filebtn)
def getfile(self):
if self.value.startswith('file'):
name = QFileDialog.getOpenFileName(None, 'Select file',
filter=self.value[5:])
elif self.value == 'dir':
name = QFileDialog.getExistingDirectory(None, 'Select directory')
if name:
self.lineedit.setText(name)
def text(self):
return self.lineedit.text()
def setStyleSheet(self, style):
self.lineedit.setStyleSheet(style)
self.filebtn.setStyleSheet(style)
class RadioLayout(QVBoxLayout):
"""Radio buttons layout with QButtonGroup"""
def __init__(self, buttons, index, parent=None):
QVBoxLayout.__init__(self)
self.group = QButtonGroup()
for i, button in enumerate(buttons):
btn = QRadioButton(button)
if i == index:
btn.setChecked(True)
self.addWidget(btn)
self.group.addButton(btn, i)
def currentIndex(self):
return self.group.checkedId()
def setStyleSheet(self, style):
for btn in self.group.buttons():
btn.setStyleSheet(style)
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QFontDatabase().families()
if to_text_string(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not isinstance(tup, tuple) or len(tup) != 4 \
or not isinstance(tup[0], str) \
or not isinstance(tup[1], int) \
or not isinstance(tup[2], bool) \
or not isinstance(tup[3], bool):
return None
font = QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (to_text_string(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QGridLayout.__init__(self)
if not font_is_installed(value[0]):
print("Warning: Font `%s` is not installed" % value[0],
file=sys.stderr)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QComboBox(parent)
self.size.setEditable(True)
sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def setStyleSheet(self, style):
self.family.setStyleSheet(style)
self.size.setStyleSheet(style)
self.italic.setStyleSheet(style)
self.bold.setStyleSheet(style)
def is_float_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QDoubleValidator.Acceptable
def is_required_valid(edit):
bgd_color = "background-color:rgb(255, 175, 90);"
if isinstance(edit, (QLineEdit, FileLayout)):
if edit.text():
edit.setStyleSheet("")
return True
else:
edit.setStyleSheet(bgd_color)
elif isinstance(edit, (QComboBox, RadioLayout)):
if edit.currentIndex() != -1:
edit.setStyleSheet("")
return True
else:
edit.setStyleSheet(bgd_color)
elif isinstance(edit, QTextEdit):
if edit.toPlainText():
edit.setStyleSheet("")
return True
else:
edit.setStyleSheet(bgd_color)
return False
class FormWidget(QWidget):
def __init__(self, data, comment="", parent=None):
QWidget.__init__(self, parent)
from copy import deepcopy
self.data = deepcopy(data)
self.result = parent.result
self.widgets = []
self.formlayout = QFormLayout(self)
if comment:
self.formlayout.addRow(QLabel(comment))
self.formlayout.addRow(QLabel(" "))
if DEBUG_FORMLAYOUT:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
for label, value in self.data:
if DEBUG_FORMLAYOUT:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
separator = QFrame()
separator.setFrameShape(QFrame.HLine)
separator.setFrameShadow(QFrame.Sunken)
self.formlayout.addRow(separator)
self.widgets.append(None)
continue
if label is None:
img_fmt = tuple(['.'+str(bytes(ext).decode()) for ext
in QImageReader.supportedImageFormats()])
if value.endswith(img_fmt):
# Image
pixmap = QPixmap(value)
lab = QLabel()
lab.setPixmap(pixmap)
self.formlayout.addRow(lab)
else:
# Comment
self.formlayout.addRow(QLabel(value))
self.widgets.append(None)
continue
if tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
elif text_to_qcolor(value).isValid():
field = ColorLayout(QColor(value), self)
elif is_text_string(value):
if value in ['file', 'dir'] or value.startswith('file:'):
field = FileLayout(value, self)
elif value == 'password':
field = QLineEdit(self)
field.setEchoMode(QLineEdit.Password)
elif '\n' in value:
if value == '\n':
value = ''
for linesep in (os.linesep, '\n'):
if linesep in value:
value = value.replace(linesep, u("\u2029"))
field = QTextEdit(value, self)
else:
field = QLineEdit(value, self)
elif isinstance(value, (list, tuple)):
save_value = value
value = list(value) # always needed to protect self.data
selindex = value.pop(0)
if isinstance(selindex, int):
selindex = selindex - 1
if isinstance(value[0], (list, tuple)):
keys = [ key for key, _val in value ]
value = [ val for _key, val in value ]
else:
keys = value
if selindex in value:
selindex = value.index(selindex)
elif selindex in keys:
selindex = keys.index(selindex)
elif not isinstance(selindex, int):
print("Warning: '%s' index is invalid (label: "\
"%s, value: %s)" % (selindex, label, value),
file=STDERR)
selindex = -1
if isinstance(save_value, list):
field = QComboBox(self)
field.addItems(value)
field.setCurrentIndex(selindex)
elif isinstance(save_value, tuple):
field = RadioLayout(value, selindex, self)
elif isinstance(value, bool):
field = QCheckBox(self)
field.setCheckState(Qt.Checked if value else Qt.Unchecked)
elif isinstance(value, float):
field = QLineEdit(QLocale().toString(value), self)
field.setValidator(QDoubleValidator(field))
dialog = self.get_dialog()
dialog.register_float_field(field)
if SIGNAL is None:
field.textChanged.connect(dialog.float_valid)
else:
self.connect(field, SIGNAL('textChanged(QString)'),
dialog.float_valid)
elif isinstance(value, int):
field = QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QDateEdit(self)
field.setDate(value)
elif isinstance(value, datetime.time):
field = QTimeEdit(self)
field.setTime(value)
else:
field = QLineEdit(repr(value), self)
# Eventually extracting tooltip from label and processing it
index = label.find('::')
if index != -1:
label, tooltip = label[:index], label[index+2:]
field.setToolTip(tooltip)
# Eventually catching the 'required' feature and processing it
if label.endswith(' *'):
label = label[:-1] + '<font color="red">*</font>'
if isinstance(field, (QLineEdit, QTextEdit, QComboBox,
FileLayout, RadioLayout)):
dialog = self.get_dialog()
dialog.register_required_field(field)
else:
print("Warning: '%s' doesn't support 'required' feature"\
% type(field), file=STDERR)
if isinstance(field, QLineEdit):
if SIGNAL is None:
field.textChanged.connect(dialog.required_valid)
else:
self.connect(field, SIGNAL('textChanged(QString)'),
dialog.required_valid)
elif isinstance(field, QTextEdit):
if SIGNAL is None:
field.textChanged.connect(dialog.required_valid)
else:
self.connect(field, SIGNAL('textChanged()'),
dialog.required_valid)
elif isinstance(field, QComboBox):
if SIGNAL is None:
field.currentIndexChanged.connect(\
dialog.required_valid)
else:
self.connect(field,
SIGNAL('currentIndexChanged(QString)'),
dialog.required_valid)
elif isinstance(field, FileLayout):
if SIGNAL is None:
field.lineedit.textChanged.connect(\
dialog.required_valid)
else:
self.connect(field.lineedit,
SIGNAL('textChanged(QString)'),
dialog.required_valid)
elif isinstance(field, RadioLayout):
if SIGNAL is None:
field.group.buttonClicked.connect(\
dialog.required_valid)
else:
self.connect(field.group, SIGNAL('buttonClicked(int)'),
dialog.required_valid)
self.formlayout.addRow(label, field)
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif is_text_string(value):
if isinstance(field, QTextEdit):
value = to_text_string(field.toPlainText()
).replace(u("\u2029"), os.linesep)
else:
value = to_text_string(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], int):
# Return an int index, if initialization was an int
value = index + 1
else:
value = value[index+1]
if isinstance(value, (list, tuple)):
value = value[0]
elif isinstance(value, bool):
value = field.checkState() == Qt.Checked
elif isinstance(value, float):
value = float(QLocale().toDouble(field.text())[0])
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime()
try:
value = value.toPyDateTime() # PyQt
except AttributeError:
value = value.toPython() # PySide
elif isinstance(value, datetime.date):
value = field.date()
try:
value = value.toPyDate() # PyQt
except AttributeError:
value = value.toPython() # PySide
elif isinstance(value, datetime.time):
value = field.time()
try:
value = value.toPyTime() # PyQt
except AttributeError:
value = value.toPython() # PySide
else:
value = eval(str(field.text()))
valuelist.append((label, value))
if self.result == 'list':
return [value for label, value in valuelist]
elif self.result in ['dict', 'OrderedDict', 'JSON']:
if self.result == 'dict':
dic = {}
else:
dic = OrderedDict()
for label, value in valuelist:
if label in dic.keys():
print("Warning: '%s' is duplicate and '%s' doesn't "\
"handle it, you should use 'list' or 'XML' instead"\
% (label, self.result), file=STDERR)
if isinstance(value, (datetime.date, datetime.time,
datetime.datetime)) and self.result == 'JSON':
dic[label] = str(value)
else:
dic[label] = value
if self.result == 'JSON':
return json.dumps(dic)
else:
return dic
elif self.result == 'XML':
form = ET.Element('Form')
for label, value in valuelist:
required = 'false'
if label.endswith(' *'):
label = label[:-2]
required = 'true'
child = ET.SubElement(form, label)
if isinstance(value, datetime.datetime):
child.text = value.isoformat()
else:
child.text = str(value)
child.attrib['required'] = required
return ET.tostring(form)
class FormComboWidget(QWidget):
def __init__(self, datalist, comment="", parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout()
self.setLayout(layout)
self.combobox = QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QStackedWidget(self)
layout.addWidget(self.stackwidget)
if SIGNAL is None:
self.combobox.currentIndexChanged.connect(
self.stackwidget.setCurrentIndex)
else:
self.connect(self.combobox, SIGNAL("currentIndexChanged(int)"),
self.stackwidget, SLOT("setCurrentIndex(int)"))
self.result = parent.result
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append((title, widget))
def setup(self):
for title, widget in self.widgetlist:
widget.setup()
def get(self):
if self.result == 'list':
return [widget.get() for title, widget in self.widgetlist]
elif self.result in ['dict', 'OrderedDict', 'JSON']:
if self.result == 'dict':
dic = {}
else:
dic = OrderedDict()
for title, widget in self.widgetlist:
if self.result == 'JSON':
dic[title] = json.loads(widget.get())
else:
dic[title] = widget.get()
if self.result == 'JSON':
return json.dumps(dic)
else:
return dic
elif self.result == 'XML':
combos = ET.Element('Combos')
for title, widget in self.widgetlist:
combo = ET.SubElement(combos, 'Combo')
combo.attrib['title'] = title
child = ET.fromstring(widget.get())
combo.append(child)
return ET.tostring(combos)
class FormTabWidget(QWidget):
def __init__(self, datalist, comment="", parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout()
self.tabwidget = QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.result = parent.result
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0])==3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append((title, widget))
def setup(self):
for title, widget in self.widgetlist:
widget.setup()
def get(self):
if self.result == 'list':
return [widget.get() for title, widget in self.widgetlist]
elif self.result in ['dict', 'OrderedDict', 'JSON']:
if self.result == 'dict':
dic = {}
else:
dic = OrderedDict()
for title, widget in self.widgetlist:
if self.result == 'JSON':
dic[title] = json.loads(widget.get())
else:
dic[title] = widget.get()
if self.result == 'JSON':
return json.dumps(dic)
else:
return dic
elif self.result == 'XML':
tabs = ET.Element('Tabs')
for title, widget in self.widgetlist:
tab = ET.SubElement(tabs, 'Tab')
tab.attrib['title'] = title
child = ET.fromstring(widget.get())
tab.append(child)
return ET.tostring(tabs)
class FormDialog(QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="", icon=None, parent=None,
apply=None, ok=None, cancel=None, result=None):
QDialog.__init__(self, parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.title = title
self.ok = ok
self.cancel = cancel
self.apply_ = None
self.apply_callback = None
if callable(apply):
self.apply_callback = apply
elif isinstance(apply, (list, tuple)):
self.apply_, self.apply_callback = apply
elif apply is not None:
raise AssertionError("`apply` argument must be either a function "\
"or tuple ('Apply label', apply_callback)")
self.result = result
if self.result in ['OrderedDict', 'JSON']:
global OrderedDict
from collections import OrderedDict
if self.result == 'JSON':
global json
import json
elif self.result == 'XML':
global ET
import xml.etree.ElementTree as ET
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment,
parent=self)
elif len(data[0])==3:
self.formwidget = FormComboWidget(data, comment=comment,
parent=self)
else:
self.formwidget = FormWidget(data, comment=comment,
parent=self)
layout = QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.required_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QDialogButtonBox()
if self.ok == True:
bbox.addButton(QDialogButtonBox.Ok)
elif self.ok:
ok_btn = QPushButton(self.ok)
bbox.addButton(ok_btn, QDialogButtonBox.AcceptRole)
if self.cancel == True:
bbox.addButton(QDialogButtonBox.Cancel)
elif self.cancel:
cancel_btn = QPushButton(self.cancel)
bbox.addButton(cancel_btn, QDialogButtonBox.RejectRole)
if self.apply_callback is not None:
if self.apply_:
apply_btn = QPushButton(self.apply_)
bbox.addButton(apply_btn, QDialogButtonBox.ApplyRole)
else:
apply_btn = bbox.addButton(QDialogButtonBox.Apply)
if SIGNAL is None:
apply_btn.clicked.connect(self.apply)
else:
self.connect(apply_btn, SIGNAL("clicked()"), self.apply)
if SIGNAL is None:
if self.ok:
bbox.accepted.connect(self.accept)
if self.cancel:
bbox.rejected.connect(self.reject)
else:
if self.ok:
self.connect(bbox, SIGNAL("accepted()"), SLOT("accept()"))
if self.cancel:
self.connect(bbox, SIGNAL("rejected()"), SLOT("reject()"))
layout.addWidget(bbox)
self.required_valid()
self.setLayout(layout)
self.setWindowTitle(self.title)
if not isinstance(icon, QIcon):
icon = QWidget().style().standardIcon(QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def register_required_field(self, field):
self.required_fields.append(field)
def float_valid(self):
valid = True
for field in self.float_fields:
if not is_float_valid(field):
valid = False
self.update_buttons(valid)
def required_valid(self):
valid = True
for field in self.required_fields:
if not is_required_valid(field):
valid = False
self.update_buttons(valid)
def update_buttons(self, valid):
for btn in self.bbox.buttons():
btn_role = self.bbox.buttonRole(btn)
if btn_role in (QDialogButtonBox.AcceptRole,
QDialogButtonBox.ApplyRole):
btn.setEnabled(valid)
def accept(self):
if self.result == 'XML':
app = ET.Element('App')
app.attrib['title'] = self.title
child = ET.fromstring(self.formwidget.get())
app.append(child)
self.data = ET.tostring(app)
else:
self.data = self.formwidget.get()
QDialog.accept(self)
def reject(self):
self.data = None
QDialog.reject(self)
def apply(self):
if self.result == 'XML':
app = ET.Element('App')
app.attrib['title'] = self.title
child = ET.fromstring(self.formwidget.get())
app.append(child)
self.apply_callback(ET.tostring(app))
else:
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None,
ok=True, cancel=True, result='list'):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
:param tuple data: datalist, datagroup (see below)
:param str title: form title
:param str comment: header comment
:param QIcon icon: dialog box icon
:param QWidget parent: parent widget
:param str ok: customized ok button label
:param str cancel: customized cancel button label
:param tuple apply: (label, function) customized button label and callback
:param function apply: apply callback
:param str result: result serialization ('list', 'dict', 'OrderedDict' or 'JSON')
:return: Serialized result (data type depends on `result` parameter)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
Tips:
* one field for each member of a datalist
* one tab for each member of a top-level datagroup
* one page (of a multipage widget, each page can be selected with a
combo box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g. if the module is used directly from the interpreter)
test_travis = os.environ.get('TEST_CI_WIDGETS', None)
if test_travis is not None:
app = QApplication.instance()
if app is None:
app = QApplication([])
timer = QTimer(app)
timer.timeout.connect(app.quit)
timer.start(1000)
elif QApplication.startingUp():
_app = QApplication([])
translator_qt = QTranslator()
translator_qt.load('qt_' + QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
_app.installTranslator(translator_qt)
dialog = FormDialog(data, title, comment, icon, parent,
apply, ok, cancel, result)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
def create_datalist_example():
return [('str', 'this is a string'),
('str', """this is a
MULTILINE
string"""),
('list', [0, '1', '3', '4']),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, 'Other:'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('datetime', datetime.datetime(2010, 10, 10)),
]
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment"))
#--------- datalist example
datalist = create_datalist_example()
def apply_test(data):
print("data:", data)
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=apply_test))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title"))
No space between RadioButtons
# -*- coding: utf-8 -*-
"""
formlayout
==========
Module creating Qt form dialogs/layouts to edit various type of parameters
formlayout License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009-2015 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
__version__ = '2.0.0alpha'
__license__ = __doc__
DEBUG_FORMLAYOUT = False
import os
import sys
import datetime
STDERR = sys.stderr
# ---+- PyQt-PySide compatibility -+----
_modname = os.environ.setdefault('QT_API', 'pyqt')
assert _modname in ('pyqt', 'pyqt5', 'pyside')
if os.environ['QT_API'].startswith('pyqt'):
try:
if os.environ['QT_API'] == 'pyqt5':
import PyQt5 # analysis:ignore
else:
import PyQt4 # analysis:ignore
except ImportError:
# Switching to PySide
os.environ['QT_API'] = _modname = 'pyside'
try:
import PySide # analysis:ignore
except ImportError:
raise ImportError("formlayout requires PyQt4, PyQt5 or PySide")
if os.environ['QT_API'] == 'pyqt':
try:
from PyQt4.QtGui import QFormLayout
except ImportError:
raise ImportError("formlayout requires PyQt4, PyQt5 or PySide")
from PyQt4.QtGui import * # analysis:ignore
from PyQt4.QtCore import * # analysis:ignore
from PyQt4.QtCore import pyqtSlot as Slot
from PyQt4.QtCore import pyqtProperty as Property
if os.environ['QT_API'] == 'pyqt5':
from PyQt5.QtWidgets import * # analysis:ignore
from PyQt5.QtGui import * # analysis:ignore
from PyQt5.QtCore import * # analysis:ignore
from PyQt5.QtCore import pyqtSignal as Signal # analysis:ignore
from PyQt5.QtCore import pyqtSlot as Slot # analysis:ignore
from PyQt5.QtCore import pyqtProperty as Property # analysis:ignore
SIGNAL = None # analysis:ignore
if os.environ['QT_API'] == 'pyside':
from PySide.QtGui import * # analysis:ignore
from PySide.QtCore import * # analysis:ignore
# ---+- Python 2-3 compatibility -+----
PY2 = sys.version[0] == '2'
if PY2:
# Python 2
import codecs
def u(obj):
"""Make unicode object"""
return codecs.unicode_escape_decode(obj)[0]
else:
# Python 3
def u(obj):
"""Return string as it is"""
return obj
def is_text_string(obj):
"""Return True if `obj` is a text string, False if it is anything else,
like binary data (Python 3) or QString (Python 2, PyQt API #1)"""
if PY2:
# Python 2
return isinstance(obj, basestring)
else:
# Python 3
return isinstance(obj, str)
def is_binary_string(obj):
"""Return True if `obj` is a binary string, False if it is anything else"""
if PY2:
# Python 2
return isinstance(obj, str)
else:
# Python 3
return isinstance(obj, bytes)
def is_string(obj):
"""Return True if `obj` is a text or binary Python string object,
False if it is anything else, like a QString (Python 2, PyQt API #1)"""
return is_text_string(obj) or is_binary_string(obj)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
class ColorButton(QPushButton):
"""
Color choosing push button
"""
__pyqtSignals__ = ("colorChanged(QColor)",)
if SIGNAL is None:
colorChanged = Signal("QColor")
def __init__(self, parent=None):
QPushButton.__init__(self, parent)
self.setFixedSize(20, 20)
self.setIconSize(QSize(12, 12))
if SIGNAL is None:
self.clicked.connect(self.choose_color)
else:
self.connect(self, SIGNAL("clicked()"), self.choose_color)
self._color = QColor()
def choose_color(self):
color = QColorDialog.getColor(self._color, self.parentWidget())
if color.isValid():
self.set_color(color)
def get_color(self):
return self._color
@Slot(QColor)
def set_color(self, color):
if color != self._color:
self._color = color
if SIGNAL is None:
self.colorChanged.emit(self._color)
else:
self.emit(SIGNAL("colorChanged(QColor)"), self._color)
pixmap = QPixmap(self.iconSize())
pixmap.fill(color)
self.setIcon(QIcon(pixmap))
color = Property("QColor", get_color, set_color)
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color
class ColorLayout(QHBoxLayout):
"""Color-specialized QLineEdit layout"""
def __init__(self, color, parent=None):
QHBoxLayout.__init__(self)
assert isinstance(color, QColor)
self.lineedit = QLineEdit(color.name(), parent)
if SIGNAL is None:
self.lineedit.textChanged.connect(self.update_color)
else:
self.connect(self.lineedit, SIGNAL("textChanged(QString)"),
self.update_color)
self.addWidget(self.lineedit)
self.colorbtn = ColorButton(parent)
self.colorbtn.color = color
if SIGNAL is None:
self.colorbtn.colorChanged.connect(self.update_text)
else:
self.connect(self.colorbtn, SIGNAL("colorChanged(QColor)"),
self.update_text)
self.addWidget(self.colorbtn)
def update_color(self, text):
color = text_to_qcolor(text)
if color.isValid():
self.colorbtn.color = color
def update_text(self, color):
self.lineedit.setText(color.name())
def text(self):
return self.lineedit.text()
def setStyleSheet(self, style):
self.lineedit.setStyleSheet(style)
self.colorbtn.setStyleSheet(style)
class FileLayout(QHBoxLayout):
"""File-specialized QLineEdit layout"""
def __init__(self, value, parent=None):
QHBoxLayout.__init__(self)
self.value = value
self.lineedit = QLineEdit('', parent)
self.addWidget(self.lineedit)
self.filebtn = QPushButton('Browse')
self.filebtn.clicked.connect(self.getfile)
self.addWidget(self.filebtn)
def getfile(self):
if self.value.startswith('file'):
name = QFileDialog.getOpenFileName(None, 'Select file',
filter=self.value[5:])
elif self.value == 'dir':
name = QFileDialog.getExistingDirectory(None, 'Select directory')
if name:
self.lineedit.setText(name)
def text(self):
return self.lineedit.text()
def setStyleSheet(self, style):
self.lineedit.setStyleSheet(style)
self.filebtn.setStyleSheet(style)
class RadioLayout(QVBoxLayout):
"""Radio buttons layout with QButtonGroup"""
def __init__(self, buttons, index, parent=None):
QVBoxLayout.__init__(self)
self.setSpacing(0)
self.group = QButtonGroup()
for i, button in enumerate(buttons):
btn = QRadioButton(button)
if i == index:
btn.setChecked(True)
self.addWidget(btn)
self.group.addButton(btn, i)
def currentIndex(self):
return self.group.checkedId()
def setStyleSheet(self, style):
for btn in self.group.buttons():
btn.setStyleSheet(style)
def font_is_installed(font):
"""Check if font is installed"""
return [fam for fam in QFontDatabase().families()
if to_text_string(fam) == font]
def tuple_to_qfont(tup):
"""
Create a QFont from tuple:
(family [string], size [int], italic [bool], bold [bool])
"""
if not isinstance(tup, tuple) or len(tup) != 4 \
or not isinstance(tup[0], str) \
or not isinstance(tup[1], int) \
or not isinstance(tup[2], bool) \
or not isinstance(tup[3], bool):
return None
font = QFont()
family, size, italic, bold = tup
font.setFamily(family)
font.setPointSize(size)
font.setItalic(italic)
font.setBold(bold)
return font
def qfont_to_tuple(font):
return (to_text_string(font.family()), int(font.pointSize()),
font.italic(), font.bold())
class FontLayout(QGridLayout):
"""Font selection"""
def __init__(self, value, parent=None):
QGridLayout.__init__(self)
if not font_is_installed(value[0]):
print("Warning: Font `%s` is not installed" % value[0],
file=sys.stderr)
font = tuple_to_qfont(value)
assert font is not None
# Font family
self.family = QFontComboBox(parent)
self.family.setCurrentFont(font)
self.addWidget(self.family, 0, 0, 1, -1)
# Font size
self.size = QComboBox(parent)
self.size.setEditable(True)
sizelist = list(range(6, 12)) + list(range(12, 30, 2)) + [36, 48, 72]
size = font.pointSize()
if size not in sizelist:
sizelist.append(size)
sizelist.sort()
self.size.addItems([str(s) for s in sizelist])
self.size.setCurrentIndex(sizelist.index(size))
self.addWidget(self.size, 1, 0)
# Italic or not
self.italic = QCheckBox(self.tr("Italic"), parent)
self.italic.setChecked(font.italic())
self.addWidget(self.italic, 1, 1)
# Bold or not
self.bold = QCheckBox(self.tr("Bold"), parent)
self.bold.setChecked(font.bold())
self.addWidget(self.bold, 1, 2)
def get_font(self):
font = self.family.currentFont()
font.setItalic(self.italic.isChecked())
font.setBold(self.bold.isChecked())
font.setPointSize(int(self.size.currentText()))
return qfont_to_tuple(font)
def setStyleSheet(self, style):
self.family.setStyleSheet(style)
self.size.setStyleSheet(style)
self.italic.setStyleSheet(style)
self.bold.setStyleSheet(style)
def is_float_valid(edit):
text = edit.text()
state = edit.validator().validate(text, 0)[0]
return state == QDoubleValidator.Acceptable
def is_required_valid(edit):
bgd_color = "background-color:rgb(255, 175, 90);"
if isinstance(edit, (QLineEdit, FileLayout)):
if edit.text():
edit.setStyleSheet("")
return True
else:
edit.setStyleSheet(bgd_color)
elif isinstance(edit, (QComboBox, RadioLayout)):
if edit.currentIndex() != -1:
edit.setStyleSheet("")
return True
else:
edit.setStyleSheet(bgd_color)
elif isinstance(edit, QTextEdit):
if edit.toPlainText():
edit.setStyleSheet("")
return True
else:
edit.setStyleSheet(bgd_color)
return False
class FormWidget(QWidget):
def __init__(self, data, comment="", parent=None):
QWidget.__init__(self, parent)
from copy import deepcopy
self.data = deepcopy(data)
self.result = parent.result
self.widgets = []
self.formlayout = QFormLayout(self)
if comment:
self.formlayout.addRow(QLabel(comment))
self.formlayout.addRow(QLabel(" "))
if DEBUG_FORMLAYOUT:
print("\n"+("*"*80))
print("DATA:", self.data)
print("*"*80)
print("COMMENT:", comment)
print("*"*80)
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QDialog):
dialog = dialog.parent()
return dialog
def setup(self):
for label, value in self.data:
if DEBUG_FORMLAYOUT:
print("value:", value)
if label is None and value is None:
# Separator: (None, None)
separator = QFrame()
separator.setFrameShape(QFrame.HLine)
separator.setFrameShadow(QFrame.Sunken)
self.formlayout.addRow(separator)
self.widgets.append(None)
continue
if label is None:
img_fmt = tuple(['.'+str(bytes(ext).decode()) for ext
in QImageReader.supportedImageFormats()])
if value.endswith(img_fmt):
# Image
pixmap = QPixmap(value)
lab = QLabel()
lab.setPixmap(pixmap)
self.formlayout.addRow(lab)
else:
# Comment
self.formlayout.addRow(QLabel(value))
self.widgets.append(None)
continue
if tuple_to_qfont(value) is not None:
field = FontLayout(value, self)
elif text_to_qcolor(value).isValid():
field = ColorLayout(QColor(value), self)
elif is_text_string(value):
if value in ['file', 'dir'] or value.startswith('file:'):
field = FileLayout(value, self)
elif value == 'password':
field = QLineEdit(self)
field.setEchoMode(QLineEdit.Password)
elif '\n' in value:
if value == '\n':
value = ''
for linesep in (os.linesep, '\n'):
if linesep in value:
value = value.replace(linesep, u("\u2029"))
field = QTextEdit(value, self)
else:
field = QLineEdit(value, self)
elif isinstance(value, (list, tuple)):
save_value = value
value = list(value) # always needed to protect self.data
selindex = value.pop(0)
if isinstance(selindex, int):
selindex = selindex - 1
if isinstance(value[0], (list, tuple)):
keys = [ key for key, _val in value ]
value = [ val for _key, val in value ]
else:
keys = value
if selindex in value:
selindex = value.index(selindex)
elif selindex in keys:
selindex = keys.index(selindex)
elif not isinstance(selindex, int):
print("Warning: '%s' index is invalid (label: "\
"%s, value: %s)" % (selindex, label, value),
file=STDERR)
selindex = -1
if isinstance(save_value, list):
field = QComboBox(self)
field.addItems(value)
field.setCurrentIndex(selindex)
elif isinstance(save_value, tuple):
field = RadioLayout(value, selindex, self)
elif isinstance(value, bool):
field = QCheckBox(self)
field.setCheckState(Qt.Checked if value else Qt.Unchecked)
elif isinstance(value, float):
field = QLineEdit(QLocale().toString(value), self)
field.setValidator(QDoubleValidator(field))
dialog = self.get_dialog()
dialog.register_float_field(field)
if SIGNAL is None:
field.textChanged.connect(dialog.float_valid)
else:
self.connect(field, SIGNAL('textChanged(QString)'),
dialog.float_valid)
elif isinstance(value, int):
field = QSpinBox(self)
field.setRange(-1e9, 1e9)
field.setValue(value)
elif isinstance(value, datetime.datetime):
field = QDateTimeEdit(self)
field.setDateTime(value)
elif isinstance(value, datetime.date):
field = QDateEdit(self)
field.setDate(value)
elif isinstance(value, datetime.time):
field = QTimeEdit(self)
field.setTime(value)
else:
field = QLineEdit(repr(value), self)
# Eventually extracting tooltip from label and processing it
index = label.find('::')
if index != -1:
label, tooltip = label[:index], label[index+2:]
field.setToolTip(tooltip)
# Eventually catching the 'required' feature and processing it
if label.endswith(' *'):
label = label[:-1] + '<font color="red">*</font>'
if isinstance(field, (QLineEdit, QTextEdit, QComboBox,
FileLayout, RadioLayout)):
dialog = self.get_dialog()
dialog.register_required_field(field)
else:
print("Warning: '%s' doesn't support 'required' feature"\
% type(field), file=STDERR)
if isinstance(field, QLineEdit):
if SIGNAL is None:
field.textChanged.connect(dialog.required_valid)
else:
self.connect(field, SIGNAL('textChanged(QString)'),
dialog.required_valid)
elif isinstance(field, QTextEdit):
if SIGNAL is None:
field.textChanged.connect(dialog.required_valid)
else:
self.connect(field, SIGNAL('textChanged()'),
dialog.required_valid)
elif isinstance(field, QComboBox):
if SIGNAL is None:
field.currentIndexChanged.connect(\
dialog.required_valid)
else:
self.connect(field,
SIGNAL('currentIndexChanged(QString)'),
dialog.required_valid)
elif isinstance(field, FileLayout):
if SIGNAL is None:
field.lineedit.textChanged.connect(\
dialog.required_valid)
else:
self.connect(field.lineedit,
SIGNAL('textChanged(QString)'),
dialog.required_valid)
elif isinstance(field, RadioLayout):
if SIGNAL is None:
field.group.buttonClicked.connect(\
dialog.required_valid)
else:
self.connect(field.group, SIGNAL('buttonClicked(int)'),
dialog.required_valid)
self.formlayout.addRow(label, field)
self.widgets.append(field)
def get(self):
valuelist = []
for index, (label, value) in enumerate(self.data):
field = self.widgets[index]
if label is None:
# Separator / Comment
continue
elif tuple_to_qfont(value) is not None:
value = field.get_font()
elif is_text_string(value):
if isinstance(field, QTextEdit):
value = to_text_string(field.toPlainText()
).replace(u("\u2029"), os.linesep)
else:
value = to_text_string(field.text())
elif isinstance(value, (list, tuple)):
index = int(field.currentIndex())
if isinstance(value[0], int):
# Return an int index, if initialization was an int
value = index + 1
else:
value = value[index+1]
if isinstance(value, (list, tuple)):
value = value[0]
elif isinstance(value, bool):
value = field.checkState() == Qt.Checked
elif isinstance(value, float):
value = float(QLocale().toDouble(field.text())[0])
elif isinstance(value, int):
value = int(field.value())
elif isinstance(value, datetime.datetime):
value = field.dateTime()
try:
value = value.toPyDateTime() # PyQt
except AttributeError:
value = value.toPython() # PySide
elif isinstance(value, datetime.date):
value = field.date()
try:
value = value.toPyDate() # PyQt
except AttributeError:
value = value.toPython() # PySide
elif isinstance(value, datetime.time):
value = field.time()
try:
value = value.toPyTime() # PyQt
except AttributeError:
value = value.toPython() # PySide
else:
value = eval(str(field.text()))
valuelist.append((label, value))
if self.result == 'list':
return [value for label, value in valuelist]
elif self.result in ['dict', 'OrderedDict', 'JSON']:
if self.result == 'dict':
dic = {}
else:
dic = OrderedDict()
for label, value in valuelist:
if label in dic.keys():
print("Warning: '%s' is duplicate and '%s' doesn't "\
"handle it, you should use 'list' or 'XML' instead"\
% (label, self.result), file=STDERR)
if isinstance(value, (datetime.date, datetime.time,
datetime.datetime)) and self.result == 'JSON':
dic[label] = str(value)
else:
dic[label] = value
if self.result == 'JSON':
return json.dumps(dic)
else:
return dic
elif self.result == 'XML':
form = ET.Element('Form')
for label, value in valuelist:
required = 'false'
if label.endswith(' *'):
label = label[:-2]
required = 'true'
child = ET.SubElement(form, label)
if isinstance(value, datetime.datetime):
child.text = value.isoformat()
else:
child.text = str(value)
child.attrib['required'] = required
return ET.tostring(form)
class FormComboWidget(QWidget):
def __init__(self, datalist, comment="", parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout()
self.setLayout(layout)
self.combobox = QComboBox()
layout.addWidget(self.combobox)
self.stackwidget = QStackedWidget(self)
layout.addWidget(self.stackwidget)
if SIGNAL is None:
self.combobox.currentIndexChanged.connect(
self.stackwidget.setCurrentIndex)
else:
self.connect(self.combobox, SIGNAL("currentIndexChanged(int)"),
self.stackwidget, SLOT("setCurrentIndex(int)"))
self.result = parent.result
self.widgetlist = []
for data, title, comment in datalist:
self.combobox.addItem(title)
widget = FormWidget(data, comment=comment, parent=self)
self.stackwidget.addWidget(widget)
self.widgetlist.append((title, widget))
def setup(self):
for title, widget in self.widgetlist:
widget.setup()
def get(self):
if self.result == 'list':
return [widget.get() for title, widget in self.widgetlist]
elif self.result in ['dict', 'OrderedDict', 'JSON']:
if self.result == 'dict':
dic = {}
else:
dic = OrderedDict()
for title, widget in self.widgetlist:
if self.result == 'JSON':
dic[title] = json.loads(widget.get())
else:
dic[title] = widget.get()
if self.result == 'JSON':
return json.dumps(dic)
else:
return dic
elif self.result == 'XML':
combos = ET.Element('Combos')
for title, widget in self.widgetlist:
combo = ET.SubElement(combos, 'Combo')
combo.attrib['title'] = title
child = ET.fromstring(widget.get())
combo.append(child)
return ET.tostring(combos)
class FormTabWidget(QWidget):
def __init__(self, datalist, comment="", parent=None):
QWidget.__init__(self, parent)
layout = QVBoxLayout()
self.tabwidget = QTabWidget()
layout.addWidget(self.tabwidget)
self.setLayout(layout)
self.result = parent.result
self.widgetlist = []
for data, title, comment in datalist:
if len(data[0])==3:
widget = FormComboWidget(data, comment=comment, parent=self)
else:
widget = FormWidget(data, comment=comment, parent=self)
index = self.tabwidget.addTab(widget, title)
self.tabwidget.setTabToolTip(index, comment)
self.widgetlist.append((title, widget))
def setup(self):
for title, widget in self.widgetlist:
widget.setup()
def get(self):
if self.result == 'list':
return [widget.get() for title, widget in self.widgetlist]
elif self.result in ['dict', 'OrderedDict', 'JSON']:
if self.result == 'dict':
dic = {}
else:
dic = OrderedDict()
for title, widget in self.widgetlist:
if self.result == 'JSON':
dic[title] = json.loads(widget.get())
else:
dic[title] = widget.get()
if self.result == 'JSON':
return json.dumps(dic)
else:
return dic
elif self.result == 'XML':
tabs = ET.Element('Tabs')
for title, widget in self.widgetlist:
tab = ET.SubElement(tabs, 'Tab')
tab.attrib['title'] = title
child = ET.fromstring(widget.get())
tab.append(child)
return ET.tostring(tabs)
class FormDialog(QDialog):
"""Form Dialog"""
def __init__(self, data, title="", comment="", icon=None, parent=None,
apply=None, ok=None, cancel=None, result=None):
QDialog.__init__(self, parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.title = title
self.ok = ok
self.cancel = cancel
self.apply_ = None
self.apply_callback = None
if callable(apply):
self.apply_callback = apply
elif isinstance(apply, (list, tuple)):
self.apply_, self.apply_callback = apply
elif apply is not None:
raise AssertionError("`apply` argument must be either a function "\
"or tuple ('Apply label', apply_callback)")
self.result = result
if self.result in ['OrderedDict', 'JSON']:
global OrderedDict
from collections import OrderedDict
if self.result == 'JSON':
global json
import json
elif self.result == 'XML':
global ET
import xml.etree.ElementTree as ET
# Form
if isinstance(data[0][0], (list, tuple)):
self.formwidget = FormTabWidget(data, comment=comment,
parent=self)
elif len(data[0])==3:
self.formwidget = FormComboWidget(data, comment=comment,
parent=self)
else:
self.formwidget = FormWidget(data, comment=comment,
parent=self)
layout = QVBoxLayout()
layout.addWidget(self.formwidget)
self.float_fields = []
self.required_fields = []
self.formwidget.setup()
# Button box
self.bbox = bbox = QDialogButtonBox()
if self.ok == True:
bbox.addButton(QDialogButtonBox.Ok)
elif self.ok:
ok_btn = QPushButton(self.ok)
bbox.addButton(ok_btn, QDialogButtonBox.AcceptRole)
if self.cancel == True:
bbox.addButton(QDialogButtonBox.Cancel)
elif self.cancel:
cancel_btn = QPushButton(self.cancel)
bbox.addButton(cancel_btn, QDialogButtonBox.RejectRole)
if self.apply_callback is not None:
if self.apply_:
apply_btn = QPushButton(self.apply_)
bbox.addButton(apply_btn, QDialogButtonBox.ApplyRole)
else:
apply_btn = bbox.addButton(QDialogButtonBox.Apply)
if SIGNAL is None:
apply_btn.clicked.connect(self.apply)
else:
self.connect(apply_btn, SIGNAL("clicked()"), self.apply)
if SIGNAL is None:
if self.ok:
bbox.accepted.connect(self.accept)
if self.cancel:
bbox.rejected.connect(self.reject)
else:
if self.ok:
self.connect(bbox, SIGNAL("accepted()"), SLOT("accept()"))
if self.cancel:
self.connect(bbox, SIGNAL("rejected()"), SLOT("reject()"))
layout.addWidget(bbox)
self.required_valid()
self.setLayout(layout)
self.setWindowTitle(self.title)
if not isinstance(icon, QIcon):
icon = QWidget().style().standardIcon(QStyle.SP_MessageBoxQuestion)
self.setWindowIcon(icon)
def register_float_field(self, field):
self.float_fields.append(field)
def register_required_field(self, field):
self.required_fields.append(field)
def float_valid(self):
valid = True
for field in self.float_fields:
if not is_float_valid(field):
valid = False
self.update_buttons(valid)
def required_valid(self):
valid = True
for field in self.required_fields:
if not is_required_valid(field):
valid = False
self.update_buttons(valid)
def update_buttons(self, valid):
for btn in self.bbox.buttons():
btn_role = self.bbox.buttonRole(btn)
if btn_role in (QDialogButtonBox.AcceptRole,
QDialogButtonBox.ApplyRole):
btn.setEnabled(valid)
def accept(self):
if self.result == 'XML':
app = ET.Element('App')
app.attrib['title'] = self.title
child = ET.fromstring(self.formwidget.get())
app.append(child)
self.data = ET.tostring(app)
else:
self.data = self.formwidget.get()
QDialog.accept(self)
def reject(self):
self.data = None
QDialog.reject(self)
def apply(self):
if self.result == 'XML':
app = ET.Element('App')
app.attrib['title'] = self.title
child = ET.fromstring(self.formwidget.get())
app.append(child)
self.apply_callback(ET.tostring(app))
else:
self.apply_callback(self.formwidget.get())
def get(self):
"""Return form result"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
return self.data
def fedit(data, title="", comment="", icon=None, parent=None, apply=None,
ok=True, cancel=True, result='list'):
"""
Create form dialog and return result
(if Cancel button is pressed, return None)
:param tuple data: datalist, datagroup (see below)
:param str title: form title
:param str comment: header comment
:param QIcon icon: dialog box icon
:param QWidget parent: parent widget
:param str ok: customized ok button label
:param str cancel: customized cancel button label
:param tuple apply: (label, function) customized button label and callback
:param function apply: apply callback
:param str result: result serialization ('list', 'dict', 'OrderedDict' or 'JSON')
:return: Serialized result (data type depends on `result` parameter)
datalist: list/tuple of (field_name, field_value)
datagroup: list/tuple of (datalist *or* datagroup, title, comment)
Tips:
* one field for each member of a datalist
* one tab for each member of a top-level datagroup
* one page (of a multipage widget, each page can be selected with a
combo box) for each member of a datagroup inside a datagroup
Supported types for field_value:
- int, float, str, unicode, bool
- colors: in Qt-compatible text form, i.e. in hex format or name (red,...)
(automatically detected from a string)
- list/tuple:
* the first element will be the selected index (or value)
* the other elements can be couples (key, value) or only values
"""
# Create a QApplication instance if no instance currently exists
# (e.g. if the module is used directly from the interpreter)
test_travis = os.environ.get('TEST_CI_WIDGETS', None)
if test_travis is not None:
app = QApplication.instance()
if app is None:
app = QApplication([])
timer = QTimer(app)
timer.timeout.connect(app.quit)
timer.start(1000)
elif QApplication.startingUp():
_app = QApplication([])
translator_qt = QTranslator()
translator_qt.load('qt_' + QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
_app.installTranslator(translator_qt)
dialog = FormDialog(data, title, comment, icon, parent,
apply, ok, cancel, result)
if dialog.exec_():
return dialog.get()
if __name__ == "__main__":
def create_datalist_example():
return [('str', 'this is a string'),
('str', """this is a
MULTILINE
string"""),
('list', [0, '1', '3', '4']),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, 'Other:'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('datetime', datetime.datetime(2010, 10, 10)),
]
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment"))
#--------- datalist example
datalist = create_datalist_example()
def apply_test(data):
print("data:", data)
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=apply_test))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title"))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title"))
|
__version__ = '3.4.13'
Update version to 3.5.0
__version__ = '3.5.0'
|
__version__ = '3.5.0'
Reversion to 3.4.14
Recent changes are minor fixes to address ansible 2.4, no
functional changes at all. As version has not been tagged or
released, this change *shouldn't* have a wider impact.
__version__ = '3.4.14'
|
from dez.http.application import HTTPApplication
from dez.logging import get_logger
from ...scripts.util import log as syslog
from ..util import *
from routes import static, cb
DWEB = None
class Web(HTTPApplication):
def __init__(self, bind_address, port):
HTTPApplication.__init__(self, bind_address, port,
get_logger("dez webserver", log), "dez/cantools")
for key, val in static.items():
self.add_static_rule(key, val)
for key, val in cb.items():
self.add_cb_rule(key, val)
def run_dez_webserver(host="localhost", port=8080):
global DWEB
DWEB = Web(host, port)
DWEB.start()
def get_dez_webserver():
return DWEB
if __name__ == "__main__":
run_dez_webserver()
dez_webserver: respond/do_respond handler registration; dynamic request routing; memcache stuff; logging
from dez.http.application import HTTPApplication
from dez.logging import get_logger_getter
from dez.memcache import get_memcache
from ...scripts.util import log as syslog
from ..util import *
from routes import static, cb
DWEB = None
class Web(HTTPApplication):
def __init__(self, bind_address, port):
logger_getter = get_logger_getter("dez webserver", syslog)
self.logger = logger_getter("Web (HTTPApplication)")
HTTPApplication.__init__(self, bind_address, port, logger_getter, "dez/cantools")
self.memcache = get_memcache()
self.curpath = None
self.handlers = {}
for key, val in static.items():
self.add_static_rule(key, val)
for key, val in cb.items():
self.add_cb_rule(key, self._handler(key, val))
def register_handler(self, args, kwargs):
self.logger.info("register handler: %s"%(self.curpath,))
self.handlers[self.curpath] = lambda : do_response(*args, **kwargs)
def _handler(self, rule, target):
self.logger.info("setting handler: %s %s"%(rule, target))
def h(req):
self.logger.info("invoking handler: %s %s"%(rule, req.url))
set_read(lambda : req.body)
set_send(req.write)
set_close(req.close)
self.curpath = rule
if rule not in self.handlers:
__import__(target)
self.handlers[rule]()
return h
def run_dez_webserver(host="localhost", port=8080):
global DWEB
DWEB = Web(host, port)
DWEB.start()
def get_dez_webserver():
return DWEB
def respond(*args, **kwargs):
DWEB.register_handler(args, kwargs)
# memcache stuff
def getmem(key, tojson=True):
return DWEB.memcache.get(key, tojson)
def setmem(key, val, fromjson=True):
DWEB.memcache.set(key, val, tojson)
def delmem(key):
DWEB.memcache.rm(key)
def clearmem():
DWEB.memcache.clear()
if __name__ == "__main__":
run_dez_webserver() |
import random
from collections import deque
from ..learner import Learner
from ..utils import max_qvalue
class QLearning(Learner):
'''Tabular Q-learning'''
def __init__(self, env, policy, qfunction, learning_rate=0.1,
discount_factor=1.0, **kwargs):
super(QLearning, self).__init__(env, **kwargs)
self.policy = policy
self.qfunction = qfunction
self.learning_rate = learning_rate
self.discount_factor = discount_factor
def best_qvalue(self, state):
return max_qvalue(state, self.env.actions(state), self.qfunction)
###########
# Learner #
###########
def episode(self):
while not self.env.is_terminal():
state = self.env.cur_state()
action = self.policy.action(state)
reward, next_state = self.env.do_action(action)
best_qvalue = self.best_qvalue(next_state)
target = reward + (self.discount_factor * best_qvalue)
td_error = target - self.qfunction[state, action]
self.qfunction[state, action] += self.learning_rate * td_error
class ApproximateQLearning(Learner):
'''Q-learning with a function approximator'''
def __init__(self, env, policy, qfunction, discount_factor=1.0,
experience_replay=True, **kwargs):
super(ApproximateQLearning, self).__init__(env, **kwargs)
self.policy = policy
self.qfunction = qfunction
self.discount_factor = discount_factor
self.experience_replay = experience_replay
if self.experience_replay:
self.memory = deque(maxlen=10000)
def best_qvalue(self, state):
return max_qvalue(state, self.env.actions(state), self.qfunction)
###########
# Learner #
###########
def episode(self):
while not self.env.is_terminal():
state = self.env.cur_state()
action = self.policy.action(state)
reward, next_state = self.env.do_action(action)
if self.experience_replay:
self.memory.append((state, action, reward, next_state))
state, _, reward, next_state = random.choice(self.memory)
best_qvalue = self.best_qvalue(next_state)
update = reward + (self.discount_factor * best_qvalue)
self.qfunction.update(state, action, update)
Add selfplay to qlearning
import random
from collections import deque
from ..learner import Learner
from ..utils import max_qvalue
class QLearning(Learner):
'''Tabular Q-learning'''
def __init__(self, env, policy, qfunction, learning_rate=0.1,
discount_factor=1.0, **kwargs):
super(QLearning, self).__init__(env, **kwargs)
self.policy = policy
self.qfunction = qfunction
self.learning_rate = learning_rate
self.discount_factor = discount_factor
def best_qvalue(self, state):
return max_qvalue(state, self.env.actions(state), self.qfunction)
###########
# Learner #
###########
def episode(self):
while not self.env.is_terminal():
state = self.env.cur_state()
action = self.policy.action(state)
reward, next_state = self.env.do_action(action)
best_qvalue = self.best_qvalue(next_state)
target = reward + (self.discount_factor * best_qvalue)
td_error = target - self.qfunction[state, action]
self.qfunction[state, action] += self.learning_rate * td_error
class QLearningSelfPlay(QLearning):
'''
An specialization of the Q-learning algorithm that assumes
a Game environment. In standard Q-learning the best action
is the one that selects the maximum reward. In this version
we maximize for the first player of the game, and minimize
for the second player.
'''
def best_qvalue(self, state):
best_qvalue = max_qvalue if state.cur_player() == 0 else min_qvalue
return best_qvalue(state, self.env.actions(state), self.qfunction)
class ApproximateQLearning(Learner):
'''Q-learning with a function approximator'''
def __init__(self, env, policy, qfunction, discount_factor=1.0,
experience_replay=True, **kwargs):
super(ApproximateQLearning, self).__init__(env, **kwargs)
self.policy = policy
self.qfunction = qfunction
self.discount_factor = discount_factor
self.experience_replay = experience_replay
if self.experience_replay:
self.memory = deque(maxlen=10000)
def best_qvalue(self, state):
best_qvalue = max_qvalue if state.cur_player() == 0 else min_qvalue
return best_qvalue(state, self.env.actions(state), self.qfunction)
def best_qvalue(self, state):
return max_qvalue(state, self.env.actions(state), self.qfunction)
###########
# Learner #
###########
def episode(self):
while not self.env.is_terminal():
state = self.env.cur_state()
action = self.policy.action(state)
reward, next_state = self.env.do_action(action)
if self.experience_replay:
self.memory.append((state, action, reward, next_state))
state, _, reward, next_state = random.choice(self.memory)
best_qvalue = self.best_qvalue(next_state)
update = reward + (self.discount_factor * best_qvalue)
self.qfunction.update(state, action, update)
class ApproximateQLearningSelfPlay(ApproximateQLearning):
def best_qvalue(self, state):
best_qvalue = max_qvalue if state.cur_player() == 0 else min_qvalue
return best_qvalue(state, self.env.actions(state), self.qfunction)
|
unit testing start
|
import shutil
import subprocess
import os
import tempfile
from django.utils import timezone
from rq import get_current_job
from osmaxx.conversion._settings import CONVERSION_SETTINGS, odb_license, copying_notice, creative_commons_license
from osmaxx.conversion.converters.utils import zip_folders_relative, recursive_getsize
_path_to_commandline_utils = os.path.join(os.path.dirname(__file__), 'command_line_utils')
_path_to_bounds_zip = os.path.join(CONVERSION_SETTINGS['SEA_AND_BOUNDS_ZIP_DIRECTORY'], 'bounds.zip')
_path_to_sea_zip = os.path.join(CONVERSION_SETTINGS['SEA_AND_BOUNDS_ZIP_DIRECTORY'], 'sea.zip')
_path_to_geonames_zip = os.path.join(os.path.dirname(__file__), 'additional_data', 'cities1000.txt')
class Garmin:
def __init__(self, *, out_zip_file_path, area_name, polyfile_string):
self._resulting_zip_file_path = out_zip_file_path
self._map_description = area_name
self._osmosis_polygon_file = tempfile.NamedTemporaryFile(suffix='.poly', mode='w')
self._osmosis_polygon_file.write(polyfile_string)
self._osmosis_polygon_file.flush()
self._polyfile_path = self._osmosis_polygon_file.name
self._start_time = None
self._unzipped_result_size = None
def create_garmin_export(self):
self._start_time = timezone.now()
self._to_garmin()
self._osmosis_polygon_file.close()
job = get_current_job()
if job:
job.meta['duration'] = timezone.now() - self._start_time
job.meta['unzipped_result_size'] = self._unzipped_result_size
job.save()
def _to_garmin(self):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_out_dir = os.path.join(tmp_dir, 'garmin')
config_file_path = self._split(tmp_dir)
self._produce_garmin(config_file_path, tmp_out_dir)
self._create_zip(tmp_out_dir)
def _split(self, workdir):
memory_option = '-Xmx7000m'
_splitter_path = os.path.abspath(os.path.join(_path_to_commandline_utils, 'splitter', 'splitter.jar'))
subprocess.check_call([
'java',
memory_option,
'-jar', _splitter_path,
'--output-dir={0}'.format(workdir),
'--description={0}'.format(self._map_description),
'--geonames-file={0}'.format(_path_to_geonames_zip),
'--polygon-file={}'.format(self._polyfile_path),
CONVERSION_SETTINGS.get('PBF_PLANET_FILE_PATH'),
])
config_file_path = os.path.join(workdir, 'template.args')
return config_file_path
def _produce_garmin(self, config_file_path, out_dir):
out_dir = os.path.join(out_dir, 'garmin') # hack to get a subdirectory in the zipfile.
os.makedirs(out_dir, exist_ok=True)
shutil.copy(copying_notice, out_dir)
shutil.copy(odb_license, out_dir)
shutil.copy(creative_commons_license, out_dir)
_mkgmap_path = os.path.abspath(os.path.join(_path_to_commandline_utils, 'mkgmap', 'mkgmap.jar'))
mkg_map_command = ['java', '-jar', _mkgmap_path]
output_dir = ['--output-dir={0}'.format(out_dir)]
config = [
'--bounds={0}'.format(_path_to_bounds_zip),
'--precomp-sea={0}'.format(_path_to_sea_zip),
'--read-config={0}'.format(config_file_path),
]
subprocess.check_call(
mkg_map_command +
output_dir +
config
)
self._unzipped_result_size = recursive_getsize(out_dir)
def _create_zip(self, data_dir):
zip_folders_relative([data_dir], self._resulting_zip_file_path)
provide garmin gmapsupp
import shutil
import subprocess
import os
import tempfile
from django.utils import timezone
from rq import get_current_job
from osmaxx.conversion._settings import CONVERSION_SETTINGS, odb_license, copying_notice, creative_commons_license
from osmaxx.conversion.converters.utils import zip_folders_relative, recursive_getsize
_path_to_commandline_utils = os.path.join(os.path.dirname(__file__), 'command_line_utils')
_path_to_bounds_zip = os.path.join(CONVERSION_SETTINGS['SEA_AND_BOUNDS_ZIP_DIRECTORY'], 'bounds.zip')
_path_to_sea_zip = os.path.join(CONVERSION_SETTINGS['SEA_AND_BOUNDS_ZIP_DIRECTORY'], 'sea.zip')
_path_to_geonames_zip = os.path.join(os.path.dirname(__file__), 'additional_data', 'cities1000.txt')
class Garmin:
def __init__(self, *, out_zip_file_path, area_name, polyfile_string):
self._resulting_zip_file_path = out_zip_file_path
self._map_description = area_name
self._osmosis_polygon_file = tempfile.NamedTemporaryFile(suffix='.poly', mode='w')
self._osmosis_polygon_file.write(polyfile_string)
self._osmosis_polygon_file.flush()
self._polyfile_path = self._osmosis_polygon_file.name
self._start_time = None
self._unzipped_result_size = None
def create_garmin_export(self):
self._start_time = timezone.now()
self._to_garmin()
self._osmosis_polygon_file.close()
job = get_current_job()
if job:
job.meta['duration'] = timezone.now() - self._start_time
job.meta['unzipped_result_size'] = self._unzipped_result_size
job.save()
def _to_garmin(self):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_out_dir = os.path.join(tmp_dir, 'garmin')
config_file_path = self._split(tmp_dir)
self._produce_garmin(config_file_path, tmp_out_dir)
self._create_zip(tmp_out_dir)
def _split(self, workdir):
memory_option = '-Xmx7000m'
_splitter_path = os.path.abspath(os.path.join(_path_to_commandline_utils, 'splitter', 'splitter.jar'))
subprocess.check_call([
'java',
memory_option,
'-jar', _splitter_path,
'--output-dir={0}'.format(workdir),
'--description={0}'.format(self._map_description),
'--geonames-file={0}'.format(_path_to_geonames_zip),
'--polygon-file={}'.format(self._polyfile_path),
CONVERSION_SETTINGS.get('PBF_PLANET_FILE_PATH'),
])
config_file_path = os.path.join(workdir, 'template.args')
return config_file_path
def _produce_garmin(self, config_file_path, out_dir):
out_dir = os.path.join(out_dir, 'garmin') # hack to get a subdirectory in the zipfile.
os.makedirs(out_dir, exist_ok=True)
shutil.copy(copying_notice, out_dir)
shutil.copy(odb_license, out_dir)
shutil.copy(creative_commons_license, out_dir)
_mkgmap_path = os.path.abspath(os.path.join(_path_to_commandline_utils, 'mkgmap', 'mkgmap.jar'))
mkg_map_command = ['java', '-jar', _mkgmap_path]
output_dir = ['--output-dir={0}'.format(out_dir)]
config = [
'--bounds={0}'.format(_path_to_bounds_zip),
'--precomp-sea={0}'.format(_path_to_sea_zip),
'--read-config={0}'.format(config_file_path),
'--gmapsupp',
]
subprocess.check_call(
mkg_map_command +
output_dir +
config
)
self._unzipped_result_size = recursive_getsize(out_dir)
def _create_zip(self, data_dir):
zip_folders_relative([data_dir], self._resulting_zip_file_path)
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import salt libs
import salt.modules.pip as pip
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PipTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {pip: {'__salt__': {'cmd.which_bin': lambda _: 'pip'}}}
def test_fix4361(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements='requirements.txt')
expected_cmd = ['pip', 'install', '--requirement',
'requirements.txt']
mock.assert_called_once_with(
expected_cmd,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_editable_without_egg_fails(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install,
editable='git+https://github.com/saltstack/salt-testing.git'
)
def test_install_multiple_editable(self):
editables = [
'git+https://github.com/jek/blinker.git#egg=Blinker',
'git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting'
]
expected = ['pip', 'install']
for item in editables:
expected.extend(['--editable', item])
# Passing editables as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(editable=editables)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing editables as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(editable=','.join(editables))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_multiple_pkgs_and_editables(self):
pkgs = ['pep8', 'salt']
editables = [
'git+https://github.com/jek/blinker.git#egg=Blinker',
'git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting'
]
expected = ['pip', 'install'] + pkgs
for item in editables:
expected.extend(['--editable', item])
# Passing editables as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkgs=pkgs, editable=editables)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing editables as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkgs=','.join(pkgs), editable=','.join(editables))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# As single string (just use the first element from pkgs and editables)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkgs=pkgs[0], editable=editables[0])
mock.assert_called_once_with(
['pip', 'install', pkgs[0], '--editable', editables[0]],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_issue5940_install_multiple_pip_mirrors(self):
'''
test multiple pip mirrors. This test only works with pip < 7.0.0
'''
with patch.object(pip, 'version', MagicMock(return_value='1.4')):
mirrors = [
'http://g.pypi.python.org',
'http://c.pypi.python.org',
'http://pypi.crate.io'
]
expected = ['pip', 'install', '--use-mirrors']
for item in mirrors:
expected.extend(['--mirrors', item])
# Passing mirrors as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(mirrors=mirrors)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(mirrors=','.join(mirrors))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# As single string (just use the first element from mirrors)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(mirrors=mirrors[0])
mock.assert_called_once_with(
['pip', 'install', '--use-mirrors', '--mirrors', mirrors[0]],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_with_multiple_find_links(self):
find_links = [
'http://g.pypi.python.org',
'http://c.pypi.python.org',
'http://pypi.crate.io'
]
pkg = 'pep8'
expected = ['pip', 'install']
for item in find_links:
expected.extend(['--find-links', item])
expected.append(pkg)
# Passing mirrors as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=find_links)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=','.join(find_links))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# As single string (just use the first element from find_links)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=find_links[0])
mock.assert_called_once_with(
['pip', 'install', '--find-links', find_links[0], pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Invalid proto raises exception
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install,
'\'' + pkg + '\'',
find_links='sftp://pypi.crate.io'
)
# Valid protos work?
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=find_links)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_index_with_index_url_or_extra_index_url_raises(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install, no_index=True, index_url='http://foo.tld'
)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install, no_index=True, extra_index_url='http://foo.tld'
)
def test_install_failed_cached_requirements(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
get_cached_requirements.return_value = False
ret = pip.install(requirements='salt://my_test_reqs')
self.assertEqual(False, ret['result'])
self.assertIn('my_test_reqs', ret['comment'])
def test_install_cached_requirements_used(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
get_cached_requirements.return_value = 'my_cached_reqs'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements='salt://requirements.txt')
expected = ['pip', 'install', '--requirement', 'my_cached_reqs']
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_venv(self):
with patch('os.path') as mock_path:
mock_path.is_file.return_value = True
mock_path.isdir.return_value = True
pkg = 'mock'
venv_path = '/test_env'
def join(*args):
return '/'.join(args)
mock_path.join = join
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, bin_env=venv_path)
mock.assert_called_once_with(
[os.path.join(venv_path, 'bin', 'pip'), 'install', pkg],
env={'VIRTUAL_ENV': '/test_env'},
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_log_argument_in_resulting_command(self):
with patch('os.access') as mock_path:
pkg = 'pep8'
log_path = '/tmp/pip-install.log'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, log=log_path)
mock.assert_called_once_with(
['pip', 'install', '--log', log_path, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_non_writeable_log(self):
with patch('os.path') as mock_path:
# Let's fake a non-writable log file
pkg = 'pep8'
log_path = '/tmp/pip-install.log'
mock_path.exists.side_effect = IOError('Fooo!')
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
IOError,
pip.install,
pkg,
log=log_path
)
def test_install_timeout_argument_in_resulting_command(self):
# Passing an int
pkg = 'pep8'
expected_prefix = ['pip', 'install', '--timeout']
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, timeout=10)
mock.assert_called_once_with(
expected_prefix + [10, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing an int as a string
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, timeout='10')
mock.assert_called_once_with(
expected_prefix + ['10', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing a non-int to timeout
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
ValueError,
pip.install,
pkg,
timeout='a'
)
def test_install_index_url_argument_in_resulting_command(self):
pkg = 'pep8'
index_url = 'http://foo.tld'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, index_url=index_url)
mock.assert_called_once_with(
['pip', 'install', '--index-url', index_url, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_extra_index_url_argument_in_resulting_command(self):
pkg = 'pep8'
extra_index_url = 'http://foo.tld'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, extra_index_url=extra_index_url)
mock.assert_called_once_with(
['pip', 'install', '--extra-index-url', extra_index_url, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_index_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_index=True)
mock.assert_called_once_with(
['pip', 'install', '--no-index', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_build_argument_in_resulting_command(self):
pkg = 'pep8'
build = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, build=build)
mock.assert_called_once_with(
['pip', 'install', '--build', build, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_target_argument_in_resulting_command(self):
pkg = 'pep8'
target = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, target=target)
mock.assert_called_once_with(
['pip', 'install', '--target', target, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_download_argument_in_resulting_command(self):
pkg = 'pep8'
download = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, download=download)
mock.assert_called_once_with(
['pip', 'install', '--download', download, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_download_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_download=True)
mock.assert_called_once_with(
['pip', 'install', '--no-download', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_download_cache_dir_arguments_in_resulting_command(self):
pkg = 'pep8'
cache_dir_arg_mapping = {
'1.5.6': '--download-cache',
'6.0': '--cache-dir',
}
download_cache = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
for pip_version, cmd_arg in cache_dir_arg_mapping.items():
with patch('salt.modules.pip.version',
MagicMock(return_value=pip_version)):
# test `download_cache` kwarg
pip.install(pkg, download_cache='/tmp/foo')
mock.assert_called_with(
['pip', 'install', cmd_arg, download_cache, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# test `cache_dir` kwarg
pip.install(pkg, cache_dir='/tmp/foo')
mock.assert_called_with(
['pip', 'install', cmd_arg, download_cache, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_source_argument_in_resulting_command(self):
pkg = 'pep8'
source = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, source=source)
mock.assert_called_once_with(
['pip', 'install', '--source', source, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_exists_action_argument_in_resulting_command(self):
pkg = 'pep8'
for action in ('s', 'i', 'w', 'b'):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install('pep8', exists_action=action)
mock.assert_called_once_with(
['pip', 'install', '--exists-action', action, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Test for invalid action
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install,
pkg,
exists_action='d'
)
def test_install_install_options_argument_in_resulting_command(self):
install_options = [
'--exec-prefix=/foo/bar',
'--install-scripts=/foo/bar/bin'
]
pkg = 'pep8'
expected = ['pip', 'install']
for item in install_options:
expected.extend(['--install-option', item])
expected.append(pkg)
# Passing options as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, install_options=install_options)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, install_options=','.join(install_options))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a single string entry
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, install_options=install_options[0])
mock.assert_called_once_with(
['pip', 'install', '--install-option',
install_options[0], pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_global_options_argument_in_resulting_command(self):
global_options = [
'--quiet',
'--no-user-cfg'
]
pkg = 'pep8'
expected = ['pip', 'install']
for item in global_options:
expected.extend(['--global-option', item])
expected.append(pkg)
# Passing options as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, global_options=global_options)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, global_options=','.join(global_options))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a single string entry
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, global_options=global_options[0])
mock.assert_called_once_with(
['pip', 'install', '--global-option', global_options[0], pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_upgrade_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, upgrade=True)
mock.assert_called_once_with(
['pip', 'install', '--upgrade', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_force_reinstall_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, force_reinstall=True)
mock.assert_called_once_with(
['pip', 'install', '--force-reinstall', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_ignore_installed_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, ignore_installed=True)
mock.assert_called_once_with(
['pip', 'install', '--ignore-installed', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_deps_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_deps=True)
mock.assert_called_once_with(
['pip', 'install', '--no-deps', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_install_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_install=True)
mock.assert_called_once_with(
['pip', 'install', '--no-install', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_proxy_argument_in_resulting_command(self):
pkg = 'pep8'
proxy = 'salt-user:salt-passwd@salt-proxy:3128'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, proxy=proxy)
mock.assert_called_once_with(
['pip', 'install', '--proxy', proxy, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_multiple_requirements_arguments_in_resulting_command(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
cached_reqs = [
'my_cached_reqs-1', 'my_cached_reqs-2'
]
get_cached_requirements.side_effect = cached_reqs
requirements = [
'salt://requirements-1.txt', 'salt://requirements-2.txt'
]
expected = ['pip', 'install']
for item in cached_reqs:
expected.extend(['--requirement', item])
# Passing option as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements=requirements)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a comma separated list
get_cached_requirements.side_effect = cached_reqs
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements=','.join(requirements))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a single string entry
get_cached_requirements.side_effect = [cached_reqs[0]]
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements=requirements[0])
mock.assert_called_once_with(
['pip', 'install', '--requirement', cached_reqs[0]],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_uninstall_multiple_requirements_arguments_in_resulting_command(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
cached_reqs = [
'my_cached_reqs-1', 'my_cached_reqs-2'
]
get_cached_requirements.side_effect = cached_reqs
requirements = [
'salt://requirements-1.txt', 'salt://requirements-2.txt'
]
expected = ['pip', 'uninstall', '-y']
for item in cached_reqs:
expected.extend(['--requirement', item])
# Passing option as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(requirements=requirements)
mock.assert_called_once_with(
expected,
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a comma separated list
get_cached_requirements.side_effect = cached_reqs
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(requirements=','.join(requirements))
mock.assert_called_once_with(
expected,
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a single string entry
get_cached_requirements.side_effect = [cached_reqs[0]]
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(requirements=requirements[0])
mock.assert_called_once_with(
['pip', 'uninstall', '-y', '--requirement', cached_reqs[0]],
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_uninstall_proxy_argument_in_resulting_command(self):
pkg = 'pep8'
proxy = 'salt-user:salt-passwd@salt-proxy:3128'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, proxy=proxy)
mock.assert_called_once_with(
['pip', 'uninstall', '-y', '--proxy', proxy, pkg],
saltenv='base',
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
def test_uninstall_log_argument_in_resulting_command(self):
with patch('os.path') as mock_path:
pkg = 'pep8'
log_path = '/tmp/pip-install.log'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, log=log_path)
mock.assert_called_once_with(
['pip', 'uninstall', '-y', '--log', log_path, pkg],
saltenv='base',
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
# Let's fake a non-writable log file
mock_path.exists.side_effect = IOError('Fooo!')
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
IOError,
pip.uninstall,
pkg,
log=log_path
)
def test_uninstall_timeout_argument_in_resulting_command(self):
pkg = 'pep8'
expected_prefix = ['pip', 'uninstall', '-y', '--timeout']
# Passing an int
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, timeout=10)
mock.assert_called_once_with(
expected_prefix + [10, pkg],
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing an int as a string
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, timeout='10')
mock.assert_called_once_with(
expected_prefix + ['10', pkg],
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing a non-int to timeout
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
ValueError,
pip.uninstall,
pkg,
timeout='a'
)
def test_freeze_command(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
ret = pip.freeze()
mock.assert_called_once_with(
['pip', 'freeze'],
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
self.assertEqual(ret, eggs)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
self.assertRaises(
CommandExecutionError,
pip.freeze,
)
def test_freeze_command_with_all(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pip==0.9.1',
'pycrypto==2.6',
'setuptools==20.10.1'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='9.0.1')):
ret = pip.freeze()
mock.assert_called_once_with(
['pip', 'freeze', '--all'],
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
self.assertEqual(ret, eggs)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='9.0.1')):
self.assertRaises(
CommandExecutionError,
pip.freeze,
)
def test_list_command(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock_version = '6.1.1'
mock = MagicMock(return_value={'retcode': 0, 'stdout': '\n'.join(eggs)})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value=mock_version)):
ret = pip.list_()
mock.assert_called_with(
['pip', 'freeze'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertEqual(
ret, {
'SaltTesting-dev': 'git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8',
'M2Crypto': '0.21.1',
'bbfreeze-loader': '1.1.0',
'bbfreeze': '1.1.0',
'pip': mock_version,
'pycrypto': '2.6'
}
)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
self.assertRaises(
CommandExecutionError,
pip.list_,
)
def test_list_command_with_all(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pip==9.0.1',
'pycrypto==2.6',
'setuptools==20.10.1'
]
# N.B.: this is deliberately different from the "output" of pip freeze.
# This is to demonstrate that the version reported comes from freeze
# instead of from the pip.version function.
mock_version = '9.0.0'
mock = MagicMock(return_value={'retcode': 0, 'stdout': '\n'.join(eggs)})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value=mock_version)):
ret = pip.list_()
mock.assert_called_with(
['pip', 'freeze', '--all'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertEqual(
ret, {
'SaltTesting-dev': 'git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8',
'M2Crypto': '0.21.1',
'bbfreeze-loader': '1.1.0',
'bbfreeze': '1.1.0',
'pip': '9.0.1',
'pycrypto': '2.6',
'setuptools': '20.10.1'
}
)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
self.assertRaises(
CommandExecutionError,
pip.list_,
)
def test_list_command_with_prefix(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
ret = pip.list_(prefix='bb')
mock.assert_called_with(
['pip', 'freeze'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertEqual(
ret, {
'bbfreeze-loader': '1.1.0',
'bbfreeze': '1.1.0',
}
)
def test_install_pre_argument_in_resulting_command(self):
pkg = 'pep8'
# Lower than 1.4 versions don't end-up with `--pre` in the resulting
# output
mock = MagicMock(side_effect=[
{'retcode': 0, 'stdout': 'pip 1.2.0 /path/to/site-packages/pip'},
{'retcode': 0, 'stdout': ''}
])
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='1.3')):
pip.install(pkg, pre_releases=True)
mock.assert_called_with(
['pip', 'install', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
mock_run = MagicMock(return_value='pip 1.4.1 /path/to/site-packages/pip')
mock_run_all = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run': mock_run,
'cmd.run_all': mock_run_all}):
with patch('salt.modules.pip._get_pip_bin',
MagicMock(return_value='pip')):
pip.install(pkg, pre_releases=True)
mock_run_all.assert_called_with(
['pip', 'install', '--pre', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
Fix unit tests for test_pip
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import salt libs
import salt.utils
import salt.modules.pip as pip
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PipTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {pip: {'__salt__': {'cmd.which_bin': lambda _: 'pip'}}}
def test_fix4361(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements='requirements.txt')
expected_cmd = ['pip', 'install', '--requirement',
'requirements.txt']
mock.assert_called_once_with(
expected_cmd,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_editable_without_egg_fails(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install,
editable='git+https://github.com/saltstack/salt-testing.git'
)
def test_install_multiple_editable(self):
editables = [
'git+https://github.com/jek/blinker.git#egg=Blinker',
'git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting'
]
expected = ['pip', 'install']
for item in editables:
expected.extend(['--editable', item])
# Passing editables as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(editable=editables)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing editables as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(editable=','.join(editables))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_multiple_pkgs_and_editables(self):
pkgs = ['pep8', 'salt']
editables = [
'git+https://github.com/jek/blinker.git#egg=Blinker',
'git+https://github.com/saltstack/salt-testing.git#egg=SaltTesting'
]
expected = ['pip', 'install'] + pkgs
for item in editables:
expected.extend(['--editable', item])
# Passing editables as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkgs=pkgs, editable=editables)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing editables as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkgs=','.join(pkgs), editable=','.join(editables))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# As single string (just use the first element from pkgs and editables)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkgs=pkgs[0], editable=editables[0])
mock.assert_called_once_with(
['pip', 'install', pkgs[0], '--editable', editables[0]],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_issue5940_install_multiple_pip_mirrors(self):
'''
test multiple pip mirrors. This test only works with pip < 7.0.0
'''
with patch.object(pip, 'version', MagicMock(return_value='1.4')):
mirrors = [
'http://g.pypi.python.org',
'http://c.pypi.python.org',
'http://pypi.crate.io'
]
expected = ['pip', 'install', '--use-mirrors']
for item in mirrors:
expected.extend(['--mirrors', item])
# Passing mirrors as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(mirrors=mirrors)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(mirrors=','.join(mirrors))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# As single string (just use the first element from mirrors)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(mirrors=mirrors[0])
mock.assert_called_once_with(
['pip', 'install', '--use-mirrors', '--mirrors', mirrors[0]],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_with_multiple_find_links(self):
find_links = [
'http://g.pypi.python.org',
'http://c.pypi.python.org',
'http://pypi.crate.io'
]
pkg = 'pep8'
expected = ['pip', 'install']
for item in find_links:
expected.extend(['--find-links', item])
expected.append(pkg)
# Passing mirrors as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=find_links)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=','.join(find_links))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# As single string (just use the first element from find_links)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=find_links[0])
mock.assert_called_once_with(
['pip', 'install', '--find-links', find_links[0], pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Invalid proto raises exception
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install,
'\'' + pkg + '\'',
find_links='sftp://pypi.crate.io'
)
# Valid protos work?
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, find_links=find_links)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_index_with_index_url_or_extra_index_url_raises(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install, no_index=True, index_url='http://foo.tld'
)
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install, no_index=True, extra_index_url='http://foo.tld'
)
def test_install_failed_cached_requirements(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
get_cached_requirements.return_value = False
ret = pip.install(requirements='salt://my_test_reqs')
self.assertEqual(False, ret['result'])
self.assertIn('my_test_reqs', ret['comment'])
def test_install_cached_requirements_used(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
get_cached_requirements.return_value = 'my_cached_reqs'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements='salt://requirements.txt')
expected = ['pip', 'install', '--requirement', 'my_cached_reqs']
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_venv(self):
with patch('os.path') as mock_path:
mock_path.is_file.return_value = True
mock_path.isdir.return_value = True
pkg = 'mock'
def join(*args):
return os.sep.join(args)
mock_path.join = join
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
if salt.utils.is_windows():
venv_path = 'c:\\test_env'
bin_path = os.path.join(venv_path, 'Scripts', 'pip.exe').encode('string-escape')
else:
venv_path = '/test_env'
bin_path = os.path.join(venv_path, 'bin', 'pip')
pip.install(pkg, bin_env=venv_path)
mock.assert_called_once_with(
[bin_path, 'install', pkg],
env={'VIRTUAL_ENV': venv_path},
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_log_argument_in_resulting_command(self):
with patch('os.access') as mock_path:
pkg = 'pep8'
log_path = '/tmp/pip-install.log'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, log=log_path)
mock.assert_called_once_with(
['pip', 'install', '--log', log_path, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_non_writeable_log(self):
with patch('os.path') as mock_path:
# Let's fake a non-writable log file
pkg = 'pep8'
log_path = '/tmp/pip-install.log'
mock_path.exists.side_effect = IOError('Fooo!')
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
IOError,
pip.install,
pkg,
log=log_path
)
def test_install_timeout_argument_in_resulting_command(self):
# Passing an int
pkg = 'pep8'
expected_prefix = ['pip', 'install', '--timeout']
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, timeout=10)
mock.assert_called_once_with(
expected_prefix + [10, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing an int as a string
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, timeout='10')
mock.assert_called_once_with(
expected_prefix + ['10', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing a non-int to timeout
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
ValueError,
pip.install,
pkg,
timeout='a'
)
def test_install_index_url_argument_in_resulting_command(self):
pkg = 'pep8'
index_url = 'http://foo.tld'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, index_url=index_url)
mock.assert_called_once_with(
['pip', 'install', '--index-url', index_url, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_extra_index_url_argument_in_resulting_command(self):
pkg = 'pep8'
extra_index_url = 'http://foo.tld'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, extra_index_url=extra_index_url)
mock.assert_called_once_with(
['pip', 'install', '--extra-index-url', extra_index_url, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_index_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_index=True)
mock.assert_called_once_with(
['pip', 'install', '--no-index', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_build_argument_in_resulting_command(self):
pkg = 'pep8'
build = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, build=build)
mock.assert_called_once_with(
['pip', 'install', '--build', build, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_target_argument_in_resulting_command(self):
pkg = 'pep8'
target = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, target=target)
mock.assert_called_once_with(
['pip', 'install', '--target', target, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_download_argument_in_resulting_command(self):
pkg = 'pep8'
download = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, download=download)
mock.assert_called_once_with(
['pip', 'install', '--download', download, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_download_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_download=True)
mock.assert_called_once_with(
['pip', 'install', '--no-download', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_download_cache_dir_arguments_in_resulting_command(self):
pkg = 'pep8'
cache_dir_arg_mapping = {
'1.5.6': '--download-cache',
'6.0': '--cache-dir',
}
download_cache = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
for pip_version, cmd_arg in cache_dir_arg_mapping.items():
with patch('salt.modules.pip.version',
MagicMock(return_value=pip_version)):
# test `download_cache` kwarg
pip.install(pkg, download_cache='/tmp/foo')
mock.assert_called_with(
['pip', 'install', cmd_arg, download_cache, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# test `cache_dir` kwarg
pip.install(pkg, cache_dir='/tmp/foo')
mock.assert_called_with(
['pip', 'install', cmd_arg, download_cache, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_source_argument_in_resulting_command(self):
pkg = 'pep8'
source = '/tmp/foo'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, source=source)
mock.assert_called_once_with(
['pip', 'install', '--source', source, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_exists_action_argument_in_resulting_command(self):
pkg = 'pep8'
for action in ('s', 'i', 'w', 'b'):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install('pep8', exists_action=action)
mock.assert_called_once_with(
['pip', 'install', '--exists-action', action, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Test for invalid action
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
CommandExecutionError,
pip.install,
pkg,
exists_action='d'
)
def test_install_install_options_argument_in_resulting_command(self):
install_options = [
'--exec-prefix=/foo/bar',
'--install-scripts=/foo/bar/bin'
]
pkg = 'pep8'
expected = ['pip', 'install']
for item in install_options:
expected.extend(['--install-option', item])
expected.append(pkg)
# Passing options as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, install_options=install_options)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, install_options=','.join(install_options))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a single string entry
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, install_options=install_options[0])
mock.assert_called_once_with(
['pip', 'install', '--install-option',
install_options[0], pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_global_options_argument_in_resulting_command(self):
global_options = [
'--quiet',
'--no-user-cfg'
]
pkg = 'pep8'
expected = ['pip', 'install']
for item in global_options:
expected.extend(['--global-option', item])
expected.append(pkg)
# Passing options as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, global_options=global_options)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a comma separated list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, global_options=','.join(global_options))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing mirrors as a single string entry
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, global_options=global_options[0])
mock.assert_called_once_with(
['pip', 'install', '--global-option', global_options[0], pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_upgrade_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, upgrade=True)
mock.assert_called_once_with(
['pip', 'install', '--upgrade', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_force_reinstall_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, force_reinstall=True)
mock.assert_called_once_with(
['pip', 'install', '--force-reinstall', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_ignore_installed_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, ignore_installed=True)
mock.assert_called_once_with(
['pip', 'install', '--ignore-installed', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_deps_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_deps=True)
mock.assert_called_once_with(
['pip', 'install', '--no-deps', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_no_install_argument_in_resulting_command(self):
pkg = 'pep8'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, no_install=True)
mock.assert_called_once_with(
['pip', 'install', '--no-install', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_proxy_argument_in_resulting_command(self):
pkg = 'pep8'
proxy = 'salt-user:salt-passwd@salt-proxy:3128'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(pkg, proxy=proxy)
mock.assert_called_once_with(
['pip', 'install', '--proxy', proxy, pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_install_multiple_requirements_arguments_in_resulting_command(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
cached_reqs = [
'my_cached_reqs-1', 'my_cached_reqs-2'
]
get_cached_requirements.side_effect = cached_reqs
requirements = [
'salt://requirements-1.txt', 'salt://requirements-2.txt'
]
expected = ['pip', 'install']
for item in cached_reqs:
expected.extend(['--requirement', item])
# Passing option as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements=requirements)
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a comma separated list
get_cached_requirements.side_effect = cached_reqs
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements=','.join(requirements))
mock.assert_called_once_with(
expected,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a single string entry
get_cached_requirements.side_effect = [cached_reqs[0]]
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.install(requirements=requirements[0])
mock.assert_called_once_with(
['pip', 'install', '--requirement', cached_reqs[0]],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_uninstall_multiple_requirements_arguments_in_resulting_command(self):
with patch('salt.modules.pip._get_cached_requirements') as get_cached_requirements:
cached_reqs = [
'my_cached_reqs-1', 'my_cached_reqs-2'
]
get_cached_requirements.side_effect = cached_reqs
requirements = [
'salt://requirements-1.txt', 'salt://requirements-2.txt'
]
expected = ['pip', 'uninstall', '-y']
for item in cached_reqs:
expected.extend(['--requirement', item])
# Passing option as a list
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(requirements=requirements)
mock.assert_called_once_with(
expected,
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a comma separated list
get_cached_requirements.side_effect = cached_reqs
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(requirements=','.join(requirements))
mock.assert_called_once_with(
expected,
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing option as a single string entry
get_cached_requirements.side_effect = [cached_reqs[0]]
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(requirements=requirements[0])
mock.assert_called_once_with(
['pip', 'uninstall', '-y', '--requirement', cached_reqs[0]],
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
def test_uninstall_proxy_argument_in_resulting_command(self):
pkg = 'pep8'
proxy = 'salt-user:salt-passwd@salt-proxy:3128'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, proxy=proxy)
mock.assert_called_once_with(
['pip', 'uninstall', '-y', '--proxy', proxy, pkg],
saltenv='base',
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
def test_uninstall_log_argument_in_resulting_command(self):
with patch('os.path') as mock_path:
pkg = 'pep8'
log_path = '/tmp/pip-install.log'
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, log=log_path)
mock.assert_called_once_with(
['pip', 'uninstall', '-y', '--log', log_path, pkg],
saltenv='base',
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
# Let's fake a non-writable log file
mock_path.exists.side_effect = IOError('Fooo!')
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
IOError,
pip.uninstall,
pkg,
log=log_path
)
def test_uninstall_timeout_argument_in_resulting_command(self):
pkg = 'pep8'
expected_prefix = ['pip', 'uninstall', '-y', '--timeout']
# Passing an int
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, timeout=10)
mock.assert_called_once_with(
expected_prefix + [10, pkg],
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing an int as a string
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
pip.uninstall(pkg, timeout='10')
mock.assert_called_once_with(
expected_prefix + ['10', pkg],
cwd=None,
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
# Passing a non-int to timeout
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
self.assertRaises(
ValueError,
pip.uninstall,
pkg,
timeout='a'
)
def test_freeze_command(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
ret = pip.freeze()
mock.assert_called_once_with(
['pip', 'freeze'],
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
self.assertEqual(ret, eggs)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
self.assertRaises(
CommandExecutionError,
pip.freeze,
)
def test_freeze_command_with_all(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pip==0.9.1',
'pycrypto==2.6',
'setuptools==20.10.1'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='9.0.1')):
ret = pip.freeze()
mock.assert_called_once_with(
['pip', 'freeze', '--all'],
cwd=None,
runas=None,
use_vt=False,
python_shell=False,
)
self.assertEqual(ret, eggs)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='9.0.1')):
self.assertRaises(
CommandExecutionError,
pip.freeze,
)
def test_list_command(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock_version = '6.1.1'
mock = MagicMock(return_value={'retcode': 0, 'stdout': '\n'.join(eggs)})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value=mock_version)):
ret = pip.list_()
mock.assert_called_with(
['pip', 'freeze'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertEqual(
ret, {
'SaltTesting-dev': 'git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8',
'M2Crypto': '0.21.1',
'bbfreeze-loader': '1.1.0',
'bbfreeze': '1.1.0',
'pip': mock_version,
'pycrypto': '2.6'
}
)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
self.assertRaises(
CommandExecutionError,
pip.list_,
)
def test_list_command_with_all(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pip==9.0.1',
'pycrypto==2.6',
'setuptools==20.10.1'
]
# N.B.: this is deliberately different from the "output" of pip freeze.
# This is to demonstrate that the version reported comes from freeze
# instead of from the pip.version function.
mock_version = '9.0.0'
mock = MagicMock(return_value={'retcode': 0, 'stdout': '\n'.join(eggs)})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value=mock_version)):
ret = pip.list_()
mock.assert_called_with(
['pip', 'freeze', '--all'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertEqual(
ret, {
'SaltTesting-dev': 'git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8',
'M2Crypto': '0.21.1',
'bbfreeze-loader': '1.1.0',
'bbfreeze': '1.1.0',
'pip': '9.0.1',
'pycrypto': '2.6',
'setuptools': '20.10.1'
}
)
# Non zero returncode raises exception?
mock = MagicMock(return_value={'retcode': 1, 'stderr': 'CABOOOOMMM!'})
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
self.assertRaises(
CommandExecutionError,
pip.list_,
)
def test_list_command_with_prefix(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
ret = pip.list_(prefix='bb')
mock.assert_called_with(
['pip', 'freeze'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertEqual(
ret, {
'bbfreeze-loader': '1.1.0',
'bbfreeze': '1.1.0',
}
)
def test_install_pre_argument_in_resulting_command(self):
pkg = 'pep8'
# Lower than 1.4 versions don't end-up with `--pre` in the resulting
# output
mock = MagicMock(side_effect=[
{'retcode': 0, 'stdout': 'pip 1.2.0 /path/to/site-packages/pip'},
{'retcode': 0, 'stdout': ''}
])
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='1.3')):
pip.install(pkg, pre_releases=True)
mock.assert_called_with(
['pip', 'install', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
mock_run = MagicMock(return_value='pip 1.4.1 /path/to/site-packages/pip')
mock_run_all = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(pip.__salt__, {'cmd.run': mock_run,
'cmd.run_all': mock_run_all}):
with patch('salt.modules.pip._get_pip_bin',
MagicMock(return_value='pip')):
pip.install(pkg, pre_releases=True)
mock_run_all.assert_called_with(
['pip', 'install', '--pre', pkg],
saltenv='base',
runas=None,
use_vt=False,
python_shell=False,
)
|
import database
import gamemaker
import team
class Escavenge():
def __init__(self):
self.database = database.Database()
self.game_maker = gamemaker.GameMaker(database)
self.maker = {
"logout" : self.game_maker.logout(),
"login" : self.main(),
"display status" : self.game_maker.display_status()
}
self.user = {
"logout" : team.logout(database),
"login" : self.main(),
"display status" : team.display_status()
}
def login(self,username, password):
if(username is "maker"):
return self.game_maker.login(username, password)
else:
teams = database.get_teams()
for team in teams:
if(team.username is username):
return team.login(database,username,password)
def main(self):
print("Welcome to Escavenge!\n")
while(True):
username = input("Please enter your username: ")
password = input("Please enter your password: ")
if(self.login(username, password)):
if(database.get_current_user() is "maker"):
print("Log in as game maker successful!")
cmd = input(self.game_maker.display_menu())
self.maker[cmd]()
else:
print("Log in as " + database.get_current_user() + " successful!")
cmd = input(team.display_menu())
self.user[cmd]
if __name__ == '__main__':
self.main()
Added user_cmd and maker_cmd functions and added failed log in statement
import database
import gamemaker
import team
class Escavenge():
def __init__(self):
self.database = database.Database()
self.game_maker = gamemaker.GameMaker(database)
self.maker = {
"logout": self.game_maker.logout(),
"login": self.main(),
"display status": self.game_maker.display_status()
}
self.user = {
"logout": team.logout(database),
"display status": team.display_status()
}
def login(self, username, password):
if (username is "maker"):
return self.game_maker.login(username, password)
else:
teams = database.get_teams()
for team in teams:
if (team.username is username):
return team.login(database, username, password)
print("There is no such user. Please create a new team")
return False
def maker_cmd(self):
cmd = None
while(cmd is not "logout"):
cmd = input(self.game_maker.display_menu())
self.maker[cmd]()
self.main()
def user_cmd(self):
cmd = None
while(cmd is not "logout"):
cmd = input(team.display_menu())
self.user[cmd]()
self.main()
def main(self):
print("Welcome to Escavenge!\n")
while (True):
username = input("Please enter your username: ")
password = input("Please enter your password: ")
if (self.login(username, password)):
if (database.get_current_user() is "maker"):
print("Log in as game maker successful!")
self.maker_cmd()
else:
print("Log in as " + database.get_current_user() + " successful!")
self.user_cmd()
else:
print "Invalid username and password. Please try again!"
if __name__ == '__main__':
Escavenge()
|
#!/usr/bin/python -u
import sys
import os
import subprocess
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
import json
################
#### Chronograf Variables
################
# Packaging variables
PACKAGE_NAME = "chronograf"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/chronograf"
DATA_DIR = "/var/lib/chronograf"
SCRIPT_DIR = "/usr/lib/chronograf/scripts"
LOGROTATE_DIR = "/etc/logrotate.d"
CANNED_DIR = "/usr/share/chronograf/canned"
INIT_SCRIPT = "etc/scripts/init.sh"
SYSTEMD_SCRIPT = "etc/scripts/chronograf.service"
POSTINST_SCRIPT = "etc/scripts/post-install.sh"
POSTUNINST_SCRIPT = "etc/scripts/post-uninstall.sh"
LOGROTATE_SCRIPT = "etc/scripts/logrotate"
CANNED_SCRIPTS = "canned/*json"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/chronograf/artifacts"
CONFIGURATION_FILES = [
LOGROTATE_DIR + '/chronograf',
]
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/chronograf"
MAINTAINER = "contact@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Open source monitoring and visualization UI for the entire TICK stack."
prereqs = [ 'git', 'go', 'npm','yarn' ]
go_vet_command = "go tool vet ./"
optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--after-install {} \
--after-remove {} \
--license {} \
--maintainer {} \
--directories {} \
--directories {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
POSTINST_SCRIPT,
POSTUNINST_SCRIPT,
PACKAGE_LICENSE,
MAINTAINER,
LOG_DIR,
DATA_DIR,
DESCRIPTION)
for f in CONFIGURATION_FILES:
fpm_common_args += " --config-files {}".format(f)
targets = {
'chronograf' : './cmd/chronograf',
}
supported_builds = {
'darwin': [ "amd64" ],
'windows': [ "amd64" ],
'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ]
}
supported_packages = {
"darwin": [ "tar" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
"freebsd": [ "tar" ]
}
################
#### Chronograf Functions
################
def print_banner():
logging.info("""
___ _ __
/ __| |_ _ _ ___ _ _ ___ __ _ _ _ __ _ / _|
| (__| ' \| '_/ _ \ ' \/ _ \/ _` | '_/ _` | _|
\___|_||_|_| \___/_||_\___/\__, |_| \__,_|_|
|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating package filesystem at location: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [
INSTALL_ROOT_DIR[1:],
LOG_DIR[1:],
DATA_DIR[1:],
SCRIPT_DIR[1:],
LOGROTATE_DIR[1:],
CANNED_DIR[1:]
]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts to the package filesystem.
"""
if config_only:
pass
else:
logging.debug("Copying scripts to build directory.")
files = [
(INIT_SCRIPT, SCRIPT_DIR, "init.sh"),
(SYSTEMD_SCRIPT, SCRIPT_DIR, "chronograf.service"),
(LOGROTATE_SCRIPT, LOGROTATE_DIR, "chronograf")
]
for script, dir, name in files:
dest = os.path.join(build_root, dir[1:], name)
logging.debug("Moving {} to {}".format(script, dest))
shutil.copyfile(script, dest)
os.chmod(dest, 0o644)
run("cp {} {} && chmod 644 {}".format(CANNED_SCRIPTS,
os.path.join(build_root, CANNED_DIR[1:]),
os.path.join(build_root, CANNED_DIR[1:], "*json")),
shell=True, print_output=True)
def run_generate():
"""Generate static assets.
"""
start_time = datetime.utcnow()
logging.info("Generating static assets...")
run("make assets", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
start_time = datetime.utcnow()
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
run("make dep", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def run_tests(race, parallel, timeout, no_vet):
"""Run the Go and NPM test suite on binary output.
"""
start_time = datetime.utcnow()
logging.info("Running tests...")
run("make test", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
################
#### All Chronograf-specific content above this line
################
def run(command, allow_failure=False, shell=False, print_output=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
cmd = command
if not shell:
cmd = command.split()
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
if print_output:
stdout = None
p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)
out, _ = p.communicate()
if out is not None:
out = out.decode('utf-8').strip()
if p.returncode != 0:
if allow_failure:
logging.warn(u"Command '{}' failed with error: {}".format(command, out))
return None
else:
logging.error(u"Command '{}' failed with error: {}".format(command, out))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.commit {}\" ".format(version,
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.commit {}\" ".format(version,
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.commit={}\" ".format(version,
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.commit={}\" ".format(version,
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_sha256_from_file(path):
"""Generate SHA256 signature based on the contents of the file at path.
"""
m = hashlib.sha256()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz --owner=root ./*".format(package_build_root, name)
run(tar_command, shell=True, print_output=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True, print_output=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils"
# TODO: Check for changelog
# elif package_type == "deb":
# fpm_command += "--deb-changelog {} ".format(os.path.join(os.getcwd(), "CHANGELOG.md"))
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# TODO: check if this is correct
# if package_type == 'rpm':
# # rpm's convert any dashes to underscores
# package_version = package_version.replace("-", "_")
# logging.debug("Changing package output version from {} to {} for RPM.".format(version, package_version))
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
logging.debug("Changing package output version from {} to {} for RPM.".format(version, package_version))
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
pass
# Cleanup
# shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(args.platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch), print_output=True)
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit), print_output=True)
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
if args.no_build:
return 0
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
package_output = {}
for p in packages:
p_name = p.split('/')[-1:][0]
if ".asc" in p_name:
# Skip public keys
continue
arch = None
type = None
regex = None
nice_name = None
if ".deb" in p_name:
type = "ubuntu"
nice_name = "Ubuntu"
regex = r"^.+_(.+)\.deb$"
elif ".rpm" in p_name:
type = "centos"
nice_name = "CentOS"
regex = r"^.+\.(.+)\.rpm$"
elif ".tar.gz" in p_name:
if "linux" in p_name:
if "static" in p_name:
type = "linux_static"
nice_name = "Linux Static"
else:
type = "linux"
nice_name = "Linux"
elif "darwin" in p_name:
type = "darwin"
nice_name = "Mac OS X"
regex = r"^.+_(.+)\.tar.gz$"
elif ".zip" in p_name:
if "windows" in p_name:
type = "windows"
nice_name = "Windows"
regex = r"^.+_(.+)\.zip$"
if regex is None or type is None:
logging.error("Could not determine package type for: {}".format(p))
return 1
match = re.search(regex, p_name)
arch = match.groups()[0]
if arch is None:
logging.error("Could not determine arch for: {}".format(p))
return 1
if arch == "x86_64":
arch = "amd64"
elif arch == "x86_32":
arch = "i386"
package_name = str(arch) + "_" + str(type)
package_output[package_name] = {
"sha256": generate_sha256_from_file(p),
"md5": generate_md5_from_file(p),
"filename": p_name,
"name": nice_name,
"link": "https://dl.influxdata.com/chronograf/releases/" + p_name.rsplit('/', 1)[-1],
}
# Print the downloads in Markdown format for the release
if args.release:
lines = []
for package_name, v in package_output.items():
line = v['name'] + " | [" + v['filename'] +"](" + v['link'] + ") | `" + v['sha256'] + "`"
lines.append(line)
lines.sort()
print ("## Docker")
print("docker pull quay.io/influxdb/chronograf:"+get_current_version_tag())
print("")
print("## Packages")
print("")
print("Arch | Package | SHA256")
print("--- | --- | ---")
for line in lines:
print(line)
package_output["version"] = args.version
logging.info(json.dumps(package_output, sort_keys=True, indent=4))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch), print_output=True)
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(stream=sys.stdout,
level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
default=True,
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
parser.add_argument('--no-build',
action='store_true',
help='Dont build anything.')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
Update build to run make clean on --clean
#!/usr/bin/python -u
import sys
import os
import subprocess
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
import json
################
#### Chronograf Variables
################
# Packaging variables
PACKAGE_NAME = "chronograf"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/chronograf"
DATA_DIR = "/var/lib/chronograf"
SCRIPT_DIR = "/usr/lib/chronograf/scripts"
LOGROTATE_DIR = "/etc/logrotate.d"
CANNED_DIR = "/usr/share/chronograf/canned"
INIT_SCRIPT = "etc/scripts/init.sh"
SYSTEMD_SCRIPT = "etc/scripts/chronograf.service"
POSTINST_SCRIPT = "etc/scripts/post-install.sh"
POSTUNINST_SCRIPT = "etc/scripts/post-uninstall.sh"
LOGROTATE_SCRIPT = "etc/scripts/logrotate"
CANNED_SCRIPTS = "canned/*json"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/chronograf/artifacts"
CONFIGURATION_FILES = [
LOGROTATE_DIR + '/chronograf',
]
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/chronograf"
MAINTAINER = "contact@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Open source monitoring and visualization UI for the entire TICK stack."
prereqs = [ 'git', 'go', 'npm','yarn' ]
go_vet_command = "go tool vet ./"
optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--after-install {} \
--after-remove {} \
--license {} \
--maintainer {} \
--directories {} \
--directories {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
POSTINST_SCRIPT,
POSTUNINST_SCRIPT,
PACKAGE_LICENSE,
MAINTAINER,
LOG_DIR,
DATA_DIR,
DESCRIPTION)
for f in CONFIGURATION_FILES:
fpm_common_args += " --config-files {}".format(f)
targets = {
'chronograf' : './cmd/chronograf',
}
supported_builds = {
'darwin': [ "amd64" ],
'windows': [ "amd64" ],
'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ]
}
supported_packages = {
"darwin": [ "tar" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
"freebsd": [ "tar" ]
}
################
#### Chronograf Functions
################
def print_banner():
logging.info("""
___ _ __
/ __| |_ _ _ ___ _ _ ___ __ _ _ _ __ _ / _|
| (__| ' \| '_/ _ \ ' \/ _ \/ _` | '_/ _` | _|
\___|_||_|_| \___/_||_\___/\__, |_| \__,_|_|
|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating package filesystem at location: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [
INSTALL_ROOT_DIR[1:],
LOG_DIR[1:],
DATA_DIR[1:],
SCRIPT_DIR[1:],
LOGROTATE_DIR[1:],
CANNED_DIR[1:]
]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts to the package filesystem.
"""
if config_only:
pass
else:
logging.debug("Copying scripts to build directory.")
files = [
(INIT_SCRIPT, SCRIPT_DIR, "init.sh"),
(SYSTEMD_SCRIPT, SCRIPT_DIR, "chronograf.service"),
(LOGROTATE_SCRIPT, LOGROTATE_DIR, "chronograf")
]
for script, dir, name in files:
dest = os.path.join(build_root, dir[1:], name)
logging.debug("Moving {} to {}".format(script, dest))
shutil.copyfile(script, dest)
os.chmod(dest, 0o644)
run("cp {} {} && chmod 644 {}".format(CANNED_SCRIPTS,
os.path.join(build_root, CANNED_DIR[1:]),
os.path.join(build_root, CANNED_DIR[1:], "*json")),
shell=True, print_output=True)
def run_generate():
"""Generate static assets.
"""
start_time = datetime.utcnow()
logging.info("Generating static assets...")
run("make assets", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def make_clean():
"""Generate static assets.
"""
start_time = datetime.utcnow()
run("make clean", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
start_time = datetime.utcnow()
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
run("make dep", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def run_tests(race, parallel, timeout, no_vet):
"""Run the Go and NPM test suite on binary output.
"""
start_time = datetime.utcnow()
logging.info("Running tests...")
run("make test", shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
################
#### All Chronograf-specific content above this line
################
def run(command, allow_failure=False, shell=False, print_output=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
cmd = command
if not shell:
cmd = command.split()
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
if print_output:
stdout = None
p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)
out, _ = p.communicate()
if out is not None:
out = out.decode('utf-8').strip()
if p.returncode != 0:
if allow_failure:
logging.warn(u"Command '{}' failed with error: {}".format(command, out))
return None
else:
logging.error(u"Command '{}' failed with error: {}".format(command, out))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.commit {}\" ".format(version,
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.commit {}\" ".format(version,
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.commit={}\" ".format(version,
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.commit={}\" ".format(version,
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True, print_output=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_sha256_from_file(path):
"""Generate SHA256 signature based on the contents of the file at path.
"""
m = hashlib.sha256()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz --owner=root ./*".format(package_build_root, name)
run(tar_command, shell=True, print_output=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True, print_output=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils"
# TODO: Check for changelog
# elif package_type == "deb":
# fpm_command += "--deb-changelog {} ".format(os.path.join(os.getcwd(), "CHANGELOG.md"))
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# TODO: check if this is correct
# if package_type == 'rpm':
# # rpm's convert any dashes to underscores
# package_version = package_version.replace("-", "_")
# logging.debug("Changing package output version from {} to {} for RPM.".format(version, package_version))
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
logging.debug("Changing package output version from {} to {} for RPM.".format(version, package_version))
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
pass
# Cleanup
# shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(args.platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch), print_output=True)
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit), print_output=True)
if args.clean:
if not make_clean():
return 1
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
if args.no_build:
return 0
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
package_output = {}
for p in packages:
p_name = p.split('/')[-1:][0]
if ".asc" in p_name:
# Skip public keys
continue
arch = None
type = None
regex = None
nice_name = None
if ".deb" in p_name:
type = "ubuntu"
nice_name = "Ubuntu"
regex = r"^.+_(.+)\.deb$"
elif ".rpm" in p_name:
type = "centos"
nice_name = "CentOS"
regex = r"^.+\.(.+)\.rpm$"
elif ".tar.gz" in p_name:
if "linux" in p_name:
if "static" in p_name:
type = "linux_static"
nice_name = "Linux Static"
else:
type = "linux"
nice_name = "Linux"
elif "darwin" in p_name:
type = "darwin"
nice_name = "Mac OS X"
regex = r"^.+_(.+)\.tar.gz$"
elif ".zip" in p_name:
if "windows" in p_name:
type = "windows"
nice_name = "Windows"
regex = r"^.+_(.+)\.zip$"
if regex is None or type is None:
logging.error("Could not determine package type for: {}".format(p))
return 1
match = re.search(regex, p_name)
arch = match.groups()[0]
if arch is None:
logging.error("Could not determine arch for: {}".format(p))
return 1
if arch == "x86_64":
arch = "amd64"
elif arch == "x86_32":
arch = "i386"
package_name = str(arch) + "_" + str(type)
package_output[package_name] = {
"sha256": generate_sha256_from_file(p),
"md5": generate_md5_from_file(p),
"filename": p_name,
"name": nice_name,
"link": "https://dl.influxdata.com/chronograf/releases/" + p_name.rsplit('/', 1)[-1],
}
# Print the downloads in Markdown format for the release
if args.release:
lines = []
for package_name, v in package_output.items():
line = v['name'] + " | [" + v['filename'] +"](" + v['link'] + ") | `" + v['sha256'] + "`"
lines.append(line)
lines.sort()
print ("## Docker")
print("docker pull quay.io/influxdb/chronograf:"+get_current_version_tag())
print("")
print("## Packages")
print("")
print("Arch | Package | SHA256")
print("--- | --- | ---")
for line in lines:
print(line)
package_output["version"] = args.version
logging.info(json.dumps(package_output, sort_keys=True, indent=4))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch), print_output=True)
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(stream=sys.stdout,
level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
default=True,
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
parser.add_argument('--no-build',
action='store_true',
help='Dont build anything.')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
|
#!/usr/bin/env python3
import os
import re
import sys
import json
import time
import select
import logging
import argparse
import threading
import subprocess
ETCD_IMAGE = "quay.io/coreos/etcd:v3.3.5"
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
LOG_COLORS = {
"critical": "\x1b[31;1m",
"error": "\x1b[31;1m",
"warning": "\x1b[33;1m",
}
LOCAL_CONFIG_PATTERN = re.compile(r"^local(-\d+)?$")
log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(levelname)s:%(message)s"))
log.addHandler(handler)
class ExcThread(threading.Thread):
def __init__(self, target):
super().__init__(target=target)
self.error = None
def run(self):
try:
self._target()
except Exception as e:
self.error = e
def join(*targets):
threads = []
for target in targets:
t = ExcThread(target)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
if t.error is not None:
raise Exception("Thread error") from t.error
class Output:
def __init__(self, pipe, level):
self.pipe = pipe
self.level = level
self.lines = []
class ProcessResult:
def __init__(self, rc, stdout, stderr):
self.rc = rc
self.stdout = stdout
self.stderr = stderr
class DefaultDriver:
def available(self):
return True
def clear(self):
if run("yes | pachctl delete all", shell=True, raise_on_error=False).rc != 0:
log.error("could not call `pachctl delete all`; most likely this just means that a pachyderm cluster hasn't been setup, but may indicate a bad state")
def start(self):
pass
def push_images(self, deploy_version, dash_image):
pass
def wait(self):
while suppress("pachctl", "version") != 0:
log.info("Waiting for pachyderm to come up...")
time.sleep(1)
def set_config(self):
run("pachctl", "config", "update", "context", "--pachd-address=localhost:30650")
class MinikubeDriver(DefaultDriver):
def available(self):
return run("which", "minikube", raise_on_error=False).rc == 0
def clear(self):
run("minikube", "delete")
def start(self):
run("minikube", "start")
while suppress("minikube", "status") != 0:
log.info("Waiting for minikube to come up...")
time.sleep(1)
def push_images(self, deploy_version, dash_image):
run("./etc/kube/push-to-minikube.sh", "pachyderm/pachd:{}".format(deploy_version))
run("./etc/kube/push-to-minikube.sh", "pachyderm/worker:{}".format(deploy_version))
run("./etc/kube/push-to-minikube.sh", ETCD_IMAGE)
run("./etc/kube/push-to-minikube.sh", dash_image)
def set_config(self):
ip = capture("minikube", "ip")
run("pachctl", "config", "update", "context", "--pachd-address={}".format(ip))
def parse_log_level(s):
try:
return LOG_LEVELS[s]
except KeyError:
raise Exception("Unknown log level: {}".format(s))
def run(cmd, *args, raise_on_error=True, shell=False, stdout_log_level="info", stderr_log_level="error"):
log.debug("Running `%s %s`", cmd, " ".join(args))
proc = subprocess.Popen([cmd, *args], shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = Output(proc.stdout, stdout_log_level)
stderr = Output(proc.stderr, stderr_log_level)
timed_out_last = False
while True:
if (proc.poll() is not None and timed_out_last) or (stdout.pipe.closed and stderr.pipe.closed):
break
for io in select.select([stdout.pipe, stderr.pipe], [], [], 100)[0]:
timed_out_last = False
line = io.readline().decode().rstrip()
if line == "":
continue
dest = stdout if io == stdout.pipe else stderr
log.log(LOG_LEVELS[dest.level], "{}{}\x1b[0m".format(LOG_COLORS.get(dest.level, ""), line))
dest.lines.append(line)
else:
timed_out_last = True
rc = proc.wait()
if raise_on_error and rc != 0:
raise Exception("Unexpected return code for `{} {}`: {}".format(cmd, " ".join(args), rc))
return ProcessResult(rc, "\n".join(stdout.lines), "\n".join(stderr.lines))
def capture(cmd, *args, shell=False):
return run(cmd, *args, shell=shell, stdout_log_level="debug").stdout
def suppress(cmd, *args):
return run(cmd, *args, stdout_log_level="debug", stderr_log_level="debug", raise_on_error=False).rc
def get_pachyderm(deploy_version):
log.info("Deploying pachd:{}".format(deploy_version))
should_download = suppress("which", "pachctl") != 0 \
or capture("pachctl", "version", "--client-only") != deploy_version
if should_download:
release_url = "https://github.com/pachyderm/pachyderm/releases/download/v{}/pachctl_{}_linux_amd64.tar.gz".format(deploy_version, deploy_version)
outpath = os.path.join(os.environ["GOPATH"], "bin")
filepath = "pachctl_{}_linux_amd64/pachctl".format(deploy_version)
run("curl -L {} | tar -C \"{}\" --strip-components=1 -xzf - {}".format(release_url, outpath, filepath), shell=True)
run("docker", "pull", "pachyderm/pachd:{}".format(deploy_version))
run("docker", "pull", "pachyderm/worker:{}".format(deploy_version))
def rewrite_config():
log.info("Rewriting config")
keys = set([])
try:
with open(os.path.expanduser("~/.pachyderm/config.json"), "r") as f:
j = json.load(f)
except:
return
v2 = j.get("v2")
if not v2:
return
contexts = v2["contexts"]
for k, v in contexts.items():
if LOCAL_CONFIG_PATTERN.fullmatch(k) and len(v) > 0:
keys.add(k)
for k in keys:
del contexts[k]
with open(os.path.expanduser("~/.pachyderm/config.json"), "w") as f:
json.dump(j, f, indent=2)
def main():
parser = argparse.ArgumentParser(description="Recompiles pachyderm tooling and restarts the cluster with a clean slate.")
parser.add_argument("--no-deploy", default=False, action="store_true", help="Disables deployment")
parser.add_argument("--no-config-rewrite", default=False, action="store_true", help="Disables config rewriting")
parser.add_argument("--deploy-args", default="", help="Arguments to be passed into `pachctl deploy`")
parser.add_argument("--deploy-version", default="local", help="Sets the deployment version")
parser.add_argument("--log-level", default="info", type=parse_log_level, help="Sets the log level; defaults to 'info', other options include 'critical', 'error', 'warning', and 'debug'")
args = parser.parse_args()
log.setLevel(args.log_level)
if "GOPATH" not in os.environ:
log.critical("Must set GOPATH")
sys.exit(1)
if not args.no_deploy and "PACH_CA_CERTS" in os.environ:
log.critical("Must unset PACH_CA_CERTS\nRun:\nunset PACH_CA_CERTS", file=sys.stderr)
sys.exit(1)
if MinikubeDriver().available():
log.info("using the minikube driver")
driver = MinikubeDriver()
else:
log.info("using the k8s for docker driver")
log.warning("with this driver, it's not possible to fully reset the cluster")
driver = DefaultDriver()
driver.clear()
gopath = os.environ["GOPATH"]
if args.deploy_version == "local":
try:
os.remove(os.path.join(gopath, "bin", "pachctl"))
except:
pass
procs = [
driver.start,
lambda: run("make", "install"),
lambda: run("make", "docker-build"),
]
if not args.no_config_rewrite:
procs.append(rewrite_config)
join(*procs)
else:
join(
driver.start,
lambda: get_pachyderm(args.deploy_version),
)
version = capture("pachctl", "version", "--client-only")
log.info("Deploy pachyderm version v{}".format(version))
while suppress("pachctl", "version", "--client-only") != 0:
log.info("Waiting for pachctl to build...")
time.sleep(1)
run("which", "pachctl")
dash_image = capture("pachctl deploy local -d --dry-run | jq -r '.. | select(.name? == \"dash\" and has(\"image\")).image'", shell=True)
grpc_proxy_image = capture("pachctl deploy local -d --dry-run | jq -r '.. | select(.name? == \"grpc-proxy\").image'", shell=True)
run("docker", "pull", dash_image)
run("docker", "pull", grpc_proxy_image)
run("docker", "pull", ETCD_IMAGE)
driver.push_images(args.deploy_version, dash_image)
if not args.no_deploy:
if args.deploy_version == "local":
run("pachctl deploy local -d {}".format(args.deploy_args), shell=True)
else:
run("pachctl deploy local -d {} --dry-run | sed \"s/:local/:{}/g\" | kubectl create -f -".format(args.deploy_args, args.deploy_version), shell=True)
driver.wait()
run("killall", "kubectl", raise_on_error=False)
driver.set_config()
if __name__ == "__main__":
main()
Reset underlying k8s resources on clear
#!/usr/bin/env python3
import os
import re
import sys
import json
import time
import select
import logging
import argparse
import threading
import subprocess
ETCD_IMAGE = "quay.io/coreos/etcd:v3.3.5"
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
LOG_COLORS = {
"critical": "\x1b[31;1m",
"error": "\x1b[31;1m",
"warning": "\x1b[33;1m",
}
LOCAL_CONFIG_PATTERN = re.compile(r"^local(-\d+)?$")
log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(levelname)s:%(message)s"))
log.addHandler(handler)
class ExcThread(threading.Thread):
def __init__(self, target):
super().__init__(target=target)
self.error = None
def run(self):
try:
self._target()
except Exception as e:
self.error = e
def join(*targets):
threads = []
for target in targets:
t = ExcThread(target)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
if t.error is not None:
raise Exception("Thread error") from t.error
class Output:
def __init__(self, pipe, level):
self.pipe = pipe
self.level = level
self.lines = []
class ProcessResult:
def __init__(self, rc, stdout, stderr):
self.rc = rc
self.stdout = stdout
self.stderr = stderr
class DefaultDriver:
def available(self):
return True
def clear(self):
run("kubectl", "delete", "daemonsets,replicasets,services,deployments,pods,rc,pvc", "--all")
def start(self):
pass
def push_images(self, deploy_version, dash_image):
pass
def wait(self):
while suppress("pachctl", "version") != 0:
log.info("Waiting for pachyderm to come up...")
time.sleep(1)
def set_config(self):
run("pachctl", "config", "update", "context", "--pachd-address=localhost:30650")
class MinikubeDriver(DefaultDriver):
def available(self):
return run("which", "minikube", raise_on_error=False).rc == 0
def clear(self):
run("minikube", "delete")
def start(self):
run("minikube", "start")
while suppress("minikube", "status") != 0:
log.info("Waiting for minikube to come up...")
time.sleep(1)
def push_images(self, deploy_version, dash_image):
run("./etc/kube/push-to-minikube.sh", "pachyderm/pachd:{}".format(deploy_version))
run("./etc/kube/push-to-minikube.sh", "pachyderm/worker:{}".format(deploy_version))
run("./etc/kube/push-to-minikube.sh", ETCD_IMAGE)
run("./etc/kube/push-to-minikube.sh", dash_image)
def set_config(self):
ip = capture("minikube", "ip")
run("pachctl", "config", "update", "context", "--pachd-address={}".format(ip))
def parse_log_level(s):
try:
return LOG_LEVELS[s]
except KeyError:
raise Exception("Unknown log level: {}".format(s))
def run(cmd, *args, raise_on_error=True, shell=False, stdout_log_level="info", stderr_log_level="error"):
log.debug("Running `%s %s`", cmd, " ".join(args))
proc = subprocess.Popen([cmd, *args], shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = Output(proc.stdout, stdout_log_level)
stderr = Output(proc.stderr, stderr_log_level)
timed_out_last = False
while True:
if (proc.poll() is not None and timed_out_last) or (stdout.pipe.closed and stderr.pipe.closed):
break
for io in select.select([stdout.pipe, stderr.pipe], [], [], 100)[0]:
timed_out_last = False
line = io.readline().decode().rstrip()
if line == "":
continue
dest = stdout if io == stdout.pipe else stderr
log.log(LOG_LEVELS[dest.level], "{}{}\x1b[0m".format(LOG_COLORS.get(dest.level, ""), line))
dest.lines.append(line)
else:
timed_out_last = True
rc = proc.wait()
if raise_on_error and rc != 0:
raise Exception("Unexpected return code for `{} {}`: {}".format(cmd, " ".join(args), rc))
return ProcessResult(rc, "\n".join(stdout.lines), "\n".join(stderr.lines))
def capture(cmd, *args, shell=False):
return run(cmd, *args, shell=shell, stdout_log_level="debug").stdout
def suppress(cmd, *args):
return run(cmd, *args, stdout_log_level="debug", stderr_log_level="debug", raise_on_error=False).rc
def get_pachyderm(deploy_version):
log.info("Deploying pachd:{}".format(deploy_version))
should_download = suppress("which", "pachctl") != 0 \
or capture("pachctl", "version", "--client-only") != deploy_version
if should_download:
release_url = "https://github.com/pachyderm/pachyderm/releases/download/v{}/pachctl_{}_linux_amd64.tar.gz".format(deploy_version, deploy_version)
outpath = os.path.join(os.environ["GOPATH"], "bin")
filepath = "pachctl_{}_linux_amd64/pachctl".format(deploy_version)
run("curl -L {} | tar -C \"{}\" --strip-components=1 -xzf - {}".format(release_url, outpath, filepath), shell=True)
run("docker", "pull", "pachyderm/pachd:{}".format(deploy_version))
run("docker", "pull", "pachyderm/worker:{}".format(deploy_version))
def rewrite_config():
log.info("Rewriting config")
keys = set([])
try:
with open(os.path.expanduser("~/.pachyderm/config.json"), "r") as f:
j = json.load(f)
except:
return
v2 = j.get("v2")
if not v2:
return
contexts = v2["contexts"]
for k, v in contexts.items():
if LOCAL_CONFIG_PATTERN.fullmatch(k) and len(v) > 0:
keys.add(k)
for k in keys:
del contexts[k]
with open(os.path.expanduser("~/.pachyderm/config.json"), "w") as f:
json.dump(j, f, indent=2)
def main():
parser = argparse.ArgumentParser(description="Recompiles pachyderm tooling and restarts the cluster with a clean slate.")
parser.add_argument("--no-deploy", default=False, action="store_true", help="Disables deployment")
parser.add_argument("--no-config-rewrite", default=False, action="store_true", help="Disables config rewriting")
parser.add_argument("--deploy-args", default="", help="Arguments to be passed into `pachctl deploy`")
parser.add_argument("--deploy-version", default="local", help="Sets the deployment version")
parser.add_argument("--log-level", default="info", type=parse_log_level, help="Sets the log level; defaults to 'info', other options include 'critical', 'error', 'warning', and 'debug'")
args = parser.parse_args()
log.setLevel(args.log_level)
if "GOPATH" not in os.environ:
log.critical("Must set GOPATH")
sys.exit(1)
if not args.no_deploy and "PACH_CA_CERTS" in os.environ:
log.critical("Must unset PACH_CA_CERTS\nRun:\nunset PACH_CA_CERTS", file=sys.stderr)
sys.exit(1)
if MinikubeDriver().available():
log.info("using the minikube driver")
driver = MinikubeDriver()
else:
log.info("using the k8s for docker driver")
log.warning("with this driver, it's not possible to fully reset the cluster")
driver = DefaultDriver()
driver.clear()
gopath = os.environ["GOPATH"]
if args.deploy_version == "local":
try:
os.remove(os.path.join(gopath, "bin", "pachctl"))
except:
pass
procs = [
driver.start,
lambda: run("make", "install"),
lambda: run("make", "docker-build"),
]
if not args.no_config_rewrite:
procs.append(rewrite_config)
join(*procs)
else:
join(
driver.start,
lambda: get_pachyderm(args.deploy_version),
)
version = capture("pachctl", "version", "--client-only")
log.info("Deploy pachyderm version v{}".format(version))
while suppress("pachctl", "version", "--client-only") != 0:
log.info("Waiting for pachctl to build...")
time.sleep(1)
run("which", "pachctl")
dash_image = capture("pachctl deploy local -d --dry-run | jq -r '.. | select(.name? == \"dash\" and has(\"image\")).image'", shell=True)
grpc_proxy_image = capture("pachctl deploy local -d --dry-run | jq -r '.. | select(.name? == \"grpc-proxy\").image'", shell=True)
run("docker", "pull", dash_image)
run("docker", "pull", grpc_proxy_image)
run("docker", "pull", ETCD_IMAGE)
driver.push_images(args.deploy_version, dash_image)
if not args.no_deploy:
if args.deploy_version == "local":
run("pachctl deploy local -d {}".format(args.deploy_args), shell=True)
else:
run("pachctl deploy local -d {} --dry-run | sed \"s/:local/:{}/g\" | kubectl create -f -".format(args.deploy_args, args.deploy_version), shell=True)
driver.wait()
run("killall", "kubectl", raise_on_error=False)
driver.set_config()
if __name__ == "__main__":
main()
|
from django.db import models
import utils.models as utils
class AutomatedMessageQuerySet(utils.BaseQuerySet):
def for_participant(self,participant,exact=False,**kwargs):
''' Return AutomatedMessage for participant and today '''
description = participant.description(**kwargs)
# print "Description:",description
return self.from_description(description,exact)
def from_description(self,description,exact=False):
''' Return AutomatedMessage for description
:param description (str): base.group.condition.hiv.offset string to look for
:returns: AutomatedMessage matching description or closes match if not found
'''
send_base, group, condition, hiv_messaging, send_offset = description.split('.')
hiv = hiv_messaging == "Y"
send_offset = int(send_offset)
# Sepecial case for post date messages go back and forth between week 41 and 42 mesages
if send_base == 'edd' and send_offset < -2:
send_offset = (send_offset+1)%-2 - 1
return self.from_parameters(send_base,group,condition,send_offset,hiv,exact=exact)
def from_parameters(self,send_base,group,condition='normal',send_offset=0,hiv=False,exact=False):
# Look for exact match of parameters
try:
return self.get(send_base=send_base, send_offset=send_offset,
group=group, condition=condition, hiv_messaging=hiv)
except AutomatedMessage.DoesNotExist as e:
if exact == True:
return None
# No match for participant conditions continue to find best match
pass
# Create the base query set with send_base and offset
message_offset = self.filter(send_base=send_base,send_offset=send_offset)
if hiv:
# Try to find a non HIV message for this conditon
try:
return message_offset.get(condition=condition,group=group,hiv_messaging=False)
except AutomatedMessage.DoesNotExist as e:
pass
# Force condition to normal and try again with group and hiv=True
try:
return message_offset.get(condition="normal",group=group,hiv_messaging=hiv)
except AutomatedMessage.DoesNotExist as e:
pass
if condition != "normal":
# Force condition to normal and try again
try:
return message_offset.get(condition="normal",group=group,hiv_messaging=False)
except AutomatedMessage.DoesNotExist as e:
pass
if group == "two-way":
# Force group to one-way and try again
try:
return message_offset.get(condition=condition,group="one-way",hiv_messaging=False)
except AutomatedMessage.DoesNotExist as e:
pass
if condition != "normal" and group != "one-way":
# Force group to one-way and force hiv_messaging off return message or None
return message_offset.filter(condition='normal',group='one-way',hiv_messaging=False).first()
def from_excel(self,msg):
''' Replace fields of message content with matching discription '''
auto = self.from_description(msg.description(),exact=True)
if auto is None:
return self.create(**msg.kwargs()) , 'created'
else:
msg_english = msg.english if msg.english != '' else msg.new
changed = msg_english != auto.english or msg.swahili != auto.swahili or msg.luo != auto.luo
auto.english = msg_english
auto.swahili = msg.swahili
auto.luo = msg.luo
auto.todo = msg.status == 'todo'
auto.save()
return auto , 'changed' if changed else 'same'
class AutomatedMessage(models.Model):
"""Automated Messages for sending to participants"""
SEND_BASES_CHOICES = (
('edd','Before EDD'),
('over','Post Dates'),
('dd','Postpartum'),
('visit','Visit'),
('signup','From Signup'),
('connect','Reconnect'),
('bounce','Bounce'),
('loss','Loss'),
('stop','Stop'),
)
GROUP_CHOICES = (
('control','Control'),
('one-way','One Way'),
('two-way','Two Way'),
)
CONDITION_CHOICES = (
('art','Starting ART'),
('adolescent','Adolescent'),
('first','First Time Mother'),
('normal','Normal'),
('nbaby','No Baby'),
)
class Meta:
app_label = 'backend'
objects = AutomatedMessageQuerySet.as_manager()
priority = models.IntegerField(default=0)
english = models.TextField(blank=True)
swahili = models.TextField(blank=True)
luo = models.TextField(blank=True)
comment = models.TextField(blank=True)
group = models.CharField(max_length=20,choices=GROUP_CHOICES) # 2 groups
condition = models.CharField(max_length=20,choices=CONDITION_CHOICES) # 4 conditions
hiv_messaging = models.BooleanField() # True or False
send_base = models.CharField(max_length=20,help_text='Base to send messages from',choices=SEND_BASES_CHOICES)
send_offset = models.IntegerField(default=0,help_text='Offset from base in weeks')
todo = models.BooleanField()
def category(self):
return "{0.send_base}.{0.group}.{0.condition}.{1}".format(self,
'Y' if self.hiv_messaging else 'N')
def description(self):
return "{0}.{1}".format(self.category(),self.send_offset)
def text_for(self,participant,extra_kwargs=None):
text = self.get_language(participant.language)
message_kwargs = participant.message_kwargs()
if extra_kwargs is not None:
message_kwargs.update(extra_kwargs)
return text.format(**message_kwargs)
def get_language(self,language):
# TODO: Error checking
return getattr(self,language)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<AutomatedMessage: {}>".format(self.description())
minor formatting/style tweaks
from django.db import models
import utils.models as utils
class AutomatedMessageQuerySet(utils.BaseQuerySet):
"""
Used to map a single description to an AutomatedMessage.
"""
def for_participant(self, participant, exact=False, **kwargs):
"""
Return AutomatedMessage for participant and today
"""
description = participant.description(**kwargs)
# print "Description:",description
return self.from_description(description, exact)
def from_description(self, description, exact=False):
"""
Return AutomatedMessage for description
:param description (str): base.group.condition.hiv.offset string to look for
:returns: AutomatedMessage matching description or closes match if not found
"""
send_base, group, condition, hiv_messaging, send_offset = description.split('.')
hiv = hiv_messaging == "Y"
send_offset = int(send_offset)
# Special case for post date messages go back and forth between week 41 and 42 messages
if send_base == 'edd' and send_offset < -2:
send_offset = (send_offset+1)%-2 - 1
return self.from_parameters(send_base,group,condition,send_offset,hiv,exact=exact)
def from_parameters(self,send_base,group,condition='normal',send_offset=0,hiv=False,exact=False):
# Look for exact match of parameters
try:
return self.get(send_base=send_base, send_offset=send_offset,
group=group, condition=condition, hiv_messaging=hiv)
except AutomatedMessage.DoesNotExist as e:
if exact == True:
return None
# No match for participant conditions continue to find best match
pass
# Create the base query set with send_base and offset
message_offset = self.filter(send_base=send_base,send_offset=send_offset)
if hiv:
# Try to find a non HIV message for this conditon
try:
return message_offset.get(condition=condition,group=group,hiv_messaging=False)
except AutomatedMessage.DoesNotExist as e:
pass
# Force condition to normal and try again with group and hiv=True
try:
return message_offset.get(condition="normal",group=group,hiv_messaging=hiv)
except AutomatedMessage.DoesNotExist as e:
pass
if condition != "normal":
# Force condition to normal and try again
try:
return message_offset.get(condition="normal",group=group,hiv_messaging=False)
except AutomatedMessage.DoesNotExist as e:
pass
if group == "two-way":
# Force group to one-way and try again
try:
return message_offset.get(condition=condition,group="one-way",hiv_messaging=False)
except AutomatedMessage.DoesNotExist as e:
pass
if condition != "normal" and group != "one-way":
# Force group to one-way and force hiv_messaging off return message or None
return message_offset.filter(condition='normal',group='one-way',hiv_messaging=False).first()
def from_excel(self,msg):
"""
Replace fields of message content with matching description
"""
auto = self.from_description(msg.description(),exact=True)
if auto is None:
return self.create(**msg.kwargs()) , 'created'
else:
msg_english = msg.english if msg.english != '' else msg.new
changed = msg_english != auto.english or msg.swahili != auto.swahili or msg.luo != auto.luo
auto.english = msg_english
auto.swahili = msg.swahili
auto.luo = msg.luo
auto.todo = msg.status == 'todo'
auto.save()
return auto, 'changed' if changed else 'same'
class AutomatedMessage(models.Model):
"""Automated Messages for sending to participants"""
SEND_BASES_CHOICES = (
('edd','Before EDD'),
('over','Post Dates'),
('dd','Postpartum'),
('visit','Visit'),
('signup','From Signup'),
('connect','Reconnect'),
('bounce','Bounce'),
('loss','Loss'),
('stop','Stop'),
)
GROUP_CHOICES = (
('control','Control'),
('one-way','One Way'),
('two-way','Two Way'),
)
CONDITION_CHOICES = (
('art','Starting ART'),
('adolescent','Adolescent'),
('first','First Time Mother'),
('normal','Normal'),
('nbaby','No Baby'),
)
class Meta:
app_label = 'backend'
objects = AutomatedMessageQuerySet.as_manager()
priority = models.IntegerField(default=0)
english = models.TextField(blank=True)
swahili = models.TextField(blank=True)
luo = models.TextField(blank=True)
comment = models.TextField(blank=True)
group = models.CharField(max_length=20,choices=GROUP_CHOICES) # 2 groups
condition = models.CharField(max_length=20,choices=CONDITION_CHOICES) # 4 conditions
hiv_messaging = models.BooleanField() # True or False
send_base = models.CharField(max_length=20,help_text='Base to send messages from',choices=SEND_BASES_CHOICES)
send_offset = models.IntegerField(default=0,help_text='Offset from base in weeks')
todo = models.BooleanField()
def category(self):
return "{0.send_base}.{0.group}.{0.condition}.{1}".format(self,
'Y' if self.hiv_messaging else 'N')
def description(self):
return "{0}.{1}".format(self.category(),self.send_offset)
def text_for(self,participant,extra_kwargs=None):
text = self.get_language(participant.language)
message_kwargs = participant.message_kwargs()
if extra_kwargs is not None:
message_kwargs.update(extra_kwargs)
return text.format(**message_kwargs)
def get_language(self,language):
# TODO: Error checking
return getattr(self,language)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<AutomatedMessage: {}>".format(self.description())
|
#!/usr/bin/env python3
import os
import re
import json
import time
import argparse
import threading
import subprocess
ETCD_IMAGE = "quay.io/coreos/etcd:v3.3.5"
DELETABLE_RESOURCES = [
"daemonsets",
"replicasets",
"services",
"deployments",
"pods",
"rc",
"pvc",
"serviceaccounts",
"secrets",
]
NEWLINE_SEPARATE_OBJECTS_PATTERN = re.compile(r"\}\n+\{", re.MULTILINE)
class ExcThread(threading.Thread):
def __init__(self, target):
super().__init__(target=target)
self.error = None
def run(self):
try:
self._target()
except Exception as e:
self.error = e
def join(*targets):
threads = []
for target in targets:
t = ExcThread(target)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
if t.error is not None:
raise Exception("Thread error") from t.error
class DefaultDriver:
def available(self):
return True
def clear(self):
run("kubectl", "delete", ",".join(DELETABLE_RESOURCES), "--all", raise_on_error=False)
run("kubectl", "delete", "clusterrole.rbac.authorization.k8s.io/pachyderm", raise_on_error=False)
run("kubectl", "delete", "clusterrolebinding.rbac.authorization.k8s.io/pachyderm", raise_on_error=False)
def start(self):
pass
def push_images(self, dash_image):
pass
def set_config(self):
kube_context = capture("kubectl", "config", "current-context").strip()
run("pachctl", "config", "set", "context", kube_context, "--kubernetes", kube_context, "--overwrite")
run("pachctl", "config", "set", "active-context", kube_context)
class DockerDriver(DefaultDriver):
def set_config(self):
super().set_config()
run("pachctl", "config", "update", "context", "--pachd-address=localhost:30650")
class MinikubeDriver(DefaultDriver):
def available(self):
return run("which", "minikube", raise_on_error=False).returncode == 0
def clear(self):
run("minikube", "delete")
def start(self):
run("minikube", "start")
while run("minikube", "status", raise_on_error=False).returncode != 0:
print("Waiting for minikube to come up...")
time.sleep(1)
def push_images(self, dash_image):
run("./etc/kube/push-to-minikube.sh", "pachyderm/pachd:local")
run("./etc/kube/push-to-minikube.sh", "pachyderm/worker:local")
run("./etc/kube/push-to-minikube.sh", ETCD_IMAGE)
run("./etc/kube/push-to-minikube.sh", dash_image)
def set_config(self):
super().set_config()
ip = capture("minikube", "ip")
run("pachctl", "config", "update", "context", "--pachd-address={}".format(ip))
def find_in_json(j, f):
if f(j):
return j
iter = None
if isinstance(j, dict):
iter = j.values()
elif isinstance(j, list):
iter = j
if iter is not None:
for sub_j in iter:
v = find_in_json(sub_j, f)
if v is not None:
return v
def print_status(status):
print("===> {}".format(status))
def run(cmd, *args, raise_on_error=True, stdin=None, capture_output=False):
all_args = [cmd, *args]
print_status(" ".join(all_args))
return subprocess.run(all_args, check=raise_on_error, capture_output=capture_output, input=stdin, encoding="utf8")
def capture(cmd, *args):
return run(cmd, *args, capture_output=True).stdout
def main():
parser = argparse.ArgumentParser(description="Recompiles pachyderm tooling and restarts the cluster with a clean slate.")
parser.add_argument("--args", default="", help="Arguments to be passed into `pachctl deploy`")
args = parser.parse_args()
if "GOPATH" not in os.environ:
raise Exception("Must set GOPATH")
if "PACH_CA_CERTS" in os.environ:
raise Exception("Must unset PACH_CA_CERTS\nRun:\nunset PACH_CA_CERTS")
if MinikubeDriver().available():
print_status("using the minikube driver")
driver = MinikubeDriver()
else:
print_status("using the k8s for docker driver")
driver = DockerDriver()
driver.clear()
bin_path = os.path.join(os.environ["GOPATH"], "bin", "pachctl")
if os.path.exists(bin_path):
os.remove(bin_path)
join(
driver.start,
lambda: run("make", "install"),
lambda: run("make", "docker-build"),
)
version = capture("pachctl", "version", "--client-only")
print_status("Deploy pachyderm version v{}".format(version))
deployments_str = capture("pachctl", "deploy", "local", "-d", "--dry-run")
deployments_json = json.loads("[{}]".format(NEWLINE_SEPARATE_OBJECTS_PATTERN.sub("},{", deployments_str)))
dash_image = find_in_json(deployments_json, lambda j: isinstance(j, dict) and j.get("name") == "dash" and j.get("image") is not None)["image"]
grpc_proxy_image = find_in_json(deployments_json, lambda j: isinstance(j, dict) and j.get("name") == "grpc-proxy")["image"]
run("docker", "pull", dash_image)
run("docker", "pull", grpc_proxy_image)
run("docker", "pull", ETCD_IMAGE)
driver.push_images(dash_image)
run("kubectl", "create", "-f", "-", stdin=deployments_str)
driver.set_config()
while run("pachctl", "version", raise_on_error=False).returncode:
print_status("Waiting for pachyderm to come up...")
time.sleep(1)
if __name__ == "__main__":
main()
Delete all before clearing underlying resources
#!/usr/bin/env python3
import os
import re
import json
import time
import argparse
import threading
import subprocess
ETCD_IMAGE = "quay.io/coreos/etcd:v3.3.5"
DELETABLE_RESOURCES = [
"daemonsets",
"replicasets",
"services",
"deployments",
"pods",
"rc",
"pvc",
"serviceaccounts",
"secrets",
]
NEWLINE_SEPARATE_OBJECTS_PATTERN = re.compile(r"\}\n+\{", re.MULTILINE)
class ExcThread(threading.Thread):
def __init__(self, target):
super().__init__(target=target)
self.error = None
def run(self):
try:
self._target()
except Exception as e:
self.error = e
def join(*targets):
threads = []
for target in targets:
t = ExcThread(target)
t.start()
threads.append(t)
for t in threads:
t.join()
for t in threads:
if t.error is not None:
raise Exception("Thread error") from t.error
class DefaultDriver:
def available(self):
return True
def clear(self):
run("kubectl", "delete", ",".join(DELETABLE_RESOURCES), "--all", raise_on_error=False)
run("kubectl", "delete", "clusterrole.rbac.authorization.k8s.io/pachyderm", raise_on_error=False)
run("kubectl", "delete", "clusterrolebinding.rbac.authorization.k8s.io/pachyderm", raise_on_error=False)
def start(self):
pass
def push_images(self, dash_image):
pass
def set_config(self):
kube_context = capture("kubectl", "config", "current-context").strip()
run("pachctl", "config", "set", "context", kube_context, "--kubernetes", kube_context, "--overwrite")
run("pachctl", "config", "set", "active-context", kube_context)
class DockerDriver(DefaultDriver):
def set_config(self):
super().set_config()
run("pachctl", "config", "update", "context", "--pachd-address=localhost:30650")
class MinikubeDriver(DefaultDriver):
def available(self):
return run("which", "minikube", raise_on_error=False).returncode == 0
def clear(self):
run("minikube", "delete")
def start(self):
run("minikube", "start")
while run("minikube", "status", raise_on_error=False).returncode != 0:
print("Waiting for minikube to come up...")
time.sleep(1)
def push_images(self, dash_image):
run("./etc/kube/push-to-minikube.sh", "pachyderm/pachd:local")
run("./etc/kube/push-to-minikube.sh", "pachyderm/worker:local")
run("./etc/kube/push-to-minikube.sh", ETCD_IMAGE)
run("./etc/kube/push-to-minikube.sh", dash_image)
def set_config(self):
super().set_config()
ip = capture("minikube", "ip")
run("pachctl", "config", "update", "context", "--pachd-address={}".format(ip))
def find_in_json(j, f):
if f(j):
return j
iter = None
if isinstance(j, dict):
iter = j.values()
elif isinstance(j, list):
iter = j
if iter is not None:
for sub_j in iter:
v = find_in_json(sub_j, f)
if v is not None:
return v
def print_status(status):
print("===> {}".format(status))
def run(cmd, *args, raise_on_error=True, stdin=None, capture_output=False):
all_args = [cmd, *args]
print_status(" ".join(all_args))
return subprocess.run(all_args, check=raise_on_error, capture_output=capture_output, input=stdin, encoding="utf8")
def capture(cmd, *args):
return run(cmd, *args, capture_output=True).stdout
def main():
parser = argparse.ArgumentParser(description="Recompiles pachyderm tooling and restarts the cluster with a clean slate.")
parser.add_argument("--args", default="", help="Arguments to be passed into `pachctl deploy`")
args = parser.parse_args()
if "GOPATH" not in os.environ:
raise Exception("Must set GOPATH")
if "PACH_CA_CERTS" in os.environ:
raise Exception("Must unset PACH_CA_CERTS\nRun:\nunset PACH_CA_CERTS")
if MinikubeDriver().available():
print_status("using the minikube driver")
driver = MinikubeDriver()
else:
print_status("using the k8s for docker driver")
driver = DockerDriver()
run("pachctl", "delete", "all", raise_on_error=False)
driver.clear()
bin_path = os.path.join(os.environ["GOPATH"], "bin", "pachctl")
if os.path.exists(bin_path):
os.remove(bin_path)
join(
driver.start,
lambda: run("make", "install"),
lambda: run("make", "docker-build"),
)
version = capture("pachctl", "version", "--client-only")
print_status("Deploy pachyderm version v{}".format(version))
deployments_str = capture("pachctl", "deploy", "local", "-d", "--dry-run")
deployments_json = json.loads("[{}]".format(NEWLINE_SEPARATE_OBJECTS_PATTERN.sub("},{", deployments_str)))
dash_image = find_in_json(deployments_json, lambda j: isinstance(j, dict) and j.get("name") == "dash" and j.get("image") is not None)["image"]
grpc_proxy_image = find_in_json(deployments_json, lambda j: isinstance(j, dict) and j.get("name") == "grpc-proxy")["image"]
run("docker", "pull", dash_image)
run("docker", "pull", grpc_proxy_image)
run("docker", "pull", ETCD_IMAGE)
driver.push_images(dash_image)
run("kubectl", "create", "-f", "-", stdin=deployments_str)
driver.set_config()
while run("pachctl", "version", raise_on_error=False).returncode:
print_status("Waiting for pachyderm to come up...")
time.sleep(1)
if __name__ == "__main__":
main()
|
__author__ = 'Isaac'
from multiprocessing.queues import Empty
class Router:
def __init__(self, response_queue, command_queue):
self.response_queue = response_queue
self.command_queue = command_queue
self.routes = {}
def handle(self):
try:
command = self.command_queue.get(block=False)
if command is not None:
url = None
try:
url = command["url"]
self.response_queue.put({"url": url, "body": self[url](command["body"])})
except KeyError:
print("could not handle " + str(url))
except Empty:
pass
def route(self, url):
def decorator(function):
self.routes[url] = function
return function
return decorator
def __getitem__(self, item):
return self.routes[item]
Moves Out of Line for Easier Debugging
__author__ = 'Isaac'
from multiprocessing.queues import Empty
class Router:
def __init__(self, response_queue, command_queue):
self.response_queue = response_queue
self.command_queue = command_queue
self.routes = {}
def handle(self):
try:
command = self.command_queue.get(block=False)
if command is not None:
url = None
try:
url = command["url"]
result = self[url](command["body"])
self.response_queue.put({"url": url, "body": result})
except KeyError:
print("could not handle " + str(url))
except Empty:
pass
def route(self, url):
def decorator(function):
self.routes[url] = function
return function
return decorator
def __getitem__(self, item):
return self.routes[item] |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.conf import settings
from django import http
from django.core.urlresolvers import reverse
from django.views.decorators.http import require_http_methods
from gallery import models as gallery_api
from gallery.utils import create_s3_policy_doc
from gallery.emails import send_confirmation_email
import hmac, hashlib
import random
def gallery(request):
""" show gallery for all signups for this sequence with profiles """
s3_policy, signature = create_s3_policy_doc(settings.AWS_S3_BUCKET, 'gallery')
if request.GET.get('key'):
pass
prefix = hmac.new(
'THEANSWERIS42', request.session.session_key, hashlib.sha1
).hexdigest()
bios = gallery_api.get_bios('TODO', limit=32)
bios += [{'avatar': 'http://placehold.it/120x120', 'email': ''} for i in range(len(bios), 32)]
bios = random.sample(bios, len(bios))
# if user is logged in and has a bio, display it!
user_bio = request.session.get('user_bio', None)
if user_bio:
bio_in_list = [ x for x in bios if x['email'] == user_bio['email'] ]
if len(bio_in_list) == 1:
# swap user bio with bio at position 12
bio_index = bios.index(bio_in_list[0])
bios[bio_index] = bios[11]
bios[11] = user_bio
else:
# make a gap at position 12
bios[11] = {'avatar': 'http://placehold.it/120x120'}
context = {
'bios': bios,
'user_bio': user_bio,
'user_email': request.session.get('user_email'),
's3_policy': s3_policy,
's3_signature': signature,
'AWS_ACCESS_KEY_ID': settings.AWS_ACCESS_KEY_ID,
'AWS_S3_BUCKET': settings.AWS_S3_BUCKET,
'key_prefix': 'gallery/{0}'.format(prefix)
}
return render_to_response('gallery/index.html', context, context_instance=RequestContext(request))
@require_http_methods(['POST'])
def save_bio(request):
""" receive AJAX post from class gallery page """
user_bio = gallery_api.save_bio(
request.POST['email'],
request.POST['name'],
request.POST['bio'],
request.POST['avatar'],
request.POST.get('twitter', None)
)
user_email = request.session.get('user_email', False)
if user_email and user_email == user_bio['email']:
user_bio = gallery_api.confirm_bio(user_bio['confirmation_code'])
else:
send_confirmation_email(
user_bio['email'], user_bio['name'], user_bio['avatar'],
user_bio['bio'], user_bio['confirmation_code']
)
request.session['user_bio'] = user_bio
return http.HttpResponseRedirect(reverse('gallery_gallery'))
def confirm_updates(request, confirmation_code):
try:
bio = gallery_api.confirm_bio(confirmation_code)
request.session['user_bio'] = bio
request.session['user_email'] = bio['email']
except Exception:
pass
return http.HttpResponseRedirect(reverse('gallery_gallery'))
Check for signup
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.conf import settings
from django import http
from django.core.urlresolvers import reverse
from django.views.decorators.http import require_http_methods
from django.contrib import messages
from gallery import models as gallery_api
from gallery.utils import create_s3_policy_doc
from gallery.emails import send_confirmation_email
from signup import models as signup_api
import hmac, hashlib
import random
def gallery(request):
""" show gallery for all signups for this sequence with profiles """
s3_policy, signature = create_s3_policy_doc(settings.AWS_S3_BUCKET, 'gallery')
messages.error(request, 'Test this')
if request.GET.get('key'):
pass
prefix = hmac.new(
'THEANSWERIS42', request.session.session_key, hashlib.sha1
).hexdigest()
bios = gallery_api.get_bios('TODO', limit=32)
bios += [{'avatar': 'http://placehold.it/120x120', 'email': ''} for i in range(len(bios), 32)]
bios = random.sample(bios, len(bios))
# if user is logged in and has a bio, display it!
user_bio = request.session.get('user_bio', None)
if user_bio:
bio_in_list = [ x for x in bios if x['email'] == user_bio['email'] ]
if len(bio_in_list) == 1:
# swap user bio with bio at position 12
bio_index = bios.index(bio_in_list[0])
bios[bio_index] = bios[11]
bios[11] = user_bio
else:
# make a gap at position 12
bios[11] = {'avatar': 'http://placehold.it/120x120'}
context = {
'bios': bios,
'user_bio': user_bio,
'user_email': request.session.get('user_email'),
's3_policy': s3_policy,
's3_signature': signature,
'AWS_ACCESS_KEY_ID': settings.AWS_ACCESS_KEY_ID,
'AWS_S3_BUCKET': settings.AWS_S3_BUCKET,
'key_prefix': 'gallery/{0}'.format(prefix)
}
return render_to_response('gallery/index.html', context, context_instance=RequestContext(request))
@require_http_methods(['POST'])
def save_bio(request):
""" receive AJAX post from class gallery page """
user_bio = gallery_api.save_bio(
request.POST['email'],
request.POST['name'],
request.POST['bio'],
request.POST['avatar'],
request.POST.get('twitter', None)
)
user_email = request.session.get('user_email', False)
if user_email and user_email == user_bio['email']:
user_bio = gallery_api.confirm_bio(user_bio['confirmation_code'])
else:
send_confirmation_email(
user_bio['email'], user_bio['name'], user_bio['avatar'],
user_bio['bio'], user_bio['confirmation_code']
)
request.session['user_bio'] = user_bio
# check if user signed up for the mooc
try:
signup_api.get_signup(request.POST['email'])
except:
messages.warning(request, 'It looks like you have not signed up for this sequence of the MOOC! We saved your profile, but you still need to sign up.')
# redirect user to signup page
return http.HttpResponseRedirect(reverse('home'))
return http.HttpResponseRedirect(reverse('gallery_gallery'))
def confirm_updates(request, confirmation_code):
try:
bio = gallery_api.confirm_bio(confirmation_code)
request.session['user_bio'] = bio
request.session['user_email'] = bio['email']
except Exception:
messages.error(request, 'Could not find the confirmation code. Please make sure the URL is correct')
return http.HttpResponseRedirect(reverse('gallery_gallery'))
|
#!/usr/bin/python
import os
import shutil
import string
import subprocess
import sys
import time
APPS = ['httpd', 'mysql']
AAS = ['basicaa', 'anders-aa', 'ds-aa']
APPS_DIR = os.getenv('APPS_DIR')
def is_useful(line):
return ('Congrats' in line or 'Detected' in line or
'CPU' in line or 'swap' in line)
def invoke(args, out_file=None, redirect_stderr=False, is_mysqld=False):
sys.stdout.write('\033[0;32m%s\033[m\n' % string.join(args))
if is_mysqld:
p = subprocess.Popen(args)
else:
if out_file is None:
subprocess.check_call(args)
else:
if redirect_stderr:
out = subprocess.check_output(args, stderr=subprocess.STDOUT)
# sys.stdout.write(out)
# useful = [line for line in out.splitlines() if is_useful(line)]
# out = '\n'.join(useful) + '\n'
else:
out = subprocess.check_output(args)
# sys.stdout.write(out)
out_file.write(out)
sys.stdout.write(out)
def get_pts_files():
for filename in os.listdir('/mnt/sdb/dyn-aa'):
if filename.startswith('pts'):
yield os.path.join('/mnt/sdb/dyn-aa', filename)
def run_httpd(executable, out_file, threads='1', report=False):
cpu0 = '0'
cpu1 = '1'
if threads == '4':
cpu0 = '0,2,4,6'
cpu1 = '1,3,5,7'
if report:
invoke(['taskset', '-c', cpu0,
'./' + executable, '-c', 'MaxClients 25'],
out_file, redirect_stderr=True)
out_file = None
else:
invoke(['taskset', '-c', cpu0, './' + executable])
time.sleep(3)
invoke(['time', 'taskset', '-c', cpu1,
os.path.join(APPS_DIR, 'apache/run-ab'),
'--times', '10', '10000', threads, 'localhost:8000/test.html'],
out_file)
invoke(['./httpd', '-k', 'stop'])
time.sleep(5)
def run_mysql(executable, out_file, threads='1', small_workload=False):
cpu0 = '0'
cpu1 = '1'
if threads == '4':
cpu0 = '0,2,4,6'
cpu1 = '1,3,5,7'
invoke(['taskset', '-c', cpu0, './' + executable, '--thread_stack=8388608'],
is_mysqld=True)
time.sleep(3)
args = ['taskset', '-c', cpu1, os.path.join(APPS_DIR, 'mysql/run-sysbench')]
if small_workload:
args += ['-n', '100', '-r', '1']
else:
args += ['-n', '10000', '-r', '10']
if threads == '4':
args += ['-p', threads]
invoke(args, out_file)
invoke(['killall', executable])
time.sleep(3)
def eval_baseline_httpd(threads='1'):
os.chdir('httpd')
invoke(['clang', '-pthread', '-lcrypt', '-o', 'httpd', 'httpd.bc'])
with open('../baseline-httpd-%s.out' % threads, 'w') as out_file:
run_httpd('httpd', out_file, threads)
os.chdir('..')
def eval_baseline_mysql(threads='1'):
os.chdir('mysql')
invoke(['clang++', '-pthread', '-lcrypt', '-ldl', '-lz', '-o', 'mysqld',
'mysqld.bc'])
with open('../baseline-mysql-%s.out' % threads, 'w') as out_file:
run_mysql('mysqld', out_file, threads)
os.chdir('..')
def eval_online_httpd(threads='1'):
os.chdir('httpd')
for aa in AAS:
args = ['dynaa_insert_alias_checker.py', 'httpd', aa]
# with open('../online-httpd-%s-vr.out' % aa, 'w') as out_file:
# invoke(args, out_file)
# run_httpd('httpd.ac', out_file)
# with open('../online-httpd-%s-sl.out' % aa, 'w') as out_file:
# invoke(args + ['--no-phi'], out_file)
# run_httpd('httpd.ac', out_file)
if aa == 'basicaa':
with open('../online-httpd-basicaa-deref-%s.out' % threads,
'w') as out_file:
invoke(args, out_file)
run_httpd('httpd.ac', out_file, threads)
else:
with open('../online-httpd-%s-delta-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(args + ['--baseline', 'basicaa'], out_file)
run_httpd('httpd.ac', out_file, threads)
continue
if aa == 'anders-aa':
with open('../online-httpd-anders-aa-%s.out' % threads,
'w') as out_file:
invoke(args + ['--check-all'], out_file)
run_httpd('httpd.ac', out_file, threads)
with open('../online-httpd-anders-aa-delta-%s.out' % threads,
'w') as out_file:
invoke(args + ['--baseline', 'basicaa', '--check-all'],
out_file)
run_httpd('httpd.ac', out_file, threads)
# # with open('../online-httpd-anders-aa-deref-%s.out' % threads,
# # 'w') as out_file:
# # invoke(args, out_file)
# # run_httpd('httpd.ac', out_file, threads)
# with open('../online-httpd-anders-aa-report-%s.out' % threads,
# 'w') as out_file:
# invoke(args + ['--check-all', '--action-if-missed', 'report'])
# run_httpd('httpd.ac', out_file, threads, report=True)
# shutil.move('apache-install/logs/error_log',
# ('../online-httpd-anders-aa-report-%s.error_log' %
# threads))
# with open('../online-httpd-anders-aa-delta-report-%s.out' % threads,
# 'w') as out_file:
# invoke(args + ['--baseline', 'basicaa', '--check-all',
# '--action-if-missed', 'report'])
# run_httpd('httpd.ac', out_file, threads, report=True)
# shutil.move('apache-install/logs/error_log',
# ('../online-httpd-anders-aa-delta-report-%s.error_log' %
# threads))
# # with open('../online-httpd-anders-aa-deref-report-%s.out' % threads,
# # 'w') as out_file:
# # invoke(args + ['--action-if-missed', 'report'])
# # run_httpd('httpd.ac', out_file, threads, report=True)
# # shutil.move('apache-install/logs/error_log',
# # ('../online-httpd-anders-aa-deref-report-%s.error_log'
# # % threads))
# with open(('../online-httpd-anders-aa-delta-deref-report-%s.out' %
# threads), 'w') as out_file:
# invoke(args + ['--baseline', 'basicaa',
# '--action-if-missed', 'report'])
# run_httpd('httpd.ac', out_file, threads, report=True)
# shutil.move('apache-install/logs/error_log',
# ('../online-httpd-anders-aa-delta-deref-report-%s.error_log' %
# threads))
os.chdir('..')
def eval_online_mysql(threads='1'):
os.chdir('mysql')
for aa in AAS:
args = ['dynaa_insert_alias_checker.py', 'mysqld', aa]
# if aa == 'ds-aa':
# args.append('--skip-huge-functions')
# with open('../online-mysql-%s-vr.out' % aa, 'w') as out_file:
# invoke(args, out_file)
# run_mysql('mysqld.ac', out_file)
# with open('../online-mysql-%s-sl.out' % aa, 'w') as out_file:
# invoke(args + ['--no-phi'], out_file)
# run_mysql('mysqld.ac', out_file)
if aa in ['ds-aa', 'anders-aa']:
with open('../hybrid-mysql-%s-delta-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(['dynaa_hybrid.py', 'mysqld', aa,
'--baseline', 'basicaa',
'--offline-funcs', '_Z10MYSQLparsePv',],
out_file)
run_mysql('mysqld.hybrid', out_file, threads)
pts_files = list(get_pts_files())
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py',
'--disable-print-value',
'mysqld.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
elif aa == 'basicaa':
with open('../hybrid-mysql-%s-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(['dynaa_hybrid.py', 'mysqld', aa,
'--offline-funcs', '_Z10MYSQLparsePv',],
out_file)
run_mysql('mysqld.hybrid', out_file, threads)
pts_files = list(get_pts_files())
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py',
'--disable-print-value',
'mysqld.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
elif aa == 'basicaa':
with open('../online-mysql-basicaa-deref-%s.out' % threads,
'w') as out_file:
invoke(args, out_file)
run_mysql('mysqld.ac', out_file, threads)
else:
with open('../online-mysql-%s-delta-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(args + ['--baseline', 'basicaa'], out_file)
run_mysql('mysqld.ac', out_file, threads)
os.chdir('..')
def eval_offline_httpd(threads='1'):
os.chdir('httpd')
with open('../offline-httpd-%s.out' % threads, 'w') as out_file:
invoke(['time', 'dynaa_hook_mem.py',
'--hook-all', '--hook-fork', 'httpd'],
out_file)
run_httpd('httpd.inst', out_file, threads)
pts_files = list(get_pts_files())
for aa in AAS:
with open('../offline-httpd-%s-%s.out' % (aa, threads),
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py', '--check-all',
'--disable-print-value',
'httpd.bc', pts_file, aa],
out_file, redirect_stderr=True)
if aa == 'anders-aa':
with open('../offline-httpd-anders-aa-delta-%s.out' % threads,
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py', '--check-all',
'--baseline', 'basicaa',
'--disable-print-value',
'httpd.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
if 'anders-aa' in AAS:
with open('../offline-httpd-deref-%s.out' % threads,
'w') as out_file:
invoke(['time', 'dynaa_hook_mem.py', '--hook-fork', 'httpd'], out_file)
run_httpd('httpd.inst', out_file, threads)
pts_files = list(get_pts_files())
# with open('../offline-httpd-anders-aa-deref-%s.out' % threads,
# 'w') as out_file:
# for pts_file in pts_files:
# out_file.write(pts_file + '\n')
# invoke(['time', 'dynaa_check_aa.py', '--disable-print-value',
# 'httpd.bc', pts_file, aa],
# out_file, redirect_stderr=True)
with open('../offline-httpd-anders-aa-delta-deref-%s.out' % threads,
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py',
'--baseline', 'basicaa',
'--disable-print-value',
'httpd.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
os.chdir('..')
def eval_offline_mysql(threads='1'):
os.chdir('mysql')
with open('../offline-mysql-%s.out' % threads, 'w') as out_file:
invoke(['dynaa_hook_mem.py', '--hook-all', 'mysqld'], out_file)
run_mysql('mysqld.inst', out_file, threads, small_workload=True)
pts_files = list(get_pts_files())
for aa in AAS:
with open('../offline-mysql-%s-%s.out' % (aa, threads),
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py', '--check-all',
'--disable-print-value',
'mysqld.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
os.chdir('..')
def main():
import argparse
global APPS, AAS
parser = argparse.ArgumentParser(
description=('\033[0;31mnote: before running this script, please '
'make sure that no httpd/mysqld instance is running, '
'and please remove apache-install/logs/error_log and '
'all existing /mnt/sdb/dyn-aa/pts* files.\033[m'))
parser.add_argument('--app', choices=APPS,
help='only eval the specified application')
parser.add_argument('--aa', choices=AAS,
help='only eval the specified AA')
parser.add_argument('--threads', choices=['1', '4'], default='4',
help='number of threads')
parser.add_argument('--no-eval-baseline', action='store_true',
help='do not eval the baseline')
parser.add_argument('--no-eval-online', action='store_true',
help='do not eval the online mode')
parser.add_argument('--no-eval-offline', action='store_true',
help='do not eval the offline mode')
args = parser.parse_args()
os.putenv('LOG_FILE', '/mnt/sdb/dyn-aa/pts')
if args.app is not None:
APPS = [args.app]
if args.aa is not None:
AAS = [args.aa]
for app in APPS:
if not args.no_eval_baseline:
globals()['eval_baseline_' + app](args.threads)
if not args.no_eval_online:
globals()['eval_online_' + app](args.threads)
if not args.no_eval_offline:
globals()['eval_offline_' + app](args.threads)
if __name__ == '__main__':
main()
Stop using --hook-fork
Option hook-fork is removed, and turned on by default
#!/usr/bin/python
import os
import shutil
import string
import subprocess
import sys
import time
APPS = ['httpd', 'mysql']
AAS = ['basicaa', 'anders-aa', 'ds-aa']
APPS_DIR = os.getenv('APPS_DIR')
def is_useful(line):
return ('Congrats' in line or 'Detected' in line or
'CPU' in line or 'swap' in line)
def invoke(args, out_file=None, redirect_stderr=False, is_mysqld=False):
sys.stdout.write('\033[0;32m%s\033[m\n' % string.join(args))
if is_mysqld:
p = subprocess.Popen(args)
else:
if out_file is None:
subprocess.check_call(args)
else:
if redirect_stderr:
out = subprocess.check_output(args, stderr=subprocess.STDOUT)
# sys.stdout.write(out)
# useful = [line for line in out.splitlines() if is_useful(line)]
# out = '\n'.join(useful) + '\n'
else:
out = subprocess.check_output(args)
# sys.stdout.write(out)
out_file.write(out)
sys.stdout.write(out)
def get_pts_files():
for filename in os.listdir('/mnt/sdb/dyn-aa'):
if filename.startswith('pts'):
yield os.path.join('/mnt/sdb/dyn-aa', filename)
def run_httpd(executable, out_file, threads='1', report=False):
cpu0 = '0'
cpu1 = '1'
if threads == '4':
cpu0 = '0,2,4,6'
cpu1 = '1,3,5,7'
if report:
invoke(['taskset', '-c', cpu0,
'./' + executable, '-c', 'MaxClients 25'],
out_file, redirect_stderr=True)
out_file = None
else:
invoke(['taskset', '-c', cpu0, './' + executable])
time.sleep(3)
invoke(['time', 'taskset', '-c', cpu1,
os.path.join(APPS_DIR, 'apache/run-ab'),
'--times', '10', '10000', threads, 'localhost:8000/test.html'],
out_file)
invoke(['./httpd', '-k', 'stop'])
time.sleep(5)
def run_mysql(executable, out_file, threads='1', small_workload=False):
cpu0 = '0'
cpu1 = '1'
if threads == '4':
cpu0 = '0,2,4,6'
cpu1 = '1,3,5,7'
invoke(['taskset', '-c', cpu0, './' + executable, '--thread_stack=8388608'],
is_mysqld=True)
time.sleep(3)
args = ['taskset', '-c', cpu1, os.path.join(APPS_DIR, 'mysql/run-sysbench')]
if small_workload:
args += ['-n', '100', '-r', '1']
else:
args += ['-n', '10000', '-r', '10']
if threads == '4':
args += ['-p', threads]
invoke(args, out_file)
invoke(['killall', executable])
time.sleep(3)
def eval_baseline_httpd(threads='1'):
os.chdir('httpd')
invoke(['clang', '-pthread', '-lcrypt', '-o', 'httpd', 'httpd.bc'])
with open('../baseline-httpd-%s.out' % threads, 'w') as out_file:
run_httpd('httpd', out_file, threads)
os.chdir('..')
def eval_baseline_mysql(threads='1'):
os.chdir('mysql')
invoke(['clang++', '-pthread', '-lcrypt', '-ldl', '-lz', '-o', 'mysqld',
'mysqld.bc'])
with open('../baseline-mysql-%s.out' % threads, 'w') as out_file:
run_mysql('mysqld', out_file, threads)
os.chdir('..')
def eval_online_httpd(threads='1'):
os.chdir('httpd')
for aa in AAS:
args = ['dynaa_insert_alias_checker.py', 'httpd', aa]
# with open('../online-httpd-%s-vr.out' % aa, 'w') as out_file:
# invoke(args, out_file)
# run_httpd('httpd.ac', out_file)
# with open('../online-httpd-%s-sl.out' % aa, 'w') as out_file:
# invoke(args + ['--no-phi'], out_file)
# run_httpd('httpd.ac', out_file)
if aa == 'basicaa':
with open('../online-httpd-basicaa-deref-%s.out' % threads,
'w') as out_file:
invoke(args, out_file)
run_httpd('httpd.ac', out_file, threads)
else:
with open('../online-httpd-%s-delta-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(args + ['--baseline', 'basicaa'], out_file)
run_httpd('httpd.ac', out_file, threads)
continue
if aa == 'anders-aa':
with open('../online-httpd-anders-aa-%s.out' % threads,
'w') as out_file:
invoke(args + ['--check-all'], out_file)
run_httpd('httpd.ac', out_file, threads)
with open('../online-httpd-anders-aa-delta-%s.out' % threads,
'w') as out_file:
invoke(args + ['--baseline', 'basicaa', '--check-all'],
out_file)
run_httpd('httpd.ac', out_file, threads)
# # with open('../online-httpd-anders-aa-deref-%s.out' % threads,
# # 'w') as out_file:
# # invoke(args, out_file)
# # run_httpd('httpd.ac', out_file, threads)
# with open('../online-httpd-anders-aa-report-%s.out' % threads,
# 'w') as out_file:
# invoke(args + ['--check-all', '--action-if-missed', 'report'])
# run_httpd('httpd.ac', out_file, threads, report=True)
# shutil.move('apache-install/logs/error_log',
# ('../online-httpd-anders-aa-report-%s.error_log' %
# threads))
# with open('../online-httpd-anders-aa-delta-report-%s.out' % threads,
# 'w') as out_file:
# invoke(args + ['--baseline', 'basicaa', '--check-all',
# '--action-if-missed', 'report'])
# run_httpd('httpd.ac', out_file, threads, report=True)
# shutil.move('apache-install/logs/error_log',
# ('../online-httpd-anders-aa-delta-report-%s.error_log' %
# threads))
# # with open('../online-httpd-anders-aa-deref-report-%s.out' % threads,
# # 'w') as out_file:
# # invoke(args + ['--action-if-missed', 'report'])
# # run_httpd('httpd.ac', out_file, threads, report=True)
# # shutil.move('apache-install/logs/error_log',
# # ('../online-httpd-anders-aa-deref-report-%s.error_log'
# # % threads))
# with open(('../online-httpd-anders-aa-delta-deref-report-%s.out' %
# threads), 'w') as out_file:
# invoke(args + ['--baseline', 'basicaa',
# '--action-if-missed', 'report'])
# run_httpd('httpd.ac', out_file, threads, report=True)
# shutil.move('apache-install/logs/error_log',
# ('../online-httpd-anders-aa-delta-deref-report-%s.error_log' %
# threads))
os.chdir('..')
def eval_online_mysql(threads='1'):
os.chdir('mysql')
for aa in AAS:
args = ['dynaa_insert_alias_checker.py', 'mysqld', aa]
# if aa == 'ds-aa':
# args.append('--skip-huge-functions')
# with open('../online-mysql-%s-vr.out' % aa, 'w') as out_file:
# invoke(args, out_file)
# run_mysql('mysqld.ac', out_file)
# with open('../online-mysql-%s-sl.out' % aa, 'w') as out_file:
# invoke(args + ['--no-phi'], out_file)
# run_mysql('mysqld.ac', out_file)
if aa in ['ds-aa', 'anders-aa']:
with open('../hybrid-mysql-%s-delta-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(['dynaa_hybrid.py', 'mysqld', aa,
'--baseline', 'basicaa',
'--offline-funcs', '_Z10MYSQLparsePv',],
out_file)
run_mysql('mysqld.hybrid', out_file, threads)
pts_files = list(get_pts_files())
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py',
'--disable-print-value',
'mysqld.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
elif aa == 'basicaa':
with open('../hybrid-mysql-%s-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(['dynaa_hybrid.py', 'mysqld', aa,
'--offline-funcs', '_Z10MYSQLparsePv',],
out_file)
run_mysql('mysqld.hybrid', out_file, threads)
pts_files = list(get_pts_files())
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py',
'--disable-print-value',
'mysqld.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
elif aa == 'basicaa':
with open('../online-mysql-basicaa-deref-%s.out' % threads,
'w') as out_file:
invoke(args, out_file)
run_mysql('mysqld.ac', out_file, threads)
else:
with open('../online-mysql-%s-delta-deref-%s.out' % (aa, threads),
'w') as out_file:
invoke(args + ['--baseline', 'basicaa'], out_file)
run_mysql('mysqld.ac', out_file, threads)
os.chdir('..')
def eval_offline_httpd(threads='1'):
os.chdir('httpd')
with open('../offline-httpd-%s.out' % threads, 'w') as out_file:
invoke(['time', 'dynaa_hook_mem.py',
'--hook-all', 'httpd'],
out_file)
run_httpd('httpd.inst', out_file, threads)
pts_files = list(get_pts_files())
for aa in AAS:
with open('../offline-httpd-%s-%s.out' % (aa, threads),
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py', '--check-all',
'--disable-print-value',
'httpd.bc', pts_file, aa],
out_file, redirect_stderr=True)
if aa == 'anders-aa':
with open('../offline-httpd-anders-aa-delta-%s.out' % threads,
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py', '--check-all',
'--baseline', 'basicaa',
'--disable-print-value',
'httpd.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
if 'anders-aa' in AAS:
with open('../offline-httpd-deref-%s.out' % threads,
'w') as out_file:
invoke(['time', 'dynaa_hook_mem.py', 'httpd'], out_file)
run_httpd('httpd.inst', out_file, threads)
pts_files = list(get_pts_files())
# with open('../offline-httpd-anders-aa-deref-%s.out' % threads,
# 'w') as out_file:
# for pts_file in pts_files:
# out_file.write(pts_file + '\n')
# invoke(['time', 'dynaa_check_aa.py', '--disable-print-value',
# 'httpd.bc', pts_file, aa],
# out_file, redirect_stderr=True)
with open('../offline-httpd-anders-aa-delta-deref-%s.out' % threads,
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py',
'--baseline', 'basicaa',
'--disable-print-value',
'httpd.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
os.chdir('..')
def eval_offline_mysql(threads='1'):
os.chdir('mysql')
with open('../offline-mysql-%s.out' % threads, 'w') as out_file:
invoke(['dynaa_hook_mem.py', '--hook-all', 'mysqld'], out_file)
run_mysql('mysqld.inst', out_file, threads, small_workload=True)
pts_files = list(get_pts_files())
for aa in AAS:
with open('../offline-mysql-%s-%s.out' % (aa, threads),
'w') as out_file:
for pts_file in pts_files:
out_file.write(pts_file + '\n')
invoke(['time', 'dynaa_check_aa.py', '--check-all',
'--disable-print-value',
'mysqld.bc', pts_file, aa],
out_file, redirect_stderr=True)
for pts_file in pts_files:
shutil.move(pts_file, '/mnt/sdb/dyn-aa/backup')
os.chdir('..')
def main():
import argparse
global APPS, AAS
parser = argparse.ArgumentParser(
description=('\033[0;31mnote: before running this script, please '
'make sure that no httpd/mysqld instance is running, '
'and please remove apache-install/logs/error_log and '
'all existing /mnt/sdb/dyn-aa/pts* files.\033[m'))
parser.add_argument('--app', choices=APPS,
help='only eval the specified application')
parser.add_argument('--aa', choices=AAS,
help='only eval the specified AA')
parser.add_argument('--threads', choices=['1', '4'], default='4',
help='number of threads')
parser.add_argument('--no-eval-baseline', action='store_true',
help='do not eval the baseline')
parser.add_argument('--no-eval-online', action='store_true',
help='do not eval the online mode')
parser.add_argument('--no-eval-offline', action='store_true',
help='do not eval the offline mode')
args = parser.parse_args()
os.putenv('LOG_FILE', '/mnt/sdb/dyn-aa/pts')
if args.app is not None:
APPS = [args.app]
if args.aa is not None:
AAS = [args.aa]
for app in APPS:
if not args.no_eval_baseline:
globals()['eval_baseline_' + app](args.threads)
if not args.no_eval_online:
globals()['eval_online_' + app](args.threads)
if not args.no_eval_offline:
globals()['eval_offline_' + app](args.threads)
if __name__ == '__main__':
main()
|
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.conf import settings
from django.template.loader import get_template
from django.template import Context
from django.views.decorators.csrf import csrf_exempt
import requests
import logging
logger = logging.getLogger(__name__)
def mailgun_send(mailgun_data):
logger.debug("Mailgun send: %s" % mailgun_data)
if settings.DEBUG:
# When this is true you will see this message in the mailgun logs but
# nothing will actually be delivered
mailgun_data["o:testmode"] = "yes"
resp = requests.post("https://api.mailgun.net/v2/%s/messages" % settings.LIST_DOMAIN,
auth=("api", settings.MAILGUN_API_KEY),
data=mailgun_data
)
logger.debug("Mailgun response: %s" % resp.text)
return HttpResponse(status=200)
def new_event_notification(event, location):
# notify the event admins
admin_group = event.admin
recipients = [admin.email for admin in admin_group.users.all()]
event_short_title = event.title[0:50]
if len(event.title) > 50:
event_short_title = event_short_title + "..."
subject = '[' + location.email_subject_prefix + ']' + " A new event has been created: %s" % event_short_title
from_address = location.from_email()
plaintext = get_template('emails/new_event_notify.txt')
c = Context({
'event': event,
'creator': event.creator,
'location': location,
'location_name': location.name,
})
body_plain = plaintext.render(c)
mailgun_data={"from": from_address,
"to": recipients,
"subject": subject,
"text": body_plain,
}
return mailgun_send(mailgun_data)
def event_approved_notification(event, location):
logger.debug("event_approved_notification")
recipients = [organizer.email for organizer in event.organizers.all()]
subject = '[' + location.email_subject_prefix + ']' + " Your event is ready to be published"
from_address = location.from_email()
plaintext = get_template('emails/event_approved_notify.txt')
c = Context({
'event': event,
'domain' : Site.objects.get_current().domain,
'location_name': location.name,
})
body_plain = plaintext.render(c)
mailgun_data={"from": from_address,
"to": recipients,
"subject": subject,
"text": body_plain,
}
return mailgun_send(mailgun_data)
def event_published_notification(event, location):
logger.debug("event_published_notification")
recipients = [organizer.email for organizer in event.organizers.all()]
event_short_title = event.title[0:50]
if len(event.title) > 50:
event_short_title = event_short_title + "..."
subject = '[' + location.email_subject_prefix + ']' + " Your event is now live: %s" % event_short_title
from_address = location.from_email()
plaintext = get_template('emails/event_published_notify.txt')
c = Context({
'event': event,
'domain' : Site.objects.get_current().domain,
'location_name': location.name,
})
body_plain = plaintext.render(c)
mailgun_data={"from": from_address,
"to": recipients,
"subject": subject,
"text": body_plain,
}
return mailgun_send(mailgun_data)
###############################################
########### Email Route Create ################
def create_route(route_name, route_pattern, path):
mailgun_api_key = settings.MAILGUN_API_KEY
list_domain = settings.LIST_DOMAIN
# strip the initial slash
forward_url = os.path.join(list_domain, path)
forward_url = "https://" + forward_url
print forward_url
print list_domain
expression = "match_recipient('%s')" % route_pattern
print expression
forward_url = "forward('%s')" % forward_url
print forward_url
return requests.post( "https://api.mailgun.net/v2/routes",
auth=("api", mailgun_api_key),
data={"priority": 1,
"description": route_name,
# the route pattern is a string but still needs to be quoted
"expression": expression,
"action": forward_url,
}
)
# TODO - We are goign to try and not create new routes
#def create_event_email(sender, instance, created, using, **kwargs):
# if created == True:
# # XXX TODO should probably hash the ID or name of the event so we're
# # not info leaking here, if we care?
# route_pattern = "event%d" % instance.id
# route_name = 'Event %d' % instance.id
# path = "events/message/"
# resp = create_route(route_name, route_pattern, path)
# print resp.text
#post_save.connect(create_event_email, sender=Event)
############################################
########### EMAIL ENDPOINTS ################
@csrf_exempt
def event_message(request, location_slug=None):
''' new messages sent to event email addresses are posed to this view '''
if not request.method == 'POST':
return HttpResponseRedirect('/404')
recipient = request.POST.get('recipient')
from_address = request.POST.get('from')
sender = request.POST.get('sender')
subject = request.POST.get('subject')
body_plain = request.POST.get('body-plain')
body_html = request.POST.get('body-html')
# get the event info and make sure the event exists
# we know that the route is always in the form eventXX, where XX is the
# event id.
alias = recipient.split('@')[0]
logger.debug("event_message: alias=%s" % alias)
event = None
try:
event_id = int(alias[5:])
logger.debug("event_message: event_id=%s" % event_id)
event = Event.objects.get(id=event_id)
except:
pass
if not event:
logger.warn("Event (%s) not found. Exiting quietly." % alias)
return HttpResponse(status=200)
# Do some sanity checkint so we don't mailbomb everyone
header_txt = request.POST.get('message-headers')
message_headers = json.loads(header_txt)
message_header_keys = [item[0] for item in message_headers]
# make sure this isn't an email we have already forwarded (cf. emailbombgate 2014)
# A List-Id header will only be present if it has been added manually in
# this function, ie, if we have already processed this message.
if request.POST.get('List-Id') or 'List-Id' in message_header_keys:
logger.debug('List-Id header was found! Dropping message silently')
return HttpResponse(status=200)
# If 'Auto-Submitted' in message_headers or message_headers['Auto-Submitted'] != 'no':
if 'Auto-Submitted' in message_header_keys:
logger.info('message appears to be auto-submitted. reject silently')
return HttpResponse(status=200)
# find the event organizers and admins
organizers = event.organizers.all()
location = get_location(location_slug)
location_event_admin = EventAdminGroup.objects.get(location=location)
admins = location_event_admin.users.all()
# Build our bcc list
bcc_list = []
for organizer in organizers:
if organizer.email not in bcc_list:
bcc_list.append(organizer.email)
for admin in admins:
if admin.email not in bcc_list:
bcc_list.append(admin.email)
# Make sure this person can post to our list
if not from_address in bcc_list:
logger.warn("From address (%s) not allowed. Exiting quietly." % from_address)
return HttpResponse(status=200)
# prefix subject
if subject.find('[Event Discussion') < 0:
prefix = '[Event Discussion: %s] ' % event.slug[0:30]
subject = prefix + subject
# Add in footer
event_url = urlresolvers.reverse('gather_view_event', args=(location.slug, event.id, event.slug))
footer_msg = "You are receving this email because you are one of the organizers or an event admin at this location. Visit this event online at %s" % event_url
body_plain = body_plain + "\n\n-------------------------------------------\n" + footer_msg
body_html = body_html + "<br><br>-------------------------------------------<br>" + footer_msg
# send the message
mailgun_data={"from": from_address,
"to": [recipient, ],
"bcc": bcc_list,
"subject": subject,
"text": body_plain,
"html": body_html
}
return mailgun_send(mailgun_data)
missing import
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.conf import settings
from django.template.loader import get_template
from django.template import Context
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect
import requests
import logging
logger = logging.getLogger(__name__)
def mailgun_send(mailgun_data):
logger.debug("Mailgun send: %s" % mailgun_data)
if settings.DEBUG:
# When this is true you will see this message in the mailgun logs but
# nothing will actually be delivered
mailgun_data["o:testmode"] = "yes"
resp = requests.post("https://api.mailgun.net/v2/%s/messages" % settings.LIST_DOMAIN,
auth=("api", settings.MAILGUN_API_KEY),
data=mailgun_data
)
logger.debug("Mailgun response: %s" % resp.text)
return HttpResponse(status=200)
def new_event_notification(event, location):
# notify the event admins
admin_group = event.admin
recipients = [admin.email for admin in admin_group.users.all()]
event_short_title = event.title[0:50]
if len(event.title) > 50:
event_short_title = event_short_title + "..."
subject = '[' + location.email_subject_prefix + ']' + " A new event has been created: %s" % event_short_title
from_address = location.from_email()
plaintext = get_template('emails/new_event_notify.txt')
c = Context({
'event': event,
'creator': event.creator,
'location': location,
'location_name': location.name,
})
body_plain = plaintext.render(c)
mailgun_data={"from": from_address,
"to": recipients,
"subject": subject,
"text": body_plain,
}
return mailgun_send(mailgun_data)
def event_approved_notification(event, location):
logger.debug("event_approved_notification")
recipients = [organizer.email for organizer in event.organizers.all()]
subject = '[' + location.email_subject_prefix + ']' + " Your event is ready to be published"
from_address = location.from_email()
plaintext = get_template('emails/event_approved_notify.txt')
c = Context({
'event': event,
'domain' : Site.objects.get_current().domain,
'location_name': location.name,
})
body_plain = plaintext.render(c)
mailgun_data={"from": from_address,
"to": recipients,
"subject": subject,
"text": body_plain,
}
return mailgun_send(mailgun_data)
def event_published_notification(event, location):
logger.debug("event_published_notification")
recipients = [organizer.email for organizer in event.organizers.all()]
event_short_title = event.title[0:50]
if len(event.title) > 50:
event_short_title = event_short_title + "..."
subject = '[' + location.email_subject_prefix + ']' + " Your event is now live: %s" % event_short_title
from_address = location.from_email()
plaintext = get_template('emails/event_published_notify.txt')
c = Context({
'event': event,
'domain' : Site.objects.get_current().domain,
'location_name': location.name,
})
body_plain = plaintext.render(c)
mailgun_data={"from": from_address,
"to": recipients,
"subject": subject,
"text": body_plain,
}
return mailgun_send(mailgun_data)
###############################################
########### Email Route Create ################
def create_route(route_name, route_pattern, path):
mailgun_api_key = settings.MAILGUN_API_KEY
list_domain = settings.LIST_DOMAIN
# strip the initial slash
forward_url = os.path.join(list_domain, path)
forward_url = "https://" + forward_url
print forward_url
print list_domain
expression = "match_recipient('%s')" % route_pattern
print expression
forward_url = "forward('%s')" % forward_url
print forward_url
return requests.post( "https://api.mailgun.net/v2/routes",
auth=("api", mailgun_api_key),
data={"priority": 1,
"description": route_name,
# the route pattern is a string but still needs to be quoted
"expression": expression,
"action": forward_url,
}
)
# TODO - We are goign to try and not create new routes
#def create_event_email(sender, instance, created, using, **kwargs):
# if created == True:
# # XXX TODO should probably hash the ID or name of the event so we're
# # not info leaking here, if we care?
# route_pattern = "event%d" % instance.id
# route_name = 'Event %d' % instance.id
# path = "events/message/"
# resp = create_route(route_name, route_pattern, path)
# print resp.text
#post_save.connect(create_event_email, sender=Event)
############################################
########### EMAIL ENDPOINTS ################
@csrf_exempt
def event_message(request, location_slug=None):
''' new messages sent to event email addresses are posed to this view '''
if not request.method == 'POST':
return HttpResponseRedirect('/404')
recipient = request.POST.get('recipient')
from_address = request.POST.get('from')
sender = request.POST.get('sender')
subject = request.POST.get('subject')
body_plain = request.POST.get('body-plain')
body_html = request.POST.get('body-html')
# get the event info and make sure the event exists
# we know that the route is always in the form eventXX, where XX is the
# event id.
alias = recipient.split('@')[0]
logger.debug("event_message: alias=%s" % alias)
event = None
try:
event_id = int(alias[5:])
logger.debug("event_message: event_id=%s" % event_id)
event = Event.objects.get(id=event_id)
except:
pass
if not event:
logger.warn("Event (%s) not found. Exiting quietly." % alias)
return HttpResponse(status=200)
# Do some sanity checkint so we don't mailbomb everyone
header_txt = request.POST.get('message-headers')
message_headers = json.loads(header_txt)
message_header_keys = [item[0] for item in message_headers]
# make sure this isn't an email we have already forwarded (cf. emailbombgate 2014)
# A List-Id header will only be present if it has been added manually in
# this function, ie, if we have already processed this message.
if request.POST.get('List-Id') or 'List-Id' in message_header_keys:
logger.debug('List-Id header was found! Dropping message silently')
return HttpResponse(status=200)
# If 'Auto-Submitted' in message_headers or message_headers['Auto-Submitted'] != 'no':
if 'Auto-Submitted' in message_header_keys:
logger.info('message appears to be auto-submitted. reject silently')
return HttpResponse(status=200)
# find the event organizers and admins
organizers = event.organizers.all()
location = get_location(location_slug)
location_event_admin = EventAdminGroup.objects.get(location=location)
admins = location_event_admin.users.all()
# Build our bcc list
bcc_list = []
for organizer in organizers:
if organizer.email not in bcc_list:
bcc_list.append(organizer.email)
for admin in admins:
if admin.email not in bcc_list:
bcc_list.append(admin.email)
# Make sure this person can post to our list
if not from_address in bcc_list:
logger.warn("From address (%s) not allowed. Exiting quietly." % from_address)
return HttpResponse(status=200)
# prefix subject
if subject.find('[Event Discussion') < 0:
prefix = '[Event Discussion: %s] ' % event.slug[0:30]
subject = prefix + subject
# Add in footer
event_url = urlresolvers.reverse('gather_view_event', args=(location.slug, event.id, event.slug))
footer_msg = "You are receving this email because you are one of the organizers or an event admin at this location. Visit this event online at %s" % event_url
body_plain = body_plain + "\n\n-------------------------------------------\n" + footer_msg
body_html = body_html + "<br><br>-------------------------------------------<br>" + footer_msg
# send the message
mailgun_data={"from": from_address,
"to": [recipient, ],
"bcc": bcc_list,
"subject": subject,
"text": body_plain,
"html": body_html
}
return mailgun_send(mailgun_data)
|
# Copyright 2018 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gkeeprobot.control.VagrantControl import VagrantControl
from gkeeprobot.keywords.ServerSetupKeywords import ServerSetupKeywords
from gkeeprobot.keywords.ClientSetupKeywords import ClientSetupKeywords
vagrant = VagrantControl()
server = ServerSetupKeywords()
client = ClientSetupKeywords()
print('Checking that gkserver is running')
if not vagrant.is_server_running():
print("Server not running. Run 'vagrant up' first.")
exit(1)
print('Checking that gkclient is running')
if not vagrant.is_client_running():
print("Client not running. Run 'vagrant up' first.")
exit(1)
print('Copying valid server.cfg')
server.add_file_to_server('keeper', 'files/valid_server.cfg', 'server.cfg')
print('Starting server with admin_prof as admin')
server.start_gkeepd()
print('Making admin_prof account on gkclient')
client.create_account('admin_prof')
client.establish_ssh_keys('admin_prof')
client.create_gkeep_config_file('admin_prof')
print('Adding prof1 as faculty on gkserver')
client.run_gkeep_command('admin_prof', 'add_faculty', 'prof1', 'doctor', 'prof1@gitkeeper.edu')
print('Making prof1 account on gkclient')
client.create_account('prof1')
client.establish_ssh_keys('prof1')
client.create_gkeep_config_file('prof1')
Add CS1 class as prof1 and create assignment in prof1 account.
# Copyright 2018 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gkeeprobot.control.VagrantControl import VagrantControl
from gkeeprobot.keywords.ServerSetupKeywords import ServerSetupKeywords
from gkeeprobot.keywords.ClientSetupKeywords import ClientSetupKeywords
vagrant = VagrantControl()
server = ServerSetupKeywords()
client = ClientSetupKeywords()
print('Checking that gkserver is running')
if not vagrant.is_server_running():
print("Server not running. Run 'vagrant up' first.")
exit(1)
print('Checking that gkclient is running')
if not vagrant.is_client_running():
print("Client not running. Run 'vagrant up' first.")
exit(1)
print('Copying valid server.cfg')
server.add_file_to_server('keeper', 'files/valid_server.cfg', 'server.cfg')
print('Starting server with admin_prof as admin')
server.start_gkeepd()
print('Making admin_prof account on gkclient')
client.create_account('admin_prof')
client.establish_ssh_keys('admin_prof')
client.create_gkeep_config_file('admin_prof')
print('Adding prof1 as faculty on gkserver')
client.run_gkeep_command('admin_prof', 'add_faculty', 'prof1', 'doctor', 'prof1@gitkeeper.edu')
print('Making prof1 account on gkclient')
client.create_account('prof1')
client.establish_ssh_keys('prof1')
client.create_gkeep_config_file('prof1')
print('Creating CS1 class with 2 students')
client.add_to_class_csv('prof1', 'cs1', 'student1')
client.add_to_class_csv('prof1', 'cs1', 'student2')
client.run_gkeep_command('prof1', 'add cs1 cs1.csv')
client.add_assignment_to_client('prof1', 'good_simple')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2013 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
from serialwriter import *
from avnserial import *
import avnav_handlerList
hasUdev=False
try:
import pyudev
hasUdev=True
except:
pass
class DummyHandler():
def __init__(self):
self.stop=False
def run(self):
while( not self.stop):
time.sleep(0.2)
def stopHandler(self):
self.stop=True
#a worker that will use udev to find serial devices
#based on the configuration it will open available devices and read data from them
class AVNUsbSerialReader(AVNWorker):
@classmethod
def getConfigName(cls):
return "AVNUsbSerialReader"
@classmethod
def getConfigParam(cls, child=None):
if child is None:
#get the default configuration for a serial reader
rt=SerialReader.getConfigParam().copy()
rt.update({
'port': 0, #we do not use this
'maxDevices':5, #this includes preconfigured devices!
'feederName':'', #if set, use this feeder
'allowUnknown':'true' #allow devices that are not configured
})
return rt
if child == "UsbDevice":
return cls.getSerialParam()
return None
#get the parameters for an usb device
@classmethod
def getSerialParam(cls):
rt=SerialWriter.getConfigParam().copy()
rt.update({
'port': 0,
'usbid':None, #an identifier of the USB device
#.../1-1.3.1:1.0/ttyUSB2/tty/ttyUSB2 - identifier would be 1-1.3.1
'type': 'reader',
})
return rt
@classmethod
def createInstance(cls, cfgparam):
if not hasUdev:
raise Exception("no pyudev installed, cannot run %s"%(cls.getConfigName()))
cls.checkSingleInstance()
return AVNUsbSerialReader(cfgparam)
def __init__(self,cfgparam):
AVNWorker.__init__(self, cfgparam)
self.maplock=threading.Lock()
self.addrmap={}
#return True if added
def checkAndAddHandler(self,addr,handler,device):
rt=False
maxd=self.getIntParam('maxDevices')
self.maplock.acquire()
if len(self.addrmap) < maxd:
if not addr in self.addrmap:
self.addrmap[addr]=(handler,device)
rt=True
self.maplock.release()
return rt
def removeHandler(self,addr):
rt=None
self.maplock.acquire()
try:
rt=self.addrmap.pop(addr)
except:
pass
self.maplock.release()
if rt is None:
return None
return rt[0]
#param a dict of usbid->device
#returns a dict: usbid->start|stop|keep
def getStartStopList(self,handlerlist):
rt={}
self.maplock.acquire()
for h in handlerlist.keys():
if h in self.addrmap:
if handlerlist[h] != self.addrmap[h][1]:
rt['h']='restart'
else:
rt[h]='keep'
else:
rt[h]='start'
for h in self.addrmap.keys():
if not h in rt:
rt[h]='stop'
self.maplock.release()
return rt
def usbIdFromPath(self,path):
rt=re.sub('/ttyUSB.*','',path).split('/')[-1]
return rt
def getParamByUsbId(self,usbid):
configuredDevices=self.param.get('UsbDevice')
if configuredDevices is None:
return None
for dev in configuredDevices:
if usbid==dev['usbid']:
return dev
return None
def setParameterForSerial(self,param,usbid,device):
rt=param.copy()
rt.update({
'name':"%s-%s"%(usbid,device),
'port':device
})
return rt
#a thread method to run a serial reader/writer
def serialRun(self,handler,addr):
try:
handler.run()
except:
AVNLog.info("serial handler stopped with %s",(traceback.format_exc(),))
AVNLog.debug("serial handler for %s finished",addr)
self.removeHandler(addr)
self.deleteInfo(handler.getName())
#param: a dict key being the usb id, value the device node
def checkDevices(self,devicelist):
startStop=self.getStartStopList(devicelist)
for usbid in startStop:
if startStop[usbid]=='start':
AVNLog.debug("must start handler for %s at %s",usbid,devicelist[usbid])
sourceName="%s-%s"%(self.getName(),usbid)
param=self.getParamByUsbId(usbid)
type="anonymous"
if param is None:
if not self.getBoolParam('allowUnknown'):
AVNLog.debug("unknown devices not allowed, skip start of %s at %s",usbid,devicelist[usbid])
continue
param=self.setParameterForSerial(self.getParam(),usbid,devicelist[usbid])
else:
type="known"
pn=param.get('name')
if pn is not None and pn != '':
sourceName=pn
param=self.setParameterForSerial(param, usbid, devicelist[usbid])
handlertype="reader"
if param.get('type') is not None:
handlertype=param.get('type')
if handlertype == 'writer' or handlertype == "combined":
handler=SerialWriter(param,self.writeData,self,sourceName)
if handlertype == "combined":
handler.param["combined"]=True
else:
if handlertype == 'reader':
handler=SerialReader(param, self.writeData, self,sourceName)
else:
AVNLog.info("ignore device %s : type %s",usbid,handlertype)
handler=DummyHandler()
res=self.checkAndAddHandler(usbid, handler,devicelist[usbid])
if not res:
AVNLog.debug("max number of readers already reached, skip start of %s at %s",usbid,devicelist[usbid])
continue
handlerThread=threading.Thread(target=self.serialRun,args=(handler,usbid))
handlerThread.daemon=True
handlerThread.start()
AVNLog.info("started %s for %s device %s at %s",handlertype,type,usbid,devicelist[usbid])
if startStop[usbid]=='stop' or startStop[usbid]=='restart':
#really starting is left to the audit...
self.stopHandler(usbid)
def stopHandler(self,usbid):
AVNLog.debug("must stop handler for %s",usbid)
handler=self.removeHandler(usbid)
if handler is None:
#must have been a thread race... or another device
return
try:
handler.stopHandler()
AVNLog.info("stop handler for %s triggered",usbid)
except:
pass
#start monitoring in separate thread
#method will never return...
def monitorDevices(self,context):
self.setInfo('monitor', "running", AVNWorker.Status.RUNNING)
threading.current_thread().setName("%s[monitor]"%(self.getThreadPrefix()))
AVNLog.info("start device monitoring")
while True:
try:
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='tty')
AVNLog.info("start monitor loop")
for deviceaction in monitor:
action,device=deviceaction
if action=='remove':
usbid=self.usbIdFromPath(device.device_path)
AVNLog.info("device removal detected %s",usbid)
self.stopHandler(usbid)
except:
AVNLog.error("error in usb monitor loop: ",traceback.format_exc(1))
time.sleep(2)
#any start handling we leave to the audit...
#this is the main thread - this executes the polling
def run(self):
self.setInfo('main', "discovering", AVNWorker.Status.RUNNING)
self.setName("%s-polling"%(self.getThreadPrefix()))
time.sleep(2) # give a chance to have the feeder socket open...
#now start an endless loop with udev discovery...
#any removal will be detected by the monitor (to be fast)
#but we have an audit here anyway
#the removal will be robust enough to deal with 2 parallel tries
context=None
init=True
while True:
currentDevices={}
try:
AVNLog.debug("starting udev discovery")
if context is None:
context=pyudev.Context()
allDev=context.list_devices(subsystem='tty')
for dev in allDev:
if dev.parent is None or not (dev.parent.subsystem == "usb-serial" or dev.parent.subsystem == "usb"):
continue
usbid=self.usbIdFromPath(dev.device_path)
AVNLog.debug("discovered usb serial tty device %s at %s (usbid=%s)",dev.device_node,unicode(dev),usbid)
currentDevices[usbid]=dev.device_node
self.checkDevices(currentDevices)
if init:
monitorThread=threading.Thread(target=self.monitorDevices,args=(context,))
monitorThread.daemon=True
monitorThread.start()
init=False
except Exception as e:
AVNLog.debug("exception when querying usb serial devices %s, retrying after 10s",traceback.format_exc())
context=None
time.sleep(10)
#overloaded info method
def getInfo(self):
try:
rt=self.info.copy()
st=self.status.copy()
rta=[]
keys=sorted(rt.keys(),key=lambda x: re.sub("^[^-]*[-]","-",x))
for k in keys:
try:
elem={}
elem['name']=k
elem['info']=rt[k]
elem['status']=st[k]
rta.append(elem)
except:
pass
return {'name':self.getName(),'items':rta}
except:
return {'name':self.getName(),'items':[],'error':"no info available"}
avnav_handlerList.registerHandler(AVNUsbSerialReader)
show usb ids again in status
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2013 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
from serialwriter import *
from avnserial import *
import avnav_handlerList
hasUdev=False
try:
import pyudev
hasUdev=True
except:
pass
class InfoHandler():
def __init__(self,name,parent):
self.name=name
self.parent=parent
def setInfo(self,item,text,status):
self.parent.setInfo(self.name,text,status)
def deleteInfo(self,item):
self.parent.deleteInfo(self.name)
class DummyHandler():
def __init__(self):
self.stop=False
def run(self):
while( not self.stop):
time.sleep(0.2)
def stopHandler(self):
self.stop=True
#a worker that will use udev to find serial devices
#based on the configuration it will open available devices and read data from them
class AVNUsbSerialReader(AVNWorker):
@classmethod
def getConfigName(cls):
return "AVNUsbSerialReader"
@classmethod
def getConfigParam(cls, child=None):
if child is None:
#get the default configuration for a serial reader
rt=SerialReader.getConfigParam().copy()
rt.update({
'port': 0, #we do not use this
'maxDevices':5, #this includes preconfigured devices!
'feederName':'', #if set, use this feeder
'allowUnknown':'true' #allow devices that are not configured
})
return rt
if child == "UsbDevice":
return cls.getSerialParam()
return None
#get the parameters for an usb device
@classmethod
def getSerialParam(cls):
rt=SerialWriter.getConfigParam().copy()
rt.update({
'port': 0,
'usbid':None, #an identifier of the USB device
#.../1-1.3.1:1.0/ttyUSB2/tty/ttyUSB2 - identifier would be 1-1.3.1
'type': 'reader',
})
return rt
@classmethod
def createInstance(cls, cfgparam):
if not hasUdev:
raise Exception("no pyudev installed, cannot run %s"%(cls.getConfigName()))
cls.checkSingleInstance()
return AVNUsbSerialReader(cfgparam)
def __init__(self,cfgparam):
AVNWorker.__init__(self, cfgparam)
self.maplock=threading.Lock()
self.addrmap={}
#return True if added
def checkAndAddHandler(self,addr,handler,device):
rt=False
maxd=self.getIntParam('maxDevices')
self.maplock.acquire()
if len(self.addrmap) < maxd:
if not addr in self.addrmap:
self.addrmap[addr]=(handler,device)
rt=True
self.maplock.release()
return rt
def removeHandler(self,addr):
rt=None
self.maplock.acquire()
try:
rt=self.addrmap.pop(addr)
except:
pass
self.maplock.release()
if rt is None:
return None
return rt[0]
#param a dict of usbid->device
#returns a dict: usbid->start|stop|keep
def getStartStopList(self,handlerlist):
rt={}
self.maplock.acquire()
for h in handlerlist.keys():
if h in self.addrmap:
if handlerlist[h] != self.addrmap[h][1]:
rt['h']='restart'
else:
rt[h]='keep'
else:
rt[h]='start'
for h in self.addrmap.keys():
if not h in rt:
rt[h]='stop'
self.maplock.release()
return rt
def usbIdFromPath(self,path):
rt=re.sub('/ttyUSB.*','',path).split('/')[-1]
return rt
def getParamByUsbId(self,usbid):
configuredDevices=self.param.get('UsbDevice')
if configuredDevices is None:
return None
for dev in configuredDevices:
if usbid==dev['usbid']:
return dev
return None
def setParameterForSerial(self,param,usbid,device):
rt=param.copy()
rt.update({
'name':"%s-%s"%(usbid,device),
'port':device
})
return rt
#a thread method to run a serial reader/writer
def serialRun(self,handler,addr):
try:
handler.run()
except:
AVNLog.info("serial handler stopped with %s",(traceback.format_exc(),))
AVNLog.debug("serial handler for %s finished",addr)
self.removeHandler(addr)
self.deleteInfo(handler.getName())
#param: a dict key being the usb id, value the device node
def checkDevices(self,devicelist):
startStop=self.getStartStopList(devicelist)
for usbid in startStop:
if startStop[usbid]=='start':
AVNLog.debug("must start handler for %s at %s",usbid,devicelist[usbid])
sourceName="%s-%s"%(self.getName(),usbid)
param=self.getParamByUsbId(usbid)
type="anonymous"
if param is None:
if not self.getBoolParam('allowUnknown'):
AVNLog.debug("unknown devices not allowed, skip start of %s at %s",usbid,devicelist[usbid])
continue
param=self.setParameterForSerial(self.getParam(),usbid,devicelist[usbid])
else:
type="known"
pn=param.get('name')
if pn is not None and pn != '':
sourceName=pn
param=self.setParameterForSerial(param, usbid, devicelist[usbid])
handlertype="reader"
if param.get('type') is not None:
handlertype=param.get('type')
if handlertype == 'writer' or handlertype == "combined":
handler=SerialWriter(param,self.writeData,InfoHandler(usbid,self),sourceName)
if handlertype == "combined":
handler.param["combined"]=True
else:
if handlertype == 'reader':
handler=SerialReader(param, self.writeData, InfoHandler(usbid,self),sourceName)
else:
AVNLog.info("ignore device %s : type %s",usbid,handlertype)
handler=DummyHandler()
res=self.checkAndAddHandler(usbid, handler,devicelist[usbid])
if not res:
AVNLog.debug("max number of readers already reached, skip start of %s at %s",usbid,devicelist[usbid])
continue
handlerThread=threading.Thread(target=self.serialRun,args=(handler,usbid))
handlerThread.daemon=True
handlerThread.start()
AVNLog.info("started %s for %s device %s at %s",handlertype,type,usbid,devicelist[usbid])
if startStop[usbid]=='stop' or startStop[usbid]=='restart':
#really starting is left to the audit...
self.stopHandler(usbid)
def stopHandler(self,usbid):
AVNLog.debug("must stop handler for %s",usbid)
handler=self.removeHandler(usbid)
if handler is None:
#must have been a thread race... or another device
return
try:
handler.stopHandler()
AVNLog.info("stop handler for %s triggered",usbid)
except:
pass
#start monitoring in separate thread
#method will never return...
def monitorDevices(self,context):
self.setInfo('monitor', "running", AVNWorker.Status.RUNNING)
threading.current_thread().setName("%s[monitor]"%(self.getThreadPrefix()))
AVNLog.info("start device monitoring")
while True:
try:
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='tty')
AVNLog.info("start monitor loop")
for deviceaction in monitor:
action,device=deviceaction
if action=='remove':
usbid=self.usbIdFromPath(device.device_path)
AVNLog.info("device removal detected %s",usbid)
self.stopHandler(usbid)
except:
AVNLog.error("error in usb monitor loop: ",traceback.format_exc(1))
time.sleep(2)
#any start handling we leave to the audit...
#this is the main thread - this executes the polling
def run(self):
self.setInfo('main', "discovering", AVNWorker.Status.RUNNING)
self.setName("%s-polling"%(self.getThreadPrefix()))
time.sleep(2) # give a chance to have the feeder socket open...
#now start an endless loop with udev discovery...
#any removal will be detected by the monitor (to be fast)
#but we have an audit here anyway
#the removal will be robust enough to deal with 2 parallel tries
context=None
init=True
while True:
currentDevices={}
try:
AVNLog.debug("starting udev discovery")
if context is None:
context=pyudev.Context()
allDev=context.list_devices(subsystem='tty')
for dev in allDev:
if dev.parent is None or not (dev.parent.subsystem == "usb-serial" or dev.parent.subsystem == "usb"):
continue
usbid=self.usbIdFromPath(dev.device_path)
AVNLog.debug("discovered usb serial tty device %s at %s (usbid=%s)",dev.device_node,unicode(dev),usbid)
currentDevices[usbid]=dev.device_node
self.checkDevices(currentDevices)
if init:
monitorThread=threading.Thread(target=self.monitorDevices,args=(context,))
monitorThread.daemon=True
monitorThread.start()
init=False
except Exception as e:
AVNLog.debug("exception when querying usb serial devices %s, retrying after 10s",traceback.format_exc())
context=None
time.sleep(10)
#overloaded info method
def getInfo(self):
try:
rt=self.info.copy()
st=self.status.copy()
rta=[]
keys=sorted(rt.keys(),key=lambda x: re.sub("^[^-]*[-]","-",x))
for k in keys:
try:
elem={}
elem['name']=k
elem['info']=rt[k]
elem['status']=st[k]
rta.append(elem)
except:
pass
return {'name':self.getName(),'items':rta}
except:
return {'name':self.getName(),'items':[],'error':"no info available"}
avnav_handlerList.registerHandler(AVNUsbSerialReader)
|
#!/usr/bin/env python
import argparse
import ConfigParser
import json
import logging
import mad
import os
import re
import pycurl
import shutil
import sqlite3
import sys
import time
import socket
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
import urllib2
from datetime import datetime
from glob import glob
from flask import Flask, request, jsonify
from multiprocessing import Process, Queue
from StringIO import StringIO
g_start_time = time.time()
g_round_ix = 0
g_queue = Queue()
g_config = {}
g_last = {}
g_db = {}
g_streams = []
def to_minute(unix_time):
if type(unix_time) is int:
unix_time = datetime.utcfromtimestamp(unix_time)
return unix_time.weekday() * (24 * 60) + unix_time.hour * 60 + unix_time.minute
def now():
return to_minute(datetime.utcnow())
def db_connect():
global g_db
if 'conn' not in g_db:
conn = sqlite3.connect('config.db')
g_db = {'conn': conn, 'c': conn.cursor()}
g_db['c'].execute("""CREATE TABLE IF NOT EXISTS intents(
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
start INTEGER,
end INTEGER,
read_count INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
accessed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""");
g_db['conn'].commit()
return g_db
def register_intent(minute, duration):
db = db_connect()
key = str(minute) + str(duration)
c = db['c']
res = c.execute('select id from intents where key = ?', (key, )).fetchone()
if res == None:
c.execute('insert into intents(key, start, end) values(?, ?, ?)', (key, minute, minute + duration))
else:
c.execute('update intents set read_count = read_count + 1, accessed_at = (current_timestamp) where id = ?', (res[0], ))
db['conn'].commit()
return db['c'].lastrowid
def generate_xml():
return True
def should_be_recording():
db = db_connect()
current_minute = now()
intent_count = db['c'].execute(
'select count(*) from intents where start >= ? and end <= ?', (current_minute, current_minute)).fetchone()[0]
return intent_count != 0
def prune():
global g_config
db = db_connect()
duration = int(g_config['archivedays']) * 60 * 60 * 24
cutoff = time.time() - duration
# Dumping old streams
count = 0
for f in os.listdir('.'):
entry = g_config['storage'] + f
if os.path.isfile(entry) and os.path.getctime(entry) < cutoff:
logging.debug("Prune: %s" % entry)
os.unlink(entry)
count += 1
# Dump old intents (TODO)
logging.info("Found %d files older than %s days." % (count, g_config['archivedays']))
def get_time_offset():
global g_config
when = int(time.time())
api_key='AIzaSyBkyEMoXrSYTtIi8bevEIrSxh1Iig5V_to'
url = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s×tamp=%d&key=%s" % (g_config['lat'], g_config['long'], when, api_key)
stream = urllib2.urlopen(url)
data = stream.read()
opts = json.loads(data)
if opts['status'] == 'OK':
g_config['offset'] = int(opts['rawOffset']) / 60
return True
# Let's do something at least
else:
g_config['offset'] = 0
return False
def find_streams(minute, duration):
global g_streams
ts_re = re.compile('(\d*).mp3')
for f in glob('*.mp3'):
ts = ts_re.findall(f)
try:
duration = mad.MadFile(f).total_time() / (60.0 * 1000)
except:
logging.warning("Unable to read file %s as an mp3 file" % f)
print to_minute(int(ts[0]))
return True
def server():
app = Flask(__name__)
@app.route('/heartbeat')
def heartbeat():
global g_config
if request.remote_addr != '127.0.0.1':
return '', 403
stats = {
'disk': sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))
}
return jsonify(stats), 200
@app.route('/<weekday>/<start>/<duration>/<name>')
def stream(weekday, start, duration, name):
ts = to_utc(weekday, start)
# This will register the intent if needed
register_intent(ts, duration)
return weekday + start + duration + name
app.run(debug=True)
def download(callsign, url):
def cback(data):
global g_round_ix, g_config, g_start_time
g_queue.put(True)
g_round_ix += 1
stream.write(data)
logging.debug(str(float(g_round_ix) / (time.time() - g_start_time)))
logging.info("Spawning - %s" % callsign)
fname = callsign + "-" + str(int(time.time())) + ".mp3"
try:
stream = open(fname, 'w')
except:
logging.critical("Unable to open %s. Can't record. Must exit." % (fname))
sys.exit(-1)
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.WRITEFUNCTION, cback)
c.perform()
c.close()
stream.close()
def ago(duration):
return time.time() - duration
# This takes the nominal weekday (sun, mon, tue, wed, thu, fri, sat)
# and a 12 hour time hh:mm [ap]m and converts it to our absolute units
# with respect to the timestamp in the configuration file
def to_utc(day_str, hour):
global g_config
try:
day_number = ['sun','mon','tue','wed','thu','fri','sat','sun'].index(day_str.lower())
except e:
return False
time_re = re.compile('(\d{1,2}):(\d{2})([ap])m')
time = time_re.findall(hour)
if len(time) == 0:
return False
local = day_number * (60 * 60 * 24);
local += int(time[0]) * 60
local += int(time[1])
if time[2] == 'p':
local += (12 * 60)
utc = local + g_config['offset']
return utc
def spawner():
global g_queue, g_config, g_last
station = {
'callsign': g_config['callsign'],
'url': g_config['stream'],
'flag': False,
'process': False
}
g_last = {
'prune': 0,
'offset': 0
}
minute = 60
hour = 60 * minute
day = 24 * hour
b_shutdown = False
server_pid = Process(target=server)
server_pid.start()
while True:
if g_last['prune'] < ago(1 * day):
prune()
g_last['prune'] = time.time()
if g_last['offset'] < ago(1 * day):
get_time_offset()
g_last['offset'] = time.time()
while not g_queue.empty():
b = g_queue.get(False)
if b == 'shutdown':
b_shutdown = True
else:
station['flag'] = True
# didn't respond in 3 seconds so we respawn
if station['flag'] == False:
if station['process'] != False and station['process'].is_alive():
station['process'].terminate()
station['process'] = False
if station['process'] == False and b_shutdown == False:
station['process'] = p = Process(target=download, args=(g_config['callsign'], station['url'],))
p.start()
# If there is still no process then we should definitely bail.
if station['process'] == False:
return False
station['flag'] = False
time.sleep(3)
# From https://wiki.python.org/moin/ConfigParserExamples
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logging.info("skip: %s" % option)
except:
logging.warning("exception on %s!" % option)
dict1[option] = None
return dict1
def startup():
global g_config
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="./indy_config.txt", help="Configuration file (default ./indy_config.txt)")
parser.add_argument("-v", "--version", help="Version info")
args = parser.parse_args()
Config = ConfigParser.ConfigParser()
Config.read(args.config)
g_config = ConfigSectionMap('Main', Config)
if 'loglevel' not in g_config:
g_config['loglevel'] = 'WARN'
if os.path.isdir(g_config['storage']):
os.chdir(g_config['storage'])
else:
logging.warning("Can't find %s. Using current directory." % g_config['storage'])
# from https://docs.python.org/2/howto/logging.html
numeric_level = getattr(logging, g_config['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='indycast.log')
register_intent(123,321)
print should_be_recording()
find_streams(0,0)
sys.exit(0)
get_time_offset()
shutdown()
sys.exit(0)
def shutdown():
global g_db, g_queue
g_db['conn'].close()
g_queue.put('shutdown')
startup()
spawner()
restructuring
#!/usr/bin/env python
import argparse
import ConfigParser
import json
import logging
import mad
import os
import re
import pycurl
import shutil
import sqlite3
import sys
import time
import socket
import xml.etree.cElementTree as ET
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
import urllib2
from datetime import datetime
from glob import glob
from flask import Flask, request, jsonify
from multiprocessing import Process, Queue
from StringIO import StringIO
g_start_time = time.time()
g_round_ix = 0
g_queue = Queue()
g_config = {}
g_last = {}
g_db = {}
g_streams = []
def shutdown():
global g_db, g_queue
g_db['conn'].close()
g_queue.put('shutdown')
# Time related
def to_minute(unix_time):
if type(unix_time) is int:
unix_time = datetime.utcfromtimestamp(unix_time)
return unix_time.weekday() * (24 * 60) + unix_time.hour * 60 + unix_time.minute
def now():
return to_minute(datetime.utcnow())
def ago(duration):
return time.time() - duration
# This takes the nominal weekday (sun, mon, tue, wed, thu, fri, sat)
# and a 12 hour time hh:mm [ap]m and converts it to our absolute units
# with respect to the timestamp in the configuration file
def to_utc(day_str, hour):
global g_config
try:
day_number = ['sun','mon','tue','wed','thu','fri','sat','sun'].index(day_str.lower())
except e:
return False
time_re = re.compile('(\d{1,2}):(\d{2})([ap])m')
time = time_re.findall(hour)
if len(time) == 0:
return False
local = day_number * (60 * 60 * 24);
local += int(time[0]) * 60
local += int(time[1])
if time[2] == 'p':
local += (12 * 60)
utc = local + g_config['offset']
return utc
def get_time_offset():
global g_config
when = int(time.time())
api_key='AIzaSyBkyEMoXrSYTtIi8bevEIrSxh1Iig5V_to'
url = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s×tamp=%d&key=%s" % (g_config['lat'], g_config['long'], when, api_key)
stream = urllib2.urlopen(url)
data = stream.read()
opts = json.loads(data)
if opts['status'] == 'OK':
g_config['offset'] = int(opts['rawOffset']) / 60
return True
# Let's do something at least
else:
g_config['offset'] = 0
return False
def db_connect():
global g_db
if 'conn' not in g_db:
conn = sqlite3.connect('config.db')
g_db = {'conn': conn, 'c': conn.cursor()}
g_db['c'].execute("""CREATE TABLE IF NOT EXISTS intents(
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
start INTEGER,
end INTEGER,
read_count INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
accessed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""");
g_db['conn'].commit()
return g_db
def register_intent(minute, duration):
db = db_connect()
key = str(minute) + str(duration)
c = db['c']
res = c.execute('select id from intents where key = ?', (key, )).fetchone()
if res == None:
c.execute('insert into intents(key, start, end) values(?, ?, ?)', (key, minute, minute + duration))
else:
c.execute('update intents set read_count = read_count + 1, accessed_at = (current_timestamp) where id = ?', (res[0], ))
db['conn'].commit()
return db['c'].lastrowid
def should_be_recording():
db = db_connect()
current_minute = now()
intent_count = db['c'].execute(
'select count(*) from intents where start >= ? and end <= ?', (current_minute, current_minute)).fetchone()[0]
return intent_count != 0
def prune():
global g_config
db = db_connect()
duration = int(g_config['archivedays']) * 60 * 60 * 24
cutoff = time.time() - duration
# Dumping old streams
count = 0
for f in os.listdir('.'):
entry = g_config['storage'] + f
if os.path.isfile(entry) and os.path.getctime(entry) < cutoff:
logging.debug("Prune: %s" % entry)
os.unlink(entry)
count += 1
# Dump old intents (TODO)
logging.info("Found %d files older than %s days." % (count, g_config['archivedays']))
def find_streams(minute, duration):
global g_streams
ts_re = re.compile('(\d*).mp3')
for f in glob('*.mp3'):
ts = ts_re.findall(f)
try:
duration = mad.MadFile(f).total_time() / (60.0 * 1000)
except:
logging.warning("Unable to read file %s as an mp3 file" % f)
print to_minute(int(ts[0]))
return True
def generate_xml():
return True
def server():
app = Flask(__name__)
@app.route('/heartbeat')
def heartbeat():
global g_config
if request.remote_addr != '127.0.0.1':
return '', 403
stats = {
'disk': sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))
}
return jsonify(stats), 200
@app.route('/<weekday>/<start>/<duration>/<name>')
def stream(weekday, start, duration, name):
ts = to_utc(weekday, start)
# This will register the intent if needed
register_intent(ts, duration)
return weekday + start + duration + name
app.run(debug=True)
def download(callsign, url):
def cback(data):
global g_round_ix, g_config, g_start_time
g_queue.put(True)
g_round_ix += 1
stream.write(data)
logging.debug(str(float(g_round_ix) / (time.time() - g_start_time)))
logging.info("Spawning - %s" % callsign)
fname = callsign + "-" + str(int(time.time())) + ".mp3"
try:
stream = open(fname, 'w')
except:
logging.critical("Unable to open %s. Can't record. Must exit." % (fname))
sys.exit(-1)
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.WRITEFUNCTION, cback)
c.perform()
c.close()
stream.close()
def spawner():
global g_queue, g_config, g_last
station = {
'callsign': g_config['callsign'],
'url': g_config['stream'],
'flag': False,
'process': False
}
g_last = {
'prune': 0,
'offset': 0
}
minute = 60
hour = 60 * minute
day = 24 * hour
b_shutdown = False
server_pid = Process(target=server)
server_pid.start()
while True:
if g_last['prune'] < ago(1 * day):
prune()
g_last['prune'] = time.time()
if g_last['offset'] < ago(1 * day):
get_time_offset()
g_last['offset'] = time.time()
while not g_queue.empty():
b = g_queue.get(False)
if b == 'shutdown':
b_shutdown = True
else:
station['flag'] = True
# didn't respond in 3 seconds so we respawn
if station['flag'] == False:
if station['process'] != False and station['process'].is_alive():
station['process'].terminate()
station['process'] = False
if station['process'] == False and b_shutdown == False:
station['process'] = p = Process(target=download, args=(g_config['callsign'], station['url'],))
p.start()
# If there is still no process then we should definitely bail.
if station['process'] == False:
return False
station['flag'] = False
time.sleep(3)
# From https://wiki.python.org/moin/ConfigParserExamples
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logging.info("skip: %s" % option)
except:
logging.warning("exception on %s!" % option)
dict1[option] = None
return dict1
def startup():
global g_config
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="./indy_config.txt", help="Configuration file (default ./indy_config.txt)")
parser.add_argument("-v", "--version", help="Version info")
args = parser.parse_args()
Config = ConfigParser.ConfigParser()
Config.read(args.config)
g_config = ConfigSectionMap('Main', Config)
if 'loglevel' not in g_config:
g_config['loglevel'] = 'WARN'
if os.path.isdir(g_config['storage']):
os.chdir(g_config['storage'])
else:
logging.warning("Can't find %s. Using current directory." % g_config['storage'])
# from https://docs.python.org/2/howto/logging.html
numeric_level = getattr(logging, g_config['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='indycast.log')
register_intent(123,321)
print should_be_recording()
find_streams(0,0)
sys.exit(0)
get_time_offset()
shutdown()
sys.exit(0)
startup()
spawner()
|
#!/usr/bin/python -O
import argparse
import binascii
import ConfigParser
import json
import logging
import lxml.etree as ET
import math
import os
import pycurl
import re
import setproctitle as SP
import signal
import socket
import sqlite3
import struct
import sys
import time
#
# This is needed to force ipv4 on ipv6 devices. It's sometimes needed
# if there isn't a clean ipv6 route to get to the big wild internet.
# In these cases, a pure ipv6 route simply will not work. People aren't
# always in full control of every hop ... so it's much safer to force
# ipv4 then optimistically cross our fingers.
#
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# Replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
import urllib2
from datetime import datetime, timedelta, date
from glob import glob
from flask import Flask, request, jsonify
import flask
from multiprocessing import Process, Queue
g_start_time = time.time()
g_queue = Queue()
g_config = {}
g_db = {}
g_pid = 0
# From https://wiki.python.org/moin/ConfigParserExamples
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logging.info("skip: %s" % option)
except:
logging.warning("exception on %s!" % option)
dict1[option] = None
return dict1
# Sets a more human-readable process name for the various parts of the system to be viewed in top/htop
def change_proc_name(what):
SP.setproctitle(what)
print "[%s:%d] Starting" % (what, os.getpid())
# shutdown is hit on the keyboard interrupt
def shutdown(signal = 15, frame = False):
global g_db, g_queue, g_start_time
title = SP.getproctitle()
print "[%s:%d] Shutting down" % (title, os.getpid())
if 'conn' in g_db:
g_db['conn'].close()
logging.info("[%s:%d] Shutting down through keyboard interrupt" % (title, os.getpid()))
if title == 'ic-manager':
logging.info("Uptime: %ds", time.time() - g_start_time)
g_queue.put(('shutdown', True))
sys.exit(0)
##
## Audio related functions
##
# This determines the date the thing starts,
# the minute time it starts, and the duration
def audio_stream_info(fname):
ts_re = re.compile('-(\d*)[.|_]')
ts = ts_re.findall(fname)
duration = 0
start_minute = 0
start_date = 0
if ts:
unix_time = int(ts[0])
start_minute = time_to_minute(unix_time)
start_date = datetime.utcfromtimestamp(unix_time)
try:
duration = audio_time_fast(fname)
except:
# If we can't find a duration then we try to see if it's in the file name
ts_re_duration = re.compile('_(\d*).mp3')
ts = ts_re_duration.findall(fname)
if ts:
duration = int(ts[0]) * 60
return {
# The week number
'week': start_date.isocalendar()[1],
'name': fname,
'start_minute': start_minute,
'start_date': start_date,
'end_minute': (duration / 60.0 + start_minute) % 10080,
'duration_sec': duration,
}
#
# Open up an mp3 file, find all the blocks, the byte offset of the blocks, and if they
# are audio blocks, construct a crc32 mapping of some given beginning offset of the audio
# data ... this is intended for stitching.
#
def audio_crc(fname, blockcount = -1):
frame_sig = []
start_byte = []
freqTable = [ 44100, 48000, 32000, 0 ]
brTable = [
0, 32, 40, 48,
56, 64, 80, 96,
112, 128, 160, 192,
224, 256, 320, 0
]
f = open(fname, 'rb')
while blockcount != 0:
blockcount -= 1
frame_start = f.tell()
header = f.read(2)
if header:
if header == '\xff\xfb' or header == '\xff\xfa':
b = ord(f.read(1))
samp_rate = freqTable[(b & 0x0f) >> 2]
bit_rate = brTable[b >> 4]
pad_bit = (b & 0x3) >> 1
# from http://id3.org/mp3Frame
frame_size = (144000 * bit_rate / samp_rate) + pad_bit
# Rest of the header
throw_away = f.read(1)
# Get the signature
crc = binascii.crc32(f.read(32))
frame_sig.append(crc)
start_byte.append(frame_start)
# Move forward the frame f.read size + 4 byte header
throw_away = f.read(frame_size - 36)
# ID3 tag for some reason
elif header == '\x49\x44':
# Rest of the header
throw_away = f.read(4)
# Quoting http://id3.org/d3v2.3.0
#
# The ID3v2 tag size is encoded with four bytes where the most significant bit
# (bit 7) is set to zero in every byte, making a total of 28 bits. The zeroed
# bits are ignored, so a 257 bytes long tag is represented as $00 00 02 01.
#
candidate = struct.unpack('>I', f.read(4))[0]
size = ((candidate & 0x007f0000) >> 2 ) | ((candidate & 0x00007f00) >> 1 ) | (candidate & 0x0000007f)
f.read(size)
# ID3 TAG -- 128 bytes long
elif header == '\x54\x41':
# We've already read 2 so we can go 126 forward
f.read(126)
elif len(header) == 1:
# we are at the end of file, but let's just continue.
next
else:
# This helps me debug mp3 files that I'm not reading correctly.
print "%s:%s:%s:%s %s %d" % (binascii.b2a_hex(header), header, f.read(5), fname, hex(f.tell()), len(start_byte) * (1152.0 / 44100) / 60)
break
else:
break
f.close()
return [frame_sig, start_byte]
def audio_time_fast(fname):
crc32, offset = audio_crc(fname, 2)
# in the fast method we get the first two frames, find out the offset
# difference between them, take the length of the file, divide it by that
# and then presume that will be the framecount
frame_size = offset[1] - offset[0]
frame_count_est = os.path.getsize(fname) / frame_size
return (1152.0 / 44100) * frame_count_est
#
# Given a start_file in a directory and a duration, this function will seek out
# ajacent files if necessary and serialize them accordingly, and then return the
# file name of an audio slice that is the combination of them.
#
def audio_stitch_and_slice(file_list, start_minute, duration_minute):
if len(file_list) == 0:
return False
# We presume that there is a file list we need to make
stitched_name = audio_stitch(file_list, force_stitch = True)
if stitched_name:
info = audio_stream_info(stitched_name)
else:
logging.warn("Unable to stitch file list")
return -1
# After we've stitched together the audio then we start our slice
# by figuring our the start_minute of the slice, versus ours
start_slice = max(info['start_minute'] - start_minute, 0)
# Now we need to take the duration of the stream we want, in minutes, and then
# make sure that we don't exceed the length of the file.
duration_slice = min(duration_minute, start_slice + info['duration_sec'] / 60.0)
sliced_name = audio_slice(stitched_name, start_minute = start_slice, duration_minute = duration_slice)
return sliced_name
#
# audio_serialize takes a list of ordinal tuples and makes one larger mp3 out of it.
# The tuple format is (fila_name, byte_start, byte_end) where byte_end == -1 means
# "the whole file".
#
def audio_serialize(file_list, duration_min):
first_file = file_list[0][0]
# Our file will be the first one_duration.mp3
name = "stitches/%s_%d.mp3" % (first_file[first_file.index('/') + 1:first_file.rindex('.')], duration_min)
# If the file exists, then we just return it
if os.path.isfile(name):
return name
out = open(name, 'wb+')
for name, start, end in file_list:
f = open(name, 'rb')
f.seek(start)
if end == -1:
out.write(f.read())
else:
out.write(f.read(end - start))
f.close()
out.close()
return name
#
# Take some mp3 file name_in and then create a new one based on the start and end times
# by finding the closest frames and just doing an extraction.
#
def audio_slice(name_in, start_minute, end_minute = -1, duration_minute = -1):
if duration_minute == -1:
duration_minute = end_minute - start_minute
else:
end_minute = start_minute + duration_minute
name_out = "slices/%s_%d.mp3" % (name_in[name_in.index('/') + 1:name_in.rindex('_')], duration_minute)
start_sec = start_minute * 60.0
end_sec = end_minute * 60.0
if os.path.isfile(name_out):
return name_out
# Most common frame-length ... in practice, I haven't
# seen other values in the real world
frame_length = (1152.0 / 44100)
crc32, offset = audio_crc(name_in)
frame_start = int(math.floor(start_sec / frame_length))
frame_end = int(math.ceil(end_sec / frame_length))
out = open(name_out, 'wb+')
fin = open(name_in, 'rb')
fin.seek(offset[frame_start])
out.write(fin.read(offset[frame_end] - offset[frame_start]))
fin.close()
out.close()
return name_out
#
# audio_stitch takes a list of files and then attempt to seamlessly stitch them
# together by looking at their crc32 checksums of the data payload in the blocks.
#
def audio_stitch(file_list, force_stitch = False):
first = {'name': file_list[0]}
duration = 0
frame_length = (1152.0 / 44100)
crc32, offset = audio_crc(first['name'])
first['crc32'] = crc32
first['offset'] = offset
args = [(first['name'], 0, first['offset'][-1])]
duration += len(first['offset']) * frame_length
for name in file_list[1:]:
second = {'name': name}
crc32, offset = audio_crc(name)
second['crc32'] = crc32
second['offset'] = offset
isFound = True
try:
pos = second['crc32'].index(first['crc32'][-2])
for i in xrange(5, 1, -1):
if second['crc32'][pos - i + 2] != first['crc32'][-i]:
isFound = False
print "Indices do not match between %s and %s" % (first['name'], second['name'])
break
except:
logging.warn("Cannot find indices between %s and %s" % (first['name'], second['name']))
pos = 1
isFound = force_stitch
if isFound:
args.append((second['name'], second['offset'][pos], second['offset'][-2]))
duration += (len(second['offset']) - pos - 1) * frame_length
first = second
continue
break
# Since we end at the last block, we can safely pass in a file1_stop of 0
if len(args) > 1:
# And then we take the offset in the second['crc32'] where things began
return audio_serialize(args, duration_min = int(duration / 60))
##
## Time related functions
##
def time_to_minute(unix_time):
if type(unix_time) is int:
unix_time = datetime.utcfromtimestamp(unix_time)
return unix_time.weekday() * (24 * 60) + unix_time.hour * 60 + unix_time.minute
# from http://code.activestate.com/recipes/521915-start-date-and-end-date-of-given-week/
def time_week_to_iso(year, week):
d = date(year, 1, 1)
if d.weekday() > 3:
d = d + timedelta(7 - d.weekday())
else:
d = d - timedelta(d.weekday())
dlt = timedelta(days = (week - 1) * 7)
return d + dlt
def time_minute_now():
return time_to_minute(datetime.utcnow())
#
# time_to_utc takes the nominal weekday (sun, mon, tue, wed, thu, fri, sat)
# and a 12 hour time hh:mm [ap]m and converts it to our absolute units
# with respect to the timestamp in the configuration file
#
def time_to_utc(day_str, hour):
global g_config
try:
day_number = ['sun','mon','tue','wed','thu','fri','sat'].index(day_str.lower())
except:
return False
local = day_number * (60 * 24)
time_re_solo = re.compile('(\d{1,2})([ap])m', re.I)
time_re_min = re.compile('(\d{1,2}):(\d{2})([ap])m', re.I)
time = time_re_solo.match(hour)
if time:
local += int(time.groups()[0]) * 60
else:
time = time_re_min.match(hour)
if time:
local += int(time.groups()[0]) * 60
local += int(time.groups()[1])
if not time:
return False
if time.groups()[-1] == 'p':
local += (12 * 60)
utc = local + time_get_offset()
return utc
#
# time_get_offset contacts the goog, giving a longitude and lattitude and gets the time
# offset with regard to the UTC. There's a sqlite cache entry for the offset.
#
def time_get_offset():
offset = db_get('offset', expiry = 60 * 60 * 24)
if not offset:
when = int(time.time())
api_key='AIzaSyBkyEMoXrSYTtIi8bevEIrSxh1Iig5V_to'
url = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s×tamp=%d&key=%s" % (g_config['lat'], g_config['long'], when, api_key)
stream = urllib2.urlopen(url)
data = stream.read()
opts = json.loads(data)
if opts['status'] == 'OK':
logging.info("Location: %s | offset: %s" % (opts['timeZoneId'], opts['rawOffset']))
offset = int(opts['rawOffset']) / 60
db_set('offset', offset)
else:
offset = 0
return int(offset)
##
## Database Related functions
##
#
# db_incr increments some key in the database by some value. It is used
# to maintain statistical counters.
#
def db_incr(key, value = 1):
db = db_connect()
try:
db['c'].execute('insert into kv(value, key) values(?, ?)', (value, key))
except:
db['c'].execute('update kv set value = value + ? where key = ?', (value, key))
db['conn'].commit()
# db_set sets (or replaces) a given key to a specific value.
def db_set(key, value):
db = db_connect()
# From http://stackoverflow.com/questions/418898/sqlite-upsert-not-insert-or-replace
res = db['c'].execute('''
INSERT OR REPLACE INTO kv (key, value, created_at)
VALUES (
COALESCE((SELECT key FROM kv WHERE key = ?), ?),
?,
current_timestamp
)''', (key, key, value))
db['conn'].commit()
return value
# db_get retrieves a value from the database, tentative on the expiry
def db_get(key, expiry=0):
db = db_connect()
if expiry > 0:
# If we let things expire, we first sweep for it
db['c'].execute('delete from kv where key = ? and created_at < (current_timestamp - ?)', (key, expiry))
db['conn'].commit()
res = db['c'].execute('select value, created_at from kv where key = ?', (key, )).fetchone()
if res:
return res[0]
return False
#
# db_connect is a "singleton pattern" or some other fancy $10-world style of maintaining
# the database connection throughout the execution of the script.
#
def db_connect():
global g_db
if 'conn' not in g_db:
conn = sqlite3.connect('config.db')
g_db = {'conn': conn, 'c': conn.cursor()}
g_db['c'].execute("""CREATE TABLE IF NOT EXISTS intents(
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
start INTEGER,
end INTEGER,
read_count INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
accessed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""");
g_db['c'].execute("""CREATE TABLE IF NOT EXISTS kv(
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
value TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""");
g_db['conn'].commit()
return g_db
def db_register_intent(minute, duration):
db = db_connect()
key = str(minute) + ':' + str(duration)
res = db['c'].execute('select id from intents where key = ?', (key, )).fetchone()
if res == None:
db['c'].execute('insert into intents(key, start, end) values(?, ?, ?)', (key, minute, minute + duration))
else:
db['c'].execute('update intents set read_count = read_count + 1, accessed_at = (current_timestamp) where id = ?', (res[0], ))
db['conn'].commit()
return db['c'].lastrowid
##
## Storage and file related
##
# Get rid of files older than archivedays
def file_prune():
global g_config
db = db_connect()
duration = int(g_config['archivedays']) * 60 * 60 * 24
cutoff = time.time() - duration
# Dumping old streams
count = 0
for f in os.listdir('.'):
entry = g_config['storage'] + f
if os.path.isfile(entry) and os.path.getctime(entry) < cutoff:
logging.debug("Prune: %s" % entry)
os.unlink(entry)
count += 1
logging.info("Found %d files older than %s days." % (count, g_config['archivedays']))
#
# Given a start week minute this looks for streams in the storage
# directory that match it - regardless of duration ... so it may return
# partial shows results.
#
def file_find_streams(start, duration):
stream_list = []
end = (start + duration) % 10080
# We want to make sure we only get the edges so we need to have state
# between the iterations.
next_valid_start_minute = 0
current_week = 0
file_list = glob('streams/*.mp3')
# Sorting by date (see http://stackoverflow.com/questions/23430395/glob-search-files-in-date-order)
file_list.sort(key=os.path.getmtime)
stitch_list = []
for filename in file_list:
i = audio_stream_info(filename)
if i['start_minute'] < next_valid_start_minute and i['week'] == current_week:
stitch_list.append(filename)
continue
# We are only looking for starting edges of the stream
#
# If we started recording before this is fine as long as we ended recording after our start
if start == -1 or (i['start_minute'] < start and i['end_minute'] > start) or (i['start_minute'] > start and i['start_minute'] < end):
fname = audio_stitch_and_slice(stitch_list, start, duration)
print fname
stitch_list = [filename]
# TODO: May need to % 10080 this
next_valid_start_minute = (start + duration)
current_week = i['week']
if fname:
stream_list.append(audio_stream_info(fname))
fname = audio_stitch_and_slice(stitch_list, start, duration)
if fname:
stream_list.append(audio_stream_info(fname))
return stream_list
#
# This takes a number of params:
#
# showname - from the incoming request url
# feedList - this is a list of tuples in the form (date, file)
# corresponding to the, um, date of recording and filename
#
# It obviously returns an xml file ... I mean duh.
#
# In the xml file we will lie about the duration to make life easier
#
def server_generate_xml(showname, feed_list, duration, start_minute):
global g_config
base_url = 'http://%s.indycast.net/' % g_config['callsign']
callsign = g_config['callsign']
nsmap = {
'dc': 'http://purl.org/dc/elements/1.1/',
'media': 'http://search.yahoo.com/mrss/',
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
'feedburner': 'http://rssnamespace.org/feedburner/ext/1.0'
}
root = ET.Element("rss", nsmap = nsmap)
root.attrib['version'] = '2.0'
frame_length = 1152.0 / 44100
channel = ET.SubElement(root, "channel")
for k,v in {
'{%s}summary' % nsmap['itunes']: showname,
'{%s}subtitle' % nsmap['itunes']: showname,
'{%s}category' % nsmap['itunes']: 'podcast',
'title': showname,
'link': base_url,
'copyright': callsign,
'description': showname,
'language': 'en'
}.items():
ET.SubElement(channel, k).text = v
# In our feed, we construct theoretical files which will be stitched and sliced
# together on-demand (lazy) if the user requests it.
for feed in feed_list:
# This is our file ... we have a week number, which is what we need.
# By the existence of this feed, we are essentially saying that we have
# a specific week at a specific minute ... so we construct that as the
# lazy-file name
# Start with the start_date of the feed
#start_of_week = time_week_to_iso(feed['start_date'].year, feed['week'])
# now we add the minute offset to get a datetime version
#dt_start_of_stream = start_of_week + timedelta(minutes = start_minute)
# and then make a unix time stamp from it. This will be the numeric on the file that
# are committing to making
#str_start_of_stream = dt_start_of_stream.strftime('%s')
#file_name = "%s-%s_%d.mp3" % (callsign, str_start_of_stream, duration)
file_name = feed['name']
link = "%s%s" % (base_url, file_name)
item = ET.SubElement(channel, 'item')
for k,v in {
'{%s}explicit' % nsmap['itunes']: 'no',
'{%s}author' % nsmap['itunes']: callsign,
'{%s}duration' % nsmap['itunes']: str(duration * 60),
'{%s}summary' % nsmap['itunes']: showname,
'{%s}creator' % nsmap['dc']: callsign,
'{%s}origEnclosureLink' % nsmap['feedburner']: link,
'{%s}origLink' % nsmap['feedburner']: base_url,
'description': showname,
'pubDate': feed['start_date'].strftime("%Y-%m-%d %H:%M:%S"),
'title': showname,
'link': link,
'copyright': callsign,
'guid': callsign + file_name
}.items():
ET.SubElement(item, k).text = v
ET.SubElement(item, 'guid', isPermaLink = "false").text = base_url
# fileSize and length will be guessed based on 209 bytes covering
# frame_length seconds of audio (128k/44.1k no id3)
content = ET.SubElement(item, '{%s}content' % nsmap['media'])
content.attrib['url'] = link
content.attrib['fileSize'] = str(209 * (duration * 60.0) / frame_length)
content.attrib['type'] = 'audio/mpeg3'
# The length of the audio we will just take as the duration
content = ET.SubElement(item, 'enclosure')
content.attrib['url'] = link
content.attrib['length'] = str(duration * 60)
content.attrib['type'] = 'audio/mpeg3'
tree = ET.ElementTree(root)
return ET.tostring(tree, xml_declaration=True, encoding="utf-8")
def server_error(errstr):
return jsonify({'result': False, 'error':errstr}), 500
def server_manager(config):
app = Flask(__name__)
#
# The path is (unix timestamp)_(duration in minutes). If it exists (as in we had
# previously generated it) then we can trivially send it. Otherwise we need
# to create it.
#
@app.route('/slices/<path:path>')
def send_stream(path):
base_dir = config['storage'] + 'slices/'
fname = base_dir + path
# If the file doesn't exist, then we need to slice it and create it based on our query.
if not os.path.isfile(fname):
# 1. Find the closest timestamp
# Even though the file doesn't exist, we'll still get
# a partial return on getting it's "info"
info = audio_stream_info(fname)
# Now we see what our start stream should be
start_stream = file_find_streams(info['start_minute'], info['duration_sec'] / 60)
# slice if needed
# add up the timestamp
return True
return flask.send_from_directory(base_dir, path)
@app.route('/heartbeat')
def heartbeat():
global g_start_time
if request.remote_addr != '127.0.0.1':
return '', 403
db = db_connect()
stats = {
'intents': [record for record in db['c'].execute('select * from intents').fetchall()],
'kv': [record for record in db['c'].execute('select * from kv').fetchall()],
'uptime': int(time.time() - g_start_time),
'disk': sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)),
'streams': file_find_streams(-1, 0),
'config': config
}
return jsonify(stats), 200
@app.route('/<weekday>/<start>/<duration>/<showname>')
def stream(weekday, start, duration, showname):
# Duration is expressed either in minutes or in \d+hr\d+ minute
re_minute = re.compile('^(\d+)$')
re_hr_solo = re.compile('^(\d+)hr$', re.I)
re_hr_min = re.compile('^(\d+)hr(\d+).*$', re.I)
res = re_minute.match(duration)
if res:
duration = int(res.groups()[0])
else:
res = re_hr_solo.match(duration)
if res:
duration = int(res.groups()[0]) * 60
else:
res = re_hr_min.match(duration)
if res:
duration = int(res.groups()[0]) * 60 + int(res.groups()[1])
# This means we failed to parse
if type(duration) is str:
return server_error('duration "%s" is not set correctly' % duration)
start_time = time_to_utc(weekday, start)
if not start_time:
return server_error('weekday and start times are not set correctly')
# If we are here then it looks like our input is probably good.
# Strip the .xml from the showname ... this will be used in our xml.
showname = re.sub('.xml$', '', showname)
# This will register the intent if needed for future recordings
# (that is if we are in ondemand mode)
db_register_intent(start_time, duration)
# Look for streams that we have which match this query and duration.
feed_list = file_find_streams(start_time, duration)
# Then, taking those two things, make a feed list from them.
return server_generate_xml(showname, feed_list, duration, start_time)
if __name__ == '__main__':
change_proc_name("ic-webserver")
start = time.time()
try:
app.run(port = int(config['port']))
except:
if time.time() - start < 5:
print "Error, can't start server ... perhaps %s is already in use?" % config['port']
shutdown()
##
## Stream management functions
##
# Query the database and see if we ought to be recording at this moment
def stream_should_be_recording():
global g_config
db = db_connect()
current_minute = time_minute_now()
intent_count = db['c'].execute(
"""select count(*) from intents where
start >= ? and
end <= ? and
accessed_at > datetime('now','-%s days')
""" % g_config['expireafter'],
(current_minute, current_minute)
).fetchone()[0]
return intent_count != 0
# The curl interfacing that downloads the stream to disk
def stream_download(callsign, url, my_pid, fname):
change_proc_name("ic-download")
nl = {'stream': False}
def dl_stop(signal, frame):
print fname
sys.exit(0)
def cback(data):
global g_config, g_queue
g_queue.put(('heartbeat', True))
if nl['stream'] == False:
try:
nl['stream'] = open(fname, 'w')
except:
logging.critical("Unable to open %s. Can't record. Must exit." % fname)
sys.exit(-1)
nl['stream'].write(data)
# signal.signal(signal.SIGTERM, dl_stop)
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.WRITEFUNCTION, cback)
try:
c.perform()
except:
logging.warning("Couldn't resolve or connect to %s." % url)
c.close()
if type(nl['stream']) != bool:
nl['stream'].close()
# The manager process that makes sure that the
# streams are running appropriately
def stream_manager():
global g_queue, g_config
callsign = g_config['callsign']
url = g_config['stream']
cascade_time = int(g_config['cascadetime'])
cascade_buffer = int(g_config['cascadebuffer'])
cascade_margin = cascade_time - cascade_buffer
last_prune = 0
last_success = 0
mode_full = (g_config['mode'].lower() == 'full')
b_shutdown = False
should_record = mode_full
# Number of seconds to be cycling
cycle_time = int(g_config['cycletime'])
process = False
process_next = False
server_pid = Process(target = server_manager, args=(g_config,))
server_pid.start()
fname = False
# A wrapper function to start a donwnload process
def download_start(fname):
global g_pid
g_pid += 1
logging.info("Starting cascaded downloader #%d. Next up in %ds" % (g_pid, cascade_margin))
#
# Although we are already in the callsign path, we want the file to be a self-contained
# description of the content - not relying on the path to complete part of the story
# of what it is.
#
if fname and os.path.getsize(fname) == 0:
logging.info("Unsuccessful download. Removing %s." % fname)
os.unlink(fname)
fname = 'streams/' + callsign + "-" + str(int(time.time())) + ".mp3"
process = Process(target = stream_download, args = (callsign, url, g_pid, fname))
process.start()
return [fname, process]
while True:
#
# We cycle this to off for every run. By the time we go throug the queue so long
# as we aren't supposed to be shutting down, this should be toggled to true.
#
flag = False
yesterday = time.time() - 24 * 60 * 60
if last_prune < yesterday:
file_prune()
last_prune = time.time()
time_get_offset()
while not g_queue.empty():
what, value = g_queue.get(False)
if what == 'shutdown':
b_shutdown = True
else:
flag = True
#
# If we are not in full mode, then we should check whether we should be
# recording right now according to our intents.
#
if not mode_full:
should_record = stream_should_be_recording()
if should_record:
# Didn't respond in cycle_time seconds so we respawn
if not flag:
if process and process.is_alive():
process.terminate()
process = False
if not process and not b_shutdown:
fname, process = download_start(fname)
last_success = time.time()
# If we've hit the time when we ought to cascade
elif time.time() - last_success > cascade_margin:
# And we haven't created the next process yet, then we start it now.
if not process_next:
fname, process_next = download_start(fname)
# If our last_success stream was more than cascade_time - cascade_buffer
# then we start our process_next
# If there is still no process then we should definitely bail.
if not process:
return False
#
# The only way for the bool to be toggled off is if we are not in full-mode ...
# we get here if we should NOT be recording. So we make sure we aren't.
#
else:
if process and process.is_alive():
process.terminate()
if process_next and process_next.is_alive():
process_next.terminate()
process_next = process = False
#
# This needs to be on the outside loop in case we are doing a cascade
# outside of a full mode. In this case, we will need to shut things down
#
# If we are past the cascade_time and we have a process_next, then
# we should shutdown our previous process and move the pointers around.
#
if time.time() - last_success > cascade_time and process:
logging.info("Stopping cascaded downloader")
process.terminate()
# If the process_next is running then we move our last_success forward to the present
last_success = time.time()
# we rename our process_next AS OUR process
process = process_next
# And then clear out the old process_next pointer
process_next = False
# Increment the amount of time this has been running
db_incr('uptime', cycle_time)
time.sleep(cycle_time)
def read_config():
global g_config
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="./indy_config.txt", help="Configuration file (default ./indy_config.txt)")
parser.add_argument("-v", "--version", help="Version info")
args = parser.parse_args()
Config = ConfigParser.ConfigParser()
Config.read(args.config)
g_config = ConfigSectionMap('Main', Config)
defaults = {
# The log level to be put into the indycast.log file.
'loglevel': 'WARN',
# The recording mode, either 'full' meaning to record everything, or != 'full'
# meaning to record only when an intent is matched.
'mode': 'full',
# The relative, or absolute directory to put things in
'storage': 'recording',
# The (day) time to expire an intent to record
'expireafter': '45',
# The TCP port to run the server on
'port': '5000',
# The (day) duration we should be archiving things.
'archivedays': '7',
# The (second) time in looking to see if our stream is running
'cycletime': '7',
# The (second) time to start a stream BEFORE the lapse of the cascade-time
'cascadebuffer': 15,
# The (second) time between cascaded streams
'cascadetime': 60 * 15
}
for k,v in defaults.items():
if k not in g_config:
g_config[k] = v
if not os.path.isdir(g_config['storage']):
try:
# If I can't do this, that's fine.
os.mkdir(g_config['storage'])
except:
# We make it from the current directory
g_config['storage'] = defaults['storage']
os.mkdir(g_config['storage'])
# We go to the callsign level in order to store multiple station feeds on a single
# server in a single parent directory without forcing the user to decide what goes
# where.
g_config['storage'] += '/%s/' % g_config['callsign']
g_config['storage'] = re.sub('\/+', '/', g_config['storage'])
if not os.path.isdir(g_config['storage']):
os.mkdir(g_config['storage'])
# We have a few sub directories for storing things
for subdir in ['streams', 'stitches', 'slices']:
if not os.path.isdir(g_config['storage'] + subdir):
os.mkdir(g_config['storage'] + subdir)
# Now we try to do all this stuff again
if os.path.isdir(g_config['storage']):
#
# There's a bug after we chdir, where the multiprocessing is trying to grab the same
# invocation as the initial argv[0] ... so we need to make sure that if a user did
# ./blah this will be maintained.
#
if not os.path.isfile(g_config['storage'] + __file__):
os.symlink(os.path.abspath(__file__), g_config['storage'] + __file__)
os.chdir(g_config['storage'])
else:
logging.warning("Can't find %s. Using current directory." % g_config['storage'])
# From https://docs.python.org/2/howto/logging.html
numeric_level = getattr(logging, g_config['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='indycast.log', datefmt='%Y-%m-%d %H:%M:%S',format='%(asctime)s %(message)s')
#
# Increment the number of times this has been run so we can track the stability of remote
# servers and instances.
#
db_incr('runcount')
signal.signal(signal.SIGINT, shutdown)
if __name__ == "__main__":
# From http://stackoverflow.com/questions/25504149/why-does-running-the-flask-dev-server-run-itself-twice
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
change_proc_name("ic-main")
server_manager(g_config)
else:
read_config()
change_proc_name("ic-manager")
stream_manager()
more time work
#!/usr/bin/python -O
import argparse
import binascii
import ConfigParser
import json
import logging
import lxml.etree as ET
import math
import os
import pycurl
import re
import setproctitle as SP
import signal
import socket
import sqlite3
import struct
import sys
import time
#
# This is needed to force ipv4 on ipv6 devices. It's sometimes needed
# if there isn't a clean ipv6 route to get to the big wild internet.
# In these cases, a pure ipv6 route simply will not work. People aren't
# always in full control of every hop ... so it's much safer to force
# ipv4 then optimistically cross our fingers.
#
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# Replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
import urllib2
from datetime import datetime, timedelta, date
from glob import glob
from flask import Flask, request, jsonify
import flask
from multiprocessing import Process, Queue
g_start_time = time.time()
g_queue = Queue()
g_config = {}
g_db = {}
g_pid = 0
# From https://wiki.python.org/moin/ConfigParserExamples
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logging.info("skip: %s" % option)
except:
logging.warning("exception on %s!" % option)
dict1[option] = None
return dict1
# Sets a more human-readable process name for the various parts of the system to be viewed in top/htop
def change_proc_name(what):
SP.setproctitle(what)
print "[%s:%d] Starting" % (what, os.getpid())
# shutdown is hit on the keyboard interrupt
def shutdown(signal = 15, frame = False):
global g_db, g_queue, g_start_time
title = SP.getproctitle()
print "[%s:%d] Shutting down" % (title, os.getpid())
if 'conn' in g_db:
g_db['conn'].close()
logging.info("[%s:%d] Shutting down through keyboard interrupt" % (title, os.getpid()))
if title == 'ic-manager':
logging.info("Uptime: %ds", time.time() - g_start_time)
g_queue.put(('shutdown', True))
sys.exit(0)
##
## Audio related functions
##
# This determines the date the thing starts,
# the minute time it starts, and the duration
def audio_stream_info(fname):
ts_re = re.compile('-(\d*)[.|_]')
ts = ts_re.findall(fname)
duration = 0
start_minute = 0
start_date = 0
if ts:
unix_time = int(ts[0])
start_minute = time_to_minute(unix_time)
start_date = datetime.fromtimestamp(unix_time)
try:
duration = audio_time_fast(fname)
except:
# If we can't find a duration then we try to see if it's in the file name
ts_re_duration = re.compile('_(\d*).mp3')
ts = ts_re_duration.findall(fname)
if ts:
duration = int(ts[0]) * 60
return {
# The week number
'week': start_date.isocalendar()[1],
'name': fname,
'start_minute': start_minute,
'start_date': start_date,
'end_minute': (duration / 60.0 + start_minute) % 10080,
'duration_sec': duration,
}
#
# Open up an mp3 file, find all the blocks, the byte offset of the blocks, and if they
# are audio blocks, construct a crc32 mapping of some given beginning offset of the audio
# data ... this is intended for stitching.
#
def audio_crc(fname, blockcount = -1):
frame_sig = []
start_byte = []
freqTable = [ 44100, 48000, 32000, 0 ]
brTable = [
0, 32, 40, 48,
56, 64, 80, 96,
112, 128, 160, 192,
224, 256, 320, 0
]
f = open(fname, 'rb')
while blockcount != 0:
blockcount -= 1
frame_start = f.tell()
header = f.read(2)
if header:
if header == '\xff\xfb' or header == '\xff\xfa':
b = ord(f.read(1))
samp_rate = freqTable[(b & 0x0f) >> 2]
bit_rate = brTable[b >> 4]
pad_bit = (b & 0x3) >> 1
# from http://id3.org/mp3Frame
frame_size = (144000 * bit_rate / samp_rate) + pad_bit
# Rest of the header
throw_away = f.read(1)
# Get the signature
crc = binascii.crc32(f.read(32))
frame_sig.append(crc)
start_byte.append(frame_start)
# Move forward the frame f.read size + 4 byte header
throw_away = f.read(frame_size - 36)
# ID3 tag for some reason
elif header == '\x49\x44':
# Rest of the header
throw_away = f.read(4)
# Quoting http://id3.org/d3v2.3.0
#
# The ID3v2 tag size is encoded with four bytes where the most significant bit
# (bit 7) is set to zero in every byte, making a total of 28 bits. The zeroed
# bits are ignored, so a 257 bytes long tag is represented as $00 00 02 01.
#
candidate = struct.unpack('>I', f.read(4))[0]
size = ((candidate & 0x007f0000) >> 2 ) | ((candidate & 0x00007f00) >> 1 ) | (candidate & 0x0000007f)
f.read(size)
# ID3 TAG -- 128 bytes long
elif header == '\x54\x41':
# We've already read 2 so we can go 126 forward
f.read(126)
elif len(header) == 1:
# we are at the end of file, but let's just continue.
next
else:
# This helps me debug mp3 files that I'm not reading correctly.
print "%s:%s:%s:%s %s %d" % (binascii.b2a_hex(header), header, f.read(5), fname, hex(f.tell()), len(start_byte) * (1152.0 / 44100) / 60)
break
else:
break
f.close()
return [frame_sig, start_byte]
def audio_time_fast(fname):
crc32, offset = audio_crc(fname, 2)
# in the fast method we get the first two frames, find out the offset
# difference between them, take the length of the file, divide it by that
# and then presume that will be the framecount
frame_size = offset[1] - offset[0]
frame_count_est = os.path.getsize(fname) / frame_size
return (1152.0 / 44100) * frame_count_est
#
# Given a start_file in a directory and a duration, this function will seek out
# ajacent files if necessary and serialize them accordingly, and then return the
# file name of an audio slice that is the combination of them.
#
def audio_stitch_and_slice(file_list, start_minute, duration_minute):
if len(file_list) == 0:
return False
# We presume that there is a file list we need to make
stitched_name = audio_stitch(file_list, force_stitch = True)
if stitched_name:
info = audio_stream_info(stitched_name)
else:
logging.warn("Unable to stitch file list")
return -1
# After we've stitched together the audio then we start our slice
# by figuring our the start_minute of the slice, versus ours
start_slice = max(info['start_minute'] - start_minute, 0)
# Now we need to take the duration of the stream we want, in minutes, and then
# make sure that we don't exceed the length of the file.
duration_slice = min(duration_minute, start_slice + info['duration_sec'] / 60.0)
sliced_name = audio_slice(stitched_name, start_minute = start_slice, duration_minute = duration_slice)
return sliced_name
#
# audio_serialize takes a list of ordinal tuples and makes one larger mp3 out of it.
# The tuple format is (fila_name, byte_start, byte_end) where byte_end == -1 means
# "the whole file".
#
def audio_serialize(file_list, duration_min):
first_file = file_list[0][0]
# Our file will be the first one_duration.mp3
name = "stitches/%s_%d.mp3" % (first_file[first_file.index('/') + 1:first_file.rindex('.')], duration_min)
# If the file exists, then we just return it
if os.path.isfile(name):
return name
out = open(name, 'wb+')
for name, start, end in file_list:
f = open(name, 'rb')
f.seek(start)
if end == -1:
out.write(f.read())
else:
out.write(f.read(end - start))
f.close()
out.close()
return name
#
# Take some mp3 file name_in and then create a new one based on the start and end times
# by finding the closest frames and just doing an extraction.
#
def audio_slice(name_in, start_minute, end_minute = -1, duration_minute = -1):
if duration_minute == -1:
duration_minute = end_minute - start_minute
else:
end_minute = start_minute + duration_minute
name_out = "slices/%s_%d.mp3" % (name_in[name_in.index('/') + 1:name_in.rindex('_')], duration_minute)
start_sec = start_minute * 60.0
end_sec = end_minute * 60.0
if os.path.isfile(name_out):
return name_out
# Most common frame-length ... in practice, I haven't
# seen other values in the real world
frame_length = (1152.0 / 44100)
crc32, offset = audio_crc(name_in)
frame_start = int(math.floor(start_sec / frame_length))
frame_end = int(math.ceil(end_sec / frame_length))
out = open(name_out, 'wb+')
fin = open(name_in, 'rb')
fin.seek(offset[frame_start])
out.write(fin.read(offset[frame_end] - offset[frame_start]))
fin.close()
out.close()
return name_out
#
# audio_stitch takes a list of files and then attempt to seamlessly stitch them
# together by looking at their crc32 checksums of the data payload in the blocks.
#
def audio_stitch(file_list, force_stitch = False):
first = {'name': file_list[0]}
duration = 0
frame_length = (1152.0 / 44100)
crc32, offset = audio_crc(first['name'])
first['crc32'] = crc32
first['offset'] = offset
args = [(first['name'], 0, first['offset'][-1])]
duration += len(first['offset']) * frame_length
for name in file_list[1:]:
second = {'name': name}
crc32, offset = audio_crc(name)
second['crc32'] = crc32
second['offset'] = offset
isFound = True
try:
pos = second['crc32'].index(first['crc32'][-2])
for i in xrange(5, 1, -1):
if second['crc32'][pos - i + 2] != first['crc32'][-i]:
isFound = False
print "Indices do not match between %s and %s" % (first['name'], second['name'])
break
except:
logging.warn("Cannot find indices between %s and %s" % (first['name'], second['name']))
pos = 1
isFound = force_stitch
if isFound:
args.append((second['name'], second['offset'][pos], second['offset'][-2]))
duration += (len(second['offset']) - pos - 1) * frame_length
first = second
continue
break
# Since we end at the last block, we can safely pass in a file1_stop of 0
if len(args) > 1:
# And then we take the offset in the second['crc32'] where things began
return audio_serialize(args, duration_min = int(duration / 60))
##
## Time related functions
##
def time_to_minute(unix_time):
if type(unix_time) is int:
unix_time = datetime.fromtimestamp(unix_time)
return unix_time.weekday() * (24 * 60) + unix_time.hour * 60 + unix_time.minute
# from http://code.activestate.com/recipes/521915-start-date-and-end-date-of-given-week/
def time_week_to_iso(year, week):
d = date(year, 1, 1)
if d.weekday() > 3:
d = d + timedelta(7 - d.weekday())
else:
d = d - timedelta(d.weekday())
dlt = timedelta(days = (week - 1) * 7)
return d + dlt
def time_sec_now():
return int((datetime.utcnow() + timedelta(minutes = time_get_offset())).strftime('%s'))
def time_minute_now():
return time_to_minute(datetime.utcnow())
#
# time_to_utc takes the nominal weekday (sun, mon, tue, wed, thu, fri, sat)
# and a 12 hour time hh:mm [ap]m and converts it to our absolute units
# with respect to the timestamp in the configuration file
#
def time_to_utc(day_str, hour):
global g_config
try:
day_number = ['sun','mon','tue','wed','thu','fri','sat'].index(day_str.lower())
except:
return False
local = day_number * (60 * 24)
time_re_solo = re.compile('(\d{1,2})([ap])m', re.I)
time_re_min = re.compile('(\d{1,2}):(\d{2})([ap])m', re.I)
time = time_re_solo.match(hour)
if time:
local += int(time.groups()[0]) * 60
else:
time = time_re_min.match(hour)
if time:
local += int(time.groups()[0]) * 60
local += int(time.groups()[1])
if not time:
return False
if time.groups()[-1] == 'p':
local += (12 * 60)
utc = local + time_get_offset()
return utc
#
# time_get_offset contacts the goog, giving a longitude and lattitude and gets the time
# offset with regard to the UTC. There's a sqlite cache entry for the offset.
#
def time_get_offset(force = False):
offset = db_get('offset', expiry = 60 * 60 * 24)
if not offset or force:
when = int(time.time())
api_key='AIzaSyBkyEMoXrSYTtIi8bevEIrSxh1Iig5V_to'
url = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s×tamp=%d&key=%s" % (g_config['lat'], g_config['long'], when, api_key)
stream = urllib2.urlopen(url)
data = stream.read()
opts = json.loads(data)
if opts['status'] == 'OK':
logging.info("Location: %s | offset: %s" % (opts['timeZoneId'], opts['rawOffset']))
offset = (int(opts['rawOffset']) + int(opts['dstOffset'])) / 60
db_set('offset', offset)
else:
offset = 0
return int(offset)
##
## Database Related functions
##
#
# db_incr increments some key in the database by some value. It is used
# to maintain statistical counters.
#
def db_incr(key, value = 1):
db = db_connect()
try:
db['c'].execute('insert into kv(value, key) values(?, ?)', (value, key))
except:
db['c'].execute('update kv set value = value + ? where key = ?', (value, key))
db['conn'].commit()
# db_set sets (or replaces) a given key to a specific value.
def db_set(key, value):
db = db_connect()
# From http://stackoverflow.com/questions/418898/sqlite-upsert-not-insert-or-replace
res = db['c'].execute('''
INSERT OR REPLACE INTO kv (key, value, created_at)
VALUES (
COALESCE((SELECT key FROM kv WHERE key = ?), ?),
?,
current_timestamp
)''', (key, key, value))
db['conn'].commit()
return value
# db_get retrieves a value from the database, tentative on the expiry
def db_get(key, expiry=0):
db = db_connect()
if expiry > 0:
# If we let things expire, we first sweep for it
db['c'].execute('delete from kv where key = ? and created_at < (current_timestamp - ?)', (key, expiry))
db['conn'].commit()
res = db['c'].execute('select value, created_at from kv where key = ?', (key, )).fetchone()
if res:
return res[0]
return False
#
# db_connect is a "singleton pattern" or some other fancy $10-world style of maintaining
# the database connection throughout the execution of the script.
#
def db_connect():
global g_db
if 'conn' not in g_db:
conn = sqlite3.connect('config.db')
g_db = {'conn': conn, 'c': conn.cursor()}
g_db['c'].execute("""CREATE TABLE IF NOT EXISTS intents(
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
start INTEGER,
end INTEGER,
read_count INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
accessed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""");
g_db['c'].execute("""CREATE TABLE IF NOT EXISTS kv(
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
value TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""");
g_db['conn'].commit()
return g_db
def db_register_intent(minute, duration):
db = db_connect()
key = str(minute) + ':' + str(duration)
res = db['c'].execute('select id from intents where key = ?', (key, )).fetchone()
if res == None:
db['c'].execute('insert into intents(key, start, end) values(?, ?, ?)', (key, minute, minute + duration))
else:
db['c'].execute('update intents set read_count = read_count + 1, accessed_at = (current_timestamp) where id = ?', (res[0], ))
db['conn'].commit()
return db['c'].lastrowid
##
## Storage and file related
##
# Get rid of files older than archivedays
def file_prune():
global g_config
db = db_connect()
duration = int(g_config['archivedays']) * 60 * 60 * 24
cutoff = time.time() - duration
# Dumping old streams
count = 0
for f in os.listdir('.'):
entry = g_config['storage'] + f
if os.path.isfile(entry) and os.path.getctime(entry) < cutoff:
logging.debug("Prune: %s" % entry)
os.unlink(entry)
count += 1
logging.info("Found %d files older than %s days." % (count, g_config['archivedays']))
#
# Given a start week minute this looks for streams in the storage
# directory that match it - regardless of duration ... so it may return
# partial shows results.
#
def file_find_streams(start, duration):
stream_list = []
end = (start + duration) % 10080
# We want to make sure we only get the edges so we need to have state
# between the iterations.
next_valid_start_minute = 0
current_week = 0
file_list = glob('streams/*.mp3')
# Sorting by date (see http://stackoverflow.com/questions/23430395/glob-search-files-in-date-order)
file_list.sort(key=os.path.getmtime)
stitch_list = []
for filename in file_list:
i = audio_stream_info(filename)
if i['start_minute'] < next_valid_start_minute and i['week'] == current_week:
stitch_list.append(filename)
continue
# We are only looking for starting edges of the stream
#
# If we started recording before this is fine as long as we ended recording after our start
if start == -1 or (i['start_minute'] < start and i['end_minute'] > start) or (i['start_minute'] > start and i['start_minute'] < end):
fname = audio_stitch_and_slice(stitch_list, start, duration)
print fname
stitch_list = [filename]
# TODO: May need to % 10080 this
next_valid_start_minute = (start + duration)
current_week = i['week']
if fname:
stream_list.append(audio_stream_info(fname))
fname = audio_stitch_and_slice(stitch_list, start, duration)
if fname:
stream_list.append(audio_stream_info(fname))
return stream_list
#
# This takes a number of params:
#
# showname - from the incoming request url
# feedList - this is a list of tuples in the form (date, file)
# corresponding to the, um, date of recording and filename
#
# It obviously returns an xml file ... I mean duh.
#
# In the xml file we will lie about the duration to make life easier
#
def server_generate_xml(showname, feed_list, duration, start_minute):
global g_config
base_url = 'http://%s.indycast.net/' % g_config['callsign']
callsign = g_config['callsign']
nsmap = {
'dc': 'http://purl.org/dc/elements/1.1/',
'media': 'http://search.yahoo.com/mrss/',
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
'feedburner': 'http://rssnamespace.org/feedburner/ext/1.0'
}
root = ET.Element("rss", nsmap = nsmap)
root.attrib['version'] = '2.0'
frame_length = 1152.0 / 44100
channel = ET.SubElement(root, "channel")
for k,v in {
'{%s}summary' % nsmap['itunes']: showname,
'{%s}subtitle' % nsmap['itunes']: showname,
'{%s}category' % nsmap['itunes']: 'podcast',
'title': showname,
'link': base_url,
'copyright': callsign,
'description': showname,
'language': 'en'
}.items():
ET.SubElement(channel, k).text = v
# In our feed, we construct theoretical files which will be stitched and sliced
# together on-demand (lazy) if the user requests it.
for feed in feed_list:
# This is our file ... we have a week number, which is what we need.
# By the existence of this feed, we are essentially saying that we have
# a specific week at a specific minute ... so we construct that as the
# lazy-file name
# Start with the start_date of the feed
#start_of_week = time_week_to_iso(feed['start_date'].year, feed['week'])
# now we add the minute offset to get a datetime version
#dt_start_of_stream = start_of_week + timedelta(minutes = start_minute)
# and then make a unix time stamp from it. This will be the numeric on the file that
# are committing to making
#str_start_of_stream = dt_start_of_stream.strftime('%s')
#file_name = "%s-%s_%d.mp3" % (callsign, str_start_of_stream, duration)
file_name = feed['name']
link = "%s%s" % (base_url, file_name)
item = ET.SubElement(channel, 'item')
for k,v in {
'{%s}explicit' % nsmap['itunes']: 'no',
'{%s}author' % nsmap['itunes']: callsign,
'{%s}duration' % nsmap['itunes']: str(duration * 60),
'{%s}summary' % nsmap['itunes']: showname,
'{%s}creator' % nsmap['dc']: callsign,
'{%s}origEnclosureLink' % nsmap['feedburner']: link,
'{%s}origLink' % nsmap['feedburner']: base_url,
'description': showname,
'pubDate': feed['start_date'].strftime("%Y-%m-%d %H:%M:%S"),
'title': showname,
'link': link,
'copyright': callsign,
'guid': callsign + file_name
}.items():
ET.SubElement(item, k).text = v
ET.SubElement(item, 'guid', isPermaLink = "false").text = base_url
# fileSize and length will be guessed based on 209 bytes covering
# frame_length seconds of audio (128k/44.1k no id3)
content = ET.SubElement(item, '{%s}content' % nsmap['media'])
content.attrib['url'] = link
content.attrib['fileSize'] = str(209 * (duration * 60.0) / frame_length)
content.attrib['type'] = 'audio/mpeg3'
# The length of the audio we will just take as the duration
content = ET.SubElement(item, 'enclosure')
content.attrib['url'] = link
content.attrib['length'] = str(duration * 60)
content.attrib['type'] = 'audio/mpeg3'
tree = ET.ElementTree(root)
return ET.tostring(tree, xml_declaration=True, encoding="utf-8")
def server_error(errstr):
return jsonify({'result': False, 'error':errstr}), 500
def server_manager(config):
app = Flask(__name__)
#
# The path is (unix timestamp)_(duration in minutes). If it exists (as in we had
# previously generated it) then we can trivially send it. Otherwise we need
# to create it.
#
@app.route('/slices/<path:path>')
def send_stream(path):
base_dir = config['storage'] + 'slices/'
fname = base_dir + path
# If the file doesn't exist, then we need to slice it and create it based on our query.
if not os.path.isfile(fname):
# 1. Find the closest timestamp
# Even though the file doesn't exist, we'll still get
# a partial return on getting it's "info"
info = audio_stream_info(fname)
# Now we see what our start stream should be
start_stream = file_find_streams(info['start_minute'], info['duration_sec'] / 60)
# slice if needed
# add up the timestamp
return True
return flask.send_from_directory(base_dir, path)
@app.route('/heartbeat')
def heartbeat():
global g_start_time
if request.remote_addr != '127.0.0.1':
return '', 403
db = db_connect()
stats = {
'intents': [record for record in db['c'].execute('select * from intents').fetchall()],
'kv': [record for record in db['c'].execute('select * from kv').fetchall()],
'uptime': int(time.time() - g_start_time),
'disk': sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)),
'streams': file_find_streams(-1, 0),
'config': config
}
return jsonify(stats), 200
@app.route('/<weekday>/<start>/<duration>/<showname>')
def stream(weekday, start, duration, showname):
# Duration is expressed either in minutes or in \d+hr\d+ minute
re_minute = re.compile('^(\d+)$')
re_hr_solo = re.compile('^(\d+)hr$', re.I)
re_hr_min = re.compile('^(\d+)hr(\d+).*$', re.I)
res = re_minute.match(duration)
if res:
duration = int(res.groups()[0])
else:
res = re_hr_solo.match(duration)
if res:
duration = int(res.groups()[0]) * 60
else:
res = re_hr_min.match(duration)
if res:
duration = int(res.groups()[0]) * 60 + int(res.groups()[1])
# This means we failed to parse
if type(duration) is str:
return server_error('duration "%s" is not set correctly' % duration)
start_time = time_to_utc(weekday, start)
if not start_time:
return server_error('weekday and start times are not set correctly')
# If we are here then it looks like our input is probably good.
# Strip the .xml from the showname ... this will be used in our xml.
showname = re.sub('.xml$', '', showname)
# This will register the intent if needed for future recordings
# (that is if we are in ondemand mode)
db_register_intent(start_time, duration)
# Look for streams that we have which match this query and duration.
feed_list = file_find_streams(start_time, duration)
# Then, taking those two things, make a feed list from them.
return server_generate_xml(showname, feed_list, duration, start_time)
if __name__ == '__main__':
change_proc_name("ic-webserver")
start = time.time()
try:
app.run(port = int(config['port']))
except:
if time.time() - start < 5:
print "Error, can't start server ... perhaps %s is already in use?" % config['port']
shutdown()
##
## Stream management functions
##
# Query the database and see if we ought to be recording at this moment
def stream_should_be_recording():
global g_config
db = db_connect()
current_minute = time_minute_now()
intent_count = db['c'].execute(
"""select count(*) from intents where
start >= ? and
end <= ? and
accessed_at > datetime('now','-%s days')
""" % g_config['expireafter'],
(current_minute, current_minute)
).fetchone()[0]
return intent_count != 0
# The curl interfacing that downloads the stream to disk
def stream_download(callsign, url, my_pid, fname):
change_proc_name("ic-download")
nl = {'stream': False}
def dl_stop(signal, frame):
print fname
sys.exit(0)
def cback(data):
global g_config, g_queue
g_queue.put(('heartbeat', True))
if nl['stream'] == False:
try:
nl['stream'] = open(fname, 'w')
except:
logging.critical("Unable to open %s. Can't record. Must exit." % fname)
sys.exit(-1)
nl['stream'].write(data)
# signal.signal(signal.SIGTERM, dl_stop)
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.WRITEFUNCTION, cback)
try:
c.perform()
except:
logging.warning("Couldn't resolve or connect to %s." % url)
c.close()
if type(nl['stream']) != bool:
nl['stream'].close()
# The manager process that makes sure that the
# streams are running appropriately
def stream_manager():
global g_queue, g_config
callsign = g_config['callsign']
url = g_config['stream']
cascade_time = int(g_config['cascadetime'])
cascade_buffer = int(g_config['cascadebuffer'])
cascade_margin = cascade_time - cascade_buffer
last_prune = 0
last_success = 0
mode_full = (g_config['mode'].lower() == 'full')
b_shutdown = False
should_record = mode_full
# Number of seconds to be cycling
cycle_time = int(g_config['cycletime'])
process = False
process_next = False
server_pid = Process(target = server_manager, args=(g_config,))
server_pid.start()
fname = False
# A wrapper function to start a donwnload process
def download_start(fname):
global g_pid
g_pid += 1
logging.info("Starting cascaded downloader #%d. Next up in %ds" % (g_pid, cascade_margin))
#
# Although we are already in the callsign path, we want the file to be a self-contained
# description of the content - not relying on the path to complete part of the story
# of what it is.
#
if fname and os.path.getsize(fname) == 0:
logging.info("Unsuccessful download. Removing %s." % fname)
os.unlink(fname)
fname = 'streams/%s-%d.mp3' % (callsign, time_sec_now())
process = Process(target = stream_download, args = (callsign, url, g_pid, fname))
process.start()
return [fname, process]
while True:
#
# We cycle this to off for every run. By the time we go throug the queue so long
# as we aren't supposed to be shutting down, this should be toggled to true.
#
flag = False
yesterday = time.time() - 24 * 60 * 60
if last_prune < yesterday:
file_prune()
last_prune = time.time()
time_get_offset()
while not g_queue.empty():
what, value = g_queue.get(False)
if what == 'shutdown':
b_shutdown = True
else:
flag = True
#
# If we are not in full mode, then we should check whether we should be
# recording right now according to our intents.
#
if not mode_full:
should_record = stream_should_be_recording()
if should_record:
# Didn't respond in cycle_time seconds so we respawn
if not flag:
if process and process.is_alive():
process.terminate()
process = False
if not process and not b_shutdown:
fname, process = download_start(fname)
last_success = time.time()
# If we've hit the time when we ought to cascade
elif time.time() - last_success > cascade_margin:
# And we haven't created the next process yet, then we start it now.
if not process_next:
fname, process_next = download_start(fname)
# If our last_success stream was more than cascade_time - cascade_buffer
# then we start our process_next
# If there is still no process then we should definitely bail.
if not process:
return False
#
# The only way for the bool to be toggled off is if we are not in full-mode ...
# we get here if we should NOT be recording. So we make sure we aren't.
#
else:
if process and process.is_alive():
process.terminate()
if process_next and process_next.is_alive():
process_next.terminate()
process_next = process = False
#
# This needs to be on the outside loop in case we are doing a cascade
# outside of a full mode. In this case, we will need to shut things down
#
# If we are past the cascade_time and we have a process_next, then
# we should shutdown our previous process and move the pointers around.
#
if time.time() - last_success > cascade_time and process:
logging.info("Stopping cascaded downloader")
process.terminate()
# If the process_next is running then we move our last_success forward to the present
last_success = time.time()
# we rename our process_next AS OUR process
process = process_next
# And then clear out the old process_next pointer
process_next = False
# Increment the amount of time this has been running
db_incr('uptime', cycle_time)
time.sleep(cycle_time)
def read_config():
global g_config
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="./indy_config.txt", help="Configuration file (default ./indy_config.txt)")
parser.add_argument("-v", "--version", help="Version info")
args = parser.parse_args()
Config = ConfigParser.ConfigParser()
Config.read(args.config)
g_config = ConfigSectionMap('Main', Config)
defaults = {
# The log level to be put into the indycast.log file.
'loglevel': 'WARN',
# The recording mode, either 'full' meaning to record everything, or != 'full'
# meaning to record only when an intent is matched.
'mode': 'full',
# The relative, or absolute directory to put things in
'storage': 'recording',
# The (day) time to expire an intent to record
'expireafter': '45',
# The TCP port to run the server on
'port': '5000',
# The (day) duration we should be archiving things.
'archivedays': '7',
# The (second) time in looking to see if our stream is running
'cycletime': '7',
# The (second) time to start a stream BEFORE the lapse of the cascade-time
'cascadebuffer': 15,
# The (second) time between cascaded streams
'cascadetime': 60 * 15
}
for k,v in defaults.items():
if k not in g_config:
g_config[k] = v
if not os.path.isdir(g_config['storage']):
try:
# If I can't do this, that's fine.
os.mkdir(g_config['storage'])
except:
# We make it from the current directory
g_config['storage'] = defaults['storage']
os.mkdir(g_config['storage'])
# We go to the callsign level in order to store multiple station feeds on a single
# server in a single parent directory without forcing the user to decide what goes
# where.
g_config['storage'] += '/%s/' % g_config['callsign']
g_config['storage'] = re.sub('\/+', '/', g_config['storage'])
if not os.path.isdir(g_config['storage']):
os.mkdir(g_config['storage'])
# We have a few sub directories for storing things
for subdir in ['streams', 'stitches', 'slices']:
if not os.path.isdir(g_config['storage'] + subdir):
os.mkdir(g_config['storage'] + subdir)
# Now we try to do all this stuff again
if os.path.isdir(g_config['storage']):
#
# There's a bug after we chdir, where the multiprocessing is trying to grab the same
# invocation as the initial argv[0] ... so we need to make sure that if a user did
# ./blah this will be maintained.
#
if not os.path.isfile(g_config['storage'] + __file__):
os.symlink(os.path.abspath(__file__), g_config['storage'] + __file__)
os.chdir(g_config['storage'])
else:
logging.warning("Can't find %s. Using current directory." % g_config['storage'])
# From https://docs.python.org/2/howto/logging.html
numeric_level = getattr(logging, g_config['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='indycast.log', datefmt='%Y-%m-%d %H:%M:%S',format='%(asctime)s %(message)s')
#
# Increment the number of times this has been run so we can track the stability of remote
# servers and instances.
#
db_incr('runcount')
signal.signal(signal.SIGINT, shutdown)
if __name__ == "__main__":
# From http://stackoverflow.com/questions/25504149/why-does-running-the-flask-dev-server-run-itself-twice
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
change_proc_name("ic-main")
server_manager(g_config)
else:
read_config()
change_proc_name("ic-manager")
stream_manager()
|
#!/usr/bin/python -O
import argparse
import binascii
import ConfigParser
import json
import logging
import lxml.etree as ET
import math
import os
import pycurl
import marshal
import gzip
import re
import setproctitle as SP
import signal
import struct
import sys
import time
import socket
import StringIO
import threading
import lib.db as DB
#
# This is needed to force ipv4 on ipv6 devices. It's sometimes needed
# if there isn't a clean ipv6 route to get to the big wild internet.
# In these cases, a pure ipv6 route simply will not work. People aren't
# always in full control of every hop ... so it's much safer to force
# ipv4 then optimistically cross our fingers.
#
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# Replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
import urllib2
import urllib
from datetime import datetime, timedelta, date
from glob import glob
from flask import Flask, request, jsonify, Response, url_for
import flask
from subprocess import call
import subprocess
from multiprocessing import Process, Queue
g_start_time = time.time()
g_queue = Queue()
g_config = {}
g_download_pid = 0
g_manager_pid = 0
g_params = {}
__version__ = os.popen("git describe").read().strip()
# Most common frame-length ... in practice, I haven't
# seen other values in the real world
FRAME_LENGTH = (1152.0 / 44100)
# Everything is presumed to be weekly and on the minute
# scale. We use this to do wrap around when necessary
MINUTES_PER_WEEK = 10080
ONE_DAY = 60 * 60 * 24
#
# Some stations don't start you off with a valid mp3 header
# (such as kdvs), so we have to just seek into the file
# and look for one. This is the number of bytes we try.
# In practice, 217 appears to be enough, so we make it about
# ten times that and cross our fingers
#
MAX_HEADER_ATTEMPTS = 2048
#
# Maintain a pidfile for the manager and the webserver (which
# likes to become a zombie ... braaaainnns!) so we have to take
# care of it separately and specially - like a little retard.
#
PIDFILE_MANAGER = 'pid-manager'
PIDFILE_WEBSERVER = 'pid-webserver'
#
# The process delay is used throughout to measure things like the delay in
# forking a subprocesses, waiting for DNS, and then starting a stream or
# waiting for all the sub-processes like the web-server to clean up and free
# the tcp port they are listening on, and shut down.
#
# Making this generous shouldn't be discouraged as it is also used as a metric
# to calculate the number of accomodations that are to be given to make service
# continuous.
#
# Things are specified in multiples of this value ... for instance PROCESS_DELAY
# / 4 or * 2. 4 is a good number.
#
PROCESS_DELAY = 4
# From https://wiki.python.org/moin/ConfigParserExamples
def config_section_map(section, Config):
"""
Takes a section in a config file and makes a dictionary
out of it.
Returns that dictionary
"""
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logging.info("skip: %s" % option)
except Exception as exc:
logging.warning("exception on %s!" % option)
dict1[option] = None
return dict1
def change_proc_name(what):
"""
Sets a more human-readable process name for the various
parts of the system to be viewed in top/htop
"""
SP.setproctitle(what)
print "[%s:%d] Starting" % (what, os.getpid())
return os.getpid()
def shutdown(signal=15, frame=False):
""" Shutdown is hit on the keyboard interrupt """
global g_queue, g_start_time, g_config
# Try to manually shutdown the webserver
if os.path.isfile(PIDFILE_WEBSERVER):
with open(PIDFILE_WEBSERVER, 'r') as f:
webserver = f.readline()
try:
os.kill(int(webserver), signal)
except:
pass
os.unlink(PIDFILE_WEBSERVER)
title = SP.getproctitle()
print "[%s:%d] Shutting down" % (title, os.getpid())
DB.shutdown()
logging.info("[%s:%d] Shutting down through signal %d" % (title, os.getpid(), signal))
if title == ('%s-manager' % g_config['callsign']):
logging.info("Uptime: %ds", time.time() - g_start_time)
elif title != ('%s-webserver' % g_config['callsign']) and os.path.isfile(PIDFILE_MANAGER):
os.unlink(PIDFILE_MANAGER)
g_queue.put(('shutdown', True))
sys.exit(0)
##
## Audio related functions
##
def audio_get_map(fname):
""" Retrieves a map file associated with the mp3 """
map_name = fname if fname.endswith('.map') else fname + '.map'
if os.path.exists(map_name):
f = gzip.open(map_name, 'r')
ret = marshal.loads(f.read())
f.close()
return ret
return None, None
def audio_list_info(file_list):
info = audio_stream_info(file_list[0]['name'])
# Some things are the same such as the
# week, start_minute, start_date
info['duration_sec'] = 0
for item in file_list:
info['duration_sec'] += item['duration_sec']
info['end_minute'] = (info['duration_sec'] / 60.0 + info['duration_sec']) % MINUTES_PER_WEEK
return info
def audio_stream_info(fname, guess_time=False):
"""
Determines the date the thing starts,
the minute time it starts, and the duration
If guess_time is set, then that value is used
as the audio time. It can speed things up
by avoiding an opening of the file all together.
It's sometimes an ok thing to do.
"""
if type(fname) is not str:
return audio_list_info(fname)
ts_re = re.compile('-(\d*)[.|_]')
ts = ts_re.findall(fname)
duration = 0
start_minute = 0
start_date = 0
if ts:
unix_time = int(ts[0])
start_minute = time_to_minute(unix_time)
start_date = datetime.fromtimestamp(unix_time)
try:
duration = guess_time if guess_time else audio_time(fname)
except Exception as exc:
# If we can't find a duration then we try to see if it's in the file name
ts_re_duration = re.compile('_(\d*).mp3')
ts = ts_re_duration.findall(fname)
if ts:
duration = int(ts[0]) * 60
return {
# The week number
'week': start_date.isocalendar()[1],
'name': fname,
'start_minute': start_minute,
'start_date': start_date,
'end_minute': (duration / 60.0 + start_minute) % MINUTES_PER_WEEK,
'duration_sec': duration
}
def audio_crc(fname, blockcount=-1, only_check=False):
"""
Opens an mp3 file, find all the blocks, the byte offset of the blocks, and if they
are audio blocks, construct a crc32 mapping of some given beginning offset of the audio
data ... this is intended for stitching.
"""
# Simply make sure that there is a map associated with the
# mp3. Otherwise create one.
map_name = fname if fname.endswith('.map') else fname + '.map'
if only_check and os.path.exists(map_name):
return True
crc32, offset = audio_get_map(fname)
if crc32 is not None:
return crc32, offset
frame_sig = []
start_byte = []
first_header_seen = False
header_attempts = 0
#
# Looking at the first 16 bytes of the payload yield a rate that is 99.75% unique
# as tested over various corpi ranging from 1,000,000 - 7,000,000 blocks.
#
# There's an additional precautions of looking for a string of 4 matches which
# mitigates this even further
#
read_size = 4
freqTable = [ 44100, 48000, 32000, 0 ]
brTable = [
0, 32, 40, 48,
56, 64, 80, 96,
112, 128, 160, 192,
224, 256, 320, 0
]
f = open(fname, 'rb')
while blockcount != 0:
if first_header_seen:
blockcount -= 1
else:
header_attempts += 1
if header_attempts > 2:
# Go 1 back.
f.seek(-1, 1)
frame_start = f.tell()
header = f.read(2)
if header:
if header == '\xff\xfb' or header == '\xff\xfa':
try:
b = ord(f.read(1))
# If we are at the EOF
except:
break
samp_rate = freqTable[(b & 0x0f) >> 2]
bit_rate = brTable[b >> 4]
pad_bit = (b & 0x3) >> 1
# from http://id3.org/mp3Frame
try:
frame_size = (144000 * bit_rate / samp_rate) + pad_bit
# If there's a /0 error
except:
continue
if not first_header_seen:
first_header_seen = True
# We try to record the CBR associated with this
# stream
if not DB.get('bitrate', use_cache = True):
DB.set('bitrate', bit_rate)
# Rest of the header
throw_away = f.read(1)
# Get the signature
crc = f.read(read_size)
frame_sig.append(crc)
start_byte.append(frame_start)
# Move forward the frame f.read size + 4 byte header
throw_away = f.read(frame_size - (read_size + 4))
# ID3 tag for some reason
elif header == '\x49\x44':
# Rest of the header
throw_away = f.read(4)
#
# Quoting http://id3.org/d3v2.3.0
#
# The ID3v2 tag size is encoded with four bytes where the most significant bit
# (bit 7) is set to zero in every byte, making a total of 28 bits. The zeroed
# bits are ignored, so a 257 bytes long tag is represented as $00 00 02 01.
#
candidate = struct.unpack('>I', f.read(4))[0]
size = ((candidate & 0x007f0000) >> 2 ) | ((candidate & 0x00007f00) >> 1 ) | (candidate & 0x0000007f)
f.read(size)
# ID3 TAG -- 128 bytes long
elif header == '\x54\x41':
# We've already read 2 so we can go 126 forward
f.read(126)
elif len(header) == 1:
# We are at the end of file, but let's just continue.
next
elif first_header_seen or header_attempts > MAX_HEADER_ATTEMPTS:
print "%d[%d/%d]%s:%s:%s %s %d" % (len(frame_sig), header_attempts, MAX_HEADER_ATTEMPTS, binascii.b2a_hex(header), binascii.b2a_hex(f.read(5)), fname, hex(f.tell()), len(start_byte) * (1152.0 / 44100) / 60)
# This means that perhaps we didn't guess the start correct so we try this again
if len(frame_sig) == 1 and header_attempts < MAX_HEADER_ATTEMPTS:
print "False start -- trying again"
# seek to the first start byte + 1
f.seek(start_byte[0] + 2)
# discard what we thought was the first start byte and
# frame signature
start_byte = []
frame_sig = []
first_header_seen = False
else:
break
else:
break
f.close()
# If we get here that mans that we don't have a map
# file yet. So we just creat it.
map_name = fname + '.map'
if not os.path.exists(map_name):
with gzip.open(map_name, 'wb') as f:
f.write(marshal.dumps([frame_sig, start_byte]))
return frame_sig, start_byte
def audio_time(fname):
"""
Determines the duration of an audio file by doing some estimates based on the offsets
Returns the audio time in seconds
"""
# In this fast method we get the first two frames, find out the offset
# difference between them, take the length of the file, divide it by that
# and then presume that will be the framecount
crc32, offset = audio_crc(fname)
return FRAME_LENGTH * len(offset)
def audio_stitch_and_slice(file_list, start_minute, duration_minute):
"""
Given a file_list in a directory and a duration, this function will seek out
adjacent files if necessary and serialize them accordingly, and then return the
file name of an audio slice that is the combination of them.
"""
if not file_list:
return False
# We presume that there is a file list we need to make
stitched_list = audio_stitch(file_list, force_stitch=True)
if len(stitched_list) > 1:
info = audio_stream_info(stitched_list)
else:
logging.warn("Unable to stitch file list")
return False
# After we've stitched together the audio then we start our slice
# by figuring our the start_minute of the slice, versus ours
start_slice = max(start_minute - info['start_minute'], 0)
# Now we need to take the duration of the stream we want, in minutes, and then
# make sure that we don't exceed the length of the file.
duration_slice = min(duration_minute, start_slice + info['duration_sec'] / 60.0)
sliced_name = audio_list_slice(
list_in=stitched_list,
start_minute=start_slice,
duration_minute=duration_slice
)
return sliced_name
def audio_list_slice_process(list_in, name_out, duration_sec, start_sec):
global g_config
pid = change_proc_name("%s-audioslice" % g_config['callsign'])
out = open(name_out, 'wb+')
for ix in range(0, len(list_in)):
item = list_in[ix]
# get the regular map
crc32, offset = audio_crc(item['name'])
if ix == len(list_in) - 1:
frame_end = min(int(math.ceil(duration_sec / FRAME_LENGTH)), len(offset) - 1)
else:
frame_end = len(offset) - 1
if ix == 0:
frame_start = max(int(math.floor(start_sec / FRAME_LENGTH)), 0)
duration_sec -= (item['duration_sec'] - start_sec)
else:
frame_start = item['start_offset']
duration_sec -= item['duration_sec']
# try and get the mp3 referred to by the map file
fin = file_get(item['name'][:-4])
if fin:
fin.seek(offset[frame_start])
out.write(fin.read(offset[frame_end] - offset[frame_start]))
fin.close()
# If we fail to get the mp3 file then we can suppose that
# the map file is bad so we just wince and remove it.
else:
os.unlink(item['name'])
logging.warn("Unable to find %s's corresponding mp3, deleting" % item['name'])
out.close()
# If we failed to do anything this is a tragedy
# and we just dump the file
#
# We take files under some really nominal threshold as being invalid.
if os.path.getsize(name_out) < 1000:
logging.warn("Unable to create %s - no valid slices" % name_out)
os.unlink(name_out)
def audio_list_slice(list_in, start_minute, duration_minute=-1):
"""
Takes some stitch list, list_in and then create a new one based on the start and end times
by finding the closest frames and just doing an extraction.
"""
duration_sec = duration_minute * 60.0
first_file = list_in[0]['name']
callsign, unix_time = re.findall('(\w*)-(\d+)', first_file)[0]
name_out = "slices/%s-%d_%d.mp3" % (callsign, int(unix_time) + start_minute * 60, duration_minute)
start_sec = start_minute * 60.0
if os.path.isfile(name_out) and os.path.getsize(name_out) > 0:
return name_out
#
# We may need to pull things down from the cloud so it's better if we just return
# the eventual mp3 name here and not block. As it turns out, pulling the blobs from
# the cloud is rather fast on the vpss (a matter of seconds) so by the time the user
# requests an mp3, it will probably exist. If it doesn't, then eh, we'll figure it out.
#
slice_process = Process(target=audio_list_slice_process, args=(list_in, name_out, duration_sec, start_sec))
slice_process.start()
return name_out
def audio_stitch(file_list, force_stitch=False):
"""
Takes a list of files and then attempt to seamlessly stitch them
together by looking at their crc32 checksums of the data payload in the blocks.
"""
first = {'name': file_list[0]}
duration = 0
crc32, offset = audio_crc(first['name'])
first['crc32'] = crc32
first['offset'] = offset
args = [{
'name': first['name'],
'start_byte': 0,
'start_offset': 0,
'end_byte': first['offset'][-1],
'start_minute': 0,
'duration_sec': (len(first['offset']) - 1) * FRAME_LENGTH
}]
duration += len(first['offset']) * FRAME_LENGTH
for name in file_list[1:]:
second = {'name': name}
crc32, offset = audio_crc(name)
second['crc32'] = crc32
second['offset'] = offset
isFound = True
pos = -1
try:
while True:
pos = second['crc32'].index(first['crc32'][-2], pos + 1)
isFound = True
for i in xrange(5, 1, -1):
if second['crc32'][pos - i + 2] != first['crc32'][-i]:
isFound = False
logging.warn("Indices @%d do not match between %s and %s" % (pos, first['name'], second['name']))
break
# If we got here it means that everything matches
if isFound: break
else: continue
except Exception as exc:
logging.warn("Cannot find indices between %s and %s" % (first['name'], second['name']))
pos = 1
if isFound or force_stitch:
args.append({
'name': second['name'],
'start_byte': second['offset'][pos],
'end_byte': second['offset'][-2],
'start_offset': pos,
'start_minute': pos * FRAME_LENGTH,
'duration_sec': (len(second['offset']) - pos - 1) * FRAME_LENGTH
})
duration += (len(second['offset']) - pos - 1) * FRAME_LENGTH
first = second
continue
break
return args
##
## Time related functions
##
def time_to_minute(unix_time):
""" Takes a given unix time and finds the week minute corresponding to it. """
if type(unix_time) is int:
unix_time = datetime.fromtimestamp(unix_time)
return unix_time.weekday() * (24 * 60) + unix_time.hour * 60 + unix_time.minute
def time_sec_now(offset_sec=0):
"""
Returns the unix time with respect to the timezone of the station being recorded.
Accepts an optional offset_sec to forward the time into the future
"""
return int((datetime.utcnow() + timedelta(seconds=offset_sec, minutes=time_get_offset())).strftime('%s'))
def time_minute_now():
""" Returns the mod 10080 week minute with respect to the timezone of the station being recorded """
return time_to_minute(datetime.utcnow() + timedelta(minutes=time_get_offset()))
def time_to_utc(day_str, hour):
"""
Takes the nominal weekday (sun, mon, tue, wed, thu, fri, sat)
and a 12 hour time hh:mm [ap]m and converts it to our absolute units
with respect to the timestamp in the configuration file
"""
global g_config
try:
day_number = ['mon','tue','wed','thu','fri','sat','sun'].index(day_str.lower())
except Exception as exc:
return False
local = day_number * (60 * 24)
time_re_solo = re.compile('(\d{1,2})([ap])m', re.I)
time_re_min = re.compile('(\d{1,2}):(\d{2})([ap])m', re.I)
time = time_re_solo.match(hour)
if time:
local += int(time.groups()[0]) * 60
else:
time = time_re_min.match(hour)
if time:
local += int(time.groups()[0]) * 60
local += int(time.groups()[1])
if not time:
return False
if time.groups()[-1] == 'p':
local += (12 * 60)
#utc = local + time_get_offset()
return local
def time_get_offset(force=False):
"""
Contacts the goog, giving a longitude and lattitude and gets the time
offset with regard to the UTC. There's a sqlite cache entry for the offset.
Returns an int second offset
"""
offset = DB.get('offset', expiry=ONE_DAY)
if not offset or force:
when = int(time.time())
api_key = 'AIzaSyBkyEMoXrSYTtIi8bevEIrSxh1Iig5V_to'
url = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s×tamp=%d&key=%s" % (g_config['lat'], g_config['long'], when, api_key)
stream = urllib2.urlopen(url)
data = stream.read()
opts = json.loads(data)
if opts['status'] == 'OK':
logging.info("Location: %s | offset: %s" % (opts['timeZoneId'], opts['rawOffset']))
offset = (int(opts['rawOffset']) + int(opts['dstOffset'])) / 60
DB.set('offset', offset)
else:
offset = 0
return int(offset)
##
## Storage and file related
##
def cloud_connect():
from azure.storage import BlobService
global g_config
container = 'streams'
blob_service = BlobService(g_config['azure']['storage_account_name'], g_config['azure']['primary_access_key'])
blob_service.create_container(container, x_ms_blob_public_access='container')
return blob_service, container
def cloud_unlink(path):
fname = os.path.basename(path)
blob_service, container = cloud_connect()
return blob_service.delete_blob(container, path)
def cloud_put(path):
blob_service, container = cloud_connect()
if blob_service:
try:
res = blob_service.put_block_blob_from_path(
container,
os.path.basename(path),
path,
max_connections=5,
)
return res
except:
logging.debug('Unable to put %s in the cloud.' % path)
return False
def cloud_get(path):
blob_service, container = cloud_connect()
if blob_service:
fname = os.path.basename(path)
try:
blob_service.get_blob_to_path(
container,
fname,
'streams/%s' % fname,
max_connections=8,
)
return True
except:
logging.debug('Unable to retreive %s from the cloud.' % path)
return False
def file_get_size(fname):
""" Gets a file size or just plain guesses it if it doesn't exist yet. """
if os.path.exists(fname):
return os.path.getsize(fname)
# Otherwise we try to parse the magical file which doesn't exist yet.
ts_re_duration = re.compile('_(\d*).mp3')
ts = ts_re_duration.findall(fname)
if len(ts):
duration_min = int(ts[0])
bitrate = int(DB.get('bitrate') or 128)
#
# Estimating mp3 length is actually pretty easy if you don't have ID3 headers.
# MP3s are rated at things like 128kb/s ... well there you go.
#
# They consider a k to be 10^3, not 2^10
#
return (bitrate / 8) * (duration_min * 60) * (10 ** 3)
# If we can't find it based on the name, then we are kinda
# SOL and just return 0
return 0
def file_prune():
""" Gets rid of files older than archivedays - cloud stores things if relevant """
global g_config
pid = change_proc_name("%s-cleanup" % g_config['callsign'])
db = DB.connect()
duration = g_config['archivedays'] * ONE_DAY
cutoff = time.time() - duration
cloud_cutoff = False
if g_config['cloud']:
cloud_cutoff = time.time() - g_config['cloudarchive'] * ONE_DAY
# Dump old streams and slices
count = 0
for fname in glob('*/*.mp3'):
#
# Depending on many factors this could be running for hours
# or even days. We wnat to make sure this isn't a blarrrghhh
# zombie process or worse yet, still running and competing with
# other instances of itself.
#
if not manager_is_running():
shutdown()
ctime = os.path.getctime(fname)
# We observe the rules set up in the config.
if ctime < cutoff:
logging.debug("Prune: %s" % fname)
os.unlink(fname)
count += 1
elif cloud_cutoff and ctime < cloud_cutoff:
logging.debug("Prune[cloud]: putting %s" % fname)
cloud_put(fname)
try:
os.unlink(fname)
except:
logging.debug("Prune[cloud]: Couldn't remove %s" % fname)
# The map names are different since there may or may not be a corresponding
# cloud thingie associated with it.
for fname in glob('*/*.map'):
if ctime < cutoff:
# If there's a cloud account at all then we need to unlink the
# equivalent mp3 file
if cloud_cutoff:
cloud_unlink(fname[:-4])
# now only after we've deleted from the cloud can we delete the local file
os.unlink(fname)
logging.info("Found %d files older than %s days." % (count, g_config['archivedays']))
return 0
def file_get(path):
"""
If the file exists locally then we return it, otherwise
we go out to the network store and retrieve it
"""
if os.path.exists(path):
return open(path, 'rb')
else:
res = cloud_get(path)
if res:
return open(path, 'rb')
return False
def file_find_streams(start_list, duration):
"""
Given a start week minute this looks for streams in the storage
directory that match it - regardless of duration ... so it may return
partial shows results.
"""
global g_config
stream_list = []
if type(start_list) is int:
start_list = [start_list]
file_list = glob('streams/*.map')
# Sort nominally - since we have unix time in the name, this should come out
# as sorted by time for us for free.
file_list.sort()
stitch_list = []
# TODO: This start list needs to be chronologically as opposed to
# every monday, then every tuesday, etc ... for multi-day stream requests
for start in start_list:
end = (start + duration) % MINUTES_PER_WEEK
# We want to make sure we only get the edges so we need to have state
# between the iterations.
next_valid_start_minute = 0
current_week = 0
for filename in file_list:
i = audio_stream_info(filename, guess_time=g_config['cascadetime'])
if i['start_minute'] < next_valid_start_minute and i['week'] == current_week:
stitch_list.append(filename)
continue
# We are only looking for starting edges of the stream
#
# If we started recording before this is fine as long as we ended recording after our start
if start == -1 or (i['start_minute'] < start and i['end_minute'] > start) or (i['start_minute'] > start and i['start_minute'] < end):
if start == -1:
fname = filename
else:
fname = audio_stitch_and_slice(stitch_list, start, duration)
stitch_list = [filename]
next_valid_start_minute = (start + duration) % MINUTES_PER_WEEK
current_week = i['week']
if fname:
stream_list.append(audio_stream_info(fname))
if start != -1:
fname = audio_stitch_and_slice(stitch_list, start, duration)
if fname:
stream_list.append(audio_stream_info(fname))
return stream_list
def server_generate_xml(showname, feed_list, duration, weekday_list, start, duration_string):
"""
This takes a number of params:
showname - from the incoming request url
feed_list - this is a list of tuples in the form (date, file)
corresponding to the, um, date of recording and filename
It obviously returns an xml file ... I mean duh.
In the xml file we will lie about the duration to make life easier
"""
global g_config
day_map = {
'sun': 'Sunday',
'mon': 'Monday',
'tue': 'Tuesday',
'wed': 'Wednesday',
'thu': 'Thursday',
'fri': 'Friday',
'sat': 'Saturday'
}
day_list = [ day_map[weekday] for weekday in weekday_list ]
if len(day_list) == 1:
week_string = day_list[0]
else:
# an oxford comma, how cute.
week_string = "%s and %s" % (', '.join(day_list[:-1]), day_list[-1])
base_url = 'http://%s.indycast.net:%s/' % (g_config['callsign'], g_config['port'])
callsign = g_config['callsign']
nsmap = {
'dc': 'http://purl.org/dc/elements/1.1/',
'media': 'http://search.yahoo.com/mrss/',
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
'feedburner': 'http://rssnamespace.org/feedburner/ext/1.0'
}
root = ET.Element("rss", nsmap=nsmap)
root.attrib['version'] = '2.0'
channel = ET.SubElement(root, "channel")
for k,v in {
'{%s}summary' % nsmap['itunes']: showname,
'{%s}subtitle' % nsmap['itunes']: showname,
'{%s}category' % nsmap['itunes']: 'podcast',
'title': showname,
'link': base_url,
'copyright': callsign,
'description': "%s is a %s show recorded every %s on %s at %s. Saved and delivered when you want it, through a volunteer network at http://indycast.net." % (showname, duration_string, week_string, callsign.upper(), start),
'language': 'en'
}.items():
ET.SubElement(channel, k).text = v
itunes_image = ET.SubElement(channel, '{%s}image' % nsmap['itunes'])
itunes_image.attrib['href'] = 'http://indycast.net/icon/%s_1400.png' % urllib.quote(showname)
media_image = ET.SubElement(channel, '{%s}thumbnail' % nsmap['media'])
media_image.attrib['url'] = 'http://indycast.net/icon/%s_1400.png' % urllib.quote(showname)
image = ET.SubElement(channel, 'image')
for k,v in {
'url': 'http://indycast.net/icon/%s_200.png' % urllib.quote(showname),
'title': showname,
'link': 'http://indycast.net'
}.items():
ET.SubElement(image, k).text = v
for feed in feed_list:
file_name = feed['name']
link = "%s%s" % (base_url, file_name)
item = ET.SubElement(channel, 'item')
itunes_duration = "%02d:00" % (duration % 60)
if duration > 60:
itunes_duration = "%d:%s" % (int(math.floor(duration / 60 )), itunes_duration)
for k,v in {
'{%s}explicit' % nsmap['itunes']: 'no',
'{%s}author' % nsmap['itunes']: callsign,
'{%s}duration' % nsmap['itunes']: itunes_duration,
'{%s}summary' % nsmap['itunes']: showname,
'{%s}creator' % nsmap['dc']: callsign.upper(),
'{%s}origEnclosureLink' % nsmap['feedburner']: link,
'{%s}origLink' % nsmap['feedburner']: base_url,
'description': "%s recorded on %s" % (showname, feed['start_date'].strftime("%Y-%m-%d %H:%M:%S")),
'pubDate': feed['start_date'].strftime("%Y-%m-%d %H:%M:%S"),
'title': "%s - %s" % (showname, feed['start_date'].strftime("%Y.%m.%d")),
'link': link,
'copyright': callsign
}.items():
ET.SubElement(item, k).text = v
ET.SubElement(item, 'guid', isPermaLink="false").text = file_name
# fileSize and length will be guessed based on 209 bytes covering
# frame_length seconds of audio (128k/44.1k no id3)
content = ET.SubElement(item, '{%s}content' % nsmap['media'])
content.attrib['url'] = link
content.attrib['fileSize'] = str(file_get_size(file_name))
content.attrib['type'] = 'audio/mpeg3'
# The length of the audio we will just take as the duration
content = ET.SubElement(item, 'enclosure')
content.attrib['url'] = link
content.attrib['length'] = str(duration * 60)
content.attrib['type'] = 'audio/mpeg3'
tree = ET.ElementTree(root)
return Response(ET.tostring(tree, xml_declaration=True, encoding="utf-8"), mimetype='text/xml')
def server_error(errstr):
""" Returns a server error as a JSON result """
return jsonify({'result': False, 'error':errstr}), 500
def server_manager(config):
""" Main flask process that manages the end points """
global g_queue
app = Flask(__name__)
# from http://flask.pocoo.org/snippets/67/
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
# from http://blog.asgaard.co.uk/2012/08/03/http-206-partial-content-for-flask-python
@app.after_request
def after_request(response):
""" Supports 206 partial content requests for podcast streams """
response.headers.add('Accept-Ranges', 'bytes')
return response
def send_file_partial(path):
"""
Wrapper around send_file which handles HTTP 206 Partial Content
(byte ranges)
"""
range_header = request.headers.get('Range', None)
if not range_header:
return flask.send_file(path)
size = os.path.getsize(path)
byte1, byte2 = 0, None
m = re.search('(\d+)-(\d*)', range_header)
g = m.groups()
if g[0]:
byte1 = int(g[0])
if g[1]:
byte2 = int(g[1])
length = size - byte1
if byte2 is not None:
length = byte2 - byte1
data = None
with open(path, 'rb') as f:
f.seek(byte1)
data = f.read(length)
rv = Response(
data,
206,
mimetype = 'audio/mpeg',
direct_passthrough=True
)
rv.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(byte1, byte1 + length - 1, size))
return rv
# From http://stackoverflow.com/questions/13317536/get-a-list-of-all-routes-defined-in-the-app
@app.route("/site-map")
def site_map():
""" Shows all the end points supported by the current server """
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:25s} {}".format(rule.endpoint, url))
output.append(line)
return Response('\n'.join(output), mimetype='text/plain')
@app.route('/slices/<path:path>')
def send_stream(path):
"""
Downloads a stream from the server. The path is (unix timestamp)_(duration in minutes).
If it exists (as in we had previously generated it) then we can trivially send it. Otherwise
we'll just call this an error to make our lives easier.
"""
base_dir = config['storage'] + 'slices/'
fname = base_dir + path
# If the file doesn't exist, then we need to slice it and create it based on our query.
if not os.path.isfile(fname):
return "File not found. Perhaps the stream is old?", 404
return send_file_partial("%s/%s" % (base_dir, path))
@app.route('/restart')
def restart():
""" Restarts an instance """
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
shutdown_server()
g_queue.put(('restart', True))
return "restarting..."
os.chdir(cwd)
@app.route('/upgrade')
def upgrade():
"""
Goes to the source directory, pulls down the latest from git
and if the versions are different, the application restarts
"""
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
os.system('git pull')
os.system('pip install --user -r requirements.txt')
# See what the version is after the pull
newversion = os.popen("git describe").read().strip()
if newversion != __version__:
# from http://blog.petrzemek.net/2014/03/23/restarting-a-python-script-within-itself/
shutdown_server()
g_queue.put(('restart', True))
return "Upgrading from %s to %s" % (__version__, newversion)
os.chdir(cwd)
return 'Version %s is current' % __version__
@app.route('/heartbeat')
def heartbeat():
"""
A low resource version of the /stats call ... this is invoked
by the server health check
"""
global g_start_time
return jsonify({
'uptime': int(time.time() - g_start_time),
'version': __version__
}), 200
@app.route('/stats')
def stats():
""" Reports various statistical metrics on a particular server """
global g_start_time
db = DB.connect()
stats = {
'intents': [record for record in db['c'].execute('select * from intents').fetchall()],
'hits': db['c'].execute('select sum(read_count) from intents').fetchone()[0],
'kv': [record for record in db['c'].execute('select * from kv').fetchall()],
'uptime': int(time.time() - g_start_time),
'free': os.popen("df -h / | tail -1").read().strip(),
'disk': sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)),
#'streams': file_find_streams(-1, 0),
'version': __version__,
'config': config
}
return jsonify(stats), 200
@app.route('/<weekday>/<start>/<duration>/<showname>')
def stream(weekday, start, duration, showname):
"""
Returns an xml file based on the weekday, start and duration
from the front end.
"""
# Supports multiple weekdays
weekday_list = weekday.split(',')
duration_string = duration
# Duration is expressed either in minutes or in \d+hr\d+ minute
re_minute = re.compile('^(\d+)$')
re_hr_solo = re.compile('^(\d+)hr$', re.I)
re_hr_min = re.compile('^(\d+)hr(\d+).*$', re.I)
res = re_minute.match(duration)
if res:
duration = int(res.groups()[0])
else:
res = re_hr_solo.match(duration)
if res:
duration = int(res.groups()[0]) * 60
else:
res = re_hr_min.match(duration)
if res:
duration = int(res.groups()[0]) * 60 + int(res.groups()[1])
# This means we failed to parse
if type(duration) is str:
return server_error('duration "%s" is not set correctly' % duration)
#
# See https://github.com/kristopolous/DRR/issues/22:
#
# We're going to add 2 minutes to the duration to make sure that we get
# the entire episode.
#
duration += 2
start_time_list = [time_to_utc(day, start) for day in weekday_list]
if not start_time_list[0]:
return server_error('weekday and start times are not set correctly')
# If we are here then it looks like our input is probably good.
# Strip the .xml from the showname ... this will be used in our xml.
showname = re.sub('.xml$', '', showname)
# We come in with spaces as underscores so here we translate that back
showname = re.sub('_', ' ', showname)
# This will register the intent if needed for future recordings
# (that is if we are in ondemand mode)
DB.register_intent(start_time_list, duration)
# Look for streams that we have which match this query and duration.
feed_list = file_find_streams(start_time_list, duration)
# Then, taking those two things, make a feed list from them.
return server_generate_xml(
showname=showname,
feed_list=feed_list,
duration=duration,
weekday_list=weekday_list,
start=start,
duration_string=duration_string
)
if __name__ == '__main__':
pid = change_proc_name("%s-webserver" % config['callsign'])
with open(PIDFILE_WEBSERVER, 'w+') as f:
f.write(str(pid))
"""
When we do an upgrade or a restart, there's a race condition of getting to start this server
before the previous one has cleaned up all the socket work. So if the time is under our
patience threshold then we sleep a second and just try again, hoping that it will work.
"""
patience = PROCESS_DELAY * 2
attempt = 1
start = time.time()
while time.time() - start < patience:
try:
print "Listening on %s" % config['port']
app.run(threaded=True, port=int(config['port']), host='0.0.0.0')
break
except Exception as exc:
if time.time() - start < patience:
print "[attempt: %d] Error, can't start server ... perhaps %s is already in use?" % (attempt, config['port'])
attempt += 1
time.sleep(PROCESS_DELAY / 4)
##
## Stream management functions
##
def stream_should_be_recording():
""" Queries the database and see if we ought to be recording at this moment """
global g_config
db = DB.connect()
current_minute = time_minute_now()
intent_count = db['c'].execute("""
select count(*) from intents where
start >= ? and
end <= ? and
accessed_at > datetime('now','-%s days')
""" % g_config['expireafter'],
(current_minute, current_minute)
).fetchone()[0]
return intent_count != 0
def stream_download(callsign, url, my_pid, fname):
"""
Curl interfacing which downloads the stream to disk.
Follows redirects and parses out basic m3u
"""
global g_params
change_proc_name("%s-download" % callsign)
nl = {'stream': False}
def dl_stop(signal, frame):
sys.exit(0)
def cback(data):
global g_config, g_queue, g_params
if g_params['isFirst'] == True:
g_params['isFirst'] = False
if len(data) < 800:
if re.match('https?://', data):
# If we are getting a redirect then we don't mind, we
# just put it in the stream and then we leave
g_queue.put(('stream', data.strip()))
return True
# A pls style playlist
elif re.findall('File\d', data, re.M):
logging.info("Found a pls, using the File1 parameter");
matches = re.findall('File1=(.*)\n', data, re.M)
g_queue.put(('stream', matches[0].strip()))
return True
g_queue.put(('heartbeat', True))
if not nl['stream']:
try:
nl['stream'] = open(fname, 'w')
except Exception as exc:
logging.critical("Unable to open %s. Can't record. Must exit." % fname)
sys.exit(-1)
nl['stream'].write(data)
if not manager_is_running():
shutdown()
# signal.signal(signal.SIGTERM, dl_stop)
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.WRITEFUNCTION, cback)
c.setopt(pycurl.FOLLOWLOCATION, True)
g_params['isFirst'] = True
try:
c.perform()
except Exception as exc:
logging.warning("Couldn't resolve or connect to %s." % url)
c.close()
if type(nl['stream']) != bool:
nl['stream'].close()
def manager_is_running():
"""
Checks to see if the manager is still running or if we should
shutdown. It works by sending a signal(0) to a pid and seeing
if that fails
"""
global g_manager_pid
try:
os.kill(g_manager_pid, 0)
return True
except:
return False
def stream_manager():
"""
Manager process which makes sure that the
streams are running appropriately
"""
global g_queue, g_config
callsign = g_config['callsign']
cascade_time = g_config['cascadetime']
cascade_buffer = g_config['cascadebuffer']
cascade_margin = cascade_time - cascade_buffer
last_prune = 0
last_success = 0
mode_full = (g_config['mode'].lower() == 'full')
b_shutdown = False
should_record = mode_full
# Number of seconds to be cycling
cycle_time = g_config['cycletime']
process = False
process_next = False
server_pid = Process(target=server_manager, args=(g_config,))
server_pid.start()
fname = False
# A wrapper function to start a donwnload process
def download_start(fname):
""" Starts a process that manages the downloading of a stream."""
global g_download_pid
g_download_pid += 1
logging.info("Starting cascaded downloader #%d. Next up in %ds" % (g_download_pid, cascade_margin))
#
# There may be a multi-second lapse time from the naming of the file to
# the actual start of the download so we should err on that side by putting it
# in the future by some margin
#
fname = 'streams/%s-%d.mp3' % (callsign, time_sec_now(offset_sec=PROCESS_DELAY))
process = Process(target=stream_download, args=(callsign, g_config['stream'], g_download_pid, fname))
process.start()
return [fname, process]
while True:
#
# We cycle this to off for every run. By the time we go throug the queue so long
# as we aren't supposed to be shutting down, this should be toggled to true.
#
flag = False
if last_prune < (time.time() - ONE_DAY * g_config['pruneevery']):
# We just assume it can do its business in under a day
prune_process = Process(target=file_prune)
prune_process.start()
last_prune = time.time()
time_get_offset()
while not g_queue.empty():
what, value = g_queue.get(False)
# The curl proces discovered a new stream to be
# used instead.
if what == 'stream':
g_config['stream'] = value
logging.info("Using %s as the stream now" % value)
# We now don't toggle to flag in order to shutdown the
# old process and start a new one
elif what == 'shutdown':
print "-- shutdown"
b_shutdown = True
elif what == 'restart':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
subprocess.Popen(sys.argv)
else:
flag = True
# Check for our management process
if not manager_is_running():
logging.info("Manager isn't running");
b_shutdown = True
#
# If we are not in full mode, then we should check whether we should be
# recording right now according to our intents.
#
if not mode_full:
should_record = stream_should_be_recording()
if should_record:
# Didn't respond in cycle_time seconds so we respawn
if not flag:
if process and process.is_alive():
process.terminate()
process = False
if not process and not b_shutdown:
fname, process = download_start(fname)
last_success = time.time()
# If we've hit the time when we ought to cascade
elif time.time() - last_success > cascade_margin:
# And we haven't created the next process yet, then we start it now.
if not process_next:
fname, process_next = download_start(fname)
# If our last_success stream was more than cascade_time - cascade_buffer
# then we start our process_next
# If there is still no process then we should definitely bail.
if not process:
return False
#
# The only way for the bool to be toggled off is if we are not in full-mode ...
# we get here if we should NOT be recording. So we make sure we aren't.
#
else:
if process and process.is_alive():
process.terminate()
if process_next and process_next.is_alive():
process_next.terminate()
process_next = process = False
#
# This needs to be on the outside loop in case we are doing a cascade
# outside of a full mode. In this case, we will need to shut things down
#
# If we are past the cascade_time and we have a process_next, then
# we should shutdown our previous process and move the pointers around.
#
if time.time() - last_success > cascade_time and process:
logging.info("Stopping cascaded downloader")
process.terminate()
# If the process_next is running then we move our last_success forward to the present
last_success = time.time()
# we rename our process_next AS OUR process
process = process_next
# And then clear out the old process_next pointer
process_next = False
# Increment the amount of time this has been running
DB.incr('uptime', cycle_time)
time.sleep(cycle_time)
def make_maps():
pid = change_proc_name("%s-mapmaker" % g_config['callsign'])
for fname in glob('streams/*.mp3'):
if not manager_is_running():
shutdown()
audio_crc(fname, only_check=True)
return 0
def read_config(config):
"""
Reads a configuration file.
Currently documented at https://github.com/kristopolous/DRR/wiki/Join-the-Federation
"""
global g_config
Config = ConfigParser.ConfigParser()
Config.read(config)
g_config = config_section_map('Main', Config)
defaults = {
# The log level to be put into the indycast.log file.
'loglevel': 'WARN',
# The recording mode, either 'full' meaning to record everything, or != 'full'
# meaning to record only when an intent is matched.
'mode': 'full',
# The relative, or absolute directory to put things in
'storage': "%s/radio" % os.path.expanduser('~'),
# The (day) time to expire an intent to record
'expireafter': 45,
# The TCP port to run the server on
'port': '5000',
# The (day) duration we should be archiving things.
'archivedays': 14,
# The (second) time in looking to see if our stream is running
'cycletime': 7,
# The (second) time to start a stream BEFORE the lapse of the cascade-time
'cascadebuffer': 15,
# The (second) time between cascaded streams
'cascadetime': 60 * 15,
# Cloud credenials (ec2, azure etc)
'cloud': False,
#
# When to get things off local disk and store to the cloud
# This means that after this many days data is sent remote and then
# retained for `archivedays`. This makes the entire user-experience
# a bit slower of course, and has an incurred throughput cost - but
# it does save price VPS disk space which seems to come at an unusual
# premium.
#
'cloudarchive': 2,
# Run the pruning every this many days (float)
'pruneevery': 0.5
}
for k, v in defaults.items():
if k not in g_config:
g_config[k] = v
else:
if type(v) is int: g_config[k] = int(g_config[k])
elif type(v) is float: g_config[k] = float(g_config[k])
# in case someone is specifying ~/radio
g_config['storage'] = os.path.expanduser(g_config['storage'])
if g_config['cloud']:
g_config['cloud'] = os.path.expanduser(g_config['cloud'])
if os.path.exists(g_config['cloud']):
# If there's a cloud conifiguration file then we read that too
cloud_config = ConfigParser.ConfigParser()
cloud_config.read(g_config['cloud'])
g_config['azure'] = config_section_map('Azure', cloud_config)
if not os.path.isdir(g_config['storage']):
try:
# If I can't do this, that's fine.
os.mkdir(g_config['storage'])
except Exception as exc:
# We make it from the current directory
g_config['storage'] = defaults['storage']
if not os.path.isdir(g_config['storage']):
os.mkdir(g_config['storage'])
# Go to the callsign level in order to store multiple station feeds on a single
# server in a single parent directory without forcing the user to decide what goes
# where.
g_config['storage'] += '/%s/' % g_config['callsign']
g_config['storage'] = re.sub('\/+', '/', g_config['storage'])
if not os.path.isdir(g_config['storage']):
os.mkdir(g_config['storage'])
# We have a few sub directories for storing things
for subdir in ['streams', 'slices']:
if not os.path.isdir(g_config['storage'] + subdir):
os.mkdir(g_config['storage'] + subdir)
# Now we try to do all this stuff again
if os.path.isdir(g_config['storage']):
#
# There's a bug after we chdir, where the multiprocessing is trying to grab the same
# invocation as the initial argv[0] ... so we need to make sure that if a user did
# ./blah this will be maintained.
#
if not os.path.isfile(g_config['storage'] + __file__):
os.symlink(os.path.abspath(__file__), g_config['storage'] + __file__)
os.chdir(g_config['storage'])
else:
logging.warning("Can't find %s. Using current directory." % g_config['storage'])
# If there is an existing pid-manager, that means that
# there is probably another version running.
if os.path.isfile(PIDFILE_MANAGER):
with open(PIDFILE_MANAGER, 'r') as f:
oldserver = f.readline()
try:
os.kill(int(oldserver), 15)
# We give it a few seconds to shut everything down
# before trying to proceed
time.sleep(PROCESS_DELAY / 2)
except:
pass
# From https://docs.python.org/2/howto/logging.html
numeric_level = getattr(logging, g_config['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='indycast.log', datefmt='%Y-%m-%d %H:%M:%S',format='%(asctime)s %(message)s')
#
# Increment the number of times this has been run so we can track the stability of remote
# servers and instances.
#
DB.incr('runcount')
signal.signal(signal.SIGINT, shutdown)
if __name__ == "__main__":
# From http://stackoverflow.com/questions/25504149/why-does-running-the-flask-dev-server-run-itself-twice
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
server_manager(g_config)
else:
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="./indy_config.txt", help="Configuration file (default ./indy_config.txt)")
parser.add_argument('--version', action='version', version='indycast %s :: July 2015' % __version__)
args = parser.parse_args()
read_config(args.config)
map_pid = Process(target=make_maps, args=())
map_pid.start()
pid = change_proc_name("%s-manager" % g_config['callsign'])
# This is the pid that should be killed to shut the system
# down.
g_manager_pid = pid
with open(PIDFILE_MANAGER, 'w+') as f:
f.write(str(pid))
stream_manager()
fixing weird stream
#!/usr/bin/python -O
import argparse
import binascii
import ConfigParser
import json
import logging
import lxml.etree as ET
import math
import os
import pycurl
import marshal
import gzip
import re
import setproctitle as SP
import signal
import struct
import sys
import time
import socket
import StringIO
import threading
import lib.db as DB
#
# This is needed to force ipv4 on ipv6 devices. It's sometimes needed
# if there isn't a clean ipv6 route to get to the big wild internet.
# In these cases, a pure ipv6 route simply will not work. People aren't
# always in full control of every hop ... so it's much safer to force
# ipv4 then optimistically cross our fingers.
#
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# Replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
import urllib2
import urllib
from datetime import datetime, timedelta, date
from glob import glob
from flask import Flask, request, jsonify, Response, url_for
import flask
from subprocess import call
import subprocess
from multiprocessing import Process, Queue
g_start_time = time.time()
g_queue = Queue()
g_config = {}
g_download_pid = 0
g_manager_pid = 0
g_params = {}
__version__ = os.popen("git describe").read().strip()
# Most common frame-length ... in practice, I haven't
# seen other values in the real world
FRAME_LENGTH = (1152.0 / 44100)
# Everything is presumed to be weekly and on the minute
# scale. We use this to do wrap around when necessary
MINUTES_PER_WEEK = 10080
ONE_DAY = 60 * 60 * 24
#
# Some stations don't start you off with a valid mp3 header
# (such as kdvs), so we have to just seek into the file
# and look for one. This is the number of bytes we try.
# In practice, 217 appears to be enough, so we make it about
# ten times that and cross our fingers
#
MAX_HEADER_ATTEMPTS = 2048
#
# Maintain a pidfile for the manager and the webserver (which
# likes to become a zombie ... braaaainnns!) so we have to take
# care of it separately and specially - like a little retard.
#
PIDFILE_MANAGER = 'pid-manager'
PIDFILE_WEBSERVER = 'pid-webserver'
#
# The process delay is used throughout to measure things like the delay in
# forking a subprocesses, waiting for DNS, and then starting a stream or
# waiting for all the sub-processes like the web-server to clean up and free
# the tcp port they are listening on, and shut down.
#
# Making this generous shouldn't be discouraged as it is also used as a metric
# to calculate the number of accomodations that are to be given to make service
# continuous.
#
# Things are specified in multiples of this value ... for instance PROCESS_DELAY
# / 4 or * 2. 4 is a good number.
#
PROCESS_DELAY = 4
# From https://wiki.python.org/moin/ConfigParserExamples
def config_section_map(section, Config):
"""
Takes a section in a config file and makes a dictionary
out of it.
Returns that dictionary
"""
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
logging.info("skip: %s" % option)
except Exception as exc:
logging.warning("exception on %s!" % option)
dict1[option] = None
return dict1
def change_proc_name(what):
"""
Sets a more human-readable process name for the various
parts of the system to be viewed in top/htop
"""
SP.setproctitle(what)
print "[%s:%d] Starting" % (what, os.getpid())
return os.getpid()
def shutdown(signal=15, frame=False):
""" Shutdown is hit on the keyboard interrupt """
global g_queue, g_start_time, g_config
# Try to manually shutdown the webserver
if os.path.isfile(PIDFILE_WEBSERVER):
with open(PIDFILE_WEBSERVER, 'r') as f:
webserver = f.readline()
try:
os.kill(int(webserver), signal)
except:
pass
os.unlink(PIDFILE_WEBSERVER)
title = SP.getproctitle()
print "[%s:%d] Shutting down" % (title, os.getpid())
DB.shutdown()
logging.info("[%s:%d] Shutting down through signal %d" % (title, os.getpid(), signal))
if title == ('%s-manager' % g_config['callsign']):
logging.info("Uptime: %ds", time.time() - g_start_time)
elif title != ('%s-webserver' % g_config['callsign']) and os.path.isfile(PIDFILE_MANAGER):
os.unlink(PIDFILE_MANAGER)
g_queue.put(('shutdown', True))
sys.exit(0)
##
## Audio related functions
##
def audio_get_map(fname):
""" Retrieves a map file associated with the mp3 """
map_name = fname if fname.endswith('.map') else fname + '.map'
if os.path.exists(map_name):
f = gzip.open(map_name, 'r')
ret = marshal.loads(f.read())
f.close()
return ret
return None, None
def audio_list_info(file_list):
info = audio_stream_info(file_list[0]['name'])
# Some things are the same such as the
# week, start_minute, start_date
info['duration_sec'] = 0
for item in file_list:
info['duration_sec'] += item['duration_sec']
info['end_minute'] = (info['duration_sec'] / 60.0 + info['duration_sec']) % MINUTES_PER_WEEK
return info
def audio_stream_info(fname, guess_time=False):
"""
Determines the date the thing starts,
the minute time it starts, and the duration
If guess_time is set, then that value is used
as the audio time. It can speed things up
by avoiding an opening of the file all together.
It's sometimes an ok thing to do.
"""
if type(fname) is not str:
return audio_list_info(fname)
ts_re = re.compile('-(\d*)[.|_]')
ts = ts_re.findall(fname)
duration = 0
start_minute = 0
start_date = 0
if ts:
unix_time = int(ts[0])
start_minute = time_to_minute(unix_time)
start_date = datetime.fromtimestamp(unix_time)
try:
duration = guess_time if guess_time else audio_time(fname)
except Exception as exc:
# If we can't find a duration then we try to see if it's in the file name
ts_re_duration = re.compile('_(\d*).mp3')
ts = ts_re_duration.findall(fname)
if ts:
duration = int(ts[0]) * 60
return {
# The week number
'week': start_date.isocalendar()[1],
'name': fname,
'start_minute': start_minute,
'start_date': start_date,
'end_minute': (duration / 60.0 + start_minute) % MINUTES_PER_WEEK,
'duration_sec': duration
}
def audio_crc(fname, blockcount=-1, only_check=False):
"""
Opens an mp3 file, find all the blocks, the byte offset of the blocks, and if they
are audio blocks, construct a crc32 mapping of some given beginning offset of the audio
data ... this is intended for stitching.
"""
# Simply make sure that there is a map associated with the
# mp3. Otherwise create one.
map_name = fname if fname.endswith('.map') else fname + '.map'
if only_check and os.path.exists(map_name):
return True
crc32, offset = audio_get_map(fname)
if crc32 is not None:
return crc32, offset
frame_sig = []
start_byte = []
first_header_seen = False
header_attempts = 0
#
# Looking at the first 16 bytes of the payload yield a rate that is 99.75% unique
# as tested over various corpi ranging from 1,000,000 - 7,000,000 blocks.
#
# There's an additional precautions of looking for a string of 4 matches which
# mitigates this even further
#
read_size = 4
freqTable = [ 44100, 48000, 32000, 0 ]
brTable = [
0, 32, 40, 48,
56, 64, 80, 96,
112, 128, 160, 192,
224, 256, 320, 0
]
f = open(fname, 'rb')
while blockcount != 0:
if first_header_seen:
blockcount -= 1
else:
header_attempts += 1
if header_attempts > 2:
# Go 1 back.
f.seek(-1, 1)
frame_start = f.tell()
header = f.read(2)
if header:
if header == '\xff\xfb' or header == '\xff\xfa':
try:
b = ord(f.read(1))
# If we are at the EOF
except:
break
samp_rate = freqTable[(b & 0x0f) >> 2]
bit_rate = brTable[b >> 4]
pad_bit = (b & 0x3) >> 1
# from http://id3.org/mp3Frame
try:
frame_size = (144000 * bit_rate / samp_rate) + pad_bit
# If there's a /0 error
except:
continue
if not first_header_seen:
first_header_seen = True
# We try to record the CBR associated with this
# stream
if not DB.get('bitrate', use_cache = True):
DB.set('bitrate', bit_rate)
# Rest of the header
throw_away = f.read(1)
# Get the signature
crc = f.read(read_size)
frame_sig.append(crc)
start_byte.append(frame_start)
# Move forward the frame f.read size + 4 byte header
throw_away = f.read(frame_size - (read_size + 4))
# ID3 tag for some reason
elif header == '\x49\x44':
# Rest of the header
throw_away = f.read(4)
#
# Quoting http://id3.org/d3v2.3.0
#
# The ID3v2 tag size is encoded with four bytes where the most significant bit
# (bit 7) is set to zero in every byte, making a total of 28 bits. The zeroed
# bits are ignored, so a 257 bytes long tag is represented as $00 00 02 01.
#
candidate = struct.unpack('>I', f.read(4))[0]
size = ((candidate & 0x007f0000) >> 2 ) | ((candidate & 0x00007f00) >> 1 ) | (candidate & 0x0000007f)
f.read(size)
# ID3 TAG -- 128 bytes long
elif header == '\x54\x41':
# We've already read 2 so we can go 126 forward
f.read(126)
elif len(header) == 1:
# We are at the end of file, but let's just continue.
next
elif first_header_seen or header_attempts > MAX_HEADER_ATTEMPTS:
print "%d[%d/%d]%s:%s:%s %s %d" % (len(frame_sig), header_attempts, MAX_HEADER_ATTEMPTS, binascii.b2a_hex(header), binascii.b2a_hex(f.read(5)), fname, hex(f.tell()), len(start_byte) * (1152.0 / 44100) / 60)
# This means that perhaps we didn't guess the start correct so we try this again
if len(frame_sig) == 1 and header_attempts < MAX_HEADER_ATTEMPTS:
print "False start -- trying again"
# seek to the first start byte + 1
f.seek(start_byte[0] + 2)
# discard what we thought was the first start byte and
# frame signature
start_byte = []
frame_sig = []
first_header_seen = False
else:
break
else:
break
f.close()
# If we get here that mans that we don't have a map
# file yet. So we just creat it.
map_name = fname + '.map'
if not os.path.exists(map_name):
with gzip.open(map_name, 'wb') as f:
f.write(marshal.dumps([frame_sig, start_byte]))
return frame_sig, start_byte
def audio_time(fname):
"""
Determines the duration of an audio file by doing some estimates based on the offsets
Returns the audio time in seconds
"""
# In this fast method we get the first two frames, find out the offset
# difference between them, take the length of the file, divide it by that
# and then presume that will be the framecount
crc32, offset = audio_crc(fname)
return FRAME_LENGTH * len(offset)
def audio_stitch_and_slice(file_list, start_minute, duration_minute):
"""
Given a file_list in a directory and a duration, this function will seek out
adjacent files if necessary and serialize them accordingly, and then return the
file name of an audio slice that is the combination of them.
"""
if not file_list:
return False
# We presume that there is a file list we need to make
stitched_list = audio_stitch(file_list, force_stitch=True)
if len(stitched_list) > 1:
info = audio_stream_info(stitched_list)
else:
logging.warn("Unable to stitch file list")
return False
# After we've stitched together the audio then we start our slice
# by figuring our the start_minute of the slice, versus ours
start_slice = max(start_minute - info['start_minute'], 0)
# Now we need to take the duration of the stream we want, in minutes, and then
# make sure that we don't exceed the length of the file.
duration_slice = min(duration_minute, start_slice + info['duration_sec'] / 60.0)
sliced_name = audio_list_slice(
list_in=stitched_list,
start_minute=start_slice,
duration_minute=duration_slice
)
return sliced_name
def audio_list_slice_process(list_in, name_out, duration_sec, start_sec):
global g_config
pid = change_proc_name("%s-audioslice" % g_config['callsign'])
out = open(name_out, 'wb+')
for ix in range(0, len(list_in)):
item = list_in[ix]
# get the regular map
crc32, offset = audio_crc(item['name'])
if ix == len(list_in) - 1:
frame_end = min(int(math.ceil(duration_sec / FRAME_LENGTH)), len(offset) - 1)
else:
frame_end = len(offset) - 1
if ix == 0:
frame_start = min(max(int(math.floor(start_sec / FRAME_LENGTH)), 0), len(offset) - 1)
duration_sec -= (item['duration_sec'] - start_sec)
else:
frame_start = item['start_offset']
duration_sec -= item['duration_sec']
# try and get the mp3 referred to by the map file
fin = file_get(item['name'][:-4])
if fin:
fin.seek(offset[frame_start])
out.write(fin.read(offset[frame_end] - offset[frame_start]))
fin.close()
# If we fail to get the mp3 file then we can suppose that
# the map file is bad so we just wince and remove it.
else:
os.unlink(item['name'])
logging.warn("Unable to find %s's corresponding mp3, deleting" % item['name'])
out.close()
# If we failed to do anything this is a tragedy
# and we just dump the file
#
# We take files under some really nominal threshold as being invalid.
if os.path.getsize(name_out) < 1000:
logging.warn("Unable to create %s - no valid slices" % name_out)
os.unlink(name_out)
def audio_list_slice(list_in, start_minute, duration_minute=-1):
"""
Takes some stitch list, list_in and then create a new one based on the start and end times
by finding the closest frames and just doing an extraction.
"""
duration_sec = duration_minute * 60.0
first_file = list_in[0]['name']
callsign, unix_time = re.findall('(\w*)-(\d+)', first_file)[0]
name_out = "slices/%s-%d_%d.mp3" % (callsign, int(unix_time) + start_minute * 60, duration_minute)
start_sec = start_minute * 60.0
if os.path.isfile(name_out) and os.path.getsize(name_out) > 0:
return name_out
#
# We may need to pull things down from the cloud so it's better if we just return
# the eventual mp3 name here and not block. As it turns out, pulling the blobs from
# the cloud is rather fast on the vpss (a matter of seconds) so by the time the user
# requests an mp3, it will probably exist. If it doesn't, then eh, we'll figure it out.
#
slice_process = Process(target=audio_list_slice_process, args=(list_in, name_out, duration_sec, start_sec))
slice_process.start()
return name_out
def audio_stitch(file_list, force_stitch=False):
"""
Takes a list of files and then attempt to seamlessly stitch them
together by looking at their crc32 checksums of the data payload in the blocks.
"""
first = {'name': file_list[0]}
duration = 0
crc32, offset = audio_crc(first['name'])
first['crc32'] = crc32
first['offset'] = offset
args = [{
'name': first['name'],
'start_byte': 0,
'start_offset': 0,
'end_byte': first['offset'][-1],
'start_minute': 0,
'duration_sec': (len(first['offset']) - 1) * FRAME_LENGTH
}]
duration += len(first['offset']) * FRAME_LENGTH
for name in file_list[1:]:
second = {'name': name}
crc32, offset = audio_crc(name)
second['crc32'] = crc32
second['offset'] = offset
isFound = True
pos = -1
try:
while True:
pos = second['crc32'].index(first['crc32'][-2], pos + 1)
isFound = True
for i in xrange(5, 1, -1):
if second['crc32'][pos - i + 2] != first['crc32'][-i]:
isFound = False
logging.warn("Indices @%d do not match between %s and %s" % (pos, first['name'], second['name']))
break
# If we got here it means that everything matches
if isFound: break
else: continue
except Exception as exc:
logging.warn("Cannot find indices between %s and %s" % (first['name'], second['name']))
pos = 1
if isFound or force_stitch:
args.append({
'name': second['name'],
'start_byte': second['offset'][pos],
'end_byte': second['offset'][-2],
'start_offset': pos,
'start_minute': pos * FRAME_LENGTH,
'duration_sec': (len(second['offset']) - pos - 1) * FRAME_LENGTH
})
duration += (len(second['offset']) - pos - 1) * FRAME_LENGTH
first = second
continue
break
return args
##
## Time related functions
##
def time_to_minute(unix_time):
""" Takes a given unix time and finds the week minute corresponding to it. """
if type(unix_time) is int:
unix_time = datetime.fromtimestamp(unix_time)
return unix_time.weekday() * (24 * 60) + unix_time.hour * 60 + unix_time.minute
def time_sec_now(offset_sec=0):
"""
Returns the unix time with respect to the timezone of the station being recorded.
Accepts an optional offset_sec to forward the time into the future
"""
return int((datetime.utcnow() + timedelta(seconds=offset_sec, minutes=time_get_offset())).strftime('%s'))
def time_minute_now():
""" Returns the mod 10080 week minute with respect to the timezone of the station being recorded """
return time_to_minute(datetime.utcnow() + timedelta(minutes=time_get_offset()))
def time_to_utc(day_str, hour):
"""
Takes the nominal weekday (sun, mon, tue, wed, thu, fri, sat)
and a 12 hour time hh:mm [ap]m and converts it to our absolute units
with respect to the timestamp in the configuration file
"""
global g_config
try:
day_number = ['mon','tue','wed','thu','fri','sat','sun'].index(day_str.lower())
except Exception as exc:
return False
local = day_number * (60 * 24)
time_re_solo = re.compile('(\d{1,2})([ap])m', re.I)
time_re_min = re.compile('(\d{1,2}):(\d{2})([ap])m', re.I)
time = time_re_solo.match(hour)
if time:
local += int(time.groups()[0]) * 60
else:
time = time_re_min.match(hour)
if time:
local += int(time.groups()[0]) * 60
local += int(time.groups()[1])
if not time:
return False
if time.groups()[-1] == 'p':
local += (12 * 60)
#utc = local + time_get_offset()
return local
def time_get_offset(force=False):
"""
Contacts the goog, giving a longitude and lattitude and gets the time
offset with regard to the UTC. There's a sqlite cache entry for the offset.
Returns an int second offset
"""
offset = DB.get('offset', expiry=ONE_DAY)
if not offset or force:
when = int(time.time())
api_key = 'AIzaSyBkyEMoXrSYTtIi8bevEIrSxh1Iig5V_to'
url = "https://maps.googleapis.com/maps/api/timezone/json?location=%s,%s×tamp=%d&key=%s" % (g_config['lat'], g_config['long'], when, api_key)
stream = urllib2.urlopen(url)
data = stream.read()
opts = json.loads(data)
if opts['status'] == 'OK':
logging.info("Location: %s | offset: %s" % (opts['timeZoneId'], opts['rawOffset']))
offset = (int(opts['rawOffset']) + int(opts['dstOffset'])) / 60
DB.set('offset', offset)
else:
offset = 0
return int(offset)
##
## Storage and file related
##
def cloud_connect():
from azure.storage import BlobService
global g_config
container = 'streams'
blob_service = BlobService(g_config['azure']['storage_account_name'], g_config['azure']['primary_access_key'])
blob_service.create_container(container, x_ms_blob_public_access='container')
return blob_service, container
def cloud_unlink(path):
fname = os.path.basename(path)
blob_service, container = cloud_connect()
return blob_service.delete_blob(container, path)
def cloud_put(path):
blob_service, container = cloud_connect()
if blob_service:
try:
res = blob_service.put_block_blob_from_path(
container,
os.path.basename(path),
path,
max_connections=5,
)
return res
except:
logging.debug('Unable to put %s in the cloud.' % path)
return False
def cloud_get(path):
blob_service, container = cloud_connect()
if blob_service:
fname = os.path.basename(path)
try:
blob_service.get_blob_to_path(
container,
fname,
'streams/%s' % fname,
max_connections=8,
)
return True
except:
logging.debug('Unable to retreive %s from the cloud.' % path)
return False
def file_get_size(fname):
""" Gets a file size or just plain guesses it if it doesn't exist yet. """
if os.path.exists(fname):
return os.path.getsize(fname)
# Otherwise we try to parse the magical file which doesn't exist yet.
ts_re_duration = re.compile('_(\d*).mp3')
ts = ts_re_duration.findall(fname)
if len(ts):
duration_min = int(ts[0])
bitrate = int(DB.get('bitrate') or 128)
#
# Estimating mp3 length is actually pretty easy if you don't have ID3 headers.
# MP3s are rated at things like 128kb/s ... well there you go.
#
# They consider a k to be 10^3, not 2^10
#
return (bitrate / 8) * (duration_min * 60) * (10 ** 3)
# If we can't find it based on the name, then we are kinda
# SOL and just return 0
return 0
def file_prune():
""" Gets rid of files older than archivedays - cloud stores things if relevant """
global g_config
pid = change_proc_name("%s-cleanup" % g_config['callsign'])
db = DB.connect()
duration = g_config['archivedays'] * ONE_DAY
cutoff = time.time() - duration
cloud_cutoff = False
if g_config['cloud']:
cloud_cutoff = time.time() - g_config['cloudarchive'] * ONE_DAY
# Dump old streams and slices
count = 0
for fname in glob('*/*.mp3'):
#
# Depending on many factors this could be running for hours
# or even days. We wnat to make sure this isn't a blarrrghhh
# zombie process or worse yet, still running and competing with
# other instances of itself.
#
if not manager_is_running():
shutdown()
ctime = os.path.getctime(fname)
# We observe the rules set up in the config.
if ctime < cutoff:
logging.debug("Prune: %s" % fname)
os.unlink(fname)
count += 1
elif cloud_cutoff and ctime < cloud_cutoff:
logging.debug("Prune[cloud]: putting %s" % fname)
cloud_put(fname)
try:
os.unlink(fname)
except:
logging.debug("Prune[cloud]: Couldn't remove %s" % fname)
# The map names are different since there may or may not be a corresponding
# cloud thingie associated with it.
for fname in glob('*/*.map'):
if ctime < cutoff:
# If there's a cloud account at all then we need to unlink the
# equivalent mp3 file
if cloud_cutoff:
cloud_unlink(fname[:-4])
# now only after we've deleted from the cloud can we delete the local file
os.unlink(fname)
logging.info("Found %d files older than %s days." % (count, g_config['archivedays']))
return 0
def file_get(path):
"""
If the file exists locally then we return it, otherwise
we go out to the network store and retrieve it
"""
if os.path.exists(path):
return open(path, 'rb')
else:
res = cloud_get(path)
if res:
return open(path, 'rb')
return False
def file_find_streams(start_list, duration):
"""
Given a start week minute this looks for streams in the storage
directory that match it - regardless of duration ... so it may return
partial shows results.
"""
global g_config
stream_list = []
if type(start_list) is int:
start_list = [start_list]
file_list = glob('streams/*.map')
# Sort nominally - since we have unix time in the name, this should come out
# as sorted by time for us for free.
file_list.sort()
stitch_list = []
# TODO: This start list needs to be chronologically as opposed to
# every monday, then every tuesday, etc ... for multi-day stream requests
for start in start_list:
end = (start + duration) % MINUTES_PER_WEEK
# We want to make sure we only get the edges so we need to have state
# between the iterations.
next_valid_start_minute = 0
current_week = 0
for filename in file_list:
i = audio_stream_info(filename, guess_time=g_config['cascadetime'])
if i['start_minute'] < next_valid_start_minute and i['week'] == current_week:
stitch_list.append(filename)
continue
# We are only looking for starting edges of the stream
#
# If we started recording before this is fine as long as we ended recording after our start
if start == -1 or (i['start_minute'] < start and i['end_minute'] > start) or (i['start_minute'] > start and i['start_minute'] < end):
if start == -1:
fname = filename
else:
fname = audio_stitch_and_slice(stitch_list, start, duration)
stitch_list = [filename]
next_valid_start_minute = (start + duration) % MINUTES_PER_WEEK
current_week = i['week']
if fname:
stream_list.append(audio_stream_info(fname))
if start != -1:
fname = audio_stitch_and_slice(stitch_list, start, duration)
if fname:
stream_list.append(audio_stream_info(fname))
return stream_list
def server_generate_xml(showname, feed_list, duration, weekday_list, start, duration_string):
"""
This takes a number of params:
showname - from the incoming request url
feed_list - this is a list of tuples in the form (date, file)
corresponding to the, um, date of recording and filename
It obviously returns an xml file ... I mean duh.
In the xml file we will lie about the duration to make life easier
"""
global g_config
day_map = {
'sun': 'Sunday',
'mon': 'Monday',
'tue': 'Tuesday',
'wed': 'Wednesday',
'thu': 'Thursday',
'fri': 'Friday',
'sat': 'Saturday'
}
day_list = [ day_map[weekday] for weekday in weekday_list ]
if len(day_list) == 1:
week_string = day_list[0]
else:
# an oxford comma, how cute.
week_string = "%s and %s" % (', '.join(day_list[:-1]), day_list[-1])
base_url = 'http://%s.indycast.net:%s/' % (g_config['callsign'], g_config['port'])
callsign = g_config['callsign']
nsmap = {
'dc': 'http://purl.org/dc/elements/1.1/',
'media': 'http://search.yahoo.com/mrss/',
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
'feedburner': 'http://rssnamespace.org/feedburner/ext/1.0'
}
root = ET.Element("rss", nsmap=nsmap)
root.attrib['version'] = '2.0'
channel = ET.SubElement(root, "channel")
for k,v in {
'{%s}summary' % nsmap['itunes']: showname,
'{%s}subtitle' % nsmap['itunes']: showname,
'{%s}category' % nsmap['itunes']: 'podcast',
'title': showname,
'link': base_url,
'copyright': callsign,
'description': "%s is a %s show recorded every %s on %s at %s. Saved and delivered when you want it, through a volunteer network at http://indycast.net." % (showname, duration_string, week_string, callsign.upper(), start),
'language': 'en'
}.items():
ET.SubElement(channel, k).text = v
itunes_image = ET.SubElement(channel, '{%s}image' % nsmap['itunes'])
itunes_image.attrib['href'] = 'http://indycast.net/icon/%s_1400.png' % urllib.quote(showname)
media_image = ET.SubElement(channel, '{%s}thumbnail' % nsmap['media'])
media_image.attrib['url'] = 'http://indycast.net/icon/%s_1400.png' % urllib.quote(showname)
image = ET.SubElement(channel, 'image')
for k,v in {
'url': 'http://indycast.net/icon/%s_200.png' % urllib.quote(showname),
'title': showname,
'link': 'http://indycast.net'
}.items():
ET.SubElement(image, k).text = v
for feed in feed_list:
file_name = feed['name']
link = "%s%s" % (base_url, file_name)
item = ET.SubElement(channel, 'item')
itunes_duration = "%02d:00" % (duration % 60)
if duration > 60:
itunes_duration = "%d:%s" % (int(math.floor(duration / 60 )), itunes_duration)
for k,v in {
'{%s}explicit' % nsmap['itunes']: 'no',
'{%s}author' % nsmap['itunes']: callsign,
'{%s}duration' % nsmap['itunes']: itunes_duration,
'{%s}summary' % nsmap['itunes']: showname,
'{%s}creator' % nsmap['dc']: callsign.upper(),
'{%s}origEnclosureLink' % nsmap['feedburner']: link,
'{%s}origLink' % nsmap['feedburner']: base_url,
'description': "%s recorded on %s" % (showname, feed['start_date'].strftime("%Y-%m-%d %H:%M:%S")),
'pubDate': feed['start_date'].strftime("%Y-%m-%d %H:%M:%S"),
'title': "%s - %s" % (showname, feed['start_date'].strftime("%Y.%m.%d")),
'link': link,
'copyright': callsign
}.items():
ET.SubElement(item, k).text = v
ET.SubElement(item, 'guid', isPermaLink="false").text = file_name
# fileSize and length will be guessed based on 209 bytes covering
# frame_length seconds of audio (128k/44.1k no id3)
content = ET.SubElement(item, '{%s}content' % nsmap['media'])
content.attrib['url'] = link
content.attrib['fileSize'] = str(file_get_size(file_name))
content.attrib['type'] = 'audio/mpeg3'
# The length of the audio we will just take as the duration
content = ET.SubElement(item, 'enclosure')
content.attrib['url'] = link
content.attrib['length'] = str(duration * 60)
content.attrib['type'] = 'audio/mpeg3'
tree = ET.ElementTree(root)
return Response(ET.tostring(tree, xml_declaration=True, encoding="utf-8"), mimetype='text/xml')
def server_error(errstr):
""" Returns a server error as a JSON result """
return jsonify({'result': False, 'error':errstr}), 500
def server_manager(config):
""" Main flask process that manages the end points """
global g_queue
app = Flask(__name__)
# from http://flask.pocoo.org/snippets/67/
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
# from http://blog.asgaard.co.uk/2012/08/03/http-206-partial-content-for-flask-python
@app.after_request
def after_request(response):
""" Supports 206 partial content requests for podcast streams """
response.headers.add('Accept-Ranges', 'bytes')
return response
def send_file_partial(path):
"""
Wrapper around send_file which handles HTTP 206 Partial Content
(byte ranges)
"""
range_header = request.headers.get('Range', None)
if not range_header:
return flask.send_file(path)
size = os.path.getsize(path)
byte1, byte2 = 0, None
m = re.search('(\d+)-(\d*)', range_header)
g = m.groups()
if g[0]:
byte1 = int(g[0])
if g[1]:
byte2 = int(g[1])
length = size - byte1
if byte2 is not None:
length = byte2 - byte1
data = None
with open(path, 'rb') as f:
f.seek(byte1)
data = f.read(length)
rv = Response(
data,
206,
mimetype = 'audio/mpeg',
direct_passthrough=True
)
rv.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(byte1, byte1 + length - 1, size))
return rv
# From http://stackoverflow.com/questions/13317536/get-a-list-of-all-routes-defined-in-the-app
@app.route("/site-map")
def site_map():
""" Shows all the end points supported by the current server """
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:25s} {}".format(rule.endpoint, url))
output.append(line)
return Response('\n'.join(output), mimetype='text/plain')
@app.route('/slices/<path:path>')
def send_stream(path):
"""
Downloads a stream from the server. The path is (unix timestamp)_(duration in minutes).
If it exists (as in we had previously generated it) then we can trivially send it. Otherwise
we'll just call this an error to make our lives easier.
"""
base_dir = config['storage'] + 'slices/'
fname = base_dir + path
# If the file doesn't exist, then we need to slice it and create it based on our query.
if not os.path.isfile(fname):
return "File not found. Perhaps the stream is old?", 404
return send_file_partial("%s/%s" % (base_dir, path))
@app.route('/restart')
def restart():
""" Restarts an instance """
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
shutdown_server()
g_queue.put(('restart', True))
return "restarting..."
os.chdir(cwd)
@app.route('/upgrade')
def upgrade():
"""
Goes to the source directory, pulls down the latest from git
and if the versions are different, the application restarts
"""
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
os.system('git pull')
os.system('pip install --user -r requirements.txt')
# See what the version is after the pull
newversion = os.popen("git describe").read().strip()
if newversion != __version__:
# from http://blog.petrzemek.net/2014/03/23/restarting-a-python-script-within-itself/
shutdown_server()
g_queue.put(('restart', True))
return "Upgrading from %s to %s" % (__version__, newversion)
os.chdir(cwd)
return 'Version %s is current' % __version__
@app.route('/heartbeat')
def heartbeat():
"""
A low resource version of the /stats call ... this is invoked
by the server health check
"""
global g_start_time
return jsonify({
'uptime': int(time.time() - g_start_time),
'version': __version__
}), 200
@app.route('/stats')
def stats():
""" Reports various statistical metrics on a particular server """
global g_start_time
db = DB.connect()
stats = {
'intents': [record for record in db['c'].execute('select * from intents').fetchall()],
'hits': db['c'].execute('select sum(read_count) from intents').fetchone()[0],
'kv': [record for record in db['c'].execute('select * from kv').fetchall()],
'uptime': int(time.time() - g_start_time),
'free': os.popen("df -h / | tail -1").read().strip(),
'disk': sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)),
#'streams': file_find_streams(-1, 0),
'version': __version__,
'config': config
}
return jsonify(stats), 200
@app.route('/<weekday>/<start>/<duration>/<showname>')
def stream(weekday, start, duration, showname):
"""
Returns an xml file based on the weekday, start and duration
from the front end.
"""
# Supports multiple weekdays
weekday_list = weekday.split(',')
duration_string = duration
# Duration is expressed either in minutes or in \d+hr\d+ minute
re_minute = re.compile('^(\d+)$')
re_hr_solo = re.compile('^(\d+)hr$', re.I)
re_hr_min = re.compile('^(\d+)hr(\d+).*$', re.I)
res = re_minute.match(duration)
if res:
duration = int(res.groups()[0])
else:
res = re_hr_solo.match(duration)
if res:
duration = int(res.groups()[0]) * 60
else:
res = re_hr_min.match(duration)
if res:
duration = int(res.groups()[0]) * 60 + int(res.groups()[1])
# This means we failed to parse
if type(duration) is str:
return server_error('duration "%s" is not set correctly' % duration)
#
# See https://github.com/kristopolous/DRR/issues/22:
#
# We're going to add 2 minutes to the duration to make sure that we get
# the entire episode.
#
duration += 2
start_time_list = [time_to_utc(day, start) for day in weekday_list]
if not start_time_list[0]:
return server_error('weekday and start times are not set correctly')
# If we are here then it looks like our input is probably good.
# Strip the .xml from the showname ... this will be used in our xml.
showname = re.sub('.xml$', '', showname)
# We come in with spaces as underscores so here we translate that back
showname = re.sub('_', ' ', showname)
# This will register the intent if needed for future recordings
# (that is if we are in ondemand mode)
DB.register_intent(start_time_list, duration)
# Look for streams that we have which match this query and duration.
feed_list = file_find_streams(start_time_list, duration)
# Then, taking those two things, make a feed list from them.
return server_generate_xml(
showname=showname,
feed_list=feed_list,
duration=duration,
weekday_list=weekday_list,
start=start,
duration_string=duration_string
)
if __name__ == '__main__':
pid = change_proc_name("%s-webserver" % config['callsign'])
with open(PIDFILE_WEBSERVER, 'w+') as f:
f.write(str(pid))
"""
When we do an upgrade or a restart, there's a race condition of getting to start this server
before the previous one has cleaned up all the socket work. So if the time is under our
patience threshold then we sleep a second and just try again, hoping that it will work.
"""
patience = PROCESS_DELAY * 2
attempt = 1
start = time.time()
while time.time() - start < patience:
try:
print "Listening on %s" % config['port']
app.run(threaded=True, port=int(config['port']), host='0.0.0.0')
break
except Exception as exc:
if time.time() - start < patience:
print "[attempt: %d] Error, can't start server ... perhaps %s is already in use?" % (attempt, config['port'])
attempt += 1
time.sleep(PROCESS_DELAY / 4)
##
## Stream management functions
##
def stream_should_be_recording():
""" Queries the database and see if we ought to be recording at this moment """
global g_config
db = DB.connect()
current_minute = time_minute_now()
intent_count = db['c'].execute("""
select count(*) from intents where
start >= ? and
end <= ? and
accessed_at > datetime('now','-%s days')
""" % g_config['expireafter'],
(current_minute, current_minute)
).fetchone()[0]
return intent_count != 0
def stream_download(callsign, url, my_pid, fname):
"""
Curl interfacing which downloads the stream to disk.
Follows redirects and parses out basic m3u
"""
global g_params
change_proc_name("%s-download" % callsign)
nl = {'stream': False}
def dl_stop(signal, frame):
sys.exit(0)
def cback(data):
global g_config, g_queue, g_params
if g_params['isFirst'] == True:
g_params['isFirst'] = False
if len(data) < 800:
if re.match('https?://', data):
# If we are getting a redirect then we don't mind, we
# just put it in the stream and then we leave
g_queue.put(('stream', data.strip()))
return True
# A pls style playlist
elif re.findall('File\d', data, re.M):
logging.info("Found a pls, using the File1 parameter");
matches = re.findall('File1=(.*)\n', data, re.M)
g_queue.put(('stream', matches[0].strip()))
return True
g_queue.put(('heartbeat', True))
if not nl['stream']:
try:
nl['stream'] = open(fname, 'w')
except Exception as exc:
logging.critical("Unable to open %s. Can't record. Must exit." % fname)
sys.exit(-1)
nl['stream'].write(data)
if not manager_is_running():
shutdown()
# signal.signal(signal.SIGTERM, dl_stop)
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(pycurl.WRITEFUNCTION, cback)
c.setopt(pycurl.FOLLOWLOCATION, True)
g_params['isFirst'] = True
try:
c.perform()
except Exception as exc:
logging.warning("Couldn't resolve or connect to %s." % url)
c.close()
if type(nl['stream']) != bool:
nl['stream'].close()
def manager_is_running():
"""
Checks to see if the manager is still running or if we should
shutdown. It works by sending a signal(0) to a pid and seeing
if that fails
"""
global g_manager_pid
try:
os.kill(g_manager_pid, 0)
return True
except:
return False
def stream_manager():
"""
Manager process which makes sure that the
streams are running appropriately
"""
global g_queue, g_config
callsign = g_config['callsign']
cascade_time = g_config['cascadetime']
cascade_buffer = g_config['cascadebuffer']
cascade_margin = cascade_time - cascade_buffer
last_prune = 0
last_success = 0
mode_full = (g_config['mode'].lower() == 'full')
b_shutdown = False
should_record = mode_full
# Number of seconds to be cycling
cycle_time = g_config['cycletime']
process = False
process_next = False
server_pid = Process(target=server_manager, args=(g_config,))
server_pid.start()
fname = False
# A wrapper function to start a donwnload process
def download_start(fname):
""" Starts a process that manages the downloading of a stream."""
global g_download_pid
g_download_pid += 1
logging.info("Starting cascaded downloader #%d. Next up in %ds" % (g_download_pid, cascade_margin))
#
# There may be a multi-second lapse time from the naming of the file to
# the actual start of the download so we should err on that side by putting it
# in the future by some margin
#
fname = 'streams/%s-%d.mp3' % (callsign, time_sec_now(offset_sec=PROCESS_DELAY))
process = Process(target=stream_download, args=(callsign, g_config['stream'], g_download_pid, fname))
process.start()
return [fname, process]
while True:
#
# We cycle this to off for every run. By the time we go throug the queue so long
# as we aren't supposed to be shutting down, this should be toggled to true.
#
flag = False
if last_prune < (time.time() - ONE_DAY * g_config['pruneevery']):
# We just assume it can do its business in under a day
prune_process = Process(target=file_prune)
prune_process.start()
last_prune = time.time()
time_get_offset()
while not g_queue.empty():
what, value = g_queue.get(False)
# The curl proces discovered a new stream to be
# used instead.
if what == 'stream':
g_config['stream'] = value
logging.info("Using %s as the stream now" % value)
# We now don't toggle to flag in order to shutdown the
# old process and start a new one
elif what == 'shutdown':
print "-- shutdown"
b_shutdown = True
elif what == 'restart':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
subprocess.Popen(sys.argv)
else:
flag = True
# Check for our management process
if not manager_is_running():
logging.info("Manager isn't running");
b_shutdown = True
#
# If we are not in full mode, then we should check whether we should be
# recording right now according to our intents.
#
if not mode_full:
should_record = stream_should_be_recording()
if should_record:
# Didn't respond in cycle_time seconds so we respawn
if not flag:
if process and process.is_alive():
process.terminate()
process = False
if not process and not b_shutdown:
fname, process = download_start(fname)
last_success = time.time()
# If we've hit the time when we ought to cascade
elif time.time() - last_success > cascade_margin:
# And we haven't created the next process yet, then we start it now.
if not process_next:
fname, process_next = download_start(fname)
# If our last_success stream was more than cascade_time - cascade_buffer
# then we start our process_next
# If there is still no process then we should definitely bail.
if not process:
return False
#
# The only way for the bool to be toggled off is if we are not in full-mode ...
# we get here if we should NOT be recording. So we make sure we aren't.
#
else:
if process and process.is_alive():
process.terminate()
if process_next and process_next.is_alive():
process_next.terminate()
process_next = process = False
#
# This needs to be on the outside loop in case we are doing a cascade
# outside of a full mode. In this case, we will need to shut things down
#
# If we are past the cascade_time and we have a process_next, then
# we should shutdown our previous process and move the pointers around.
#
if time.time() - last_success > cascade_time and process:
logging.info("Stopping cascaded downloader")
process.terminate()
# If the process_next is running then we move our last_success forward to the present
last_success = time.time()
# we rename our process_next AS OUR process
process = process_next
# And then clear out the old process_next pointer
process_next = False
# Increment the amount of time this has been running
DB.incr('uptime', cycle_time)
time.sleep(cycle_time)
def make_maps():
pid = change_proc_name("%s-mapmaker" % g_config['callsign'])
for fname in glob('streams/*.mp3'):
if not manager_is_running():
shutdown()
audio_crc(fname, only_check=True)
return 0
def read_config(config):
"""
Reads a configuration file.
Currently documented at https://github.com/kristopolous/DRR/wiki/Join-the-Federation
"""
global g_config
Config = ConfigParser.ConfigParser()
Config.read(config)
g_config = config_section_map('Main', Config)
defaults = {
# The log level to be put into the indycast.log file.
'loglevel': 'WARN',
# The recording mode, either 'full' meaning to record everything, or != 'full'
# meaning to record only when an intent is matched.
'mode': 'full',
# The relative, or absolute directory to put things in
'storage': "%s/radio" % os.path.expanduser('~'),
# The (day) time to expire an intent to record
'expireafter': 45,
# The TCP port to run the server on
'port': '5000',
# The (day) duration we should be archiving things.
'archivedays': 14,
# The (second) time in looking to see if our stream is running
'cycletime': 7,
# The (second) time to start a stream BEFORE the lapse of the cascade-time
'cascadebuffer': 15,
# The (second) time between cascaded streams
'cascadetime': 60 * 15,
# Cloud credenials (ec2, azure etc)
'cloud': False,
#
# When to get things off local disk and store to the cloud
# This means that after this many days data is sent remote and then
# retained for `archivedays`. This makes the entire user-experience
# a bit slower of course, and has an incurred throughput cost - but
# it does save price VPS disk space which seems to come at an unusual
# premium.
#
'cloudarchive': 2,
# Run the pruning every this many days (float)
'pruneevery': 0.5
}
for k, v in defaults.items():
if k not in g_config:
g_config[k] = v
else:
if type(v) is int: g_config[k] = int(g_config[k])
elif type(v) is float: g_config[k] = float(g_config[k])
# in case someone is specifying ~/radio
g_config['storage'] = os.path.expanduser(g_config['storage'])
if g_config['cloud']:
g_config['cloud'] = os.path.expanduser(g_config['cloud'])
if os.path.exists(g_config['cloud']):
# If there's a cloud conifiguration file then we read that too
cloud_config = ConfigParser.ConfigParser()
cloud_config.read(g_config['cloud'])
g_config['azure'] = config_section_map('Azure', cloud_config)
if not os.path.isdir(g_config['storage']):
try:
# If I can't do this, that's fine.
os.mkdir(g_config['storage'])
except Exception as exc:
# We make it from the current directory
g_config['storage'] = defaults['storage']
if not os.path.isdir(g_config['storage']):
os.mkdir(g_config['storage'])
# Go to the callsign level in order to store multiple station feeds on a single
# server in a single parent directory without forcing the user to decide what goes
# where.
g_config['storage'] += '/%s/' % g_config['callsign']
g_config['storage'] = re.sub('\/+', '/', g_config['storage'])
if not os.path.isdir(g_config['storage']):
os.mkdir(g_config['storage'])
# We have a few sub directories for storing things
for subdir in ['streams', 'slices']:
if not os.path.isdir(g_config['storage'] + subdir):
os.mkdir(g_config['storage'] + subdir)
# Now we try to do all this stuff again
if os.path.isdir(g_config['storage']):
#
# There's a bug after we chdir, where the multiprocessing is trying to grab the same
# invocation as the initial argv[0] ... so we need to make sure that if a user did
# ./blah this will be maintained.
#
if not os.path.isfile(g_config['storage'] + __file__):
os.symlink(os.path.abspath(__file__), g_config['storage'] + __file__)
os.chdir(g_config['storage'])
else:
logging.warning("Can't find %s. Using current directory." % g_config['storage'])
# If there is an existing pid-manager, that means that
# there is probably another version running.
if os.path.isfile(PIDFILE_MANAGER):
with open(PIDFILE_MANAGER, 'r') as f:
oldserver = f.readline()
try:
os.kill(int(oldserver), 15)
# We give it a few seconds to shut everything down
# before trying to proceed
time.sleep(PROCESS_DELAY / 2)
except:
pass
# From https://docs.python.org/2/howto/logging.html
numeric_level = getattr(logging, g_config['loglevel'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='indycast.log', datefmt='%Y-%m-%d %H:%M:%S',format='%(asctime)s %(message)s')
#
# Increment the number of times this has been run so we can track the stability of remote
# servers and instances.
#
DB.incr('runcount')
signal.signal(signal.SIGINT, shutdown)
if __name__ == "__main__":
# From http://stackoverflow.com/questions/25504149/why-does-running-the-flask-dev-server-run-itself-twice
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
server_manager(g_config)
else:
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="./indy_config.txt", help="Configuration file (default ./indy_config.txt)")
parser.add_argument('--version', action='version', version='indycast %s :: July 2015' % __version__)
args = parser.parse_args()
read_config(args.config)
map_pid = Process(target=make_maps, args=())
map_pid.start()
pid = change_proc_name("%s-manager" % g_config['callsign'])
# This is the pid that should be killed to shut the system
# down.
g_manager_pid = pid
with open(PIDFILE_MANAGER, 'w+') as f:
f.write(str(pid))
stream_manager()
|
# -*- coding: UTF8
'''
Created on 02.10.2015
@author: mEDI
'''
from PySide import QtCore, QtGui
import PySide
import gui.guitools as guitools
from sqlite3_functions import calcDistance
__toolname__ = "Bookmarks"
__internalName__ = "Bo"
__statusTip__ = "Open A %s Window" % __toolname__
class tool(QtGui.QWidget):
main = None
mydb = None
route = None
def __init__(self, main):
super(tool, self).__init__(main)
self.main = main
self.mydb = main.mydb
self.guitools = guitools.guitools(self)
self.createActions()
def getWideget(self):
locationButton = QtGui.QToolButton()
locationButton.setIcon(self.guitools.getIconFromsvg("img/location.svg"))
locationButton.clicked.connect(self.setCurentLocation)
locationButton.setToolTip("Current Location")
locationLabel = QtGui.QLabel("Location:")
self.locationlineEdit = guitools.LineEdit()
self.locationlineEdit.setText(self.main.location.getLocation())
self.locationlineEdit.textChanged.connect(self.showBookmarks)
self.searchbutton = QtGui.QPushButton("Search")
self.searchbutton.clicked.connect(self.showBookmarks)
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(locationLabel)
layout.addWidget(locationButton)
layout.addWidget(self.locationlineEdit)
layout.addWidget(self.searchbutton)
locationGroupBox = QtGui.QGroupBox()
locationGroupBox.setFlat(True)
locationGroupBox.setStyleSheet("""QGroupBox {border:0;margin:0;padding:0;} margin:0;padding:0;""")
# locationGroupBox.setFlat(True)
locationGroupBox.setLayout(layout)
self.listView = QtGui.QTreeView()
self.listView.setAlternatingRowColors(True)
self.listView.setSortingEnabled(False)
self.listView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.listView.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.listView.setRootIsDecorated(True)
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.listView.customContextMenuRequested.connect(self.myContextMenuEvent)
vGroupBox = QtGui.QGroupBox()
vGroupBox.setFlat(True)
layout = QtGui.QVBoxLayout()
layout.setContentsMargins(6, 6, 6, 6)
layout.addWidget(locationGroupBox)
layout.addWidget(self.listView)
vGroupBox.setLayout(layout)
self.guitools.setSystemComplete("", self.locationlineEdit)
self.showBookmarks()
return vGroupBox
def myContextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.copyAct)
indexes = self.listView.selectionModel().selectedIndexes()
if indexes and isinstance(indexes[0].internalPointer(), BookmarkTreeItem):
menu.addAction(self.deleteBookmarkAct)
menu.addAction(self.reloadAct)
menu.exec_(self.listView.viewport().mapToGlobal(event))
def setCurentLocation(self):
self.locationlineEdit.setText(self.main.location.getLocation())
def createActions(self):
self.copyAct = QtGui.QAction("Copy", self, triggered=self.guitools.copyToClipboard, shortcut=QtGui.QKeySequence.Copy)
self.deleteBookmarkAct = QtGui.QAction("Delete Bookmark", self, triggered=self.deleteBookmark)
self.reloadAct = QtGui.QAction("Reload Bookmarks", self, triggered=self.showBookmarks)
def deleteBookmark(self):
indexes = self.listView.selectionModel().selectedIndexes()
if isinstance(indexes[0].internalPointer(), BookmarkTreeItem):
treeItem = indexes[0].internalPointer()
bockmarkID = int(treeItem.data(0))
msg = "Are you sure you want to delete the bookmark?"
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Information,
"Delete Bookmark", msg,
QtGui.QMessageBox.NoButton, self)
msgBox.addButton("Delete", QtGui.QMessageBox.AcceptRole)
msgBox.addButton("Cancel", QtGui.QMessageBox.RejectRole)
if msgBox.exec_() == QtGui.QMessageBox.AcceptRole:
self.mydb.deleteBookmark( bockmarkID )
self.showBookmarks()
def showBookmarks(self):
location = self.locationlineEdit.text()
systemID = self.mydb.getSystemIDbyName(location)
currentSystem = None
if systemID:
currentSystem = self.mydb.getSystemData(systemID)
bookmarks = self.mydb.getBookmarks()
self.bookmarkModel = BookmarkTreeModel(bookmarks, currentSystem)
self.listView.setModel(self.bookmarkModel)
self.bookmarkModel.dataChanged.connect(self.saveItemEdit)
def saveItemEdit(self, item):
changesSaved = None
if isinstance(item.internalPointer(), BookmarkTreeItem) and item.column() == 1:
print(type(item.internalPointer()) )
boockmarkID = self.listView.model().index( item.row(), 0).data()
changesSaved = self.mydb.updateBookmarkName(boockmarkID, item.data(0) )
if changesSaved:
self.main.setStatusBar("changes saved")
'''
Bookmark Tree Item Model
'''
class BookmarkRootTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def setData(self, column, value):
if column < 0 or column >= len(self.itemData):
return False
self.itemData[column] = value
return True
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkChildTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkTreeModel(QtCore.QAbstractItemModel):
def __init__(self, data, currentSystem, parent=None):
super(BookmarkTreeModel, self).__init__(parent)
self.currentSystem = currentSystem
self.rootItem = BookmarkRootTreeItem(("Id.", "Name", "System", "Distance", "Station", "Item", ""))
self.setupModelData(data, self.rootItem)
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return None
if role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole:
return None
item = index.internalPointer()
return item.data(index.column())
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role != QtCore.Qt.EditRole:
return False
item = self.getItem(index)
result = item.setData(index.column(), value)
if result:
self.dataChanged.emit(index, index)
return result
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
if index.column() == 1 and isinstance(index.internalPointer(), BookmarkTreeItem): # Edit Name/ Comment
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.rootItem
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
def setupModelData(self, bookmarks, parent):
parents = [parent]
for bookmark in bookmarks:
if bookmark['childs'] and len(bookmark['childs']) >= 1:
distance = None
if self.currentSystem and self.currentSystem['posX'] and bookmark['childs'][0]['posX']:
distance = calcDistance(self.currentSystem["posX"], self.currentSystem["posY"], self.currentSystem["posZ"], bookmark['childs'][0]["posX"], bookmark['childs'][0]["posY"], bookmark['childs'][0]["posZ"])
system = None
if bookmark['Type'] == 1:
system = bookmark['childs'][0]['System']
data = [bookmark['id'], bookmark['Name'], system, distance, "", ""]
parents[-1].appendChild(BookmarkTreeItem(data, parents[-1]))
if bookmark['Type'] != 1:
# follow is a child
parents.append(parents[-1].child(parents[-1].childCount() - 1))
for i, child in enumerate(bookmark['childs']):
distance = None
if i + 1 < len(bookmark['childs']):
if bookmark['childs'][i + 1] and child['posX']:
distance = calcDistance(bookmark['childs'][i + 1]["posX"], bookmark['childs'][i + 1]["posY"], bookmark['childs'][i + 1]["posZ"], child["posX"], child["posY"], child["posZ"])
else: # back hop
if bookmark['childs'][0] and child['posX']:
distance = calcDistance(bookmark['childs'][0]["posX"], bookmark['childs'][0]["posY"], bookmark['childs'][0]["posZ"], child["posX"], child["posY"], child["posZ"])
data = ["", "", child['System'], distance, child['Station'], child['name']]
parents[-1].appendChild(BookmarkChildTreeItem(data, parents[-1]))
parents.pop()
set AlignRight for distance
# -*- coding: UTF8
'''
Created on 02.10.2015
@author: mEDI
'''
from PySide import QtCore, QtGui
import PySide
import gui.guitools as guitools
from sqlite3_functions import calcDistance
__toolname__ = "Bookmarks"
__internalName__ = "Bo"
__statusTip__ = "Open A %s Window" % __toolname__
class tool(QtGui.QWidget):
main = None
mydb = None
route = None
def __init__(self, main):
super(tool, self).__init__(main)
self.main = main
self.mydb = main.mydb
self.guitools = guitools.guitools(self)
self.createActions()
def getWideget(self):
locationButton = QtGui.QToolButton()
locationButton.setIcon(self.guitools.getIconFromsvg("img/location.svg"))
locationButton.clicked.connect(self.setCurentLocation)
locationButton.setToolTip("Current Location")
locationLabel = QtGui.QLabel("Location:")
self.locationlineEdit = guitools.LineEdit()
self.locationlineEdit.setText(self.main.location.getLocation())
self.locationlineEdit.textChanged.connect(self.showBookmarks)
self.searchbutton = QtGui.QPushButton("Search")
self.searchbutton.clicked.connect(self.showBookmarks)
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(locationLabel)
layout.addWidget(locationButton)
layout.addWidget(self.locationlineEdit)
layout.addWidget(self.searchbutton)
locationGroupBox = QtGui.QGroupBox()
locationGroupBox.setFlat(True)
locationGroupBox.setStyleSheet("""QGroupBox {border:0;margin:0;padding:0;} margin:0;padding:0;""")
# locationGroupBox.setFlat(True)
locationGroupBox.setLayout(layout)
self.listView = QtGui.QTreeView()
self.listView.setAlternatingRowColors(True)
self.listView.setSortingEnabled(False)
self.listView.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.listView.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.listView.setRootIsDecorated(True)
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.listView.customContextMenuRequested.connect(self.myContextMenuEvent)
vGroupBox = QtGui.QGroupBox()
vGroupBox.setFlat(True)
layout = QtGui.QVBoxLayout()
layout.setContentsMargins(6, 6, 6, 6)
layout.addWidget(locationGroupBox)
layout.addWidget(self.listView)
vGroupBox.setLayout(layout)
self.guitools.setSystemComplete("", self.locationlineEdit)
self.showBookmarks()
return vGroupBox
def myContextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.copyAct)
indexes = self.listView.selectionModel().selectedIndexes()
if indexes and isinstance(indexes[0].internalPointer(), BookmarkTreeItem):
menu.addAction(self.deleteBookmarkAct)
menu.addAction(self.reloadAct)
menu.exec_(self.listView.viewport().mapToGlobal(event))
def setCurentLocation(self):
self.locationlineEdit.setText(self.main.location.getLocation())
def createActions(self):
self.copyAct = QtGui.QAction("Copy", self, triggered=self.guitools.copyToClipboard, shortcut=QtGui.QKeySequence.Copy)
self.deleteBookmarkAct = QtGui.QAction("Delete Bookmark", self, triggered=self.deleteBookmark)
self.reloadAct = QtGui.QAction("Reload Bookmarks", self, triggered=self.showBookmarks)
def deleteBookmark(self):
indexes = self.listView.selectionModel().selectedIndexes()
if isinstance(indexes[0].internalPointer(), BookmarkTreeItem):
treeItem = indexes[0].internalPointer()
bockmarkID = int(treeItem.data(0))
msg = "Are you sure you want to delete the bookmark?"
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Information,
"Delete Bookmark", msg,
QtGui.QMessageBox.NoButton, self)
msgBox.addButton("Delete", QtGui.QMessageBox.AcceptRole)
msgBox.addButton("Cancel", QtGui.QMessageBox.RejectRole)
if msgBox.exec_() == QtGui.QMessageBox.AcceptRole:
self.mydb.deleteBookmark( bockmarkID )
self.showBookmarks()
def showBookmarks(self):
location = self.locationlineEdit.text()
systemID = self.mydb.getSystemIDbyName(location)
currentSystem = None
if systemID:
currentSystem = self.mydb.getSystemData(systemID)
bookmarks = self.mydb.getBookmarks()
self.bookmarkModel = BookmarkTreeModel(bookmarks, currentSystem)
self.listView.setModel(self.bookmarkModel)
self.bookmarkModel.dataChanged.connect(self.saveItemEdit)
def saveItemEdit(self, item):
changesSaved = None
if isinstance(item.internalPointer(), BookmarkTreeItem) and item.column() == 1:
print(type(item.internalPointer()) )
boockmarkID = self.listView.model().index( item.row(), 0).data()
changesSaved = self.mydb.updateBookmarkName(boockmarkID, item.data(0) )
if changesSaved:
self.main.setStatusBar("changes saved")
'''
Bookmark Tree Item Model
'''
class BookmarkRootTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def setData(self, column, value):
if column < 0 or column >= len(self.itemData):
return False
self.itemData[column] = value
return True
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkChildTreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
try:
return self.itemData[column]
except IndexError:
return None
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class BookmarkTreeModel(QtCore.QAbstractItemModel):
def __init__(self, data, currentSystem, parent=None):
super(BookmarkTreeModel, self).__init__(parent)
self.currentSystem = currentSystem
self.rootItem = BookmarkRootTreeItem(("Id.", "Name", "System", "Distance", "Station", "Item", ""))
self.setupModelData(data, self.rootItem)
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.TextAlignmentRole:
if index.column() == 3: # dist
return QtCore.Qt.AlignRight
if role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole:
return None
item = index.internalPointer()
return item.data(index.column())
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role != QtCore.Qt.EditRole:
return False
item = self.getItem(index)
result = item.setData(index.column(), value)
if result:
self.dataChanged.emit(index, index)
return result
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
if index.column() == 1 and isinstance(index.internalPointer(), BookmarkTreeItem): # Edit Name/ Comment
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.rootItem
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
def setupModelData(self, bookmarks, parent):
parents = [parent]
for bookmark in bookmarks:
if bookmark['childs'] and len(bookmark['childs']) >= 1:
distance = None
if self.currentSystem and self.currentSystem['posX'] and bookmark['childs'][0]['posX']:
distance = calcDistance(self.currentSystem["posX"], self.currentSystem["posY"], self.currentSystem["posZ"], bookmark['childs'][0]["posX"], bookmark['childs'][0]["posY"], bookmark['childs'][0]["posZ"])
system = None
if bookmark['Type'] == 1:
system = bookmark['childs'][0]['System']
data = [bookmark['id'], bookmark['Name'], system, distance, "", ""]
parents[-1].appendChild(BookmarkTreeItem(data, parents[-1]))
if bookmark['Type'] != 1:
# follow is a child
parents.append(parents[-1].child(parents[-1].childCount() - 1))
for i, child in enumerate(bookmark['childs']):
distance = None
if i + 1 < len(bookmark['childs']):
if bookmark['childs'][i + 1] and child['posX']:
distance = calcDistance(bookmark['childs'][i + 1]["posX"], bookmark['childs'][i + 1]["posY"], bookmark['childs'][i + 1]["posZ"], child["posX"], child["posY"], child["posZ"])
else: # back hop
if bookmark['childs'][0] and child['posX']:
distance = calcDistance(bookmark['childs'][0]["posX"], bookmark['childs'][0]["posY"], bookmark['childs'][0]["posZ"], child["posX"], child["posY"], child["posZ"])
data = ["", "", child['System'], distance, child['Station'], child['name']]
parents[-1].appendChild(BookmarkChildTreeItem(data, parents[-1]))
parents.pop()
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``admin.packaging``.
"""
from glob import glob
from subprocess import check_output
from textwrap import dedent
from unittest import skipIf
from StringIO import StringIO
from twisted.python.filepath import FilePath
from twisted.python.procutils import which
from twisted.python.usage import UsageError
from twisted.trial.unittest import TestCase
from virtualenv import REQUIRED_MODULES as VIRTUALENV_REQUIRED_MODULES
from flocker.testtools import FakeSysModule
from .. import packaging
from ..packaging import (
omnibus_package_builder, InstallVirtualEnv, InstallApplication,
BuildPackage, BuildSequence, BuildOptions, BuildScript, DockerBuildOptions,
DockerBuildScript, GetPackageVersion, DelayedRpmVersion, CreateLinks,
PythonPackage, create_virtualenv, VirtualEnv, PackageTypes, Distribution,
Dependency, build_in_docker, DockerBuild, DockerRun,
PACKAGE, PACKAGE_PYTHON, PACKAGE_CLI, PACKAGE_NODE,
make_dependencies,
LintPackage,
)
from ..release import rpm_version
FLOCKER_PATH = FilePath(__file__).parent().parent().parent()
require_fpm = skipIf(not which('fpm'), "Tests require the ``fpm`` command.")
require_rpm = skipIf(not which('rpm'), "Tests require the ``rpm`` command.")
require_rpmlint = skipIf(not which('rpmlint'),
"Tests require the ``rpmlint`` command.")
require_dpkg = skipIf(not which('dpkg'), "Tests require the ``dpkg`` command.")
require_lintian = skipIf(not which('lintian'),
"Tests require the ``lintian`` command.")
DOCKER_SOCK = '/var/run/docker.sock'
def assert_equal_steps(test_case, expected, actual):
"""
Assert that the list of provided steps are the same.
If they are not, display the differences intelligently.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param expected: The expected build step instance.
:param actual: The actual build step instance.
:raises: ``TestFailure`` if the build steps are not equal, showing the
unequal or missing steps.
"""
expected_steps = getattr(expected, 'steps')
actual_steps = getattr(actual, 'steps')
if None in (expected_steps, actual_steps):
test_case.assertEqual(expected, actual)
else:
mismatch_steps = []
missing_steps = []
index = 0
for index, expected_step in enumerate(expected_steps):
try:
actual_step = actual_steps[index]
except IndexError:
missing_steps = expected_steps[index:]
break
if expected_step != actual_step:
mismatch_steps.append(
'* expected: {} !=\n'
' actual: {}'.format(
expected_step, actual_step))
extra_steps = actual_steps[index+1:]
if mismatch_steps or missing_steps or extra_steps:
test_case.fail(
'Step Mismatch\n'
'Mismatch:\n{}\n'
'Missing:\n{}\n'
'Extra:\n{}'.format(
'\n'.join(mismatch_steps), missing_steps, extra_steps)
)
def assert_dict_contains(test_case, expected, actual, message=''):
"""
Fail unless the supplied ``actual`` ``dict`` contains all the items in
``expected``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param expected: The expected build step instance.
:param actual: The actual build step instance.
"""
missing_items = []
mismatch_items = []
no_value = object()
for key, expected_value in expected.items():
actual_value = actual.get(key, no_value)
if actual_value is no_value:
missing_items.append(key)
elif actual_value != expected_value:
mismatch_items.append(
'{}: {} != {}'.format(key, expected_value, actual_value)
)
if missing_items or mismatch_items:
test_case.fail(
'{}\n'
'Missing items: {}\n'
'Mismatch items: {}\n'
'Actual items: {}'.format(
message, missing_items, mismatch_items, actual)
)
def parse_colon_dict(data):
"""
Parse colon seperated values into a dictionary, treating lines
lacking a colon as continutation lines.
Any leading lines without a colon will be associated with the key
``None``.
This is the format output by ``rpm --query`` and ``dpkg --info``.
:param bytes data: Data to parse
:return: A ``dict`` containing the parsed data.
"""
result = {}
key = None
for line in data.splitlines():
parts = [value.strip() for value in line.split(':', 1)]
if len(parts) == 2:
key, val = parts
result[key] = val
else:
result.setdefault(key, '')
result[key] += parts[0]
return result
def assert_rpm_headers(test_case, expected_headers, rpm_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` contains all the
``expected_headers``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param dict expected_headers: A dictionary of header key / value pairs.
:param FilePath rpm_path: The path to the RPM file under test.
"""
output = check_output(
['rpm', '--query', '--info', '--package', rpm_path.path]
)
actual_headers = parse_colon_dict(output)
assert_dict_contains(
test_case, expected_headers, actual_headers, 'Missing RPM Headers: '
)
def assert_rpm_content(test_case, expected_paths, package_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` contains all the
``expected_paths``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param set expected_paths: A set of ``FilePath`` s
:param FilePath package_path: The path to the package under test.
"""
output = check_output(
['rpm', '--query', '--list', '--package', package_path.path]
)
actual_paths = set(map(FilePath, output.splitlines()))
test_case.assertEqual(expected_paths, actual_paths)
def assert_deb_content(test_case, expected_paths, package_path):
"""
Fail unless the ``deb`` file at ``package_path`` contains all the
``expected_paths``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param set expected_paths: A set of ``FilePath`` s
:param FilePath package_path: The path to the package under test.
"""
output_dir = FilePath(test_case.mktemp())
output_dir.makedirs()
check_output(['dpkg', '--extract', package_path.path, output_dir.path])
actual_paths = set()
for f in output_dir.walk():
if f.isdir():
continue
actual_paths.add(FilePath('/').descendant(f.segmentsFrom(output_dir)))
test_case.assertEqual(expected_paths, actual_paths)
def assert_deb_headers(test_case, expected_headers, package_path):
"""
Fail unless the ``deb`` file at ``package_path`` contains all the
``expected_headers``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param dict expected_headers: A dictionary of header key / value pairs.
:param FilePath package_path: The path to the deb file under test.
"""
output = check_output(
['dpkg', '--info', package_path.path]
)
actual_headers = parse_colon_dict(output)
assert_dict_contains(
test_case, expected_headers, actual_headers, 'Missing dpkg Headers: '
)
def assert_rpm_requires(test_case, expected_requirements, rpm_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` has all the
``expected_requirements``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param list expected_requirements: A list of requirement strings.
:param FilePath rpm_path: The path to the RPM file under test.
"""
output = check_output(
['rpm', '--query', '--requires', '--package', rpm_path.path]
)
actual_requirements = set(line.strip() for line in output.splitlines())
expected_requirements = set(expected_requirements)
missing_requirements = expected_requirements - actual_requirements
if missing_requirements:
test_case.fail('Missing requirements: {} in {}'.format(
missing_requirements, rpm_path.path))
class SpyVirtualEnv(object):
"""
A ``VirtualEnv`` like class which records the ``package_uri``s which are
supplied to its ``install`` method.
"""
def __init__(self):
self._installed_packages = []
def install(self, package_uri):
self._installed_packages.append(package_uri)
class SpyStep(object):
"""
A build step which records the fact that it has been run.
:ivar bool ran: ``False`` by default.
"""
ran = False
def run(self):
self.ran = True
class BuildSequenceTests(TestCase):
"""
Tests for ``BuildSequence``.
"""
def test_run(self):
"""
``BuildSequence`` calls the ``run`` method of each of its ``steps``.
"""
step1 = SpyStep()
step2 = SpyStep()
BuildSequence(steps=(step1, step2)).run()
self.assertEqual((True, True), (step1.ran, step2.ran))
def assert_has_paths(test_case, expected_paths, parent_path):
"""
Fail if any of the ``expected_paths`` are not existing relative paths of
``parent_path``.
:param TestCase test_case: The ``TestCase`` with which to make assertions.
:param list expected_paths: A ``list`` of ``bytes`` relative path names
which are expected to exist beneath ``parent_path``.
:param FilePath parent_path: The root ``FilePath`` in which to search for
``expected_paths``.
"""
missing_paths = []
for path in expected_paths:
if not parent_path.preauthChild(path).exists():
missing_paths.append(path)
if missing_paths:
test_case.fail('Missing paths: {}'.format(missing_paths))
class InstallVirtualEnvTests(TestCase):
"""
Tests for ``InstallVirtualEnv``.
"""
def test_run(self):
"""
``InstallVirtualEnv.run`` installs a virtual python environment using
create_virtualenv passing ``target_path`` as ``root``.
"""
virtualenv = VirtualEnv(root=FilePath(self.mktemp()))
step = InstallVirtualEnv(virtualenv=virtualenv)
calls = []
self.patch(
step, '_create_virtualenv', lambda **kwargs: calls.append(kwargs))
step.run()
self.assertEqual([dict(root=virtualenv.root)], calls)
class CreateVirtualenvTests(TestCase):
"""
"""
def test_bin(self):
"""
``create_virtualenv`` installs a virtual python environment in its
``target_path``.
"""
virtualenv = VirtualEnv(root=FilePath(self.mktemp()))
InstallVirtualEnv(virtualenv=virtualenv).run()
expected_paths = ['bin/pip', 'bin/python']
assert_has_paths(self, expected_paths, virtualenv.root)
def test_pythonpath(self):
"""
``create_virtualenv`` installs a virtual python whose path does not
include the system python libraries.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
output = check_output([
target_path.descendant(['bin', 'python']).path,
'-c', r'import sys; sys.stdout.write("\n".join(sys.path))'
])
# We should probably check for lib64 as well here.
self.assertNotIn(
'/usr/lib/python2.7/site-packages', output.splitlines())
def test_bootstrap_pyc(self):
"""
``create_virtualenv`` creates links to the pyc files for all the
modules required for the virtualenv bootstrap process.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
py_files = []
for module_name in VIRTUALENV_REQUIRED_MODULES:
py_base = target_path.descendant(['lib', 'python2.7', module_name])
py = py_base.siblingExtension('.py')
pyc = py_base.siblingExtension('.pyc')
if py.exists() and False in (py.islink(), pyc.islink()):
py_files.append('PY: {} > {}\nPYC: {} > {}\n'.format(
'/'.join(py.segmentsFrom(target_path)),
py.realpath().path,
'/'.join(pyc.segmentsFrom(target_path)),
pyc.islink() and pyc.realpath().path or 'NOT A SYMLINK'
))
if py_files:
self.fail(
'Non-linked bootstrap pyc files in {}: \n{}'.format(
target_path, '\n'.join(py_files)
)
)
def test_internal_symlinks_only(self):
"""
The resulting ``virtualenv`` only contains symlinks to files inside the
virtualenv and to /usr on the host OS.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
allowed_targets = (target_path, FilePath('/usr'),)
bad_links = []
for path in target_path.walk():
if path.islink():
realpath = path.realpath()
for allowed_target in allowed_targets:
try:
realpath.segmentsFrom(allowed_target)
except ValueError:
pass
else:
# The target is a descendent of an allowed_target.
break
else:
bad_links.append(path)
if bad_links:
self.fail(
"Symlinks outside of virtualenv detected:" +
'\n'.join(
'/'.join(
path.segmentsFrom(target_path)
) + ' -> ' + path.realpath().path
for path in bad_links
)
)
class VirtualEnvTests(TestCase):
"""
Tests for ``VirtualEnv``.
"""
def test_install(self):
"""
``VirtualEnv.install`` accepts a ``PythonPackage`` instance and
installs it.
"""
virtualenv_dir = FilePath(self.mktemp())
virtualenv = create_virtualenv(root=virtualenv_dir)
package_dir = FilePath(self.mktemp())
package = canned_package(package_dir)
virtualenv.install(package_dir.path)
self.assertIn(
'{}-{}-py2.7.egg-info'.format(package.name, package.version),
[f.basename() for f in virtualenv_dir.descendant(
['lib', 'python2.7', 'site-packages']).children()]
)
class InstallApplicationTests(TestCase):
"""
Tests for ``InstallApplication``.
"""
def test_run(self):
"""
``InstallApplication.run`` installs the supplied application in the
``target_path``.
"""
package_uri = 'http://www.example.com/Bar-1.2.3.whl'
fake_env = SpyVirtualEnv()
InstallApplication(
virtualenv=fake_env,
package_uri=package_uri
).run()
self.assertEqual(
[package_uri], fake_env._installed_packages)
class CreateLinksTests(TestCase):
"""
Tests for ``CreateLinks``.
"""
def test_run(self):
"""
``CreateLinks.run`` generates symlinks in ``destination_path`` for all
the supplied ``links``.
"""
root = FilePath(self.mktemp())
bin_dir = root.descendant(['usr', 'bin'])
bin_dir.makedirs()
CreateLinks(
links=frozenset([
(FilePath('/opt/flocker/bin/flocker-foo'), bin_dir),
(FilePath('/opt/flocker/bin/flocker-bar'), bin_dir),
])
).run()
self.assertEqual(
set(FilePath('/opt/flocker/bin').child(script)
for script in ('flocker-foo', 'flocker-bar')),
set(child.realpath() for child in bin_dir.children())
)
def canned_package(root, version=b'0.3.2'):
"""
Create a directory containing an empty Python package which can be
installed and with a name and version which can later be tested.
:param test_case: The ``TestCase`` whose mktemp method will be called.
:param version: The version of the created package.
:return: A ``PythonPackage`` instance.
"""
name = 'FooBar'
root.makedirs()
setup_py = root.child('setup.py')
setup_py.setContent(
dedent("""
from setuptools import setup
setup(
name="{package_name}",
version="{package_version}",
py_modules=["{package_name}"],
)
""").format(package_name=name, package_version=version)
)
package_module = root.child(name + ".py")
package_module.setContent(
dedent("""
__version__ = "{package_version}"
""").format(package_version=version)
)
return PythonPackage(name=name, version=version)
class GetPackageVersionTests(TestCase):
"""
Tests for ``GetPackageVersion``.
"""
def test_version_default(self):
"""
``GetPackageVersion.version`` is ``None`` by default.
"""
step = GetPackageVersion(virtualenv=None, package_name=None)
self.assertIs(None, step.version)
def assert_version_found(self, version):
"""
``GetPackageVersion`` assigns the exact version of a found package to
its ``version`` attribute.
:param version: The version of the package to test package.
"""
test_env = FilePath(self.mktemp())
virtualenv = VirtualEnv(root=test_env)
InstallVirtualEnv(virtualenv=virtualenv).run()
package_root = FilePath(self.mktemp())
test_package = canned_package(root=package_root, version=version)
InstallApplication(
virtualenv=virtualenv, package_uri=package_root.path).run()
step = GetPackageVersion(
virtualenv=virtualenv, package_name=test_package.name)
step.run()
self.assertEqual(test_package.version, step.version)
def test_version_found(self):
"""
``GetPackageVersion`` assigns the exact version of a found package to
its ``version`` attribute.
In particular, newer versions of pip/setuptools normalize the version
accoding to PEP440. We aren't prepared to handle that yet.
"""
versions = [
'0.3.2',
'0.3.3dev5',
'0.3.2+doc1',
'0.3.2-1-gf661a6a',
'0.3.2+doc1-1-gf661a6a',
'0.3.2pre1',
'0.3.2-1-gf661a6a-dirty'
'0.3.2+doc1-dirty'
]
for version in versions:
self.assert_version_found(version=version)
def test_version_not_found(self):
"""
``GetPackageVersion.run`` raises an exception if the supplied
``package_name`` is not installed in the supplied ``virtual_env``.
"""
test_env = FilePath(self.mktemp())
virtualenv = VirtualEnv(root=test_env)
InstallVirtualEnv(virtualenv=virtualenv).run()
step = GetPackageVersion(
virtualenv=virtualenv,
package_name='PackageWhichIsNotInstalled'
)
self.assertRaises(Exception, step.run)
class BuildPackageTests(TestCase):
"""
Tests for `BuildPackage`.
"""
@require_fpm
def setUp(self):
pass
@require_rpm
def test_rpm(self):
"""
``BuildPackage.run`` creates an RPM from the supplied ``source_path``.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
expected_prefix = FilePath('/foo/bar')
expected_paths = set([
expected_prefix.child('Foo'),
expected_prefix.child('Bar'),
FilePath('/other/file'),
])
expected_name = 'FooBar'
expected_epoch = b'3'
expected_rpm_version = rpm_version('0.3', '0.dev.1')
expected_license = 'My Test License'
expected_url = 'https://www.example.com/foo/bar'
expected_vendor = 'Acme Corporation'
expected_maintainer = 'noreply@example.com'
expected_architecture = 'i386'
expected_description = 'Explosive Tennis Balls'
expected_dependencies = ['test-dep', 'version-dep >= 42']
BuildPackage(
package_type=PackageTypes.RPM,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/other/file'),
},
name=expected_name,
prefix=FilePath('/'),
epoch=expected_epoch,
rpm_version=expected_rpm_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture=expected_architecture,
description=expected_description,
category="Applications/System",
dependencies=[
Dependency(package='test-dep'),
Dependency(package='version-dep', compare='>=', version='42')],
).run()
rpms = glob('{}*.rpm'.format(
destination_path.child(expected_name).path))
self.assertEqual(1, len(rpms))
expected_headers = dict(
Name=expected_name,
Epoch=expected_epoch,
Version=expected_rpm_version.version,
Release=expected_rpm_version.release,
License=expected_license,
URL=expected_url,
Vendor=expected_vendor,
Packager=expected_maintainer,
Architecture=expected_architecture,
Group="Applications/System",
)
rpm_path = FilePath(rpms[0])
assert_rpm_requires(self, expected_dependencies, rpm_path)
assert_rpm_headers(self, expected_headers, rpm_path)
assert_rpm_content(self, expected_paths, rpm_path)
@require_dpkg
def test_deb(self):
"""
``BuildPackage.run`` creates a .deb package from the supplied
``source_path``.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
expected_prefix = FilePath('/foo/bar')
expected_paths = set([
expected_prefix.child('Foo'),
expected_prefix.child('Bar'),
FilePath('/other/file'),
# This is added automatically by fpm despite not supplying the
# --deb-changelog option
FilePath('/usr/share/doc/foobar/changelog.Debian.gz'),
])
expected_name = 'FooBar'.lower()
expected_epoch = b'3'
expected_rpm_version = rpm_version('0.3', '0.dev.1')
expected_license = 'My Test License'
expected_url = 'https://www.example.com/foo/bar'
expected_vendor = 'Acme Corporation'
expected_maintainer = 'noreply@example.com'
expected_architecture = 'i386'
expected_description = 'Explosive Tennis Balls'
BuildPackage(
package_type=PackageTypes.DEB,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/other/file'),
},
name=expected_name,
prefix=FilePath("/"),
epoch=expected_epoch,
rpm_version=expected_rpm_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture=expected_architecture,
description=expected_description,
category="admin",
dependencies=[
Dependency(package='test-dep'),
Dependency(package='version-dep', compare='>=', version='42')],
).run()
packages = glob('{}*.deb'.format(
destination_path.child(expected_name.lower()).path))
self.assertEqual(1, len(packages))
expected_headers = dict(
Package=expected_name,
Version=(
expected_epoch
+ b':'
+ expected_rpm_version.version
+ '-'
+ expected_rpm_version.release
),
License=expected_license,
Vendor=expected_vendor,
Architecture=expected_architecture,
Maintainer=expected_maintainer,
Homepage=expected_url,
Depends=', '.join(['test-dep', 'version-dep (>= 42)']),
Section="admin",
)
assert_deb_headers(self, expected_headers, FilePath(packages[0]))
assert_deb_content(self, expected_paths, FilePath(packages[0]))
class LintPackageTests(TestCase):
"""
Tests for ``LintPackage``.
"""
@require_fpm
def setUp(self):
pass
def assert_lint(self, package_type, expected_output):
"""
``LintPackage.run`` reports only unfiltered errors and raises
``SystemExit``.
:param PackageTypes package_type: The type of package to test.
:param bytes expected_output: The expected output of the linting.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
BuildPackage(
package_type=package_type,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/opt/file'),
},
name="package-name",
prefix=FilePath('/'),
epoch=b'3',
rpm_version=rpm_version('0.3', '0.dev.1'),
license="Example",
url="https://package.example/",
vendor="Acme Corporation",
maintainer='Someone <noreply@example.com>',
architecture="all",
description="Description\n\nExtended",
category="none",
dependencies=[]
).run()
step = LintPackage(
package_type=package_type,
destination_path=destination_path,
epoch=b'3',
rpm_version=rpm_version('0.3', '0.dev.1'),
package='package-name',
architecture='all'
)
step.output = StringIO()
self.assertRaises(SystemExit, step.run)
self.assertEqual(step.output.getvalue(), expected_output)
@require_rpmlint
def test_rpm(self):
"""
rpmlint doesn't report filtered errors.
"""
# The following warnings and errors are filtered.
# - E: no-changelogname-tag
# - W: no-documentation
# - E: zero-length
self.assert_lint(PackageTypes.RPM, b"""\
Package errors (package-name):
package-name.noarch: W: non-standard-group default
package-name.noarch: W: invalid-license Example
package-name.noarch: W: invalid-url URL: https://package.example/ \
<urlopen error [Errno -2] Name or service not known>
package-name.noarch: W: cross-directory-hard-link /foo/bar/Foo /opt/file
""")
@require_lintian
def test_deb(self):
"""
lintian doesn't report filtered errors.
"""
# The following warnings and errors are filtered.
# - E: package-name: no-copyright-file
# - E: package-name: dir-or-file-in-opt
# - W: package-name: file-missing-in-md5sums .../changelog.Debian.gz
self.assert_lint(PackageTypes.DEB, b"""\
Package errors (package-name):
W: package-name: unknown-section default
E: package-name: non-standard-toplevel-dir foo/
W: package-name: file-in-unusual-dir foo/bar/Bar
W: package-name: file-in-unusual-dir foo/bar/Foo
W: package-name: package-contains-hardlink foo/bar/Foo -> opt/file
""")
class OmnibusPackageBuilderTests(TestCase):
"""
Tests for ``omnibus_package_builder``.
"""
def test_centos_7(self):
self.assert_omnibus_steps(
distribution=Distribution(name='centos', version='7'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
)
def test_ubuntu_14_04(self):
self.assert_omnibus_steps(
distribution=Distribution(name='ubuntu', version='14.04'),
expected_category='admin',
expected_package_type=PackageTypes.DEB,
)
def test_fedora_20(self):
self.assert_omnibus_steps(
distribution=Distribution(name='fedora', version='20'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
)
def assert_omnibus_steps(
self,
distribution=Distribution(name='fedora', version='20'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
):
"""
A sequence of build steps is returned.
"""
self.patch(packaging, 'CURRENT_DISTRIBUTION', distribution)
fake_dependencies = {
'python': [Dependency(package='python-dep')],
'node': [Dependency(package='node-dep')],
'cli': [Dependency(package='cli-dep')],
}
def fake_make_dependencies(
package_name, package_version, distribution):
return fake_dependencies[package_name]
self.patch(packaging, 'make_dependencies', fake_make_dependencies)
expected_destination_path = FilePath(self.mktemp())
target_path = FilePath(self.mktemp())
flocker_cli_path = target_path.child('flocker-cli')
flocker_node_path = target_path.child('flocker-node')
expected_virtualenv_path = FilePath('/opt/flocker')
expected_prefix = FilePath('/')
expected_epoch = PACKAGE.EPOCH.value
expected_package_uri = b'https://www.example.com/foo/Bar-1.2.3.whl'
expected_package_version_step = GetPackageVersion(
virtualenv=VirtualEnv(root=expected_virtualenv_path),
package_name='flocker'
)
expected_version = DelayedRpmVersion(
package_version_step=expected_package_version_step
)
expected_license = PACKAGE.LICENSE.value
expected_url = PACKAGE.URL.value
expected_vendor = PACKAGE.VENDOR.value
expected_maintainer = PACKAGE.MAINTAINER.value
expected = BuildSequence(
steps=(
# clusterhq-python-flocker steps
InstallVirtualEnv(
virtualenv=VirtualEnv(root=expected_virtualenv_path)),
InstallApplication(
virtualenv=VirtualEnv(root=expected_virtualenv_path),
package_uri=b'https://www.example.com/foo/Bar-1.2.3.whl',
),
expected_package_version_step,
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={
expected_virtualenv_path: expected_virtualenv_path
},
name='clusterhq-python-flocker',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='native',
description=PACKAGE_PYTHON.DESCRIPTION.value,
category=expected_category,
directories=[expected_virtualenv_path],
dependencies=[Dependency(package='python-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-python-flocker',
architecture="native",
),
# clusterhq-flocker-cli steps
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-deploy'),
flocker_cli_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={flocker_cli_path: FilePath("/usr/bin")},
name='clusterhq-flocker-cli',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='all',
description=PACKAGE_CLI.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='cli-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-cli',
architecture="all",
),
# clusterhq-flocker-node steps
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-reportstate'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-changestate'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-volume'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-control'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-zfs-agent'),
flocker_node_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={flocker_node_path: FilePath("/usr/sbin")},
name='clusterhq-flocker-node',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='all',
description=PACKAGE_NODE.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='node-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-node',
architecture="all",
),
)
)
assert_equal_steps(
self,
expected,
omnibus_package_builder(distribution=distribution,
destination_path=expected_destination_path,
package_uri=expected_package_uri,
target_dir=target_path))
class DockerBuildOptionsTests(TestCase):
"""
Tests for ``DockerBuildOptions``.
"""
native_package_type = object()
def setUp(self):
"""
Patch ``admin.packaging._native_package_type`` to return a fixed value.
"""
self.patch(
packaging, '_native_package_type',
lambda: self.native_package_type)
def test_defaults(self):
"""
``DockerBuildOptions`` destination path defaults to the current working
directory.
"""
expected_defaults = {
'destination-path': '.',
}
self.assertEqual(expected_defaults, DockerBuildOptions())
def test_package_uri_missing(self):
"""
``DockerBuildOptions`` requires a single positional argument containing
the URI of the Python package which is being packaged.
"""
exception = self.assertRaises(
UsageError, DockerBuildOptions().parseOptions, [])
self.assertEqual('Wrong number of arguments.', str(exception))
def test_package_uri_supplied(self):
"""
``DockerBuildOptions`` saves the supplied ``package-uri``.
"""
expected_uri = 'http://www.example.com/foo-bar.whl'
options = DockerBuildOptions()
options.parseOptions([expected_uri])
self.assertEqual(expected_uri, options['package-uri'])
class DockerBuildScriptTests(TestCase):
"""
Tests for ``DockerBuildScript``.
"""
def test_usage_error_status(self):
"""
``DockerBuildScript.main`` raises ``SystemExit`` if there are missing
command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = DockerBuildScript(sys_module=fake_sys_module)
exception = self.assertRaises(SystemExit, script.main)
self.assertEqual(1, exception.code)
def test_usage_error_message(self):
"""
``DockerBuildScript.main`` prints a usage error to ``stderr`` if there
are missing command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = DockerBuildScript(sys_module=fake_sys_module)
try:
script.main()
except SystemExit:
pass
self.assertEqual(
'Wrong number of arguments.',
fake_sys_module.stderr.getvalue().splitlines()[-1]
)
def test_build_command(self):
"""
``DockerBuildScript.build_command`` is ``omnibus_package_builder`` by
default.
"""
self.assertIs(omnibus_package_builder, DockerBuildScript.build_command)
def test_run(self):
"""
``DockerBuildScript.main`` calls ``run`` on the instance returned by
``build_command``.
"""
expected_destination_path = FilePath(self.mktemp())
expected_package_uri = 'http://www.example.com/foo/bar.whl'
fake_sys_module = FakeSysModule(
argv=[
'build-command-name',
'--destination-path=%s' % (expected_destination_path.path,),
expected_package_uri]
)
distribution = Distribution(name='test-distro', version='30')
self.patch(packaging, 'CURRENT_DISTRIBUTION', distribution)
script = DockerBuildScript(sys_module=fake_sys_module)
build_step = SpyStep()
arguments = []
def record_arguments(*args, **kwargs):
arguments.append((args, kwargs))
return build_step
script.build_command = record_arguments
script.main()
expected_build_arguments = [(
(),
dict(destination_path=expected_destination_path,
package_uri=expected_package_uri,
distribution=distribution)
)]
self.assertEqual(expected_build_arguments, arguments)
self.assertTrue(build_step.ran)
class BuildOptionsTests(TestCase):
"""
Tests for ``BuildOptions``.
"""
def test_defaults(self):
"""
``BuildOptions`` destination path defaults to the current working
directory.
"""
expected_defaults = {
'destination-path': '.',
'distribution': None,
}
self.assertEqual(expected_defaults, BuildOptions())
def test_distribution_missing(self):
"""
``BuildOptions.parseOptions`` raises ``UsageError`` if
``--distribution`` is not supplied.
"""
options = BuildOptions()
self.assertRaises(
UsageError,
options.parseOptions,
['http://example.com/fake/uri'])
def test_package_uri_missing(self):
"""
``DockerBuildOptions`` requires a single positional argument containing
the URI of the Python package which is being packaged.
"""
exception = self.assertRaises(
UsageError, BuildOptions().parseOptions, [])
self.assertEqual('Wrong number of arguments.', str(exception))
def test_package_options_supplied(self):
"""
``BuildOptions`` saves the supplied options.
"""
expected_uri = 'http://www.example.com/foo-bar.whl'
expected_distribution = 'ubuntu1404'
options = BuildOptions()
options.parseOptions(
['--distribution', expected_distribution, expected_uri])
self.assertEqual(
(expected_distribution, expected_uri),
(options['distribution'], options['package-uri'])
)
class BuildScriptTests(TestCase):
"""
Tests for ``BuildScript``.
"""
def test_usage_error_status(self):
"""
``BuildScript.main`` raises ``SystemExit`` if there are missing command
line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = BuildScript(sys_module=fake_sys_module)
exception = self.assertRaises(SystemExit, script.main)
self.assertEqual(1, exception.code)
def test_usage_error_message(self):
"""
``BuildScript.main`` prints a usage error to ``stderr`` if there are
missing command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = BuildScript(sys_module=fake_sys_module)
try:
script.main()
except SystemExit:
pass
self.assertEqual(
'Wrong number of arguments.',
fake_sys_module.stderr.getvalue().splitlines()[-1]
)
def test_build_command(self):
"""
``BuildScript.build_command`` is ``build_in_docker`` by default.
"""
self.assertIs(build_in_docker, BuildScript.build_command)
def test_run(self):
"""
``BuildScript.main`` calls ``run`` on the instance returned by
``build_command``.
"""
expected_destination_path = FilePath(self.mktemp())
expected_distribution = 'centos7'
expected_package_uri = 'http://www.example.com/foo/bar.whl'
fake_sys_module = FakeSysModule(
argv=[
'build-command-name',
'--destination-path', expected_destination_path.path,
'--distribution=%s' % (expected_distribution,),
expected_package_uri]
)
script = BuildScript(sys_module=fake_sys_module)
build_step = SpyStep()
arguments = []
def record_arguments(*args, **kwargs):
arguments.append((args, kwargs))
return build_step
script.build_command = record_arguments
script.main()
expected_build_arguments = [(
(),
dict(destination_path=expected_destination_path,
distribution=expected_distribution,
package_uri=expected_package_uri,
top_level=None)
)]
self.assertEqual(expected_build_arguments, arguments)
self.assertTrue(build_step.ran)
class BuildInDockerFunctionTests(TestCase):
"""
Tests for ``build_in_docker``.
"""
def test_steps(self):
"""
``build_in_docker`` returns a ``BuildSequence`` comprising
``DockerBuild`` and ``DockerRun`` instances.
"""
supplied_distribution = 'Foo'
expected_tag = 'clusterhq/build-%s' % (supplied_distribution,)
supplied_top_level = FilePath('/foo/bar')
expected_build_directory = supplied_top_level.descendant(
['admin', 'build_targets', supplied_distribution])
supplied_destination_path = FilePath('/baz/qux')
expected_volumes = {
FilePath('/output'): supplied_destination_path,
FilePath('/flocker'): supplied_top_level,
}
expected_package_uri = 'http://www.example.com/foo/bar/whl'
assert_equal_steps(
test_case=self,
expected=BuildSequence(
steps=[
DockerBuild(
tag=expected_tag,
build_directory=expected_build_directory
),
DockerRun(
tag=expected_tag,
volumes=expected_volumes,
command=[expected_package_uri]
),
]
),
actual=build_in_docker(
destination_path=supplied_destination_path,
distribution=supplied_distribution,
top_level=supplied_top_level,
package_uri=expected_package_uri
)
)
class MakeDependenciesTests(TestCase):
"""
Tests for ``make_dependencies``.
"""
def test_node(self):
"""
``make_dependencies`` includes the supplied ``version`` of
``clusterhq-python-flocker`` for ``clusterhq-flocker-node``.
"""
expected_version = '1.2.3'
self.assertIn(
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=expected_version
),
make_dependencies('node', expected_version,
Distribution(name='fedora', version='20'))
)
def test_cli(self):
"""
``make_dependencies`` includes the supplied ``version`` of
``clusterhq-python-flocker`` for ``clusterhq-flocker-cli``.
"""
expected_version = '1.2.3'
self.assertIn(
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=expected_version
),
make_dependencies('cli', expected_version,
Distribution(name='fedora', version='20'))
)
add flocker to omnibus packaging tests
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``admin.packaging``.
"""
from glob import glob
from subprocess import check_output
from textwrap import dedent
from unittest import skipIf
from StringIO import StringIO
from twisted.python.filepath import FilePath
from twisted.python.procutils import which
from twisted.python.usage import UsageError
from twisted.trial.unittest import TestCase
from virtualenv import REQUIRED_MODULES as VIRTUALENV_REQUIRED_MODULES
from flocker.testtools import FakeSysModule
from .. import packaging
from ..packaging import (
omnibus_package_builder, InstallVirtualEnv, InstallApplication,
BuildPackage, BuildSequence, BuildOptions, BuildScript, DockerBuildOptions,
DockerBuildScript, GetPackageVersion, DelayedRpmVersion, CreateLinks,
PythonPackage, create_virtualenv, VirtualEnv, PackageTypes, Distribution,
Dependency, build_in_docker, DockerBuild, DockerRun,
PACKAGE, PACKAGE_PYTHON, PACKAGE_CLI, PACKAGE_NODE,
make_dependencies,
LintPackage,
)
from ..release import rpm_version
FLOCKER_PATH = FilePath(__file__).parent().parent().parent()
require_fpm = skipIf(not which('fpm'), "Tests require the ``fpm`` command.")
require_rpm = skipIf(not which('rpm'), "Tests require the ``rpm`` command.")
require_rpmlint = skipIf(not which('rpmlint'),
"Tests require the ``rpmlint`` command.")
require_dpkg = skipIf(not which('dpkg'), "Tests require the ``dpkg`` command.")
require_lintian = skipIf(not which('lintian'),
"Tests require the ``lintian`` command.")
DOCKER_SOCK = '/var/run/docker.sock'
def assert_equal_steps(test_case, expected, actual):
"""
Assert that the list of provided steps are the same.
If they are not, display the differences intelligently.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param expected: The expected build step instance.
:param actual: The actual build step instance.
:raises: ``TestFailure`` if the build steps are not equal, showing the
unequal or missing steps.
"""
expected_steps = getattr(expected, 'steps')
actual_steps = getattr(actual, 'steps')
if None in (expected_steps, actual_steps):
test_case.assertEqual(expected, actual)
else:
mismatch_steps = []
missing_steps = []
index = 0
for index, expected_step in enumerate(expected_steps):
try:
actual_step = actual_steps[index]
except IndexError:
missing_steps = expected_steps[index:]
break
if expected_step != actual_step:
mismatch_steps.append(
'* expected: {} !=\n'
' actual: {}'.format(
expected_step, actual_step))
extra_steps = actual_steps[index+1:]
if mismatch_steps or missing_steps or extra_steps:
test_case.fail(
'Step Mismatch\n'
'Mismatch:\n{}\n'
'Missing:\n{}\n'
'Extra:\n{}'.format(
'\n'.join(mismatch_steps), missing_steps, extra_steps)
)
def assert_dict_contains(test_case, expected, actual, message=''):
"""
Fail unless the supplied ``actual`` ``dict`` contains all the items in
``expected``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param expected: The expected build step instance.
:param actual: The actual build step instance.
"""
missing_items = []
mismatch_items = []
no_value = object()
for key, expected_value in expected.items():
actual_value = actual.get(key, no_value)
if actual_value is no_value:
missing_items.append(key)
elif actual_value != expected_value:
mismatch_items.append(
'{}: {} != {}'.format(key, expected_value, actual_value)
)
if missing_items or mismatch_items:
test_case.fail(
'{}\n'
'Missing items: {}\n'
'Mismatch items: {}\n'
'Actual items: {}'.format(
message, missing_items, mismatch_items, actual)
)
def parse_colon_dict(data):
"""
Parse colon seperated values into a dictionary, treating lines
lacking a colon as continutation lines.
Any leading lines without a colon will be associated with the key
``None``.
This is the format output by ``rpm --query`` and ``dpkg --info``.
:param bytes data: Data to parse
:return: A ``dict`` containing the parsed data.
"""
result = {}
key = None
for line in data.splitlines():
parts = [value.strip() for value in line.split(':', 1)]
if len(parts) == 2:
key, val = parts
result[key] = val
else:
result.setdefault(key, '')
result[key] += parts[0]
return result
def assert_rpm_headers(test_case, expected_headers, rpm_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` contains all the
``expected_headers``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param dict expected_headers: A dictionary of header key / value pairs.
:param FilePath rpm_path: The path to the RPM file under test.
"""
output = check_output(
['rpm', '--query', '--info', '--package', rpm_path.path]
)
actual_headers = parse_colon_dict(output)
assert_dict_contains(
test_case, expected_headers, actual_headers, 'Missing RPM Headers: '
)
def assert_rpm_content(test_case, expected_paths, package_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` contains all the
``expected_paths``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param set expected_paths: A set of ``FilePath`` s
:param FilePath package_path: The path to the package under test.
"""
output = check_output(
['rpm', '--query', '--list', '--package', package_path.path]
)
actual_paths = set(map(FilePath, output.splitlines()))
test_case.assertEqual(expected_paths, actual_paths)
def assert_deb_content(test_case, expected_paths, package_path):
"""
Fail unless the ``deb`` file at ``package_path`` contains all the
``expected_paths``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param set expected_paths: A set of ``FilePath`` s
:param FilePath package_path: The path to the package under test.
"""
output_dir = FilePath(test_case.mktemp())
output_dir.makedirs()
check_output(['dpkg', '--extract', package_path.path, output_dir.path])
actual_paths = set()
for f in output_dir.walk():
if f.isdir():
continue
actual_paths.add(FilePath('/').descendant(f.segmentsFrom(output_dir)))
test_case.assertEqual(expected_paths, actual_paths)
def assert_deb_headers(test_case, expected_headers, package_path):
"""
Fail unless the ``deb`` file at ``package_path`` contains all the
``expected_headers``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param dict expected_headers: A dictionary of header key / value pairs.
:param FilePath package_path: The path to the deb file under test.
"""
output = check_output(
['dpkg', '--info', package_path.path]
)
actual_headers = parse_colon_dict(output)
assert_dict_contains(
test_case, expected_headers, actual_headers, 'Missing dpkg Headers: '
)
def assert_rpm_requires(test_case, expected_requirements, rpm_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` has all the
``expected_requirements``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param list expected_requirements: A list of requirement strings.
:param FilePath rpm_path: The path to the RPM file under test.
"""
output = check_output(
['rpm', '--query', '--requires', '--package', rpm_path.path]
)
actual_requirements = set(line.strip() for line in output.splitlines())
expected_requirements = set(expected_requirements)
missing_requirements = expected_requirements - actual_requirements
if missing_requirements:
test_case.fail('Missing requirements: {} in {}'.format(
missing_requirements, rpm_path.path))
class SpyVirtualEnv(object):
"""
A ``VirtualEnv`` like class which records the ``package_uri``s which are
supplied to its ``install`` method.
"""
def __init__(self):
self._installed_packages = []
def install(self, package_uri):
self._installed_packages.append(package_uri)
class SpyStep(object):
"""
A build step which records the fact that it has been run.
:ivar bool ran: ``False`` by default.
"""
ran = False
def run(self):
self.ran = True
class BuildSequenceTests(TestCase):
"""
Tests for ``BuildSequence``.
"""
def test_run(self):
"""
``BuildSequence`` calls the ``run`` method of each of its ``steps``.
"""
step1 = SpyStep()
step2 = SpyStep()
BuildSequence(steps=(step1, step2)).run()
self.assertEqual((True, True), (step1.ran, step2.ran))
def assert_has_paths(test_case, expected_paths, parent_path):
"""
Fail if any of the ``expected_paths`` are not existing relative paths of
``parent_path``.
:param TestCase test_case: The ``TestCase`` with which to make assertions.
:param list expected_paths: A ``list`` of ``bytes`` relative path names
which are expected to exist beneath ``parent_path``.
:param FilePath parent_path: The root ``FilePath`` in which to search for
``expected_paths``.
"""
missing_paths = []
for path in expected_paths:
if not parent_path.preauthChild(path).exists():
missing_paths.append(path)
if missing_paths:
test_case.fail('Missing paths: {}'.format(missing_paths))
class InstallVirtualEnvTests(TestCase):
"""
Tests for ``InstallVirtualEnv``.
"""
def test_run(self):
"""
``InstallVirtualEnv.run`` installs a virtual python environment using
create_virtualenv passing ``target_path`` as ``root``.
"""
virtualenv = VirtualEnv(root=FilePath(self.mktemp()))
step = InstallVirtualEnv(virtualenv=virtualenv)
calls = []
self.patch(
step, '_create_virtualenv', lambda **kwargs: calls.append(kwargs))
step.run()
self.assertEqual([dict(root=virtualenv.root)], calls)
class CreateVirtualenvTests(TestCase):
"""
"""
def test_bin(self):
"""
``create_virtualenv`` installs a virtual python environment in its
``target_path``.
"""
virtualenv = VirtualEnv(root=FilePath(self.mktemp()))
InstallVirtualEnv(virtualenv=virtualenv).run()
expected_paths = ['bin/pip', 'bin/python']
assert_has_paths(self, expected_paths, virtualenv.root)
def test_pythonpath(self):
"""
``create_virtualenv`` installs a virtual python whose path does not
include the system python libraries.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
output = check_output([
target_path.descendant(['bin', 'python']).path,
'-c', r'import sys; sys.stdout.write("\n".join(sys.path))'
])
# We should probably check for lib64 as well here.
self.assertNotIn(
'/usr/lib/python2.7/site-packages', output.splitlines())
def test_bootstrap_pyc(self):
"""
``create_virtualenv`` creates links to the pyc files for all the
modules required for the virtualenv bootstrap process.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
py_files = []
for module_name in VIRTUALENV_REQUIRED_MODULES:
py_base = target_path.descendant(['lib', 'python2.7', module_name])
py = py_base.siblingExtension('.py')
pyc = py_base.siblingExtension('.pyc')
if py.exists() and False in (py.islink(), pyc.islink()):
py_files.append('PY: {} > {}\nPYC: {} > {}\n'.format(
'/'.join(py.segmentsFrom(target_path)),
py.realpath().path,
'/'.join(pyc.segmentsFrom(target_path)),
pyc.islink() and pyc.realpath().path or 'NOT A SYMLINK'
))
if py_files:
self.fail(
'Non-linked bootstrap pyc files in {}: \n{}'.format(
target_path, '\n'.join(py_files)
)
)
def test_internal_symlinks_only(self):
"""
The resulting ``virtualenv`` only contains symlinks to files inside the
virtualenv and to /usr on the host OS.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
allowed_targets = (target_path, FilePath('/usr'),)
bad_links = []
for path in target_path.walk():
if path.islink():
realpath = path.realpath()
for allowed_target in allowed_targets:
try:
realpath.segmentsFrom(allowed_target)
except ValueError:
pass
else:
# The target is a descendent of an allowed_target.
break
else:
bad_links.append(path)
if bad_links:
self.fail(
"Symlinks outside of virtualenv detected:" +
'\n'.join(
'/'.join(
path.segmentsFrom(target_path)
) + ' -> ' + path.realpath().path
for path in bad_links
)
)
class VirtualEnvTests(TestCase):
"""
Tests for ``VirtualEnv``.
"""
def test_install(self):
"""
``VirtualEnv.install`` accepts a ``PythonPackage`` instance and
installs it.
"""
virtualenv_dir = FilePath(self.mktemp())
virtualenv = create_virtualenv(root=virtualenv_dir)
package_dir = FilePath(self.mktemp())
package = canned_package(package_dir)
virtualenv.install(package_dir.path)
self.assertIn(
'{}-{}-py2.7.egg-info'.format(package.name, package.version),
[f.basename() for f in virtualenv_dir.descendant(
['lib', 'python2.7', 'site-packages']).children()]
)
class InstallApplicationTests(TestCase):
"""
Tests for ``InstallApplication``.
"""
def test_run(self):
"""
``InstallApplication.run`` installs the supplied application in the
``target_path``.
"""
package_uri = 'http://www.example.com/Bar-1.2.3.whl'
fake_env = SpyVirtualEnv()
InstallApplication(
virtualenv=fake_env,
package_uri=package_uri
).run()
self.assertEqual(
[package_uri], fake_env._installed_packages)
class CreateLinksTests(TestCase):
"""
Tests for ``CreateLinks``.
"""
def test_run(self):
"""
``CreateLinks.run`` generates symlinks in ``destination_path`` for all
the supplied ``links``.
"""
root = FilePath(self.mktemp())
bin_dir = root.descendant(['usr', 'bin'])
bin_dir.makedirs()
CreateLinks(
links=frozenset([
(FilePath('/opt/flocker/bin/flocker-foo'), bin_dir),
(FilePath('/opt/flocker/bin/flocker-bar'), bin_dir),
])
).run()
self.assertEqual(
set(FilePath('/opt/flocker/bin').child(script)
for script in ('flocker-foo', 'flocker-bar')),
set(child.realpath() for child in bin_dir.children())
)
def canned_package(root, version=b'0.3.2'):
"""
Create a directory containing an empty Python package which can be
installed and with a name and version which can later be tested.
:param test_case: The ``TestCase`` whose mktemp method will be called.
:param version: The version of the created package.
:return: A ``PythonPackage`` instance.
"""
name = 'FooBar'
root.makedirs()
setup_py = root.child('setup.py')
setup_py.setContent(
dedent("""
from setuptools import setup
setup(
name="{package_name}",
version="{package_version}",
py_modules=["{package_name}"],
)
""").format(package_name=name, package_version=version)
)
package_module = root.child(name + ".py")
package_module.setContent(
dedent("""
__version__ = "{package_version}"
""").format(package_version=version)
)
return PythonPackage(name=name, version=version)
class GetPackageVersionTests(TestCase):
"""
Tests for ``GetPackageVersion``.
"""
def test_version_default(self):
"""
``GetPackageVersion.version`` is ``None`` by default.
"""
step = GetPackageVersion(virtualenv=None, package_name=None)
self.assertIs(None, step.version)
def assert_version_found(self, version):
"""
``GetPackageVersion`` assigns the exact version of a found package to
its ``version`` attribute.
:param version: The version of the package to test package.
"""
test_env = FilePath(self.mktemp())
virtualenv = VirtualEnv(root=test_env)
InstallVirtualEnv(virtualenv=virtualenv).run()
package_root = FilePath(self.mktemp())
test_package = canned_package(root=package_root, version=version)
InstallApplication(
virtualenv=virtualenv, package_uri=package_root.path).run()
step = GetPackageVersion(
virtualenv=virtualenv, package_name=test_package.name)
step.run()
self.assertEqual(test_package.version, step.version)
def test_version_found(self):
"""
``GetPackageVersion`` assigns the exact version of a found package to
its ``version`` attribute.
In particular, newer versions of pip/setuptools normalize the version
accoding to PEP440. We aren't prepared to handle that yet.
"""
versions = [
'0.3.2',
'0.3.3dev5',
'0.3.2+doc1',
'0.3.2-1-gf661a6a',
'0.3.2+doc1-1-gf661a6a',
'0.3.2pre1',
'0.3.2-1-gf661a6a-dirty'
'0.3.2+doc1-dirty'
]
for version in versions:
self.assert_version_found(version=version)
def test_version_not_found(self):
"""
``GetPackageVersion.run`` raises an exception if the supplied
``package_name`` is not installed in the supplied ``virtual_env``.
"""
test_env = FilePath(self.mktemp())
virtualenv = VirtualEnv(root=test_env)
InstallVirtualEnv(virtualenv=virtualenv).run()
step = GetPackageVersion(
virtualenv=virtualenv,
package_name='PackageWhichIsNotInstalled'
)
self.assertRaises(Exception, step.run)
class BuildPackageTests(TestCase):
"""
Tests for `BuildPackage`.
"""
@require_fpm
def setUp(self):
pass
@require_rpm
def test_rpm(self):
"""
``BuildPackage.run`` creates an RPM from the supplied ``source_path``.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
expected_prefix = FilePath('/foo/bar')
expected_paths = set([
expected_prefix.child('Foo'),
expected_prefix.child('Bar'),
FilePath('/other/file'),
])
expected_name = 'FooBar'
expected_epoch = b'3'
expected_rpm_version = rpm_version('0.3', '0.dev.1')
expected_license = 'My Test License'
expected_url = 'https://www.example.com/foo/bar'
expected_vendor = 'Acme Corporation'
expected_maintainer = 'noreply@example.com'
expected_architecture = 'i386'
expected_description = 'Explosive Tennis Balls'
expected_dependencies = ['test-dep', 'version-dep >= 42']
BuildPackage(
package_type=PackageTypes.RPM,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/other/file'),
},
name=expected_name,
prefix=FilePath('/'),
epoch=expected_epoch,
rpm_version=expected_rpm_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture=expected_architecture,
description=expected_description,
category="Applications/System",
dependencies=[
Dependency(package='test-dep'),
Dependency(package='version-dep', compare='>=', version='42')],
).run()
rpms = glob('{}*.rpm'.format(
destination_path.child(expected_name).path))
self.assertEqual(1, len(rpms))
expected_headers = dict(
Name=expected_name,
Epoch=expected_epoch,
Version=expected_rpm_version.version,
Release=expected_rpm_version.release,
License=expected_license,
URL=expected_url,
Vendor=expected_vendor,
Packager=expected_maintainer,
Architecture=expected_architecture,
Group="Applications/System",
)
rpm_path = FilePath(rpms[0])
assert_rpm_requires(self, expected_dependencies, rpm_path)
assert_rpm_headers(self, expected_headers, rpm_path)
assert_rpm_content(self, expected_paths, rpm_path)
@require_dpkg
def test_deb(self):
"""
``BuildPackage.run`` creates a .deb package from the supplied
``source_path``.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
expected_prefix = FilePath('/foo/bar')
expected_paths = set([
expected_prefix.child('Foo'),
expected_prefix.child('Bar'),
FilePath('/other/file'),
# This is added automatically by fpm despite not supplying the
# --deb-changelog option
FilePath('/usr/share/doc/foobar/changelog.Debian.gz'),
])
expected_name = 'FooBar'.lower()
expected_epoch = b'3'
expected_rpm_version = rpm_version('0.3', '0.dev.1')
expected_license = 'My Test License'
expected_url = 'https://www.example.com/foo/bar'
expected_vendor = 'Acme Corporation'
expected_maintainer = 'noreply@example.com'
expected_architecture = 'i386'
expected_description = 'Explosive Tennis Balls'
BuildPackage(
package_type=PackageTypes.DEB,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/other/file'),
},
name=expected_name,
prefix=FilePath("/"),
epoch=expected_epoch,
rpm_version=expected_rpm_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture=expected_architecture,
description=expected_description,
category="admin",
dependencies=[
Dependency(package='test-dep'),
Dependency(package='version-dep', compare='>=', version='42')],
).run()
packages = glob('{}*.deb'.format(
destination_path.child(expected_name.lower()).path))
self.assertEqual(1, len(packages))
expected_headers = dict(
Package=expected_name,
Version=(
expected_epoch
+ b':'
+ expected_rpm_version.version
+ '-'
+ expected_rpm_version.release
),
License=expected_license,
Vendor=expected_vendor,
Architecture=expected_architecture,
Maintainer=expected_maintainer,
Homepage=expected_url,
Depends=', '.join(['test-dep', 'version-dep (>= 42)']),
Section="admin",
)
assert_deb_headers(self, expected_headers, FilePath(packages[0]))
assert_deb_content(self, expected_paths, FilePath(packages[0]))
class LintPackageTests(TestCase):
"""
Tests for ``LintPackage``.
"""
@require_fpm
def setUp(self):
pass
def assert_lint(self, package_type, expected_output):
"""
``LintPackage.run`` reports only unfiltered errors and raises
``SystemExit``.
:param PackageTypes package_type: The type of package to test.
:param bytes expected_output: The expected output of the linting.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
BuildPackage(
package_type=package_type,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/opt/file'),
},
name="package-name",
prefix=FilePath('/'),
epoch=b'3',
rpm_version=rpm_version('0.3', '0.dev.1'),
license="Example",
url="https://package.example/",
vendor="Acme Corporation",
maintainer='Someone <noreply@example.com>',
architecture="all",
description="Description\n\nExtended",
category="none",
dependencies=[]
).run()
step = LintPackage(
package_type=package_type,
destination_path=destination_path,
epoch=b'3',
rpm_version=rpm_version('0.3', '0.dev.1'),
package='package-name',
architecture='all'
)
step.output = StringIO()
self.assertRaises(SystemExit, step.run)
self.assertEqual(step.output.getvalue(), expected_output)
@require_rpmlint
def test_rpm(self):
"""
rpmlint doesn't report filtered errors.
"""
# The following warnings and errors are filtered.
# - E: no-changelogname-tag
# - W: no-documentation
# - E: zero-length
self.assert_lint(PackageTypes.RPM, b"""\
Package errors (package-name):
package-name.noarch: W: non-standard-group default
package-name.noarch: W: invalid-license Example
package-name.noarch: W: invalid-url URL: https://package.example/ \
<urlopen error [Errno -2] Name or service not known>
package-name.noarch: W: cross-directory-hard-link /foo/bar/Foo /opt/file
""")
@require_lintian
def test_deb(self):
"""
lintian doesn't report filtered errors.
"""
# The following warnings and errors are filtered.
# - E: package-name: no-copyright-file
# - E: package-name: dir-or-file-in-opt
# - W: package-name: file-missing-in-md5sums .../changelog.Debian.gz
self.assert_lint(PackageTypes.DEB, b"""\
Package errors (package-name):
W: package-name: unknown-section default
E: package-name: non-standard-toplevel-dir foo/
W: package-name: file-in-unusual-dir foo/bar/Bar
W: package-name: file-in-unusual-dir foo/bar/Foo
W: package-name: package-contains-hardlink foo/bar/Foo -> opt/file
""")
class OmnibusPackageBuilderTests(TestCase):
"""
Tests for ``omnibus_package_builder``.
"""
def test_centos_7(self):
self.assert_omnibus_steps(
distribution=Distribution(name='centos', version='7'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
)
def test_ubuntu_14_04(self):
self.assert_omnibus_steps(
distribution=Distribution(name='ubuntu', version='14.04'),
expected_category='admin',
expected_package_type=PackageTypes.DEB,
)
def test_fedora_20(self):
self.assert_omnibus_steps(
distribution=Distribution(name='fedora', version='20'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
)
def assert_omnibus_steps(
self,
distribution=Distribution(name='fedora', version='20'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
):
"""
A sequence of build steps is returned.
"""
self.patch(packaging, 'CURRENT_DISTRIBUTION', distribution)
fake_dependencies = {
'python': [Dependency(package='python-dep')],
'node': [Dependency(package='node-dep')],
'cli': [Dependency(package='cli-dep')],
}
def fake_make_dependencies(
package_name, package_version, distribution):
return fake_dependencies[package_name]
self.patch(packaging, 'make_dependencies', fake_make_dependencies)
expected_destination_path = FilePath(self.mktemp())
target_path = FilePath(self.mktemp())
flocker_cli_path = target_path.child('flocker-cli')
flocker_node_path = target_path.child('flocker-node')
expected_virtualenv_path = FilePath('/opt/flocker')
expected_prefix = FilePath('/')
expected_epoch = PACKAGE.EPOCH.value
expected_package_uri = b'https://www.example.com/foo/Bar-1.2.3.whl'
expected_package_version_step = GetPackageVersion(
virtualenv=VirtualEnv(root=expected_virtualenv_path),
package_name='flocker'
)
expected_version = DelayedRpmVersion(
package_version_step=expected_package_version_step
)
expected_license = PACKAGE.LICENSE.value
expected_url = PACKAGE.URL.value
expected_vendor = PACKAGE.VENDOR.value
expected_maintainer = PACKAGE.MAINTAINER.value
expected = BuildSequence(
steps=(
# clusterhq-python-flocker steps
InstallVirtualEnv(
virtualenv=VirtualEnv(root=expected_virtualenv_path)),
InstallApplication(
virtualenv=VirtualEnv(root=expected_virtualenv_path),
package_uri=b'https://www.example.com/foo/Bar-1.2.3.whl',
),
expected_package_version_step,
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={
expected_virtualenv_path: expected_virtualenv_path
},
name='clusterhq-python-flocker',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='native',
description=PACKAGE_PYTHON.DESCRIPTION.value,
category=expected_category,
directories=[expected_virtualenv_path],
dependencies=[Dependency(package='python-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-python-flocker',
architecture="native",
),
# clusterhq-flocker-cli steps
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-deploy'),
flocker_cli_path),
(FilePath('/opt/flocker/bin/flocker'),
flocker_cli_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={flocker_cli_path: FilePath("/usr/bin")},
name='clusterhq-flocker-cli',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='all',
description=PACKAGE_CLI.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='cli-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-cli',
architecture="all",
),
# clusterhq-flocker-node steps
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-reportstate'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-changestate'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-volume'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-control'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-zfs-agent'),
flocker_node_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={flocker_node_path: FilePath("/usr/sbin")},
name='clusterhq-flocker-node',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='all',
description=PACKAGE_NODE.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='node-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-node',
architecture="all",
),
)
)
assert_equal_steps(
self,
expected,
omnibus_package_builder(distribution=distribution,
destination_path=expected_destination_path,
package_uri=expected_package_uri,
target_dir=target_path))
class DockerBuildOptionsTests(TestCase):
"""
Tests for ``DockerBuildOptions``.
"""
native_package_type = object()
def setUp(self):
"""
Patch ``admin.packaging._native_package_type`` to return a fixed value.
"""
self.patch(
packaging, '_native_package_type',
lambda: self.native_package_type)
def test_defaults(self):
"""
``DockerBuildOptions`` destination path defaults to the current working
directory.
"""
expected_defaults = {
'destination-path': '.',
}
self.assertEqual(expected_defaults, DockerBuildOptions())
def test_package_uri_missing(self):
"""
``DockerBuildOptions`` requires a single positional argument containing
the URI of the Python package which is being packaged.
"""
exception = self.assertRaises(
UsageError, DockerBuildOptions().parseOptions, [])
self.assertEqual('Wrong number of arguments.', str(exception))
def test_package_uri_supplied(self):
"""
``DockerBuildOptions`` saves the supplied ``package-uri``.
"""
expected_uri = 'http://www.example.com/foo-bar.whl'
options = DockerBuildOptions()
options.parseOptions([expected_uri])
self.assertEqual(expected_uri, options['package-uri'])
class DockerBuildScriptTests(TestCase):
"""
Tests for ``DockerBuildScript``.
"""
def test_usage_error_status(self):
"""
``DockerBuildScript.main`` raises ``SystemExit`` if there are missing
command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = DockerBuildScript(sys_module=fake_sys_module)
exception = self.assertRaises(SystemExit, script.main)
self.assertEqual(1, exception.code)
def test_usage_error_message(self):
"""
``DockerBuildScript.main`` prints a usage error to ``stderr`` if there
are missing command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = DockerBuildScript(sys_module=fake_sys_module)
try:
script.main()
except SystemExit:
pass
self.assertEqual(
'Wrong number of arguments.',
fake_sys_module.stderr.getvalue().splitlines()[-1]
)
def test_build_command(self):
"""
``DockerBuildScript.build_command`` is ``omnibus_package_builder`` by
default.
"""
self.assertIs(omnibus_package_builder, DockerBuildScript.build_command)
def test_run(self):
"""
``DockerBuildScript.main`` calls ``run`` on the instance returned by
``build_command``.
"""
expected_destination_path = FilePath(self.mktemp())
expected_package_uri = 'http://www.example.com/foo/bar.whl'
fake_sys_module = FakeSysModule(
argv=[
'build-command-name',
'--destination-path=%s' % (expected_destination_path.path,),
expected_package_uri]
)
distribution = Distribution(name='test-distro', version='30')
self.patch(packaging, 'CURRENT_DISTRIBUTION', distribution)
script = DockerBuildScript(sys_module=fake_sys_module)
build_step = SpyStep()
arguments = []
def record_arguments(*args, **kwargs):
arguments.append((args, kwargs))
return build_step
script.build_command = record_arguments
script.main()
expected_build_arguments = [(
(),
dict(destination_path=expected_destination_path,
package_uri=expected_package_uri,
distribution=distribution)
)]
self.assertEqual(expected_build_arguments, arguments)
self.assertTrue(build_step.ran)
class BuildOptionsTests(TestCase):
"""
Tests for ``BuildOptions``.
"""
def test_defaults(self):
"""
``BuildOptions`` destination path defaults to the current working
directory.
"""
expected_defaults = {
'destination-path': '.',
'distribution': None,
}
self.assertEqual(expected_defaults, BuildOptions())
def test_distribution_missing(self):
"""
``BuildOptions.parseOptions`` raises ``UsageError`` if
``--distribution`` is not supplied.
"""
options = BuildOptions()
self.assertRaises(
UsageError,
options.parseOptions,
['http://example.com/fake/uri'])
def test_package_uri_missing(self):
"""
``DockerBuildOptions`` requires a single positional argument containing
the URI of the Python package which is being packaged.
"""
exception = self.assertRaises(
UsageError, BuildOptions().parseOptions, [])
self.assertEqual('Wrong number of arguments.', str(exception))
def test_package_options_supplied(self):
"""
``BuildOptions`` saves the supplied options.
"""
expected_uri = 'http://www.example.com/foo-bar.whl'
expected_distribution = 'ubuntu1404'
options = BuildOptions()
options.parseOptions(
['--distribution', expected_distribution, expected_uri])
self.assertEqual(
(expected_distribution, expected_uri),
(options['distribution'], options['package-uri'])
)
class BuildScriptTests(TestCase):
"""
Tests for ``BuildScript``.
"""
def test_usage_error_status(self):
"""
``BuildScript.main`` raises ``SystemExit`` if there are missing command
line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = BuildScript(sys_module=fake_sys_module)
exception = self.assertRaises(SystemExit, script.main)
self.assertEqual(1, exception.code)
def test_usage_error_message(self):
"""
``BuildScript.main`` prints a usage error to ``stderr`` if there are
missing command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = BuildScript(sys_module=fake_sys_module)
try:
script.main()
except SystemExit:
pass
self.assertEqual(
'Wrong number of arguments.',
fake_sys_module.stderr.getvalue().splitlines()[-1]
)
def test_build_command(self):
"""
``BuildScript.build_command`` is ``build_in_docker`` by default.
"""
self.assertIs(build_in_docker, BuildScript.build_command)
def test_run(self):
"""
``BuildScript.main`` calls ``run`` on the instance returned by
``build_command``.
"""
expected_destination_path = FilePath(self.mktemp())
expected_distribution = 'centos7'
expected_package_uri = 'http://www.example.com/foo/bar.whl'
fake_sys_module = FakeSysModule(
argv=[
'build-command-name',
'--destination-path', expected_destination_path.path,
'--distribution=%s' % (expected_distribution,),
expected_package_uri]
)
script = BuildScript(sys_module=fake_sys_module)
build_step = SpyStep()
arguments = []
def record_arguments(*args, **kwargs):
arguments.append((args, kwargs))
return build_step
script.build_command = record_arguments
script.main()
expected_build_arguments = [(
(),
dict(destination_path=expected_destination_path,
distribution=expected_distribution,
package_uri=expected_package_uri,
top_level=None)
)]
self.assertEqual(expected_build_arguments, arguments)
self.assertTrue(build_step.ran)
class BuildInDockerFunctionTests(TestCase):
"""
Tests for ``build_in_docker``.
"""
def test_steps(self):
"""
``build_in_docker`` returns a ``BuildSequence`` comprising
``DockerBuild`` and ``DockerRun`` instances.
"""
supplied_distribution = 'Foo'
expected_tag = 'clusterhq/build-%s' % (supplied_distribution,)
supplied_top_level = FilePath('/foo/bar')
expected_build_directory = supplied_top_level.descendant(
['admin', 'build_targets', supplied_distribution])
supplied_destination_path = FilePath('/baz/qux')
expected_volumes = {
FilePath('/output'): supplied_destination_path,
FilePath('/flocker'): supplied_top_level,
}
expected_package_uri = 'http://www.example.com/foo/bar/whl'
assert_equal_steps(
test_case=self,
expected=BuildSequence(
steps=[
DockerBuild(
tag=expected_tag,
build_directory=expected_build_directory
),
DockerRun(
tag=expected_tag,
volumes=expected_volumes,
command=[expected_package_uri]
),
]
),
actual=build_in_docker(
destination_path=supplied_destination_path,
distribution=supplied_distribution,
top_level=supplied_top_level,
package_uri=expected_package_uri
)
)
class MakeDependenciesTests(TestCase):
"""
Tests for ``make_dependencies``.
"""
def test_node(self):
"""
``make_dependencies`` includes the supplied ``version`` of
``clusterhq-python-flocker`` for ``clusterhq-flocker-node``.
"""
expected_version = '1.2.3'
self.assertIn(
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=expected_version
),
make_dependencies('node', expected_version,
Distribution(name='fedora', version='20'))
)
def test_cli(self):
"""
``make_dependencies`` includes the supplied ``version`` of
``clusterhq-python-flocker`` for ``clusterhq-flocker-cli``.
"""
expected_version = '1.2.3'
self.assertIn(
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=expected_version
),
make_dependencies('cli', expected_version,
Distribution(name='fedora', version='20'))
)
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import appengine_blobstore as blobstore
import object_store
from file_system import FileSystem, StatInfo
from StringIO import StringIO
from future import Future
from zipfile import ZipFile
ZIP_KEY = 'zipball'
def _MakeKey(version):
return ZIP_KEY + '.' + str(version)
class _AsyncFetchFutureZip(object):
def __init__(self, fetcher, blobstore, key_to_set, key_to_delete=None):
self._fetch = fetcher.FetchAsync(ZIP_KEY)
self._blobstore = blobstore
self._key_to_set = key_to_set
self._key_to_delete = key_to_delete
def Get(self):
blob = self._fetch.Get().content
self._blobstore.Set(_MakeKey(self._key_to_set),
blob,
blobstore.BLOBSTORE_GITHUB)
if self._key_to_delete is not None:
self._blobstore.Delete(_MakeKey(self._key_to_delete),
blobstore.BLOBSTORE_GITHUB)
return ZipFile(StringIO(blob))
class GithubFileSystem(FileSystem):
"""FileSystem implementation which fetches resources from github.
"""
def __init__(self, fetcher, object_store, blobstore):
self._fetcher = fetcher
self._object_store = object_store
self._blobstore = blobstore
self._version = None
self._GetZip(self.Stat(ZIP_KEY).version)
def _GetZip(self, version):
blob = self._blobstore.Get(_MakeKey(version), blobstore.BLOBSTORE_GITHUB)
if blob is not None:
self._zip_file = Future(value=ZipFile(StringIO(blob)))
else:
self._zip_file = Future(
delegate=_AsyncFetchFutureZip(self._fetcher,
self._blobstore,
version,
key_to_delete=self._version))
self._version = version
def _ReadFile(self, path):
zip_file = self._zip_file.Get()
prefix = zip_file.namelist()[0][:-1]
return zip_file.read(prefix + path)
def _ListDir(self, path):
filenames = self._zip_file.Get().namelist()
# Take out parent directory name (GoogleChrome-chrome-app-samples-c78a30f)
filenames = [f[len(filenames[0]) - 1:] for f in filenames]
# Remove the path of the directory we're listing from the filenames.
filenames = [f[len(path):] for f in filenames
if f != path and f.startswith(path)]
# Remove all files not directly in this directory.
return [f for f in filenames if f[:-1].count('/') == 0]
def Read(self, paths, binary=False):
version = self.Stat(ZIP_KEY).version
if version != self._version:
self._GetZip(version)
result = {}
for path in paths:
if path.endswith('/'):
result[path] = self._ListDir(path)
else:
result[path] = self._ReadFile(path)
return Future(value=result)
def Stat(self, path):
version = self._object_store.Get(path, object_store.GITHUB_STAT).Get()
if version is not None:
return StatInfo(version)
version = json.loads(
self._fetcher.Fetch('commits/HEAD').content)['commit']['tree']['sha']
self._object_store.Set(path, version, object_store.GITHUB_STAT)
return StatInfo(version)
Extensions Docs Server: Gracefully handle bad Github data
This will prevent errors like these:
Traceback (most recent call last):
File "/base/data/home/apps/s~chrome-apps-doc/2-0-7.362231368946707729/appengine_main.py", line 18, in <module>
from handler import Handler
File "/base/data/home/apps/s~chrome-apps-doc/2-0-7.362231368946707729/handler.py", line 53, in <module>
AppEngineBlobstore())
File "/base/data/home/apps/s~chrome-apps-doc/2-0-7.362231368946707729/github_file_system.py", line 45, in __init__
self._GetZip(self.Stat(ZIP_KEY).version)
File "/base/data/home/apps/s~chrome-apps-doc/2-0-7.362231368946707729/github_file_system.py", line 91, in Stat
self._fetcher.Fetch('commits/HEAD').content)['commit']['tree']['sha']
KeyError: 'commit'
Review URL: https://codereview.chromium.org/11189153
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@164237 0039d316-1c4b-4281-b951-d872f2087c98
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import appengine_blobstore as blobstore
import object_store
from file_system import FileSystem, StatInfo, FileNotFoundError
from StringIO import StringIO
from future import Future
from zipfile import ZipFile, BadZipfile
ZIP_KEY = 'zipball'
def _MakeKey(version):
return ZIP_KEY + '.' + str(version)
class _AsyncFetchFutureZip(object):
def __init__(self, fetcher, blobstore, key_to_set, key_to_delete=None):
self._fetch = fetcher.FetchAsync(ZIP_KEY)
self._blobstore = blobstore
self._key_to_set = key_to_set
self._key_to_delete = key_to_delete
def Get(self):
try:
blob = self._fetch.Get().content
except FileNotFoundError as e:
logging.error('Bad github zip file: %s' % e)
return None
self._blobstore.Set(_MakeKey(self._key_to_set),
blob,
blobstore.BLOBSTORE_GITHUB)
if self._key_to_delete is not None:
self._blobstore.Delete(_MakeKey(self._key_to_delete),
blobstore.BLOBSTORE_GITHUB)
try:
return ZipFile(StringIO(blob))
except BadZipfile as e:
logging.error('Bad github zip file: %s' % e)
return None
class GithubFileSystem(FileSystem):
"""FileSystem implementation which fetches resources from github.
"""
def __init__(self, fetcher, object_store, blobstore):
self._fetcher = fetcher
self._object_store = object_store
self._blobstore = blobstore
self._version = None
self._GetZip(self.Stat(ZIP_KEY).version)
def _GetZip(self, version):
blob = self._blobstore.Get(_MakeKey(version), blobstore.BLOBSTORE_GITHUB)
if blob is not None:
try:
self._zip_file = Future(value=ZipFile(StringIO(blob)))
except BadZipfile as e:
self._blobstore.Delete(_MakeKey(version), blobstore.BLOBSTORE_GITHUB)
logging.error('Bad github zip file: %s' % e)
self._zip_file = Future(value=None)
else:
self._zip_file = Future(
delegate=_AsyncFetchFutureZip(self._fetcher,
self._blobstore,
version,
key_to_delete=self._version))
self._version = version
def _ReadFile(self, path):
zip_file = self._zip_file.Get()
if zip_file is None:
logging.error('Bad github zip file.')
return ''
prefix = zip_file.namelist()[0][:-1]
return zip_file.read(prefix + path)
def _ListDir(self, path):
zip_file = self._zip_file.Get()
if zip_file is None:
logging.error('Bad github zip file.')
return []
filenames = zip_file.namelist()
# Take out parent directory name (GoogleChrome-chrome-app-samples-c78a30f)
filenames = [f[len(filenames[0]) - 1:] for f in filenames]
# Remove the path of the directory we're listing from the filenames.
filenames = [f[len(path):] for f in filenames
if f != path and f.startswith(path)]
# Remove all files not directly in this directory.
return [f for f in filenames if f[:-1].count('/') == 0]
def Read(self, paths, binary=False):
version = self.Stat(ZIP_KEY).version
if version != self._version:
self._GetZip(version)
result = {}
for path in paths:
if path.endswith('/'):
result[path] = self._ListDir(path)
else:
result[path] = self._ReadFile(path)
return Future(value=result)
def Stat(self, path):
version = self._object_store.Get(path, object_store.GITHUB_STAT).Get()
if version is not None:
return StatInfo(version)
version = (json.loads(
self._fetcher.Fetch('commits/HEAD').content).get('commit', {})
.get('tree', {})
.get('sha', None))
# Check if the JSON was valid, and set to 0 if not.
if version is not None:
self._object_store.Set(path, version, object_store.GITHUB_STAT)
else:
logging.warning('Problem fetching commit hash from github.')
version = 0
# Cache for a minute so we don't try to keep fetching bad data.
self._object_store.Set(path, version, object_store.GITHUB_STAT, time=60)
return StatInfo(version)
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake Utility Functions
"""
# Copyright (C) 2004 Michael Lauer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, fcntl, os, string, stat, shutil, time
import sys
import errno
import logging
def explode_version(s):
r = []
alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$')
numeric_regexp = re.compile('^(\d+)(.*)$')
while (s != ''):
if s[0] in string.digits:
m = numeric_regexp.match(s)
r.append(int(m.group(1)))
s = m.group(2)
continue
if s[0] in string.letters:
m = alpha_regexp.match(s)
r.append(m.group(1))
s = m.group(2)
continue
r.append(s[0])
s = s[1:]
return r
# Version comparison
separators = ".-"
def vercmp_part(a, b):
va = explode_version(a)
vb = explode_version(b)
sa = False
sb = False
while True:
if va == []:
ca = None
else:
ca = va.pop(0)
if vb == []:
cb = None
else:
cb = vb.pop(0)
if ca == None and cb == None:
return 0
if isinstance(ca, basestring):
sa = ca in separators
if isinstance(cb, basestring):
sb = cb in separators
if sa and not sb:
return -1
if not sa and sb:
return 1
if ca > cb:
return 1
if ca < cb:
return -1
def vercmp(ta, tb):
(ea, va, ra) = ta
(eb, vb, rb) = tb
r = int(ea or 0) - int(eb or 0)
if (r == 0):
r = vercmp_part(va, vb)
if (r == 0):
r = vercmp_part(ra, rb)
return r
_package_weights_ = {"pre":-2, "p":0, "alpha":-4, "beta":-3, "rc":-1} # dicts are unordered
_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list
def relparse(myver):
"""Parses the last elements of a version number into a triplet, that can
later be compared.
"""
number = 0
p1 = 0
p2 = 0
mynewver = myver.split('_')
if len(mynewver) == 2:
# an _package_weights_
number = float(mynewver[0])
match = 0
for x in _package_ends_:
elen = len(x)
if mynewver[1][:elen] == x:
match = 1
p1 = _package_weights_[x]
try:
p2 = float(mynewver[1][elen:])
except:
p2 = 0
break
if not match:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
# letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
else:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
#letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
return [number, p1, p2]
__vercmp_cache__ = {}
def vercmp_string(val1, val2):
"""This takes two version strings and returns an integer to tell you whether
the versions are the same, val1>val2 or val2>val1.
"""
# quick short-circuit
if val1 == val2:
return 0
valkey = val1 + " " + val2
# cache lookup
try:
return __vercmp_cache__[valkey]
try:
return - __vercmp_cache__[val2 + " " + val1]
except KeyError:
pass
except KeyError:
pass
# consider 1_p2 vc 1.1
# after expansion will become (1_p2,0) vc (1,1)
# then 1_p2 is compared with 1 before 0 is compared with 1
# to solve the bug we need to convert it to (1,0_p2)
# by splitting _prepart part and adding it back _after_expansion
val1_prepart = val2_prepart = ''
if val1.count('_'):
val1, val1_prepart = val1.split('_', 1)
if val2.count('_'):
val2, val2_prepart = val2.split('_', 1)
# replace '-' by '.'
# FIXME: Is it needed? can val1/2 contain '-'?
val1 = val1.split("-")
if len(val1) == 2:
val1[0] = val1[0] + "." + val1[1]
val2 = val2.split("-")
if len(val2) == 2:
val2[0] = val2[0] + "." + val2[1]
val1 = val1[0].split('.')
val2 = val2[0].split('.')
# add back decimal point so that .03 does not become "3" !
for x in xrange(1, len(val1)):
if val1[x][0] == '0' :
val1[x] = '.' + val1[x]
for x in xrange(1, len(val2)):
if val2[x][0] == '0' :
val2[x] = '.' + val2[x]
# extend varion numbers
if len(val2) < len(val1):
val2.extend(["0"]*(len(val1)-len(val2)))
elif len(val1) < len(val2):
val1.extend(["0"]*(len(val2)-len(val1)))
# add back _prepart tails
if val1_prepart:
val1[-1] += '_' + val1_prepart
if val2_prepart:
val2[-1] += '_' + val2_prepart
# The above code will extend version numbers out so they
# have the same number of digits.
for x in xrange(0, len(val1)):
cmp1 = relparse(val1[x])
cmp2 = relparse(val2[x])
for y in xrange(0, 3):
myret = cmp1[y] - cmp2[y]
if myret != 0:
__vercmp_cache__[valkey] = myret
return myret
__vercmp_cache__[valkey] = 0
return 0
def explode_deps(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a list of dependencies.
Version information is ignored.
"""
r = []
l = s.split()
flag = False
for i in l:
if i[0] == '(':
flag = True
#j = []
if not flag:
r.append(i)
#else:
# j.append(i)
if flag and i.endswith(')'):
flag = False
# Ignore version
#r[-1] += ' ' + ' '.join(j)
return r
def explode_dep_versions(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a dictionary of dependencies and versions.
"""
r = {}
l = s.replace(",", "").split()
lastdep = None
lastver = ""
inversion = False
for i in l:
if i[0] == '(':
inversion = True
lastver = i[1:] or ""
#j = []
elif inversion and i.endswith(')'):
inversion = False
lastver = lastver + " " + (i[:-1] or "")
r[lastdep] = lastver
elif not inversion:
r[i] = None
lastdep = i
lastver = ""
elif inversion:
lastver = lastver + " " + i
return r
def join_deps(deps):
"""
Take the result from explode_dep_versions and generate a dependency string
"""
result = []
for dep in deps:
if deps[dep]:
result.append(dep + " (" + deps[dep] + ")")
else:
result.append(dep)
return ", ".join(result)
def lockfile(name):
"""
Use the file fn as a lock file, return when the lock has been acquired.
Returns a variable to pass to unlockfile().
"""
path = os.path.dirname(name)
if not os.path.isdir(path):
logger.error("Lockfile destination directory '%s' does not exist", path)
sys.exit(1)
while True:
# If we leave the lockfiles lying around there is no problem
# but we should clean up after ourselves. This gives potential
# for races though. To work around this, when we acquire the lock
# we check the file we locked was still the lock file on disk.
# by comparing inode numbers. If they don't match or the lockfile
# no longer exists, we start again.
# This implementation is unfair since the last person to request the
# lock is the most likely to win it.
try:
lf = open(name, 'a+')
fileno = lf.fileno()
fcntl.flock(fileno, fcntl.LOCK_EX)
statinfo = os.fstat(fileno)
if os.path.exists(lf.name):
statinfo2 = os.stat(lf.name)
if statinfo.st_ino == statinfo2.st_ino:
return lf
lf.close()
except Exception:
continue
def unlockfile(lf):
"""
Unlock a file locked using lockfile()
"""
os.unlink(lf.name)
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def md5_file(filename):
"""
Return the hex string representation of the MD5 checksum of filename.
"""
try:
import hashlib
m = hashlib.md5()
except ImportError:
import md5
m = md5.new()
for line in open(filename):
m.update(line)
return m.hexdigest()
def remove(path, recurse=False):
"""Equivalent to rm -f or rm -rf"""
import os, errno, shutil
try:
os.unlink(path)
except OSError, exc:
if recurse and exc.errno == errno.EISDIR:
shutil.rmtree(path)
elif exc.errno != errno.ENOENT:
raise
def prunedir(topdir):
# Delete everything reachable from the directory named in 'topdir'.
# CAUTION: This is dangerous!
for root, dirs, files in os.walk(topdir, topdown = False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
if os.path.islink(os.path.join(root, name)):
os.remove(os.path.join(root, name))
else:
os.rmdir(os.path.join(root, name))
os.rmdir(topdir)
#
# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
# but thats possibly insane and suffixes is probably going to be small
#
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
if var.endswith(suffix):
return var.replace(suffix, "")
return var
def mkdirhier(directory):
"""Create a directory like 'mkdir -p', but does not complain if
directory already exists like os.makedirs
"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def movefile(src, dest, newmtime = None, sstat = None):
"""Moves a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure. Move is
atomic.
"""
#print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
print("movefile: Stating source file failed...", e)
return None
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
os.unlink(src)
return os.lstat(dest)
except Exception as e:
print("movefile: failed to properly create symlink:", dest, "->", target, e)
return None
renamefailed = 1
if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
try:
os.rename(src, dest)
renamefailed = 0
except Exception as e:
if e[0] != errno.EXDEV:
# Some random error.
print("movefile: Failed to move", src, "to", dest, e)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
didcopy = 0
if stat.S_ISREG(sstat[stat.ST_MODE]):
try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
didcopy = 1
except Exception as e:
print('movefile: copy', src, '->', dest, 'failed.', e)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
return None # failure
try:
if didcopy:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src)
except Exception as e:
print("movefile: Failed to chown/chmod/unlink", dest, e)
return None
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def copyfile(src, dest, newmtime = None, sstat = None):
"""
Copies a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure.
"""
#print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
print("copyfile: Stating source file failed...", e)
return False
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
return os.lstat(dest)
except Exception as e:
print("copyfile: failed to properly create symlink:", dest, "->", target, e)
return False
if stat.S_ISREG(sstat[stat.ST_MODE]):
os.chmod(src, stat.S_IRUSR) # Make sure we can read it
try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
except Exception as e:
print('copyfile: copy', src, '->', dest, 'failed.', e)
os.chmod(src, stat.S_IMODE(sstat[stat.ST_MODE]))
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
return False
finally:
os.chmod(src, sstat[stat.ST_MODE])
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
print("copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a)
return False # failure
try:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
except Exception as e:
print("copyfile: Failed to chown/chmod/unlink", dest, e)
return False
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def which(path, item, direction = 0):
"""
Locate a file in a PATH
"""
paths = (path or "").split(':')
if direction != 0:
paths.reverse()
for p in paths:
next = os.path.join(p, item)
if os.path.exists(next):
return next
return ""
lib/bb: Change bb.utils.mkdirhier to an oelite.util.makedirs wrapper
Signed-off-by: Esben Haabendal <c5ba3f1141bb30a64dd6c7e6726a53a85adc9442@prevas.dk>
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake Utility Functions
"""
# Copyright (C) 2004 Michael Lauer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, fcntl, os, string, stat, shutil, time
import sys
import errno
import logging
def explode_version(s):
r = []
alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$')
numeric_regexp = re.compile('^(\d+)(.*)$')
while (s != ''):
if s[0] in string.digits:
m = numeric_regexp.match(s)
r.append(int(m.group(1)))
s = m.group(2)
continue
if s[0] in string.letters:
m = alpha_regexp.match(s)
r.append(m.group(1))
s = m.group(2)
continue
r.append(s[0])
s = s[1:]
return r
# Version comparison
separators = ".-"
def vercmp_part(a, b):
va = explode_version(a)
vb = explode_version(b)
sa = False
sb = False
while True:
if va == []:
ca = None
else:
ca = va.pop(0)
if vb == []:
cb = None
else:
cb = vb.pop(0)
if ca == None and cb == None:
return 0
if isinstance(ca, basestring):
sa = ca in separators
if isinstance(cb, basestring):
sb = cb in separators
if sa and not sb:
return -1
if not sa and sb:
return 1
if ca > cb:
return 1
if ca < cb:
return -1
def vercmp(ta, tb):
(ea, va, ra) = ta
(eb, vb, rb) = tb
r = int(ea or 0) - int(eb or 0)
if (r == 0):
r = vercmp_part(va, vb)
if (r == 0):
r = vercmp_part(ra, rb)
return r
_package_weights_ = {"pre":-2, "p":0, "alpha":-4, "beta":-3, "rc":-1} # dicts are unordered
_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list
def relparse(myver):
"""Parses the last elements of a version number into a triplet, that can
later be compared.
"""
number = 0
p1 = 0
p2 = 0
mynewver = myver.split('_')
if len(mynewver) == 2:
# an _package_weights_
number = float(mynewver[0])
match = 0
for x in _package_ends_:
elen = len(x)
if mynewver[1][:elen] == x:
match = 1
p1 = _package_weights_[x]
try:
p2 = float(mynewver[1][elen:])
except:
p2 = 0
break
if not match:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
# letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
else:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
#letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
return [number, p1, p2]
__vercmp_cache__ = {}
def vercmp_string(val1, val2):
"""This takes two version strings and returns an integer to tell you whether
the versions are the same, val1>val2 or val2>val1.
"""
# quick short-circuit
if val1 == val2:
return 0
valkey = val1 + " " + val2
# cache lookup
try:
return __vercmp_cache__[valkey]
try:
return - __vercmp_cache__[val2 + " " + val1]
except KeyError:
pass
except KeyError:
pass
# consider 1_p2 vc 1.1
# after expansion will become (1_p2,0) vc (1,1)
# then 1_p2 is compared with 1 before 0 is compared with 1
# to solve the bug we need to convert it to (1,0_p2)
# by splitting _prepart part and adding it back _after_expansion
val1_prepart = val2_prepart = ''
if val1.count('_'):
val1, val1_prepart = val1.split('_', 1)
if val2.count('_'):
val2, val2_prepart = val2.split('_', 1)
# replace '-' by '.'
# FIXME: Is it needed? can val1/2 contain '-'?
val1 = val1.split("-")
if len(val1) == 2:
val1[0] = val1[0] + "." + val1[1]
val2 = val2.split("-")
if len(val2) == 2:
val2[0] = val2[0] + "." + val2[1]
val1 = val1[0].split('.')
val2 = val2[0].split('.')
# add back decimal point so that .03 does not become "3" !
for x in xrange(1, len(val1)):
if val1[x][0] == '0' :
val1[x] = '.' + val1[x]
for x in xrange(1, len(val2)):
if val2[x][0] == '0' :
val2[x] = '.' + val2[x]
# extend varion numbers
if len(val2) < len(val1):
val2.extend(["0"]*(len(val1)-len(val2)))
elif len(val1) < len(val2):
val1.extend(["0"]*(len(val2)-len(val1)))
# add back _prepart tails
if val1_prepart:
val1[-1] += '_' + val1_prepart
if val2_prepart:
val2[-1] += '_' + val2_prepart
# The above code will extend version numbers out so they
# have the same number of digits.
for x in xrange(0, len(val1)):
cmp1 = relparse(val1[x])
cmp2 = relparse(val2[x])
for y in xrange(0, 3):
myret = cmp1[y] - cmp2[y]
if myret != 0:
__vercmp_cache__[valkey] = myret
return myret
__vercmp_cache__[valkey] = 0
return 0
def explode_deps(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a list of dependencies.
Version information is ignored.
"""
r = []
l = s.split()
flag = False
for i in l:
if i[0] == '(':
flag = True
#j = []
if not flag:
r.append(i)
#else:
# j.append(i)
if flag and i.endswith(')'):
flag = False
# Ignore version
#r[-1] += ' ' + ' '.join(j)
return r
def explode_dep_versions(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a dictionary of dependencies and versions.
"""
r = {}
l = s.replace(",", "").split()
lastdep = None
lastver = ""
inversion = False
for i in l:
if i[0] == '(':
inversion = True
lastver = i[1:] or ""
#j = []
elif inversion and i.endswith(')'):
inversion = False
lastver = lastver + " " + (i[:-1] or "")
r[lastdep] = lastver
elif not inversion:
r[i] = None
lastdep = i
lastver = ""
elif inversion:
lastver = lastver + " " + i
return r
def join_deps(deps):
"""
Take the result from explode_dep_versions and generate a dependency string
"""
result = []
for dep in deps:
if deps[dep]:
result.append(dep + " (" + deps[dep] + ")")
else:
result.append(dep)
return ", ".join(result)
def lockfile(name):
"""
Use the file fn as a lock file, return when the lock has been acquired.
Returns a variable to pass to unlockfile().
"""
path = os.path.dirname(name)
if not os.path.isdir(path):
logger.error("Lockfile destination directory '%s' does not exist", path)
sys.exit(1)
while True:
# If we leave the lockfiles lying around there is no problem
# but we should clean up after ourselves. This gives potential
# for races though. To work around this, when we acquire the lock
# we check the file we locked was still the lock file on disk.
# by comparing inode numbers. If they don't match or the lockfile
# no longer exists, we start again.
# This implementation is unfair since the last person to request the
# lock is the most likely to win it.
try:
lf = open(name, 'a+')
fileno = lf.fileno()
fcntl.flock(fileno, fcntl.LOCK_EX)
statinfo = os.fstat(fileno)
if os.path.exists(lf.name):
statinfo2 = os.stat(lf.name)
if statinfo.st_ino == statinfo2.st_ino:
return lf
lf.close()
except Exception:
continue
def unlockfile(lf):
"""
Unlock a file locked using lockfile()
"""
os.unlink(lf.name)
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def md5_file(filename):
"""
Return the hex string representation of the MD5 checksum of filename.
"""
try:
import hashlib
m = hashlib.md5()
except ImportError:
import md5
m = md5.new()
for line in open(filename):
m.update(line)
return m.hexdigest()
def remove(path, recurse=False):
"""Equivalent to rm -f or rm -rf"""
import os, errno, shutil
try:
os.unlink(path)
except OSError, exc:
if recurse and exc.errno == errno.EISDIR:
shutil.rmtree(path)
elif exc.errno != errno.ENOENT:
raise
def prunedir(topdir):
# Delete everything reachable from the directory named in 'topdir'.
# CAUTION: This is dangerous!
for root, dirs, files in os.walk(topdir, topdown = False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
if os.path.islink(os.path.join(root, name)):
os.remove(os.path.join(root, name))
else:
os.rmdir(os.path.join(root, name))
os.rmdir(topdir)
#
# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
# but thats possibly insane and suffixes is probably going to be small
#
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
if var.endswith(suffix):
return var.replace(suffix, "")
return var
def mkdirhier(directory):
import oelite.util
oelite.util.makedirs(directory)
def movefile(src, dest, newmtime = None, sstat = None):
"""Moves a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure. Move is
atomic.
"""
#print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
print("movefile: Stating source file failed...", e)
return None
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
os.unlink(src)
return os.lstat(dest)
except Exception as e:
print("movefile: failed to properly create symlink:", dest, "->", target, e)
return None
renamefailed = 1
if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
try:
os.rename(src, dest)
renamefailed = 0
except Exception as e:
if e[0] != errno.EXDEV:
# Some random error.
print("movefile: Failed to move", src, "to", dest, e)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
didcopy = 0
if stat.S_ISREG(sstat[stat.ST_MODE]):
try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
didcopy = 1
except Exception as e:
print('movefile: copy', src, '->', dest, 'failed.', e)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
return None # failure
try:
if didcopy:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src)
except Exception as e:
print("movefile: Failed to chown/chmod/unlink", dest, e)
return None
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def copyfile(src, dest, newmtime = None, sstat = None):
"""
Copies a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure.
"""
#print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
print("copyfile: Stating source file failed...", e)
return False
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
return os.lstat(dest)
except Exception as e:
print("copyfile: failed to properly create symlink:", dest, "->", target, e)
return False
if stat.S_ISREG(sstat[stat.ST_MODE]):
os.chmod(src, stat.S_IRUSR) # Make sure we can read it
try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
except Exception as e:
print('copyfile: copy', src, '->', dest, 'failed.', e)
os.chmod(src, stat.S_IMODE(sstat[stat.ST_MODE]))
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
return False
finally:
os.chmod(src, sstat[stat.ST_MODE])
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
print("copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a)
return False # failure
try:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
except Exception as e:
print("copyfile: Failed to chown/chmod/unlink", dest, e)
return False
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def which(path, item, direction = 0):
"""
Locate a file in a PATH
"""
paths = (path or "").split(':')
if direction != 0:
paths.reverse()
for p in paths:
next = os.path.join(p, item)
if os.path.exists(next):
return next
return ""
|
import json
import datetime
from copy import copy
from flask import request
from flask.ext.cors import cross_origin
from dateutil.parser import parse as parse_date
from alerta.app import app, db
from alerta.alert import Alert
from alerta.app.utils import absolute_url, jsonify, jsonp, process_alert
from alerta.app.metrics import Timer
from alerta.plugins import RejectException
LOG = app.logger
webhook_timer = Timer('alerts', 'webhook', 'Web hook alerts', 'Total time to process number of web hook alerts')
duplicate_timer = Timer('alerts', 'duplicate', 'Duplicate alerts', 'Total time to process number of duplicate alerts')
correlate_timer = Timer('alerts', 'correlate', 'Correlated alerts', 'Total time to process number of correlated alerts')
create_timer = Timer('alerts', 'create', 'Newly created alerts', 'Total time to process number of new alerts')
def cw_state_to_severity(state):
if state == 'ALARM':
return 'major'
elif state == 'INSUFFICIENT_DATA':
return 'warning'
elif state == 'OK':
return 'normal'
else:
return 'unknown'
def parse_notification(notification):
notification = json.loads(notification)
if notification['Type'] == 'SubscriptionConfirmation':
return Alert(
resource=notification['TopicArn'],
event=notification['Type'],
environment='Production',
severity='informational',
service=['Unknown'],
group='AWS/CloudWatch',
text='%s <a href="%s" target="_blank">SubscribeURL</a>' % (notification['Message'], notification['SubscribeURL']),
origin=notification['TopicArn'],
event_type='cloudwatchAlarm',
create_time=datetime.datetime.strptime(notification['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'),
raw_data=notification,
)
elif notification['Type'] == 'Notification':
alarm = json.loads(notification['Message'])
return Alert(
resource='%s:%s' % (alarm['Trigger']['Dimensions'][0]['name'], alarm['Trigger']['Dimensions'][0]['value']),
event=alarm['AlarmName'],
environment='Production',
severity=cw_state_to_severity(alarm['NewStateValue']),
service=[alarm['AWSAccountId']],
group=alarm['Trigger']['Namespace'],
value=alarm['NewStateValue'],
text=alarm['AlarmDescription'],
tags=[alarm['Region']],
attributes={
'incidentKey': alarm['AlarmName'],
'thresholdInfo': alarm['Trigger']
},
origin=notification['TopicArn'],
event_type='cloudwatchAlarm',
create_time=datetime.datetime.strptime(notification['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'),
raw_data=alarm
)
@app.route('/webhooks/cloudwatch', methods=['OPTIONS', 'POST'])
@cross_origin()
@jsonp
def cloudwatch():
hook_started = webhook_timer.start_timer()
try:
incomingAlert = parse_notification(request.data)
except ValueError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
if alert:
body = alert.get_body()
body['href'] = absolute_url('/alert/' + alert.id)
return jsonify(status="ok", id=alert.id, alert=body), 201, {'Location': body['href']}
else:
return jsonify(status="error", message="insert or update of cloudwatch alarm failed"), 500
def parse_pingdom(check):
check = json.loads(check)
if check['action'] == 'assign':
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down'],
environment='Production',
severity='critical',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
elif check['action'] == 'notify_of_close':
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down'],
environment='Production',
severity='normal',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
else:
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down', check['description']],
environment='Production',
severity='indeterminate',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
@app.route('/webhooks/pingdom', methods=['OPTIONS', 'GET'])
@cross_origin()
@jsonp
def pingdom():
hook_started = webhook_timer.start_timer()
try:
incomingAlert = parse_pingdom(request.args.get('message'))
except ValueError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
if alert:
body = alert.get_body()
body['href'] = absolute_url('/alert/' + alert.id)
return jsonify(status="ok", id=alert.id, alert=body), 201, {'Location': body['href']}
else:
return jsonify(status="error", message="insert or update of pingdom check failed"), 500
def parse_pagerduty(message):
incident_key = message['data']['incident']['incident_key']
incident_number = message['data']['incident']['incident_number']
html_url = message['data']['incident']['html_url']
incident_url = '<a href="%s">#%s</a>' % (html_url, incident_number)
try:
alert = db.get_alerts(query={'attributes.incidentKey': incident_key}, limit=1)[0]
except IndexError:
raise
from alerta.app import status_code
if message['type'] == 'incident.trigger':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.acknowledge':
status = status_code.ACK
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s acknowledged by %s' % (incident_url, user)
elif message['type'] == 'incident.unacknowledge':
status = status_code.OPEN
text = 'Incident %s unacknowledged due to timeout' % incident_url
elif message['type'] == 'incident.resolve':
status = status_code.CLOSED
if message['data']['incident']['resolved_by_user']:
user = message['data']['incident']['resolved_by_user']['name']
else:
user = 'n/a'
text = 'Incident %s resolved by %s' % (incident_url, user)
elif message['type'] == 'incident.assign':
status = status_code.ASSIGN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s manually assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.escalate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s escalated to %s' % (incident_url, user)
elif message['type'] == 'incident.delegate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s reassigned due to escalation to %s' % (incident_url, user)
else:
status = status_code.UNKNOWN
text = message['type']
return alert.id, status, text
@app.route('/webhooks/pagerduty', methods=['OPTIONS', 'POST'])
@cross_origin()
def pagerduty():
hook_started = webhook_timer.start_timer()
data = request.json
if data and 'messages' in data:
for message in data['messages']:
try:
id, status, text = parse_pagerduty(message)
except IndexError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = db.set_status(id=id, status=status, text=text)
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
else:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message="no messages in PagerDuty data payload"), 400
webhook_timer.stop_timer(hook_started)
if alert:
return jsonify(status="ok"), 200
else:
return jsonify(status="error", message="update PagerDuty incident status failed"), 500
def parse_prometheus(status, alert):
labels = copy(alert['labels'])
annotations = copy(alert['annotations'])
starts_at = parse_date(alert['startsAt'])
if alert['endsAt'] == '0001-01-01T00:00:00Z':
ends_at = None
else:
ends_at = parse_date(alert['endsAt'])
if status == 'firing':
severity = labels.pop('severity', 'warning')
create_time = starts_at
elif status == 'resolved':
severity = 'normal'
create_time = ends_at
else:
severity = 'unknown'
create_time = ends_at or starts_at
summary = annotations.pop('summary', None)
description = annotations.pop('description', None)
text = description or summary or '%s: %s on %s' % (labels['job'], labels['alertname'], labels['instance'])
if 'generatorURL' in alert:
annotations['moreInfo'] = '<a href="%s" target="_blank">Prometheus Graph</a>' % alert['generatorURL']
return Alert(
resource=labels.pop('exported_instance', None) or labels.pop('instance'),
event=labels.pop('alertname'),
environment=labels.pop('environment', 'Production'),
severity=severity,
correlate=labels.pop('correlate').split(',') if 'correlate' in labels else None,
service=labels.pop('service', '').split(','),
group=labels.pop('group', None),
value=labels.pop('value', None),
text=text,
customer=labels.pop('customer', None),
tags=["%s=%s" % t for t in labels.items()],
attributes=annotations,
origin='prometheus/' + labels.get('job', '-'),
event_type='prometheusAlert',
create_time=create_time,
raw_data=alert
)
@app.route('/webhooks/prometheus', methods=['OPTIONS', 'POST'])
@cross_origin()
def prometheus():
if request.json and 'alerts' in request.json:
hook_started = webhook_timer.start_timer()
status = request.json['status']
for alert in request.json['alerts']:
try:
incomingAlert = parse_prometheus(status, alert)
except ValueError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
else:
return jsonify(status="error", message="no alerts in Prometheus notification payload"), 400
return jsonify(status="ok"), 200
Enforce auth for webhooks if enabled
import json
import datetime
from copy import copy
from dateutil.parser import parse as parse_date
from flask import request
from flask.ext.cors import cross_origin
from alerta.app import app, db
from alerta.app.auth import auth_required
from alerta.app.metrics import Timer
from alerta.app.utils import absolute_url, jsonify, process_alert
from alerta.alert import Alert
from alerta.plugins import RejectException
LOG = app.logger
webhook_timer = Timer('alerts', 'webhook', 'Web hook alerts', 'Total time to process number of web hook alerts')
duplicate_timer = Timer('alerts', 'duplicate', 'Duplicate alerts', 'Total time to process number of duplicate alerts')
correlate_timer = Timer('alerts', 'correlate', 'Correlated alerts', 'Total time to process number of correlated alerts')
create_timer = Timer('alerts', 'create', 'Newly created alerts', 'Total time to process number of new alerts')
def cw_state_to_severity(state):
if state == 'ALARM':
return 'major'
elif state == 'INSUFFICIENT_DATA':
return 'warning'
elif state == 'OK':
return 'normal'
else:
return 'unknown'
def parse_notification(notification):
notification = json.loads(notification)
if notification['Type'] == 'SubscriptionConfirmation':
return Alert(
resource=notification['TopicArn'],
event=notification['Type'],
environment='Production',
severity='informational',
service=['Unknown'],
group='AWS/CloudWatch',
text='%s <a href="%s" target="_blank">SubscribeURL</a>' % (notification['Message'], notification['SubscribeURL']),
origin=notification['TopicArn'],
event_type='cloudwatchAlarm',
create_time=datetime.datetime.strptime(notification['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'),
raw_data=notification,
)
elif notification['Type'] == 'Notification':
alarm = json.loads(notification['Message'])
return Alert(
resource='%s:%s' % (alarm['Trigger']['Dimensions'][0]['name'], alarm['Trigger']['Dimensions'][0]['value']),
event=alarm['AlarmName'],
environment='Production',
severity=cw_state_to_severity(alarm['NewStateValue']),
service=[alarm['AWSAccountId']],
group=alarm['Trigger']['Namespace'],
value=alarm['NewStateValue'],
text=alarm['AlarmDescription'],
tags=[alarm['Region']],
attributes={
'incidentKey': alarm['AlarmName'],
'thresholdInfo': alarm['Trigger']
},
origin=notification['TopicArn'],
event_type='cloudwatchAlarm',
create_time=datetime.datetime.strptime(notification['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ'),
raw_data=alarm
)
@app.route('/webhooks/cloudwatch', methods=['OPTIONS', 'POST'])
@cross_origin()
@auth_required
def cloudwatch():
hook_started = webhook_timer.start_timer()
try:
incomingAlert = parse_notification(request.data)
except ValueError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
if alert:
body = alert.get_body()
body['href'] = absolute_url('/alert/' + alert.id)
return jsonify(status="ok", id=alert.id, alert=body), 201, {'Location': body['href']}
else:
return jsonify(status="error", message="insert or update of cloudwatch alarm failed"), 500
def parse_pingdom(check):
check = json.loads(check)
if check['action'] == 'assign':
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down'],
environment='Production',
severity='critical',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
elif check['action'] == 'notify_of_close':
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down'],
environment='Production',
severity='normal',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
else:
return Alert(
resource=check['host'],
event=check['description'],
correlate=['up', 'down', check['description']],
environment='Production',
severity='indeterminate',
service=[check['checkname']],
group='Network',
text='%s is %s.' % (check['checkname'], check['description']),
attributes={'incidentKey': check['incidentid']},
origin='Pingdom',
event_type='availabilityAlert',
raw_data=check,
)
@app.route('/webhooks/pingdom', methods=['OPTIONS', 'GET'])
@cross_origin()
@auth_required
def pingdom():
hook_started = webhook_timer.start_timer()
try:
incomingAlert = parse_pingdom(request.args.get('message'))
except ValueError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
if alert:
body = alert.get_body()
body['href'] = absolute_url('/alert/' + alert.id)
return jsonify(status="ok", id=alert.id, alert=body), 201, {'Location': body['href']}
else:
return jsonify(status="error", message="insert or update of pingdom check failed"), 500
def parse_pagerduty(message):
incident_key = message['data']['incident']['incident_key']
incident_number = message['data']['incident']['incident_number']
html_url = message['data']['incident']['html_url']
incident_url = '<a href="%s">#%s</a>' % (html_url, incident_number)
try:
alert = db.get_alerts(query={'attributes.incidentKey': incident_key}, limit=1)[0]
except IndexError:
raise
from alerta.app import status_code
if message['type'] == 'incident.trigger':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.acknowledge':
status = status_code.ACK
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s acknowledged by %s' % (incident_url, user)
elif message['type'] == 'incident.unacknowledge':
status = status_code.OPEN
text = 'Incident %s unacknowledged due to timeout' % incident_url
elif message['type'] == 'incident.resolve':
status = status_code.CLOSED
if message['data']['incident']['resolved_by_user']:
user = message['data']['incident']['resolved_by_user']['name']
else:
user = 'n/a'
text = 'Incident %s resolved by %s' % (incident_url, user)
elif message['type'] == 'incident.assign':
status = status_code.ASSIGN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s manually assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.escalate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s escalated to %s' % (incident_url, user)
elif message['type'] == 'incident.delegate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s reassigned due to escalation to %s' % (incident_url, user)
else:
status = status_code.UNKNOWN
text = message['type']
return alert.id, status, text
@app.route('/webhooks/pagerduty', methods=['OPTIONS', 'POST'])
@cross_origin()
@auth_required
def pagerduty():
hook_started = webhook_timer.start_timer()
data = request.json
if data and 'messages' in data:
for message in data['messages']:
try:
id, status, text = parse_pagerduty(message)
except IndexError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
alert = db.set_status(id=id, status=status, text=text)
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
else:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message="no messages in PagerDuty data payload"), 400
webhook_timer.stop_timer(hook_started)
if alert:
return jsonify(status="ok"), 200
else:
return jsonify(status="error", message="update PagerDuty incident status failed"), 500
def parse_prometheus(status, alert):
labels = copy(alert['labels'])
annotations = copy(alert['annotations'])
starts_at = parse_date(alert['startsAt'])
if alert['endsAt'] == '0001-01-01T00:00:00Z':
ends_at = None
else:
ends_at = parse_date(alert['endsAt'])
if status == 'firing':
severity = labels.pop('severity', 'warning')
create_time = starts_at
elif status == 'resolved':
severity = 'normal'
create_time = ends_at
else:
severity = 'unknown'
create_time = ends_at or starts_at
summary = annotations.pop('summary', None)
description = annotations.pop('description', None)
text = description or summary or '%s: %s on %s' % (labels['job'], labels['alertname'], labels['instance'])
if 'generatorURL' in alert:
annotations['moreInfo'] = '<a href="%s" target="_blank">Prometheus Graph</a>' % alert['generatorURL']
return Alert(
resource=labels.pop('exported_instance', None) or labels.pop('instance'),
event=labels.pop('alertname'),
environment=labels.pop('environment', 'Production'),
severity=severity,
correlate=labels.pop('correlate').split(',') if 'correlate' in labels else None,
service=labels.pop('service', '').split(','),
group=labels.pop('group', None),
value=labels.pop('value', None),
text=text,
customer=labels.pop('customer', None),
tags=["%s=%s" % t for t in labels.items()],
attributes=annotations,
origin='prometheus/' + labels.get('job', '-'),
event_type='prometheusAlert',
create_time=create_time,
raw_data=alert
)
@app.route('/webhooks/prometheus', methods=['OPTIONS', 'POST'])
@cross_origin()
@auth_required
def prometheus():
if request.json and 'alerts' in request.json:
hook_started = webhook_timer.start_timer()
status = request.json['status']
for alert in request.json['alerts']:
try:
incomingAlert = parse_prometheus(status, alert)
except ValueError as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 400
try:
process_alert(incomingAlert)
except RejectException as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 403
except Exception as e:
webhook_timer.stop_timer(hook_started)
return jsonify(status="error", message=str(e)), 500
webhook_timer.stop_timer(hook_started)
else:
return jsonify(status="error", message="no alerts in Prometheus notification payload"), 400
return jsonify(status="ok"), 200
|
"""
Copyright 2017 Nicolas Ramirez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Contains all the functions for subprocess calls """
import subprocess
import os
from utils import parsers
def exec_imagemagick(filepath, line='', filetype=None, filename=None):
""" Executes a subprocess call that executes imagemagick's convert
:returns True if passed, False if failed
"""
if filename is None:
return '', False
line = line if line is True else ''
name = filename.split('\\')
name = name[len(name)-1].split('.')[0]
file_out = os.path.join(filepath, '{}{}'.format(name, filetype))
convert_path = parsers.get_from_config('convert_path', os.path.dirname(os.path.realpath(__file__)))
if not convert_path:
return '', False
if os.path.isfile(file_out):
os.remove(file_out)
command = '{} {} {} {}'.format(convert_path, filename, file_out, line)
result = subprocess.run(command, universal_newlines=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return file_out, os.path.isfile(file_out)
def exec_potrace(filepath, line='', filename=None):
""" Executes a subprocess call that executes imagemagick's convert
:returns True if passed, False if failed
"""
if filename is None:
return '', False
line = line if line is True else ''
name = filename.split('\\')
name = name[len(name)-1].split('.')[0]
file_out = os.path.join(filepath, '{}{}'.format(name, '.svg'))
potrace_path = parsers.get_from_config('potrace_path', os.path.dirname(os.path.realpath(__file__)))
if not potrace_path:
return '', False
if os.path.isfile(file_out):
os.remove(file_out)
command = '{} -s {} -o {} --flat {}'.format(potrace_path, filename, file_out, line)
result = subprocess.run(command, universal_newlines=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return file_out, os.path.isfile(file_out)
Added monochrome and negate to imagemagick execution for better quality results
"""
Copyright 2017 Nicolas Ramirez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Contains all the functions for subprocess calls """
import subprocess
import os
from utils import parsers
def exec_imagemagick(filepath, line='', filetype=None, filename=None):
""" Executes a subprocess call that executes imagemagick's convert
:returns True if passed, False if failed
"""
if filename is None:
return '', False
line = line if line is True else ''
name = filename.split('\\')
name = name[len(name)-1].split('.')[0]
file_out = os.path.join(filepath, '{}{}'.format(name, filetype))
convert_path = parsers.get_from_config('convert_path', os.path.dirname(os.path.realpath(__file__)))
if not convert_path:
return '', False
if os.path.isfile(file_out):
os.remove(file_out)
command = '{} {} -monochrome -negate {} {}'.format(convert_path, filename, file_out, line)
result = subprocess.run(command, universal_newlines=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return file_out, os.path.isfile(file_out)
def exec_potrace(filepath, line='', filename=None):
""" Executes a subprocess call that executes imagemagick's convert
:returns True if passed, False if failed
"""
if filename is None:
return '', False
line = line if line is True else ''
name = filename.split('\\')
name = name[len(name)-1].split('.')[0]
file_out = os.path.join(filepath, '{}{}'.format(name, '.svg'))
potrace_path = parsers.get_from_config('potrace_path', os.path.dirname(os.path.realpath(__file__)))
if not potrace_path:
return '', False
if os.path.isfile(file_out):
os.remove(file_out)
command = '{} -s {} -o {} --flat {}'.format(potrace_path, filename, file_out, line)
result = subprocess.run(command, universal_newlines=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return file_out, os.path.isfile(file_out)
|
"""
analyzr.NetworkScanner
~~~~~~~~~~~~~~~
This modules allows to scan the network of the current host..
"""
from typing import List
import netaddr
from scapy.all import *
from texttable import Texttable
from analyzr.core.entities import NetworkNode
from analyzr.networktool import NetworkToolFacade
logger = logging.getLogger(__name__)
def discover():
pass
class NetworkDiscoverer():
# Taken from netdiscover main.c
# https://sourceforge.net/p/netdiscover/code/115/tree/trunk/src/main.c
common_networks = [
netaddr.IPNetwork("192.168.0.0/16"),
netaddr.IPNetwork("172.16.0.0/12"),
netaddr.IPNetwork("10.0.0.0/8")]
# "10.0.0.0/8"
def __init__(self, network_tool: NetworkToolFacade, fingerprinters: list):
self.network_tool = network_tool
self.discovered_network_hosts = defaultdict(
set) # (network --> set(NetworkNode, NetworkNode, NetworkNode, ...))
def discover(self, network_ranges: list = None):
"""
Scans specified network ranges to find live hosts. If no networks given, a default list is used.
Returns True if any hosts were found. False if otherwise.
"""
def scan(net: netaddr.IPNetwork):
logger.debug("Starting host discovery on network {network}...".format(network=net))
results = self.network_tool.arp_discover_hosts(network=str(net), timeout=10)
if results:
logger.info("Found {nb_found_hosts} hosts in {network}.".format(nb_found_hosts=len(results),
network=net))
for result in results:
network_node = NetworkNode(ip=netaddr.IPAddress(result.ip), mac=netaddr.EUI(result.mac))
self.discovered_network_hosts[net].add(network_node)
else:
logger.info("No hosts found on {network}.".format(network=net))
logger.info("Starting host discovery...")
if network_ranges:
networks_to_scan = [netaddr.IPNetwork(net_to_scan) for net_to_scan in network_ranges]
else:
networks_to_scan = self.common_networks
for net in networks_to_scan:
if net.prefixlen >= 16:
scan(net)
else:
# If bigger than a /16 we split the network into subnetworks to avoid flooding the network with ARP
# packets
logger.info("Splitting {0:s} into /16 subnets to avoid sending too many ARP packets.".format(str(net)))
for subnet in net.subnet(16):
scan(subnet)
logger.info("Discovery done.")
if self.discovered_network_hosts:
return True
return False
# TODO: This is bad and you should feel bad
# def identify_fingerprints(self):
# responses = dict()
# for network, network_nodes in self.discovered_network_hosts.items():
# for network_node in network_nodes:
# srcPort = random.randint(1025, 65534)
# resp = sr1(IP(dst=str(network_node.ip)) / TCP(sport=srcPort, dport=topports, flags=ScapyTCPFlag.SYN),
# timeout=1, verbose=0)
# if resp:
# responses[network_node] = resp
#
# for fingerprinter in self.fingerprinters: # type: fingerprinter
# for network_node, resp in responses.items():
# os = fingerprinter.identify_os_from_pkt(resp)
# if os:
# network_node.possible_fingerprints |= os
def make_network_graph(self):
hosts = set()
devices = set()
for network, network_nodes in self.discovered_network_hosts.items():
for network_node in network_nodes:
hosts.add(network_node)
if hosts:
logger.info("Drawing network graph...")
import matplotlib.pyplot as plt
import datetime
import networkx as nx
nodes = set()
current_host_info = self.network_tool.host_information
host_ip = current_host_info.ip
lone_nodes = set()
# on bâti un set du genre:
#
# source destination
# {('172.16.2.243', '172.16.2.1'), ('172.16.2.243', '172.16.2.8')}
for nn in hosts: # type: NetworkNode
if not nn.hops:
lone_nodes.add(str(nn.ip))
continue
next_hop = nn.hops[0]
nodes.add((host_ip, next_hop))
for i in range(1, len(nn.hops)):
ip = nn.hops[i]
nodes.add((next_hop, ip))
next_hop = ip
val_map = {}
for nn in hosts:
# TODO:
# if nn.type != "host":
# continue
val_map[str(nn.ip)] = 0.5714285714285714
gr = nx.Graph()
for s, d in nodes:
if s not in gr:
gr.add_node(s)
if d not in gr:
gr.add_node(d)
gr.add_edge(s, d)
for s in lone_nodes:
gr.add_node(s)
values = [val_map.get(node, 0.25) for node in gr.nodes()]
plt.figure(figsize=(20, 20))
plt.rcParams.update({'axes.titlesize': 'large'})
plt.title("Scan Topology", fontsize=20)
nx.draw(gr,
with_labels=True,
edge_color='blue',
node_size=7000,
node_color=values,
font_size=12,
alpha=0.3,
font_family='sans-serif',
cmap=plt.get_cmap('jet'),
)
# filename = get_next_file_path(folder=os.path.abspath(os.path.join("graphs")),
# base_filename="network_graph.png")
filename = "network_graph_" + datetime.datetime.now().strftime("%Y_%m_%d__%H%M%S") + ".png"
fullpath = os.path.abspath(os.path.join("..", "graphs", filename))
plt.savefig(fullpath)
logger.info("Created network graph ({0:s})".format(fullpath))
def find_hops(self):
logger.info("Attempting to find hops needed to reach discovered hosts...")
for network, network_nodes in self.discovered_network_hosts.items():
for network_node in network_nodes: # type: NetworkNode
logger.debug("Finding hops for {ip}".format(ip=str(network_node.ip)))
network_node.hops = self.network_tool.route_to_target(str(network_node.ip))
logger.debug("Route to get to {ip} : {route}".format(ip=str(network_node.ip),
route=" --> ".join(network_node.hops)))
logger.info("Hops discovery done.")
def pretty_print_ips(self):
header_labels = ["IP", "MAC", "Host", "Hops", "Opened Ports", "Closed Ports", "Possible Fingerprints"]
table = Texttable(max_width=230)
# We only want the header and the horizontal lines
table.set_deco(Texttable.HEADER | Texttable.HLINES)
# All columns are of type str
table.set_cols_dtype(["t"] * len(header_labels))
# All columns left align
table.set_cols_align(["l"] * len(header_labels))
table.set_cols_width([15, 17, 30, 58, 30, 30, 50])
for network, network_nodes in self.discovered_network_hosts.items():
table.header(header_labels)
print("Live hosts in network {0:s}".format(str(network)))
for nn in network_nodes:
table.add_row([str(nn.ip or "Unknown IP"),
str(nn.mac or "Unknown MAC"),
nn.host or "Unknown Host",
"{hops} ({nb_hops})".format(hops=" --> ".join(hop for hop in nn.hops),
nb_hops=len(nn.hops)),
str(nn.opened_ports),
str(nn.closed_ports),
str(nn.possible_fingerprints or "Unknown")])
print(table.draw())
table.reset()
def scan_found_network_nodes_for_opened_ports(self, ports_to_scan: List[int]):
logger.info("Checking founds hosts for opened ports...")
logger.info("Scanning ports %s.", str(ports_to_scan).strip("[]"))
for network, network_nodes in self.discovered_network_hosts.items():
for network_node in network_nodes: # type: NetworkNode
opened_ports, closed_ports = self.network_tool.tcp_port_scan(str(network_node.ip), ports_to_scan)
logger.debug("{0:s} has these ports opened: {1:s}".format(str(network_node.ip),
str(opened_ports).strip("[]")))
logger.debug("{0:s} has these ports closed: {1:s}".format(str(network_node.ip),
str(closed_ports).strip("[]")))
network_node.opened_ports = opened_ports
network_node.closed_ports = closed_ports
Meilleur commentaire.
"""
analyzr.NetworkScanner
~~~~~~~~~~~~~~~
This modules allows to scan the network of the current host..
"""
from typing import List
import netaddr
from scapy.all import *
from texttable import Texttable
from analyzr.core.entities import NetworkNode
from analyzr.networktool import NetworkToolFacade
logger = logging.getLogger(__name__)
def discover():
pass
class NetworkDiscoverer():
# Adapted from netdiscover main.c
# https://sourceforge.net/p/netdiscover/code/115/tree/trunk/src/main.c
# And improved by using netaddr.
common_networks = [
netaddr.IPNetwork("192.168.0.0/16"),
netaddr.IPNetwork("172.16.0.0/12"),
netaddr.IPNetwork("10.0.0.0/8")]
# "10.0.0.0/8"
def __init__(self, network_tool: NetworkToolFacade, fingerprinters: list):
self.network_tool = network_tool
self.discovered_network_hosts = defaultdict(
set) # (network --> set(NetworkNode, NetworkNode, NetworkNode, ...))
def discover(self, network_ranges: list = None):
"""
Scans specified network ranges to find live hosts. If no networks given, a default list is used.
Returns True if any hosts were found. False if otherwise.
"""
def scan(net: netaddr.IPNetwork):
logger.debug("Starting host discovery on network {network}...".format(network=net))
results = self.network_tool.arp_discover_hosts(network=str(net), timeout=10)
if results:
logger.info("Found {nb_found_hosts} hosts in {network}.".format(nb_found_hosts=len(results),
network=net))
for result in results:
network_node = NetworkNode(ip=netaddr.IPAddress(result.ip), mac=netaddr.EUI(result.mac))
self.discovered_network_hosts[net].add(network_node)
else:
logger.info("No hosts found on {network}.".format(network=net))
logger.info("Starting host discovery...")
if network_ranges:
networks_to_scan = [netaddr.IPNetwork(net_to_scan) for net_to_scan in network_ranges]
else:
networks_to_scan = self.common_networks
for net in networks_to_scan:
if net.prefixlen >= 16:
scan(net)
else:
# If bigger than a /16 we split the network into subnetworks to avoid flooding the network with ARP
# packets
logger.info("Splitting {0:s} into /16 subnets to avoid sending too many ARP packets.".format(str(net)))
for subnet in net.subnet(16):
scan(subnet)
logger.info("Discovery done.")
if self.discovered_network_hosts:
return True
return False
# TODO: This is bad and you should feel bad
# def identify_fingerprints(self):
# responses = dict()
# for network, network_nodes in self.discovered_network_hosts.items():
# for network_node in network_nodes:
# srcPort = random.randint(1025, 65534)
# resp = sr1(IP(dst=str(network_node.ip)) / TCP(sport=srcPort, dport=topports, flags=ScapyTCPFlag.SYN),
# timeout=1, verbose=0)
# if resp:
# responses[network_node] = resp
#
# for fingerprinter in self.fingerprinters: # type: fingerprinter
# for network_node, resp in responses.items():
# os = fingerprinter.identify_os_from_pkt(resp)
# if os:
# network_node.possible_fingerprints |= os
def make_network_graph(self):
hosts = set()
devices = set()
for network, network_nodes in self.discovered_network_hosts.items():
for network_node in network_nodes:
hosts.add(network_node)
if hosts:
logger.info("Drawing network graph...")
import matplotlib.pyplot as plt
import datetime
import networkx as nx
nodes = set()
current_host_info = self.network_tool.host_information
host_ip = current_host_info.ip
lone_nodes = set()
# on bâti un set du genre:
#
# source destination
# {('172.16.2.243', '172.16.2.1'), ('172.16.2.243', '172.16.2.8')}
for nn in hosts: # type: NetworkNode
if not nn.hops:
lone_nodes.add(str(nn.ip))
continue
next_hop = nn.hops[0]
nodes.add((host_ip, next_hop))
for i in range(1, len(nn.hops)):
ip = nn.hops[i]
nodes.add((next_hop, ip))
next_hop = ip
val_map = {}
for nn in hosts:
# TODO:
# if nn.type != "host":
# continue
val_map[str(nn.ip)] = 0.5714285714285714
gr = nx.Graph()
for s, d in nodes:
if s not in gr:
gr.add_node(s)
if d not in gr:
gr.add_node(d)
gr.add_edge(s, d)
for s in lone_nodes:
gr.add_node(s)
values = [val_map.get(node, 0.25) for node in gr.nodes()]
plt.figure(figsize=(20, 20))
plt.rcParams.update({'axes.titlesize': 'large'})
plt.title("Scan Topology", fontsize=20)
nx.draw(gr,
with_labels=True,
edge_color='blue',
node_size=7000,
node_color=values,
font_size=12,
alpha=0.3,
font_family='sans-serif',
cmap=plt.get_cmap('jet'),
)
# filename = get_next_file_path(folder=os.path.abspath(os.path.join("graphs")),
# base_filename="network_graph.png")
filename = "network_graph_" + datetime.datetime.now().strftime("%Y_%m_%d__%H%M%S") + ".png"
fullpath = os.path.abspath(os.path.join("..", "graphs", filename))
plt.savefig(fullpath)
logger.info("Created network graph ({0:s})".format(fullpath))
def find_hops(self):
logger.info("Attempting to find hops needed to reach discovered hosts...")
for network, network_nodes in self.discovered_network_hosts.items():
for network_node in network_nodes: # type: NetworkNode
logger.debug("Finding hops for {ip}".format(ip=str(network_node.ip)))
network_node.hops = self.network_tool.route_to_target(str(network_node.ip))
logger.debug("Route to get to {ip} : {route}".format(ip=str(network_node.ip),
route=" --> ".join(network_node.hops)))
logger.info("Hops discovery done.")
def pretty_print_ips(self):
header_labels = ["IP", "MAC", "Host", "Hops", "Opened Ports", "Closed Ports", "Possible Fingerprints"]
table = Texttable(max_width=230)
# We only want the header and the horizontal lines
table.set_deco(Texttable.HEADER | Texttable.HLINES)
# All columns are of type str
table.set_cols_dtype(["t"] * len(header_labels))
# All columns left align
table.set_cols_align(["l"] * len(header_labels))
table.set_cols_width([15, 17, 30, 58, 30, 30, 50])
for network, network_nodes in self.discovered_network_hosts.items():
table.header(header_labels)
print("Live hosts in network {0:s}".format(str(network)))
for nn in network_nodes:
table.add_row([str(nn.ip or "Unknown IP"),
str(nn.mac or "Unknown MAC"),
nn.host or "Unknown Host",
"{hops} ({nb_hops})".format(hops=" --> ".join(hop for hop in nn.hops),
nb_hops=len(nn.hops)),
str(nn.opened_ports),
str(nn.closed_ports),
str(nn.possible_fingerprints or "Unknown")])
print(table.draw())
table.reset()
def scan_found_network_nodes_for_opened_ports(self, ports_to_scan: List[int]):
logger.info("Checking founds hosts for opened ports...")
logger.info("Scanning ports %s.", str(ports_to_scan).strip("[]"))
for network, network_nodes in self.discovered_network_hosts.items():
for network_node in network_nodes: # type: NetworkNode
opened_ports, closed_ports = self.network_tool.tcp_port_scan(str(network_node.ip), ports_to_scan)
logger.debug("{0:s} has these ports opened: {1:s}".format(str(network_node.ip),
str(opened_ports).strip("[]")))
logger.debug("{0:s} has these ports closed: {1:s}".format(str(network_node.ip),
str(closed_ports).strip("[]")))
network_node.opened_ports = opened_ports
network_node.closed_ports = closed_ports
|
import logging
import sys
import cli_ui
from gitlabform import EXIT_PROCESSING_ERROR
from gitlabform.gitlab import GitLab
from gitlabform.gitlab.core import NotFoundException
class BranchProtector(object):
def __init__(self, gitlab: GitLab, strict: bool):
self.gitlab = gitlab
self.strict = strict
def protect_branch(self, project_and_group, configuration, branch):
try:
if (
"protected" in configuration["branches"][branch]
and configuration["branches"][branch]["protected"]
):
if ("developers_can_push" and "developers_can_merge") in configuration[
"branches"
][branch]:
logging.debug("Setting branch '%s' as *protected*", branch)
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
self.gitlab.protect_branch(
project_and_group,
branch,
configuration["branches"][branch]["developers_can_push"],
configuration["branches"][branch]["developers_can_merge"],
)
elif (
"push_access_level"
and "merge_access_level"
and "unprotect_access_level"
) in configuration["branches"][branch]:
try:
branch_access_levels = self.gitlab.get_branch_access_levels(
project_and_group, branch
)
levels = [
"push_access_levels",
"merge_access_levels",
"unprotect_access_levels",
]
# Check each access type has the correct access level, if they do, not change is needed
# Gitlabform uses access_levels with singular form, and gitlab uses plural form.
# [0:-1] removes the plural
if all(
configuration["branches"][branch][level[0:-1]]
== branch_access_levels[level][0]["access_level"]
for level in levels
):
logging.debug(
"Skipping set branch '%s' access levels because they're already set"
)
return
except NotFoundException:
logging.debug("No access levels for this branch exist yet")
logging.debug("Setting branch '%s' access level", branch)
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
self.gitlab.branch_access_level(
project_and_group,
branch,
configuration["branches"][branch]["push_access_level"],
configuration["branches"][branch]["merge_access_level"],
configuration["branches"][branch]["unprotect_access_level"],
)
if "code_owner_approval_required" in configuration["branches"][branch]:
logging.debug(
"Setting branch '%s' \"code owner approval required\" option",
branch,
)
self.gitlab.branch_code_owner_approval_required(
project_and_group,
branch,
configuration["branches"][branch][
"code_owner_approval_required"
],
)
else:
logging.debug("Setting branch '%s' as unprotected", branch)
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
except NotFoundException:
message = f"Branch '{branch}' not found when trying to set it as protected/unprotected!"
if self.strict:
cli_ui.error(message)
sys.exit(EXIT_PROCESSING_ERROR)
else:
cli_ui.warning(message)
Add deprecation warning for old branch protection
syntax and also refactor the code:
* pull out methods that actually do operations
to make the logic of the main method more clear,
* use variables to make the code more DRY.
import logging
import sys
import cli_ui
from gitlabform import EXIT_PROCESSING_ERROR
from gitlabform.gitlab import GitLab
from gitlabform.gitlab.core import NotFoundException
class BranchProtector(object):
old_api_keys = ["developers_can_push", "developers_can_merge"]
new_api_keys = [
"push_access_level",
"merge_access_level",
"unprotect_access_level",
]
def __init__(self, gitlab: GitLab, strict: bool):
self.gitlab = gitlab
self.strict = strict
def protect_branch(self, project_and_group, configuration, branch):
try:
requested_configuration = configuration["branches"][branch]
if (
"protected" in requested_configuration
and requested_configuration["protected"]
):
if all(key in requested_configuration for key in self.old_api_keys):
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.protect_using_old_api(
requested_configuration, project_and_group, branch
)
elif all(key in requested_configuration for key in self.new_api_keys):
if self.configuration_update_needed(
requested_configuration, project_and_group, branch
):
self.protect_using_new_api(
requested_configuration, project_and_group, branch
)
else:
logging.debug(
"Skipping set branch '%s' access levels because they're already set"
)
return
# TODO: is this ok that we skip below code in this case?
if "code_owner_approval_required" in requested_configuration:
self.set_code_owner_approval_required(
requested_configuration, project_and_group, branch
)
else:
self.unprotect(project_and_group, branch)
except NotFoundException:
message = f"Branch '{branch}' not found when trying to set it as protected/unprotected!"
if self.strict:
cli_ui.error(message)
sys.exit(EXIT_PROCESSING_ERROR)
else:
cli_ui.warning(message)
def protect_using_old_api(self, requested_configuration, project_and_group, branch):
logging.warning(
f"Using keys {self.old_api_keys} for configuring protected"
" branches is deprecated and will be removed in future versions of GitLabForm."
f" Please start using new keys: {self.new_api_keys}"
)
logging.debug("Setting branch '%s' as *protected*", branch)
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
self.gitlab.protect_branch(
project_and_group,
branch,
requested_configuration["developers_can_push"],
requested_configuration["developers_can_merge"],
)
def protect_using_new_api(self, requested_configuration, project_and_group, branch):
logging.debug("Setting branch '%s' access level", branch)
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
self.gitlab.branch_access_level(
project_and_group,
branch,
requested_configuration["push_access_level"],
requested_configuration["merge_access_level"],
requested_configuration["unprotect_access_level"],
)
def set_code_owner_approval_required(
self, requested_configuration, project_and_group, branch
):
logging.debug(
"Setting branch '%s' \"code owner approval required\" option",
branch,
)
self.gitlab.branch_code_owner_approval_required(
project_and_group,
branch,
requested_configuration["code_owner_approval_required"],
)
def configuration_update_needed(
self, requested_configuration, project_and_group, branch
):
try:
branch_access_levels = self.gitlab.get_branch_access_levels(
project_and_group, branch
)
# this returns dicts like this:
# {'id': 4, 'name': 'main',
# 'push_access_levels': [{'access_level': 40, 'access_level_description': 'Maintainers', 'user_id': None, 'group_id': None}],
# 'merge_access_levels': [{'access_level': 40, 'access_level_description': 'Maintainers', 'user_id': None, 'group_id': None}],
# 'unprotect_access_levels': [], 'code_owner_approval_required': False, 'allow_force_push': False, }
levels = [
"push_access_levels",
"merge_access_levels",
"unprotect_access_levels",
]
# Check each access type has the correct access level, if they do, not change is needed
# Gitlabform uses access_levels with singular form, and gitlab uses plural form.
# [0:-1] removes the plural
return all(
requested_configuration[level[0:-1]]
== branch_access_levels[level][0]["access_level"]
for level in levels
)
except NotFoundException:
logging.debug("No access levels for this branch exist yet")
def unprotect(self, project_and_group, branch):
logging.debug("Setting branch '%s' as unprotected", branch)
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
|
#
# Copyright 2010-2012 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Webauthn2 provider implementations using OAuth2 OpenID Connect. This class inherits from DatabaseConnection2
Provider-specific parameters inherited from DatabaseConnection2 module:
`database_type`
: The database type (e.g., postgres).
`database_dsn`
: The database source name (e.g., "host=localhost user=ermrest password=... dbname=ermrest").
`database_schema`
: The schema name qualifier for provider tables within the database (text or None).
`database_max_retries`
: The number of times to retry transient errors when running independent transactions (int).
Provider-specific parameters specific to OAuth2:
`oauth2_discovery_uri`
: OpenID Connect Discovery 1.0 endpoint
`oauth2_redirect_relative_uri`
: The path that users are redirected to after providing consent/authorization
`oauth2_client_secret_file`
: The file, obtained from an OAuth2 provider (e.g., google) during registration, containing shared secrets and other information.
"""
from providers import *
from webauthn2.util import *
from webauthn2.providers import database
import web
import random
import urllib
import urllib2
import uuid
import urlparse
import web
import simplejson
import psycopg2
import oauth2client.client
from jwkest import jwk
from jwkest import jws
from oauth2client.crypt import AppIdentityError, _urlsafe_b64decode, CLOCK_SKEW_SECS, AUTH_TOKEN_LIFETIME_SECS, MAX_TOKEN_LIFETIME_SECS, PyCryptoVerifier
import time
import base64
from Crypto import Random
from Crypto.Hash.HMAC import HMAC
import Crypto.Hash.SHA256
import json
from datetime import datetime, timedelta
import webauthn2.providers
import collections
config_built_ins = web.storage(
# Items needed for methods inherited from database provider
database_type= 'postgres',
database_dsn= 'dbname=',
database_schema= 'webauthn2',
database_max_retries= 5,
# OAuth-specific items
oauth2_nonce_hard_timeout=3600,
oauth2_nonce_cookie_name='oauth2_auth_nonce',
# File with parameters, including the shared secret, shared between the client and the OAuth provider
oauth2_client_secret_file=None,
oauth2_discovery_uri=None,
oauth2_redirect_relative_uri="/authn/session",
oauth2_request_offline_access=False,
oauth2_scope="openid email profile",
oauth2_provider_sets_token_nonce=False
)
class nonce_util(database.DatabaseConnection2):
algorithm=Crypto.Hash.SHA256
nonce_table='oauth2_nonce'
def __init__(self, config):
database.DatabaseConnection2.__init__(self, config)
self.config_params = {'hard_timeout' : config.oauth2_nonce_hard_timeout,
'soft_timeout' : config.oauth2_nonce_hard_timeout / 2,
'nonce_table' : config.database_schema + '.' + self.nonce_table}
self.keys=[]
def get_keys(self, db):
self.update_timeout(db)
def db_body(db):
return db.select(self.config_params['nonce_table'], what="timeout, key", order="timeout desc")
if db:
textkeys = db_body(db)
else:
textkeys = self._db_wrapper(db_body)
self.keys=[]
for k in textkeys:
self.keys.append((k.get('timeout'), self.texttokey(k.get('key'))))
return self.keys
def update_timeout(self, db, force=False):
if not force:
for k in self.keys:
if datetime.now() + timedelta(0, self.config_params['soft_timeout']) < k[0]:
return
def db_body(db):
db.query("delete from %(nonce_table)s where timeout < now()" % self.config_params)
db.query("""
insert into %(nonce_table)s (key, timeout)
select $new_key, now() + interval '%(hard_timeout)d seconds'
where not exists
(select 1 from %(nonce_table)s where timeout - now() > interval '%(soft_timeout)d seconds')
""" % self.config_params,
vars={'new_key' : self.keytotext(self.make_key())})
if db:
db_body(db)
else:
self._db_wrapper(db_body)
def get_current_key(self, db):
return self.get_keys(db)[0][1]
@staticmethod
def get_cookie_ts(cookie):
return int(cookie.split('.')[0])
@staticmethod
def get_cookie_url(cookie):
url = base64.b64decode(cookie.split('.')[2])
# Debug for referrer tracing
web.debug("in get_cookie_url, cookie is '{cookie}', url is '{url}'".format(cookie=str(cookie), url=str(url)))
if url == '':
return None
return url
@staticmethod
def keytotext(key):
return base64.b64encode(key)
@staticmethod
def texttokey(text):
return base64.b64decode(text)
def make_key(self):
return Random.get_random_bytes(self.algorithm.digest_size)
def time_ok(self, value):
return time.time() - value < self.config_params['hard_timeout']
def encode(self, msg, db):
h = HMAC(self.get_current_key(db), msg, self.algorithm)
return h.hexdigest()
def hash_matches(self, msg, hashed, db):
retval = self._check_hash_match(msg, hashed, db)
if retval == False:
self.update_timeout(db, True)
retval = self._check_hash_match(msg, hashed, db)
return retval
def _check_hash_match(self, msg, hashed, db):
for k in self.get_keys(db):
h = HMAC(k[1], msg, self.algorithm)
if h.hexdigest() == hashed:
return True
return False
__all__ = [
'OAuth2SessionStateProvider',
'OAuth2ClientProvider',
'config_built_ins'
]
class OAuth2 (database.DatabaseConnection2):
# this is the storage format version, not the software version
major = 1
minor = 0
def __init__(self, config):
database.DatabaseConnection2.__init__(self, config)
class OAuth2Login (ClientLogin):
def __init__(self, provider):
ClientLogin.__init__(self, provider)
def login(self, manager, context, db, **kwargs):
"""
Return "username" in the form iss:sub.
It is expected that the caller will store the resulting username into context.client for reuse.
"""
vals = web.input()
# Check that this request came from the same user who initiated the oauth flow
nonce_vals = {
'auth_url_nonce' : vals.get('state'),
'auth_cookie_nonce' : web.cookies().get(self.provider.nonce_cookie_name)
}
if nonce_vals['auth_url_nonce'] == None:
raise OAuth2ProtocolError("No authn_nonce in initial redirect")
if (nonce_vals['auth_cookie_nonce'] == None):
raise OAuth2ProtocolError("No authn nonce cookie")
# Has the cookie nonce expired?
try:
ts = nonce_util.get_cookie_ts(nonce_vals['auth_cookie_nonce'])
except:
raise OAuth2ProtocolError('bad nonce cookie')
if not self.provider.nonce_state.time_ok(ts):
raise OAuth2LoginTimeoutError('Login timed out')
if not self.provider.nonce_state.hash_matches(nonce_vals['auth_cookie_nonce'], nonce_vals['auth_url_nonce'], db):
raise OAuth2ProtocolError('nonce mismatch')
# we'll write this to the db if all goes well
redirect_full_payload=simplejson.dumps(vals, separators=(',', ':'))
# Get id token
token_args = {
'code' : vals.get('code'),
'client_id' : self.provider.cfg.get('client_id'),
'client_secret' : self.provider.cfg.get('client_secret'),
'redirect_uri' : web.ctx.home + web.ctx.path,
'nonce' : nonce_vals['auth_url_nonce'],
'grant_type' : 'authorization_code'}
base_timestamp = datetime.now()
u=urllib2.urlopen(self.provider.cfg.get('token_endpoint'), urllib.urlencode(token_args))
payload=simplejson.load(u)
u.close()
token_payload=simplejson.dumps(payload, separators=(',', ':'))
raw_id_token=payload.get('id_token')
# Validate id token
raw_keys=jwk.load_jwks_from_url(self.provider.cfg.get('jwks_uri'))
keys=[]
for k in raw_keys:
keys.append(k.key.exportKey())
id_result=self.verify_signed_jwt_with_keys(raw_id_token, keys, self.provider.cfg.get('client_id'))
id_token=id_result.get('body')
id_header=id_result.get('header')
if id_token.get('iss') == None or id_token.get('iss').strip() == '':
raise OAuth2IDTokenError('No issuer in ID token')
if id_token.get('sub') == None or id_token.get('sub').strip() == '':
raise OAuth2IDTokenError('No subject in ID token')
if self.provider.provider_sets_token_nonce and id_token.get('nonce') != nonce_vals['auth_url_nonce']:
raise OAuth2IDTokenError('Bad nonce in ID token')
# Validate access token
self.validate_access_token(id_header.get('alg'), id_token.get('at_hash'), payload.get('access_token'))
# Get user directory data. Right now we're assuming the server will return json.
# TODO: in theory the return value could be signed jwt
req=urllib2.Request(self.provider.cfg.get('userinfo_endpoint'), headers={'Authorization' : 'Bearer ' + payload.get('access_token')})
f = urllib2.urlopen(req)
userinfo=simplejson.load(f)
f.close()
username = id_token.get('iss') + ':' + id_token.get('sub')
# Update user table
self.create_or_update_user(manager, context, username, id_token, userinfo, base_timestamp, payload, db)
return username
def create_or_update_user(self, manager, context, username, id_token, userinfo, base_timestamp, token_payload, db):
context.user['id_token'] = simplejson.dumps(id_token, separators=(',', ':'))
context.user['userinfo'] = simplejson.dumps(userinfo, separators=(',', ':'))
context.user['access_token'] = token_payload.get('access_token')
context.user['access_token_expiration'] = base_timestamp + timedelta(seconds=int(token_payload.get('expires_in')))
context.user['refresh_token'] = token_payload.get('refresh_token')
if self.provider._client_exists(db, username):
manager.clients.manage.update_noauthz(manager, context, username, db)
else:
context.user['username'] = username
manager.clients.manage.create_noauthz(manager, context, username, db)
def validate_userinfo(self, userinfo, id_token):
if userinfo.get('sub') != id_token.get('sub'):
raise Oauth2UserinfoError("Subject mismatch")
for key in ['iss', 'aud']:
if userinfo.get(key) != None and userinfo.get(key) != id_token.get(key):
raise Oauth2UserinfoError("Bad value for " + key)
@staticmethod
def validate_access_token(alg, expected_hash, access_token):
if alg == None:
raise OAuth2ProtocolError("No hash algorithm specified")
if alg.lower() == 'rs256':
hash = jws.left_hash(access_token)
else:
hash = jws.left_hash(access_token, alg)
if hash == None:
raise OAuth2Exception("Hash failed, alg = '" + alg + "', token is '" + access_token + "'")
if hash != expected_hash:
raise OAuth2Exception("Bad hash value in access token")
@staticmethod
def verify_signed_jwt_with_keys(jwt, keys, audience):
"""Verify a JWT against public keys.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if len(segments) != 3:
raise AppIdentityError('Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
header = simplejson.loads(_urlsafe_b64decode(segments[0]))
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = json.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for pem in keys:
verifier = PyCryptoVerifier.from_string(pem, False)
if verifier.verify(signed, signature):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError('exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return {'header' : header, 'body' : parsed}
def accepts_login_get(self):
return True
def login_keywords(self, optional=False):
return set()
class OAuth2PreauthProvider (PreauthProvider):
key = 'oauth2'
def __init__(self, config):
PreauthProvider.__init__(self, config)
self.nonce_state = nonce_util(config)
self.nonce_cookie_name = config.oauth2_nonce_cookie_name
self.cfg=OAuth2Config(config)
auth_url=urlparse.urlsplit(self.cfg.get('authorization_endpoint'))
self.authentication_uri_base = [auth_url.scheme, auth_url.netloc, auth_url.path]
self.authentication_uri_args = {
"client_id" : self.cfg.get("client_id"),
"response_type" : "code",
"response_mode" : "form_post",
"scope" : config.oauth2_scope
}
if config.oauth2_request_offline_access == True:
self.authentication_uri_args["access_type"] = "offline"
def preauth_info(self, manager, context, db):
"""
Present any required pre-authentication information (e.g., a web form with options).
"""
content_type = negotiated_content_type(
['application/json', 'text/html'],
'application/json'
)
if content_type == 'text/html':
self.preauth_initiate_login(manager, context, db)
else:
return simplejson.dumps({
'authentication_type' : 'oauth2',
'cookie' : self.nonce_cookie_name,
'redirect_url' : self.preauth_initiate(manager, context, db, False)})
def preauth_initiate(self, manager, context, db, do_redirect):
"""
Initiate a login (redirect to OAuth2 provider)
"""
self.authentication_uri_args["redirect_uri"] = self.make_uri(str(self.cfg.get('oauth2_redirect_relative_uri')))
session = self.make_session(db, web.input().get('referrer'))
auth_request_args = self.make_auth_request_args(session)
web.setcookie(self.nonce_cookie_name, session.get('auth_cookie_nonce'), secure=True)
if do_redirect :
web.debug("redirecting")
raise web.seeother(self.make_redirect_uri(auth_request_args))
else:
return self.make_redirect_uri(auth_request_args)
def preauth_initiate_login(self, manager, context, db):
"""
Initiate a login (redirect to OAuth2 provider)
"""
return self.preauth_initiate(manager, context, db, True)
def preauth_referrer(self):
"""
Get the original referring URL (stored in the auth_nonce cookie)
"""
return nonce_util.get_cookie_url(web.cookies().get(self.nonce_cookie_name))
def make_uri(self, relative_uri):
return web.ctx.home + relative_uri
def make_auth_request_args(self, session):
auth_request_args=dict()
for key in self.authentication_uri_args.keys():
auth_request_args[key] = self.authentication_uri_args.get(key)
auth_request_args['state'] = session['auth_url_nonce']
return auth_request_args
def make_session(self, db, referrer):
session=dict()
for key in self.authentication_uri_args.keys():
session[key] = self.authentication_uri_args.get(key)
session['session_id'] = str(uuid.uuid4())
session['auth_cookie_nonce'] = self.generate_nonce(referrer)
session['auth_url_nonce'] = self.nonce_state.encode(session['auth_cookie_nonce'], db)
return session
@staticmethod
def generate_nonce(referrer):
nonce = str(int(time.time())) + '.' + base64.urlsafe_b64encode(Random.get_random_bytes(30)) + '.'
if referrer != None:
nonce = nonce + base64.urlsafe_b64encode(referrer)
# Debug for referrer tracing
web.debug("in generate_nonce, referrer arg is {referrer}, generated nonce is {nonce}".format(referrer=str(referrer), nonce=str(nonce)))
return nonce
def make_redirect_uriargs(self, args):
return urllib.urlencode(args)
def make_redirect_uri(self, args):
components = self.authentication_uri_base + [self.make_redirect_uriargs(args), None]
return urlparse.urlunsplit(components)
class OAuth2ClientManage(database.DatabaseClientManage):
def __init__(self, provider):
database.DatabaseClientManage.__init__(self, provider)
def _create_noauthz_extras(self, manager, context, clientname, db):
return self.__extracols(manager, context, clientname, db)
def _get_noauthz_updatecols(self, manager, context, clientname, db):
return self.__extracols(manager, context, clientname, db, False)
def __extracols(self, manager, context, clientname, db, quote=True):
"""
Generate extra (column, value) pairs for INSERT or UPDATE of user record
"""
ec = []
for k in context.user.keys():
if k != 'username' and context.user.get(k) != None:
if quote:
ec.append((k, sql_literal(context.user.get(k))))
else:
ec.append((k, context.user.get(k)))
return ec
def create_noauthz(self, manager, context, clientname, db=None):
def db_body(db):
if self.provider._client_exists(db, clientname):
return
extras = self._create_noauthz_extras(manager, context, clientname, db)
extracols = [ extra[0] for extra in extras ]
extravals = [ extra[1] for extra in extras ]
results = db.query("""
INSERT INTO %(utable)s (username %(extracols)s) VALUES ( %(uname)s %(extravals)s );
"""
% dict(utable=self.provider._table(self.provider.client_storage_name),
uname=sql_literal(context.user.get('username')),
extracols=','.join(extracols and [ '' ] + extracols),
extravals=','.join(extravals and [ '' ] + extravals))
)
if db:
return db_body(db)
else:
return self.provider._db_wrapper(db_body)
class OAuth2Passwd(ClientPasswd):
def create_noauthz(self, manager, context, clientname, password=None, oldpasswd=None, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
def delete_noauthz(self, manager, context, clientname, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
def create(self, manager, context, clientname, password=None, oldpasswd=None, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
def delete(self, manager, context, clientname, oldpasswd=None, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
class OAuth2SessionStateProvider(database.DatabaseSessionStateProvider):
key="oauth2"
def __init__(self, config):
database.DatabaseSessionStateProvider.__init__(self, config)
self.oauth_context = dict()
def set_oauth_context_val(self, key, value):
self.oauth_context[key] = value
def set_oauth_context_val(self, key):
return self.oauth_context.get(key)
class OAuth2ClientProvider (database.DatabaseClientProvider):
key = 'oauth2'
client_storage_name = 'user'
extra_client_columns = [('id_token', 'json'),
('userinfo', 'json'),
('access_token', 'text'),
('access_token_expiration', 'timestamp'),
('refresh_token', 'text')] # list of (columnname, typestring) pairs
summary_storage_name = 'usersummary'
# data storage format version
major = 1
minor = 0
def __init__(self, config,
Login=OAuth2Login,
Search=database.DatabaseClientSearch,
Manage=OAuth2ClientManage,
Passwd=None):
ClientProvider.__init__(self, config)
database.DatabaseConnection2.__init__(self, config)
self.login = Login(self)
self.search = Search(self)
self.manage = Manage(self)
if Passwd:
self.passwd = Passwd(self)
self.nonce_state = nonce_util(config)
self.cfg = OAuth2Config(config)
self.nonce_cookie_name = config.oauth2_nonce_cookie_name
self.provider_sets_token_nonce = config.oauth2_provider_sets_token_nonce
def deploy_views(self, db):
if self._table_exists(db, self.summary_storage_name):
db.query('DROP VIEW %s' % self._table(self.summary_storage_name))
db.query("""
CREATE VIEW %(summary)s AS
SELECT *
FROM %(utable)s u ;
;
"""
% dict(utable=self._table(self.client_storage_name),
summary=self._table(self.summary_storage_name))
)
def deploy(self, db=None):
"""
Deploy initial provider state.
"""
def db_body(db):
database.DatabaseConnection2.deploy(self)
tables_added = False
if not self._table_exists(db, self.client_storage_name):
tables_added = True
db.query("""
CREATE TABLE %(utable)s (
uid serial PRIMARY KEY,
username text UNIQUE
%(extras)s
);
"""
% dict(utable=self._table(self.client_storage_name),
extras=','.join(self.extra_client_columns and
[''] + ['%s %s' % ec for ec in self.extra_client_columns]))
)
if not self._table_exists(db, nonce_util.nonce_table):
tables_added = True
db.query("""
CREATE TABLE %(ntable)s (
key text,
timeout timestamp
);
"""
% dict(ntable=self._table(nonce_util.nonce_table))
)
self.deploy_guard(db, '_client')
if tables_added:
self.deploy_views(db)
if db:
return db_body(db)
else:
return self._db_wrapper(db_body)
class OAuth2Config(collections.MutableMapping):
def __init__(self, config):
self.dictionaries = [self.load_client_secret_data(config),
self.load_discovery_data(config),
config]
def load_client_secret_data(self, config):
if config.oauth2_client_secret_file == None:
raise OAuth2ConfigurationError("No oauth2_client_secret_file configured")
f = open(config.oauth2_client_secret_file)
csd = simplejson.load(f).get('web')
f.close()
return csd
def load_discovery_data(self, config):
if config.oauth2_discovery_uri == None:
raise OAuth2ConfigurationError("No oauth2_discovery_uri configured")
f = urllib2.urlopen(config.oauth2_discovery_uri)
discovery_data = simplejson.load(f)
f.close()
return discovery_data
def __getitem__(self, key):
for d in self.dictionaries:
if d.has_key(key):
return d.get(key)
return None
def __setitem__(self, key, value):
return self.override_data.__setitem__(key, value)
def __delitem__(self, key):
found_one = False
for d in self.dictionaries:
if d.has_key(key):
found_one = True
d.__delitem__(key)
if found_one == False:
raise KeyError(key)
return None
def keys(self):
klist = []
for d in self.dictionaries:
for k in d.keys():
if not klist.__contains__(k):
klist.append(k)
return klist
def __iter__(self):
raise NotImplementedError
def __len__(self):
return len(self.keys())
class OAuth2Exception(RuntimeError):
pass
class OAuth2SessionGenerationFailed(OAuth2Exception):
pass
class OAuth2ProtocolError(OAuth2Exception):
pass
class OAuth2UserinfoError(OAuth2ProtocolError):
pass
class OAuth2IDTokenError(OAuth2ProtocolError):
pass
class OAuth2LoginTimeoutError(OAuth2ProtocolError):
pass
class OAuth2ConfigurationError(OAuth2Exception):
pass
Update for current version of pyjwkest; also don't require a discovery URL (although there
will probably always be one, even if it's just a file).
#
# Copyright 2010-2012 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Webauthn2 provider implementations using OAuth2 OpenID Connect. This class inherits from DatabaseConnection2
Provider-specific parameters inherited from DatabaseConnection2 module:
`database_type`
: The database type (e.g., postgres).
`database_dsn`
: The database source name (e.g., "host=localhost user=ermrest password=... dbname=ermrest").
`database_schema`
: The schema name qualifier for provider tables within the database (text or None).
`database_max_retries`
: The number of times to retry transient errors when running independent transactions (int).
Provider-specific parameters specific to OAuth2:
`oauth2_discovery_uri`
: OpenID Connect Discovery 1.0 endpoint
`oauth2_redirect_relative_uri`
: The path that users are redirected to after providing consent/authorization
`oauth2_client_secret_file`
: The file, obtained from an OAuth2 provider (e.g., google) during registration, containing shared secrets and other information.
"""
from providers import *
from webauthn2.util import *
from webauthn2.providers import database
import web
import random
import urllib
import urllib2
import uuid
import urlparse
import web
import simplejson
import psycopg2
import oauth2client.client
from jwkest import jwk
from jwkest import jws
from oauth2client.crypt import AppIdentityError, _urlsafe_b64decode, CLOCK_SKEW_SECS, AUTH_TOKEN_LIFETIME_SECS, MAX_TOKEN_LIFETIME_SECS, PyCryptoVerifier
import time
import base64
from Crypto import Random
from Crypto.Hash.HMAC import HMAC
import Crypto.Hash.SHA256
import json
from datetime import datetime, timedelta
import webauthn2.providers
import collections
config_built_ins = web.storage(
# Items needed for methods inherited from database provider
database_type= 'postgres',
database_dsn= 'dbname=',
database_schema= 'webauthn2',
database_max_retries= 5,
# OAuth-specific items
oauth2_nonce_hard_timeout=3600,
oauth2_nonce_cookie_name='oauth2_auth_nonce',
# File with parameters, including the shared secret, shared between the client and the OAuth provider
oauth2_client_secret_file=None,
oauth2_discovery_uri=None,
oauth2_redirect_relative_uri="/authn/session",
oauth2_request_offline_access=False,
oauth2_scope="openid email profile",
oauth2_provider_sets_token_nonce=False
)
class nonce_util(database.DatabaseConnection2):
algorithm=Crypto.Hash.SHA256
nonce_table='oauth2_nonce'
def __init__(self, config):
database.DatabaseConnection2.__init__(self, config)
self.config_params = {'hard_timeout' : config.oauth2_nonce_hard_timeout,
'soft_timeout' : config.oauth2_nonce_hard_timeout / 2,
'nonce_table' : config.database_schema + '.' + self.nonce_table}
self.keys=[]
def get_keys(self, db):
self.update_timeout(db)
def db_body(db):
return db.select(self.config_params['nonce_table'], what="timeout, key", order="timeout desc")
if db:
textkeys = db_body(db)
else:
textkeys = self._db_wrapper(db_body)
self.keys=[]
for k in textkeys:
self.keys.append((k.get('timeout'), self.texttokey(k.get('key'))))
return self.keys
def update_timeout(self, db, force=False):
if not force:
for k in self.keys:
if datetime.now() + timedelta(0, self.config_params['soft_timeout']) < k[0]:
return
def db_body(db):
db.query("delete from %(nonce_table)s where timeout < now()" % self.config_params)
db.query("""
insert into %(nonce_table)s (key, timeout)
select $new_key, now() + interval '%(hard_timeout)d seconds'
where not exists
(select 1 from %(nonce_table)s where timeout - now() > interval '%(soft_timeout)d seconds')
""" % self.config_params,
vars={'new_key' : self.keytotext(self.make_key())})
if db:
db_body(db)
else:
self._db_wrapper(db_body)
def get_current_key(self, db):
return self.get_keys(db)[0][1]
@staticmethod
def get_cookie_ts(cookie):
return int(cookie.split('.')[0])
@staticmethod
def get_cookie_url(cookie):
url = base64.b64decode(cookie.split('.')[2])
# Debug for referrer tracing
web.debug("in get_cookie_url, cookie is '{cookie}', url is '{url}'".format(cookie=str(cookie), url=str(url)))
if url == '':
return None
return url
@staticmethod
def keytotext(key):
return base64.b64encode(key)
@staticmethod
def texttokey(text):
return base64.b64decode(text)
def make_key(self):
return Random.get_random_bytes(self.algorithm.digest_size)
def time_ok(self, value):
return time.time() - value < self.config_params['hard_timeout']
def encode(self, msg, db):
h = HMAC(self.get_current_key(db), msg, self.algorithm)
return h.hexdigest()
def hash_matches(self, msg, hashed, db):
retval = self._check_hash_match(msg, hashed, db)
if retval == False:
self.update_timeout(db, True)
retval = self._check_hash_match(msg, hashed, db)
return retval
def _check_hash_match(self, msg, hashed, db):
for k in self.get_keys(db):
h = HMAC(k[1], msg, self.algorithm)
if h.hexdigest() == hashed:
return True
return False
__all__ = [
'OAuth2SessionStateProvider',
'OAuth2ClientProvider',
'config_built_ins'
]
class OAuth2 (database.DatabaseConnection2):
# this is the storage format version, not the software version
major = 1
minor = 0
def __init__(self, config):
database.DatabaseConnection2.__init__(self, config)
class OAuth2Login (ClientLogin):
def __init__(self, provider):
ClientLogin.__init__(self, provider)
def login(self, manager, context, db, **kwargs):
"""
Return "username" in the form iss:sub.
It is expected that the caller will store the resulting username into context.client for reuse.
"""
vals = web.input()
# Check that this request came from the same user who initiated the oauth flow
nonce_vals = {
'auth_url_nonce' : vals.get('state'),
'auth_cookie_nonce' : web.cookies().get(self.provider.nonce_cookie_name)
}
if nonce_vals['auth_url_nonce'] == None:
raise OAuth2ProtocolError("No authn_nonce in initial redirect")
if (nonce_vals['auth_cookie_nonce'] == None):
raise OAuth2ProtocolError("No authn nonce cookie")
# Has the cookie nonce expired?
try:
ts = nonce_util.get_cookie_ts(nonce_vals['auth_cookie_nonce'])
except:
raise OAuth2ProtocolError('bad nonce cookie')
if not self.provider.nonce_state.time_ok(ts):
raise OAuth2LoginTimeoutError('Login timed out')
if not self.provider.nonce_state.hash_matches(nonce_vals['auth_cookie_nonce'], nonce_vals['auth_url_nonce'], db):
raise OAuth2ProtocolError('nonce mismatch')
# we'll write this to the db if all goes well
redirect_full_payload=simplejson.dumps(vals, separators=(',', ':'))
# Get id token
token_args = {
'code' : vals.get('code'),
'client_id' : self.provider.cfg.get('client_id'),
'client_secret' : self.provider.cfg.get('client_secret'),
'redirect_uri' : web.ctx.home + web.ctx.path,
'nonce' : nonce_vals['auth_url_nonce'],
'grant_type' : 'authorization_code'}
base_timestamp = datetime.now()
u=urllib2.urlopen(self.provider.cfg.get('token_endpoint'), urllib.urlencode(token_args))
payload=simplejson.load(u)
u.close()
token_payload=simplejson.dumps(payload, separators=(',', ':'))
raw_id_token=payload.get('id_token')
# Validate id token
u=urllib2.urlopen(self.provider.cfg.get('jwks_uri'))
raw_keys = jwk.KEYS()
raw_keys.load_jwks(u.read())
u.close()
keys=[]
for k in raw_keys:
keys.append(k.key.exportKey())
id_result=self.verify_signed_jwt_with_keys(raw_id_token, keys, self.provider.cfg.get('client_id'))
id_token=id_result.get('body')
id_header=id_result.get('header')
if id_token.get('iss') == None or id_token.get('iss').strip() == '':
raise OAuth2IDTokenError('No issuer in ID token')
if id_token.get('sub') == None or id_token.get('sub').strip() == '':
raise OAuth2IDTokenError('No subject in ID token')
if self.provider.provider_sets_token_nonce and id_token.get('nonce') != nonce_vals['auth_url_nonce']:
raise OAuth2IDTokenError('Bad nonce in ID token')
# Validate access token
self.validate_access_token(id_header.get('alg'), id_token.get('at_hash'), payload.get('access_token'))
# Get user directory data. Right now we're assuming the server will return json.
# TODO: in theory the return value could be signed jwt
req=urllib2.Request(self.provider.cfg.get('userinfo_endpoint'), headers={'Authorization' : 'Bearer ' + payload.get('access_token')})
f = urllib2.urlopen(req)
userinfo=simplejson.load(f)
f.close()
username = id_token.get('iss') + ':' + id_token.get('sub')
# Update user table
self.create_or_update_user(manager, context, username, id_token, userinfo, base_timestamp, payload, db)
return username
def create_or_update_user(self, manager, context, username, id_token, userinfo, base_timestamp, token_payload, db):
context.user['id_token'] = simplejson.dumps(id_token, separators=(',', ':'))
context.user['userinfo'] = simplejson.dumps(userinfo, separators=(',', ':'))
context.user['access_token'] = token_payload.get('access_token')
context.user['access_token_expiration'] = base_timestamp + timedelta(seconds=int(token_payload.get('expires_in')))
context.user['refresh_token'] = token_payload.get('refresh_token')
if self.provider._client_exists(db, username):
manager.clients.manage.update_noauthz(manager, context, username, db)
else:
context.user['username'] = username
manager.clients.manage.create_noauthz(manager, context, username, db)
def validate_userinfo(self, userinfo, id_token):
if userinfo.get('sub') != id_token.get('sub'):
raise Oauth2UserinfoError("Subject mismatch")
for key in ['iss', 'aud']:
if userinfo.get(key) != None and userinfo.get(key) != id_token.get(key):
raise Oauth2UserinfoError("Bad value for " + key)
@staticmethod
def validate_access_token(alg, expected_hash, access_token):
if alg == None:
raise OAuth2ProtocolError("No hash algorithm specified")
if alg.lower() == 'rs256':
hash = jws.left_hash(access_token)
else:
hash = jws.left_hash(access_token, alg)
if hash == None:
raise OAuth2Exception("Hash failed, alg = '" + alg + "', token is '" + access_token + "'")
if hash != expected_hash:
raise OAuth2Exception("Bad hash value in access token")
@staticmethod
def verify_signed_jwt_with_keys(jwt, keys, audience):
"""Verify a JWT against public keys.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if len(segments) != 3:
raise AppIdentityError('Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
header = simplejson.loads(_urlsafe_b64decode(segments[0]))
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = json.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for pem in keys:
verifier = PyCryptoVerifier.from_string(pem, False)
if verifier.verify(signed, signature):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError('exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return {'header' : header, 'body' : parsed}
def accepts_login_get(self):
return True
def login_keywords(self, optional=False):
return set()
class OAuth2PreauthProvider (PreauthProvider):
key = 'oauth2'
def __init__(self, config):
PreauthProvider.__init__(self, config)
self.nonce_state = nonce_util(config)
self.nonce_cookie_name = config.oauth2_nonce_cookie_name
self.cfg=OAuth2Config(config)
auth_url=urlparse.urlsplit(self.cfg.get('authorization_endpoint'))
self.authentication_uri_base = [auth_url.scheme, auth_url.netloc, auth_url.path]
self.authentication_uri_args = {
"client_id" : self.cfg.get("client_id"),
"response_type" : "code",
"response_mode" : "form_post",
"scope" : config.oauth2_scope
}
if config.oauth2_request_offline_access == True:
self.authentication_uri_args["access_type"] = "offline"
def preauth_info(self, manager, context, db):
"""
Present any required pre-authentication information (e.g., a web form with options).
"""
content_type = negotiated_content_type(
['application/json', 'text/html'],
'application/json'
)
if content_type == 'text/html':
self.preauth_initiate_login(manager, context, db)
else:
return simplejson.dumps({
'authentication_type' : 'oauth2',
'cookie' : self.nonce_cookie_name,
'redirect_url' : self.preauth_initiate(manager, context, db, False)})
def preauth_initiate(self, manager, context, db, do_redirect):
"""
Initiate a login (redirect to OAuth2 provider)
"""
self.authentication_uri_args["redirect_uri"] = self.make_uri(str(self.cfg.get('oauth2_redirect_relative_uri')))
session = self.make_session(db, web.input().get('referrer'))
auth_request_args = self.make_auth_request_args(session)
web.setcookie(self.nonce_cookie_name, session.get('auth_cookie_nonce'), secure=True)
if do_redirect :
web.debug("redirecting")
raise web.seeother(self.make_redirect_uri(auth_request_args))
else:
return self.make_redirect_uri(auth_request_args)
def preauth_initiate_login(self, manager, context, db):
"""
Initiate a login (redirect to OAuth2 provider)
"""
return self.preauth_initiate(manager, context, db, True)
def preauth_referrer(self):
"""
Get the original referring URL (stored in the auth_nonce cookie)
"""
return nonce_util.get_cookie_url(web.cookies().get(self.nonce_cookie_name))
def make_uri(self, relative_uri):
return web.ctx.home + relative_uri
def make_auth_request_args(self, session):
auth_request_args=dict()
for key in self.authentication_uri_args.keys():
auth_request_args[key] = self.authentication_uri_args.get(key)
auth_request_args['state'] = session['auth_url_nonce']
return auth_request_args
def make_session(self, db, referrer):
session=dict()
for key in self.authentication_uri_args.keys():
session[key] = self.authentication_uri_args.get(key)
session['session_id'] = str(uuid.uuid4())
session['auth_cookie_nonce'] = self.generate_nonce(referrer)
session['auth_url_nonce'] = self.nonce_state.encode(session['auth_cookie_nonce'], db)
return session
@staticmethod
def generate_nonce(referrer):
nonce = str(int(time.time())) + '.' + base64.urlsafe_b64encode(Random.get_random_bytes(30)) + '.'
if referrer != None:
nonce = nonce + base64.urlsafe_b64encode(referrer)
# Debug for referrer tracing
web.debug("in generate_nonce, referrer arg is {referrer}, generated nonce is {nonce}".format(referrer=str(referrer), nonce=str(nonce)))
return nonce
def make_redirect_uriargs(self, args):
return urllib.urlencode(args)
def make_redirect_uri(self, args):
components = self.authentication_uri_base + [self.make_redirect_uriargs(args), None]
return urlparse.urlunsplit(components)
class OAuth2ClientManage(database.DatabaseClientManage):
def __init__(self, provider):
database.DatabaseClientManage.__init__(self, provider)
def _create_noauthz_extras(self, manager, context, clientname, db):
return self.__extracols(manager, context, clientname, db)
def _get_noauthz_updatecols(self, manager, context, clientname, db):
return self.__extracols(manager, context, clientname, db, False)
def __extracols(self, manager, context, clientname, db, quote=True):
"""
Generate extra (column, value) pairs for INSERT or UPDATE of user record
"""
ec = []
for k in context.user.keys():
if k != 'username' and context.user.get(k) != None:
if quote:
ec.append((k, sql_literal(context.user.get(k))))
else:
ec.append((k, context.user.get(k)))
return ec
def create_noauthz(self, manager, context, clientname, db=None):
def db_body(db):
if self.provider._client_exists(db, clientname):
return
extras = self._create_noauthz_extras(manager, context, clientname, db)
extracols = [ extra[0] for extra in extras ]
extravals = [ extra[1] for extra in extras ]
results = db.query("""
INSERT INTO %(utable)s (username %(extracols)s) VALUES ( %(uname)s %(extravals)s );
"""
% dict(utable=self.provider._table(self.provider.client_storage_name),
uname=sql_literal(context.user.get('username')),
extracols=','.join(extracols and [ '' ] + extracols),
extravals=','.join(extravals and [ '' ] + extravals))
)
if db:
return db_body(db)
else:
return self.provider._db_wrapper(db_body)
class OAuth2Passwd(ClientPasswd):
def create_noauthz(self, manager, context, clientname, password=None, oldpasswd=None, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
def delete_noauthz(self, manager, context, clientname, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
def create(self, manager, context, clientname, password=None, oldpasswd=None, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
def delete(self, manager, context, clientname, oldpasswd=None, db=None):
raise NotImplementedError("Local passwords are not used with OAuth")
class OAuth2SessionStateProvider(database.DatabaseSessionStateProvider):
key="oauth2"
def __init__(self, config):
database.DatabaseSessionStateProvider.__init__(self, config)
self.oauth_context = dict()
def set_oauth_context_val(self, key, value):
self.oauth_context[key] = value
def set_oauth_context_val(self, key):
return self.oauth_context.get(key)
class OAuth2ClientProvider (database.DatabaseClientProvider):
key = 'oauth2'
client_storage_name = 'user'
extra_client_columns = [('id_token', 'json'),
('userinfo', 'json'),
('access_token', 'text'),
('access_token_expiration', 'timestamp'),
('refresh_token', 'text')] # list of (columnname, typestring) pairs
summary_storage_name = 'usersummary'
# data storage format version
major = 1
minor = 0
def __init__(self, config,
Login=OAuth2Login,
Search=database.DatabaseClientSearch,
Manage=OAuth2ClientManage,
Passwd=None):
ClientProvider.__init__(self, config)
database.DatabaseConnection2.__init__(self, config)
self.login = Login(self)
self.search = Search(self)
self.manage = Manage(self)
if Passwd:
self.passwd = Passwd(self)
self.nonce_state = nonce_util(config)
self.cfg = OAuth2Config(config)
self.nonce_cookie_name = config.oauth2_nonce_cookie_name
self.provider_sets_token_nonce = config.oauth2_provider_sets_token_nonce
def deploy_views(self, db):
if self._table_exists(db, self.summary_storage_name):
db.query('DROP VIEW %s' % self._table(self.summary_storage_name))
db.query("""
CREATE VIEW %(summary)s AS
SELECT *
FROM %(utable)s u ;
;
"""
% dict(utable=self._table(self.client_storage_name),
summary=self._table(self.summary_storage_name))
)
def deploy(self, db=None):
"""
Deploy initial provider state.
"""
def db_body(db):
database.DatabaseConnection2.deploy(self)
tables_added = False
if not self._table_exists(db, self.client_storage_name):
tables_added = True
db.query("""
CREATE TABLE %(utable)s (
uid serial PRIMARY KEY,
username text UNIQUE
%(extras)s
);
"""
% dict(utable=self._table(self.client_storage_name),
extras=','.join(self.extra_client_columns and
[''] + ['%s %s' % ec for ec in self.extra_client_columns]))
)
if not self._table_exists(db, nonce_util.nonce_table):
tables_added = True
db.query("""
CREATE TABLE %(ntable)s (
key text,
timeout timestamp
);
"""
% dict(ntable=self._table(nonce_util.nonce_table))
)
self.deploy_guard(db, '_client')
if tables_added:
self.deploy_views(db)
if db:
return db_body(db)
else:
return self._db_wrapper(db_body)
class OAuth2Config(collections.MutableMapping):
def __init__(self, config):
self.dictionaries = [self.load_client_secret_data(config),
self.load_discovery_data(config),
config]
def load_client_secret_data(self, config):
if config.oauth2_client_secret_file == None:
raise OAuth2ConfigurationError("No oauth2_client_secret_file configured")
f = open(config.oauth2_client_secret_file)
csd = simplejson.load(f).get('web')
f.close()
return csd
def load_discovery_data(self, config):
if config.oauth2_discovery_uri == None:
discovery_data = dict()
else:
f = urllib2.urlopen(config.oauth2_discovery_uri)
discovery_data = simplejson.load(f)
f.close()
return discovery_data
def __getitem__(self, key):
for d in self.dictionaries:
if d.has_key(key):
return d.get(key)
return None
def __setitem__(self, key, value):
return self.override_data.__setitem__(key, value)
def __delitem__(self, key):
found_one = False
for d in self.dictionaries:
if d.has_key(key):
found_one = True
d.__delitem__(key)
if found_one == False:
raise KeyError(key)
return None
def keys(self):
klist = []
for d in self.dictionaries:
for k in d.keys():
if not klist.__contains__(k):
klist.append(k)
return klist
def __iter__(self):
raise NotImplementedError
def __len__(self):
return len(self.keys())
class OAuth2Exception(RuntimeError):
pass
class OAuth2SessionGenerationFailed(OAuth2Exception):
pass
class OAuth2ProtocolError(OAuth2Exception):
pass
class OAuth2UserinfoError(OAuth2ProtocolError):
pass
class OAuth2IDTokenError(OAuth2ProtocolError):
pass
class OAuth2LoginTimeoutError(OAuth2ProtocolError):
pass
class OAuth2ConfigurationError(OAuth2Exception):
pass
|
from django.db import models
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, pre_init, post_save, post_delete
import datetime
class Bill(models.Model):
user = models.ForeignKey(User)
number = models.CharField(max_length=10, unique=True, blank=True)
isPaid = models.BooleanField(default=False)
billing_date = models.DateField()
amount = models.FloatField(blank=True, default=0)
def __unicode__(self):
return self.number
def coworker_name(self):
return '%s %s' % (self.user.first_name, self.user.last_name)
class Service(models.Model):
reference = models.CharField(max_length=5)
name = models.CharField(max_length=128)
description = models.CharField(max_length=1024)
price = models.FloatField()
def __unicode__(self):
""" Return name as object representation """
return self.name
class BillLine(models.Model):
bill = models.ForeignKey(Bill)
service = models.ForeignKey(Service)
quantity = models.SmallIntegerField(default=1)
total = models.FloatField(blank=True)
class UserProfile(models.Model):
""" extend User class """
user = models.OneToOneField(User)
billing_address = models.TextField(max_length=1024)
@receiver(pre_save, sender=BillLine)
def compute_total(sender, instance, **kwargs):
""" set total of line automatically """
if not instance.total:
instance.total = instance.service.price * instance.quantity
@receiver(pre_save, sender=Bill)
def define_number(sender, instance, **kwargs):
""" set bill number incrementally """
# only when we create record for the first time
if not instance.number:
today = datetime.date.today()
# get last id in base, we assume it's the last record
try:
last_record = sender.objects.latest('id')
#get last bill number and increment it
last_num = '%03d' % (int(last_record.number[-3:])+1)
# no Bill in db
except sender.DoesNotExist:
last_num = '001'
instance.number = 'F%s%s' % (today.strftime('%Y%m'), last_num)
@receiver(post_save, sender=BillLine)
@receiver(post_delete, sender=BillLine)
def set_bill_amount(sender, instance, **kwargs):
""" set total price of billing when saving """
# reset self.amount in case is already set
bill = instance.bill
bill.amount = 0
for line in bill.billline_set.all():
bill.amount += line.total
bill.save()
add help text
from django.db import models
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, pre_init, post_save, post_delete
from django.utils.translation import ugettext as _
import datetime
class Bill(models.Model):
user = models.ForeignKey(User)
number = models.CharField(max_length=10, unique=True, blank=True,
help_text=_('This value is set automatically. Remove in case of error.'))
isPaid = models.BooleanField(default=False)
billing_date = models.DateField()
amount = models.FloatField(blank=True, default=0,
help_text=_('The amount is computed automatically.'))
def __unicode__(self):
return self.number
def coworker_name(self):
return '%s %s' % (self.user.first_name, self.user.last_name)
class Service(models.Model):
reference = models.CharField(max_length=5)
name = models.CharField(max_length=128)
description = models.CharField(max_length=1024)
price = models.FloatField()
def __unicode__(self):
""" Return name as object representation """
return self.name
class BillLine(models.Model):
bill = models.ForeignKey(Bill)
service = models.ForeignKey(Service)
quantity = models.SmallIntegerField(default=1)
total = models.FloatField(blank=True,
help_text=_('This value is computed automatically'))
class UserProfile(models.Model):
""" extend User class """
user = models.OneToOneField(User)
billing_address = models.TextField(max_length=1024)
@receiver(pre_save, sender=BillLine)
def compute_total(sender, instance, **kwargs):
""" set total of line automatically """
if not instance.total:
instance.total = instance.service.price * instance.quantity
@receiver(pre_save, sender=Bill)
def define_number(sender, instance, **kwargs):
""" set bill number incrementally """
# only when we create record for the first time
if not instance.number:
today = datetime.date.today()
# get last id in base, we assume it's the last record
try:
last_record = sender.objects.latest('id')
#get last bill number and increment it
last_num = '%03d' % (int(last_record.number[-3:])+1)
# no Bill in db
except sender.DoesNotExist:
last_num = '001'
instance.number = 'F%s%s' % (today.strftime('%Y%m'), last_num)
@receiver(post_save, sender=BillLine)
@receiver(post_delete, sender=BillLine)
def set_bill_amount(sender, instance, **kwargs):
""" set total price of billing when saving """
# reset self.amount in case is already set
bill = instance.bill
bill.amount = 0
for line in bill.billline_set.all():
bill.amount += line.total
bill.save()
|
import importlib
import androguard.session as session_module
from androguard.gui.DataModel import *
from androguard.gui.apiwindow import APIWindow
from androguard.gui.binwindow import binWidget
from androguard.gui.fileloading import FileLoadingThread
from androguard.gui.helpers import class2func
from androguard.gui.methodswindow import MethodsWindow
from androguard.gui.resourceswindow import ResourcesWindow
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.stringswindow import StringsWindow
from androguard.gui.treewindow import TreeWindow
import os
import logging
log = logging.getLogger("androguard.gui")
class TabsWindow(QtWidgets.QTabWidget):
def __init__(self, bin_windows, parent=None):
super(TabsWindow, self).__init__(parent)
self.bin_windows = bin_windows
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.tabCloseRequestedHandler)
self.currentChanged.connect(self.currentTabChanged)
self.closeAllTabs = QtWidgets.QAction(
"Close all tabs",
self,
triggered=self.actioncloseAllTabs)
self.closeOtherTabs = QtWidgets.QAction(
"Close other tabs",
self,
triggered=self.actioncloseOtherTabs)
self.closeLeftTabs = QtWidgets.QAction(
"Close left tabs",
self,
triggered=self.actioncloseLeftTabs)
self.closeRightTabs = QtWidgets.QAction(
"Close right tabs",
self,
triggered=self.actioncloseRightTabs)
def actioncloseAllTabs(self):
self.clear()
def actioncloseOtherTabs(self):
for i in range(self.currentIndex() - 1, -1, -1):
self.removeTab(i)
for i in range(self.count(), self.currentIndex(), -1):
self.removeTab(i)
def actioncloseLeftTabs(self):
for i in range(self.currentIndex() - 1, -1, -1):
self.removeTab(i)
def actioncloseRightTabs(self):
for i in range(self.count(), self.currentIndex(), -1):
self.removeTab(i)
def tabCloseRequestedHandler(self, index):
self.removeTab(index)
def currentTabChanged(self, index):
log.debug("curentTabChanged -> %d (%s)" % (index, self.tabToolTip(index)))
if index == -1:
return
current_title = self.tabToolTip(index)
for title in self.bin_windows:
if title != current_title:
log.debug("Disable %s" % title)
self.bin_windows[title].disable()
if current_title in self.bin_windows:
log.debug("Enable %s" % title)
self.bin_windows[current_title].enable()
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.closeAllTabs)
menu.addAction(self.closeOtherTabs)
menu.addAction(self.closeLeftTabs)
menu.addAction(self.closeRightTabs)
menu.exec_(event.globalPos())
class MainWindow(QtWidgets.QMainWindow):
"""Main window:
self.central: QTabWidget in center area
self.dock: QDockWidget in left area
self.tree: TreeWindow(QTreeWidget) in self.dock
"""
def __init__(self, parent=None, session=session_module.Session(), input_file=None, input_plugin=None):
super(MainWindow, self).__init__(parent)
self.session = session
self.bin_windows = {}
self.setupFileMenu()
self.setupViewMenu()
self.setupPluginsMenu()
self.setupHelpMenu()
self.setupCentral()
self.setupEmptyTree()
self.setupDock()
self.setupSession()
self.setWindowTitle("Androguard GUI")
self.showStatus("Androguard GUI")
self.installEventFilter(self)
self.input_plugin = input_plugin
if input_file:
self._openFile(input_file)
root = os.path.dirname(os.path.realpath(__file__))
self.setWindowIcon(QtGui.QIcon(os.path.join(root, "androguard.ico")))
def eventFilter(self, watched, event):
for bin_window in list(self.bin_windows.values()):
bin_window.eventFilter(watched, event)
return False
def showStatus(self, msg):
"""Helper function called by any window to display a message
in status bar.
"""
log.debug(msg)
self.statusBar().showMessage(msg)
def about(self):
"""User clicked About menu. Display a Message box."""
QtWidgets.QMessageBox.about(self, "About Androguard GUI",
"<p><b>Androguard GUI</b> is basically a GUI for Androguard :)." \
"<br>Have fun !</p>")
def _no_apk_loaded(self):
"""Show a message if no APK was loaded yet..."""
QtWidgets.QMessageBox.information(self, "No APK loaded yet!",
"<p>There was no APK loaded yet. Please load one using File->Open.</p>")
def setupSession(self):
log.debug("Setup Session")
self.fileLoadingThread = FileLoadingThread(self)
self.fileLoadingThread.file_loaded.connect(self.loadedFile)
def loadedFile(self, success):
if not success:
self.showStatus("Analysis of %s failed :(" %
str(self.fileLoadingThread.file_path))
return
self.updateDockWithTree()
self.cleanCentral()
self.showStatus("Analysis of %s done!" %
str(self.fileLoadingThread.file_path))
if self.input_plugin:
self._runPlugin(self.input_plugin)
def openFile(self):
self.session.reset()
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Open File", '.',
"Android Files (*.apk *.jar *.dex *.odex *.dey);;Androguard Session (*.ag)")
self._openFile(filepath)
def _openFile(self, filepath=None):
if filepath:
self.setupTree()
self.showStatus("Analyzing %s..." % str(filepath))
self.fileLoadingThread.load(filepath)
def addFile(self):
if not self.session.isOpen():
log.debug(self.session.analyzed_digest)
self._no_apk_loaded()
return
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Add File", '',
"Android Files (*.apk *.jar *.dex *.odex *.dey)")
if filepath:
self.showStatus("Analyzing %s..." % str(filepath))
self.fileLoadingThread.load(filepath)
def saveFile(self):
"""User clicked Save menu. Display a Dialog to ask whwre to save."""
filepath, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File", '', "Androguard Session (*.ag)")
if filepath:
self.showStatus("Saving %s..." % str(filepath))
self.saveSession(filepath)
self.showStatus("Saved Session to %s!" % str(filepath))
def saveSession(self, filepath):
"""Save androguard session."""
try:
session_module.Save(self.session, filepath)
except RuntimeError as e:
log.exception(e)
os.remove(filepath)
log.warning("Session not saved")
def _runPlugin(self, filepath):
log.debug("RUN plugin from %s" % filepath)
module_name = os.path.splitext(os.path.basename(filepath))[0]
f, filename, description = importlib.find_module(
module_name,
[os.path.dirname(filepath)])
log.debug("%s %s %s", f, filename, description)
mod = importlib.load_module(module_name, f, filename, description)
mod.PluginEntry(self.session)
def openRunPluginWindow(self):
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Open File", '',
"Python Files (*.py);;")
if filepath:
self._runPlugin(filepath)
def closeEvent(self, event):
"""Clicked [x] to close main window"""
event.accept()
def setupEmptyTree(self):
"""Setup empty Tree at startup. """
if hasattr(self, "tree"):
del self.tree
self.tree = QtWidgets.QTreeWidget(self)
self.tree.header().close()
def setupDock(self):
"""Setup empty Dock at startup. """
self.dock = QtWidgets.QDockWidget("Classes", self)
self.dock.setWidget(self.tree)
self.dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock)
def setupTree(self):
log.debug("Setup Tree")
self.tree = TreeWindow(win=self, session=self.session)
self.tree.setWindowTitle("Tree model")
self.dock.setWidget(self.tree)
def setupCentral(self):
"""Setup empty window supporting tabs at startup. """
self.central = TabsWindow(self.bin_windows, self)
self.setCentralWidget(self.central)
def cleanCentral(self):
self.central.actioncloseAllTabs()
def setupFileMenu(self):
log.debug("Setup File Menu")
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction("&Open...", self.openFile, "Ctrl+O")
self.fileMenu.addAction("&Add...", self.addFile, "Ctrl+A")
self.fileMenu.addAction("&Save...", self.saveFile, "Ctrl+S")
self.fileMenu.addAction("E&xit", self.close, "Ctrl+Q")
def setupViewMenu(self):
log.debug("Setup View Menu")
self.viewMenu = self.menuBar().addMenu("&View")
self.viewMenu.addAction("&Strings...", self.openStringsWindow)
self.viewMenu.addAction("&Methods...", self.openMethodsWindow)
self.viewMenu.addAction("&API...", self.openAPIWindow)
self.viewMenu.addAction("&APK...", self.openApkWindow)
self.viewMenu.addAction("&Resources...", self.openResourcesWindow)
def setupPluginsMenu(self):
log.debug("Setup Plugins Menu")
self.pluginsMenu = self.menuBar().addMenu("&Plugins")
self.pluginsMenu.addAction("&Run...", self.openRunPluginWindow)
def setupHelpMenu(self):
log.debug("Setup Help Menu")
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction("&About", self.about)
self.helpMenu.addAction("About &Qt", QtWidgets.qApp.aboutQt)
def updateDockWithTree(self, empty=False):
"""Update the classes tree. Called when
- a new APK has been imported
- a classe has been renamed (displayed in the tree)
"""
self.setupTree()
self.tree.fill()
def openStringsWindow(self):
stringswin = StringsWindow(win=self, session=self.session)
self.central.addTab(stringswin, stringswin.title)
self.central.setTabToolTip(self.central.indexOf(stringswin),
stringswin.title)
self.central.setCurrentWidget(stringswin)
def openMethodsWindow(self):
methodswin = MethodsWindow(win=self, session=self.session)
self.central.addTab(methodswin, methodswin.title)
self.central.setTabToolTip(self.central.indexOf(methodswin),
methodswin.title)
self.central.setCurrentWidget(methodswin)
def openResourcesWindow(self):
resourceswin = ResourcesWindow(win=self, session=self.session)
self.central.addTab(resourceswin, resourceswin.title)
self.central.setTabToolTip(self.central.indexOf(resourceswin),
resourceswin.title)
self.central.setCurrentWidget(resourceswin)
def openAPIWindow(self):
apiwin = APIWindow(win=self, session=self.session)
self.central.addTab(apiwin, apiwin.title)
self.central.setTabToolTip(self.central.indexOf(apiwin),
apiwin.title)
self.central.setCurrentWidget(apiwin)
def openApkWindow(self):
log.debug("openApkWindow for %s" % self.session.analyzed_apk)
if not self.fileLoadingThread.file_path:
self._no_apk_loaded()
return
bin_window = binWidget(self, ApkModel(self.session.get_objects_apk(self.fileLoadingThread.file_path)[0]), "APK")
bin_window.activateWindow()
self.central.addTab(bin_window, bin_window.title)
self.central.setCurrentWidget(bin_window)
self.bin_windows[bin_window.title] = bin_window
def openBinWindow(self, current_class):
log.debug("openBinWindow for %s" % current_class)
dx = self.session.get_analysis(current_class)
bin_window = self.getMeOpenedWindowIfExists(current_class.current_title)
if not bin_window:
bin_window = binWidget(self, DexClassModel(current_class, dx), current_class.get_name())
bin_window.activateWindow()
self.central.addTab(bin_window, current_class.current_title)
self.central.setTabToolTip(self.central.indexOf(bin_window),
current_class.current_title)
self.bin_windows[current_class.current_title] = bin_window
bin_window.enable()
self.central.setCurrentWidget(bin_window)
def openSourceWindow(self, current_class, method=None):
"""Main function to open a decompile source window
It checks if it already opened and open that tab,
otherwise, initialize a new window.
"""
log.debug("openSourceWindow for %s" % current_class)
sourcewin = self.getMeOpenedWindowIfExists(current_class.current_title + "(S)")
if not sourcewin:
current_filename = self.session.get_filename_by_class(current_class)
current_digest = self.session.get_digest_by_class(current_class)
sourcewin = SourceWindow(win=self,
current_class=current_class,
current_title=current_class.current_title + "(S)",
current_filename=current_filename,
current_digest=current_digest,
session=self.session)
sourcewin.reload_java_sources()
self.central.addTab(sourcewin, sourcewin.title)
self.central.setTabToolTip(self.central.indexOf(sourcewin),
sourcewin.title)
if method:
sourcewin.browse_to_method(method)
self.central.setCurrentWidget(sourcewin)
def getMeOpenedWindowIfExists(self, name):
for idx in range(self.central.count()):
if name == self.central.tabToolTip(idx):
log.debug("Tab %s already opened at: %d" %
(name, idx))
return self.central.widget(idx)
return None
def doesClassExist(self, path):
arg = class2func(path)
try:
getattr(self.d, arg)
except AttributeError:
return False
return True
fix loading of plugins in gui
import sys
import androguard.session as session_module
from androguard.gui.DataModel import *
from androguard.gui.apiwindow import APIWindow
from androguard.gui.binwindow import binWidget
from androguard.gui.fileloading import FileLoadingThread
from androguard.gui.helpers import class2func
from androguard.gui.methodswindow import MethodsWindow
from androguard.gui.resourceswindow import ResourcesWindow
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.stringswindow import StringsWindow
from androguard.gui.treewindow import TreeWindow
import os
import logging
log = logging.getLogger("androguard.gui")
def load_module(module_name, file_path):
"""
Load a module by name and search path
This function should work with python 2.7 and 3.x
Returns None if Module could not be loaded.
"""
if sys.version_info >= (3,5,):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, file_path)
if not spec:
return
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
else:
import imp
mod = imp.load_source(module_name, file_path)
return mod
class TabsWindow(QtWidgets.QTabWidget):
def __init__(self, bin_windows, parent=None):
super(TabsWindow, self).__init__(parent)
self.bin_windows = bin_windows
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.tabCloseRequestedHandler)
self.currentChanged.connect(self.currentTabChanged)
self.closeAllTabs = QtWidgets.QAction(
"Close all tabs",
self,
triggered=self.actioncloseAllTabs)
self.closeOtherTabs = QtWidgets.QAction(
"Close other tabs",
self,
triggered=self.actioncloseOtherTabs)
self.closeLeftTabs = QtWidgets.QAction(
"Close left tabs",
self,
triggered=self.actioncloseLeftTabs)
self.closeRightTabs = QtWidgets.QAction(
"Close right tabs",
self,
triggered=self.actioncloseRightTabs)
def actioncloseAllTabs(self):
self.clear()
def actioncloseOtherTabs(self):
for i in range(self.currentIndex() - 1, -1, -1):
self.removeTab(i)
for i in range(self.count(), self.currentIndex(), -1):
self.removeTab(i)
def actioncloseLeftTabs(self):
for i in range(self.currentIndex() - 1, -1, -1):
self.removeTab(i)
def actioncloseRightTabs(self):
for i in range(self.count(), self.currentIndex(), -1):
self.removeTab(i)
def tabCloseRequestedHandler(self, index):
self.removeTab(index)
def currentTabChanged(self, index):
log.debug("curentTabChanged -> %d (%s)" % (index, self.tabToolTip(index)))
if index == -1:
return
current_title = self.tabToolTip(index)
for title in self.bin_windows:
if title != current_title:
log.debug("Disable %s" % title)
self.bin_windows[title].disable()
if current_title in self.bin_windows:
log.debug("Enable %s" % title)
self.bin_windows[current_title].enable()
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.closeAllTabs)
menu.addAction(self.closeOtherTabs)
menu.addAction(self.closeLeftTabs)
menu.addAction(self.closeRightTabs)
menu.exec_(event.globalPos())
class MainWindow(QtWidgets.QMainWindow):
"""Main window:
self.central: QTabWidget in center area
self.dock: QDockWidget in left area
self.tree: TreeWindow(QTreeWidget) in self.dock
"""
def __init__(self, parent=None, session=session_module.Session(), input_file=None, input_plugin=None):
super(MainWindow, self).__init__(parent)
self.session = session
self.bin_windows = {}
self.setupFileMenu()
self.setupViewMenu()
self.setupPluginsMenu()
self.setupHelpMenu()
self.setupCentral()
self.setupEmptyTree()
self.setupDock()
self.setupSession()
self.setWindowTitle("Androguard GUI")
self.showStatus("Androguard GUI")
self.installEventFilter(self)
self.input_plugin = input_plugin
if input_file:
self._openFile(input_file)
root = os.path.dirname(os.path.realpath(__file__))
self.setWindowIcon(QtGui.QIcon(os.path.join(root, "androguard.ico")))
def eventFilter(self, watched, event):
for bin_window in list(self.bin_windows.values()):
bin_window.eventFilter(watched, event)
return False
def showStatus(self, msg):
"""Helper function called by any window to display a message
in status bar.
"""
log.debug(msg)
self.statusBar().showMessage(msg)
def about(self):
"""User clicked About menu. Display a Message box."""
QtWidgets.QMessageBox.about(self, "About Androguard GUI",
"<p><b>Androguard GUI</b> is basically a GUI for Androguard :)." \
"<br>Have fun !</p>")
def _no_apk_loaded(self):
"""Show a message if no APK was loaded yet..."""
QtWidgets.QMessageBox.information(self, "No APK loaded yet!",
"<p>There was no APK loaded yet. Please load one using File->Open.</p>")
def setupSession(self):
log.debug("Setup Session")
self.fileLoadingThread = FileLoadingThread(self)
self.fileLoadingThread.file_loaded.connect(self.loadedFile)
def loadedFile(self, success):
if not success:
self.showStatus("Analysis of %s failed :(" %
str(self.fileLoadingThread.file_path))
return
self.updateDockWithTree()
self.cleanCentral()
self.showStatus("Analysis of %s done!" %
str(self.fileLoadingThread.file_path))
if self.input_plugin:
self._runPlugin(self.input_plugin)
def openFile(self):
self.session.reset()
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Open File", '.',
"Android Files (*.apk *.jar *.dex *.odex *.dey);;Androguard Session (*.ag)")
self._openFile(filepath)
def _openFile(self, filepath=None):
if filepath:
self.setupTree()
self.showStatus("Analyzing %s..." % str(filepath))
self.fileLoadingThread.load(filepath)
def addFile(self):
if not self.session.isOpen():
log.debug(self.session.analyzed_digest)
self._no_apk_loaded()
return
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Add File", '',
"Android Files (*.apk *.jar *.dex *.odex *.dey)")
if filepath:
self.showStatus("Analyzing %s..." % str(filepath))
self.fileLoadingThread.load(filepath)
def saveFile(self):
"""User clicked Save menu. Display a Dialog to ask whwre to save."""
filepath, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File", '', "Androguard Session (*.ag)")
if filepath:
self.showStatus("Saving %s..." % str(filepath))
self.saveSession(filepath)
self.showStatus("Saved Session to %s!" % str(filepath))
def saveSession(self, filepath):
"""Save androguard session."""
try:
session_module.Save(self.session, filepath)
except RuntimeError as e:
log.exception(e)
os.remove(filepath)
log.warning("Session not saved")
def _runPlugin(self, filepath):
module_name = os.path.splitext(os.path.basename(filepath))[0]
log.debug("RUN plugin '{}' from {}".format(module_name, filepath))
mod = load_module(module_name, filepath)
log.debug("Loaded %s", mod)
if not mod or not hasattr(mod, 'PluginEntry'):
QtWidgets.QMessageBox.warning(self, "Not a valid Plugin",
"<p>This python file does not look like a valid plugin.</p>")
return
mod.PluginEntry(self.session)
def openRunPluginWindow(self):
filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
self, "Open File", '',
"Python Files (*.py);;")
if filepath:
self._runPlugin(filepath)
def closeEvent(self, event):
"""Clicked [x] to close main window"""
event.accept()
def setupEmptyTree(self):
"""Setup empty Tree at startup. """
if hasattr(self, "tree"):
del self.tree
self.tree = QtWidgets.QTreeWidget(self)
self.tree.header().close()
def setupDock(self):
"""Setup empty Dock at startup. """
self.dock = QtWidgets.QDockWidget("Classes", self)
self.dock.setWidget(self.tree)
self.dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock)
def setupTree(self):
log.debug("Setup Tree")
self.tree = TreeWindow(win=self, session=self.session)
self.tree.setWindowTitle("Tree model")
self.dock.setWidget(self.tree)
def setupCentral(self):
"""Setup empty window supporting tabs at startup. """
self.central = TabsWindow(self.bin_windows, self)
self.setCentralWidget(self.central)
def cleanCentral(self):
self.central.actioncloseAllTabs()
def setupFileMenu(self):
log.debug("Setup File Menu")
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction("&Open...", self.openFile, "Ctrl+O")
self.fileMenu.addAction("&Add...", self.addFile, "Ctrl+A")
self.fileMenu.addAction("&Save...", self.saveFile, "Ctrl+S")
self.fileMenu.addAction("E&xit", self.close, "Ctrl+Q")
def setupViewMenu(self):
log.debug("Setup View Menu")
self.viewMenu = self.menuBar().addMenu("&View")
self.viewMenu.addAction("&Strings...", self.openStringsWindow)
self.viewMenu.addAction("&Methods...", self.openMethodsWindow)
self.viewMenu.addAction("&API...", self.openAPIWindow)
self.viewMenu.addAction("&APK...", self.openApkWindow)
self.viewMenu.addAction("&Resources...", self.openResourcesWindow)
def setupPluginsMenu(self):
log.debug("Setup Plugins Menu")
self.pluginsMenu = self.menuBar().addMenu("&Plugins")
self.pluginsMenu.addAction("&Run...", self.openRunPluginWindow)
def setupHelpMenu(self):
log.debug("Setup Help Menu")
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction("&About", self.about)
self.helpMenu.addAction("About &Qt", QtWidgets.qApp.aboutQt)
def updateDockWithTree(self, empty=False):
"""Update the classes tree. Called when
- a new APK has been imported
- a classe has been renamed (displayed in the tree)
"""
self.setupTree()
self.tree.fill()
def openStringsWindow(self):
stringswin = StringsWindow(win=self, session=self.session)
self.central.addTab(stringswin, stringswin.title)
self.central.setTabToolTip(self.central.indexOf(stringswin),
stringswin.title)
self.central.setCurrentWidget(stringswin)
def openMethodsWindow(self):
methodswin = MethodsWindow(win=self, session=self.session)
self.central.addTab(methodswin, methodswin.title)
self.central.setTabToolTip(self.central.indexOf(methodswin),
methodswin.title)
self.central.setCurrentWidget(methodswin)
def openResourcesWindow(self):
resourceswin = ResourcesWindow(win=self, session=self.session)
self.central.addTab(resourceswin, resourceswin.title)
self.central.setTabToolTip(self.central.indexOf(resourceswin),
resourceswin.title)
self.central.setCurrentWidget(resourceswin)
def openAPIWindow(self):
apiwin = APIWindow(win=self, session=self.session)
self.central.addTab(apiwin, apiwin.title)
self.central.setTabToolTip(self.central.indexOf(apiwin),
apiwin.title)
self.central.setCurrentWidget(apiwin)
def openApkWindow(self):
log.debug("openApkWindow for %s" % self.session.analyzed_apk)
if not self.fileLoadingThread.file_path:
self._no_apk_loaded()
return
bin_window = binWidget(self, ApkModel(self.session.get_objects_apk(self.fileLoadingThread.file_path)[0]), "APK")
bin_window.activateWindow()
self.central.addTab(bin_window, bin_window.title)
self.central.setCurrentWidget(bin_window)
self.bin_windows[bin_window.title] = bin_window
def openBinWindow(self, current_class):
log.debug("openBinWindow for %s" % current_class)
dx = self.session.get_analysis(current_class)
bin_window = self.getMeOpenedWindowIfExists(current_class.current_title)
if not bin_window:
bin_window = binWidget(self, DexClassModel(current_class, dx), current_class.get_name())
bin_window.activateWindow()
self.central.addTab(bin_window, current_class.current_title)
self.central.setTabToolTip(self.central.indexOf(bin_window),
current_class.current_title)
self.bin_windows[current_class.current_title] = bin_window
bin_window.enable()
self.central.setCurrentWidget(bin_window)
def openSourceWindow(self, current_class, method=None):
"""Main function to open a decompile source window
It checks if it already opened and open that tab,
otherwise, initialize a new window.
"""
log.debug("openSourceWindow for %s" % current_class)
sourcewin = self.getMeOpenedWindowIfExists(current_class.current_title + "(S)")
if not sourcewin:
current_filename = self.session.get_filename_by_class(current_class)
current_digest = self.session.get_digest_by_class(current_class)
sourcewin = SourceWindow(win=self,
current_class=current_class,
current_title=current_class.current_title + "(S)",
current_filename=current_filename,
current_digest=current_digest,
session=self.session)
sourcewin.reload_java_sources()
self.central.addTab(sourcewin, sourcewin.title)
self.central.setTabToolTip(self.central.indexOf(sourcewin),
sourcewin.title)
if method:
sourcewin.browse_to_method(method)
self.central.setCurrentWidget(sourcewin)
def getMeOpenedWindowIfExists(self, name):
for idx in range(self.central.count()):
if name == self.central.tabToolTip(idx):
log.debug("Tab %s already opened at: %d" %
(name, idx))
return self.central.widget(idx)
return None
def doesClassExist(self, path):
arg = class2func(path)
try:
getattr(self.d, arg)
except AttributeError:
return False
return True
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import warnings
import string
import re
import math
from collections import Counter
import pkg_resources
import repoze.lru
from pyphen import Pyphen
easy_word_set = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('textstat', 'easy_words.txt')
])
langs = {
"en": { # Default config
"fre_base": 206.835,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 84.6,
"syllable_threshold": 3,
},
"de": {
# Toni Amstad
"fre_base": 180,
"fre_sentence_length": 1,
"fre_syll_per_word": 58.5,
},
"es": {
# Fernandez Huerta Readability Formula
"fre_base": 206.84,
"fre_sentence_length": 1.02,
"fre_syll_per_word": 0.6,
},
"fr": {
"fre_base": 207,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 73.6,
},
"it": {
# Flesch-Vacca
"fre_base": 217,
"fre_sentence_length": 1.3,
"fre_syll_per_word": 0.6,
},
"nl": {
# Flesch-Douma
"fre_base": 206.835,
"fre_sentence_length": 0.93,
"fre_syll_per_word": 77,
},
"pl": {
"syllable_threshold": 4,
},
"ru": {
"fre_base": 206.835,
"fre_sentence_length": 1.3,
"fre_syll_per_word": 60.1,
},
}
def legacy_round(number, points=0):
p = 10 ** points
return float(math.floor((number * p) + math.copysign(0.5, number))) / p
def get_grade_suffix(grade):
"""
Select correct ordinal suffix
"""
ordinal_map = {1: 'st', 2: 'nd', 3: 'rd'}
teens_map = {11: 'th', 12: 'th', 13: 'th'}
return teens_map.get(grade % 100, ordinal_map.get(grade % 10, 'th'))
class textstatistics:
__lang = "en_US"
text_encoding = "utf-8"
def set_lang(self, lang):
self.__lang = lang
self.syllable_count._cache.clear()
self.avg_syllables_per_word._cache.clear()
self.flesch_reading_ease._cache.clear()
self.flesch_kincaid_grade._cache.clear()
self.polysyllabcount._cache.clear()
self.smog_index._cache.clear()
self.linsear_write_formula._cache.clear()
self.difficult_words._cache.clear()
self.dale_chall_readability_score._cache.clear()
self.gunning_fog._cache.clear()
self.spache_readability._cache.clear()
self.dale_chall_readability_score_v2._cache.clear()
self.text_standard._cache.clear()
self.reading_time._cache.clear()
@repoze.lru.lru_cache(maxsize=128)
def char_count(self, text, ignore_spaces=True):
"""
Function to return total character counts in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(text)
@repoze.lru.lru_cache(maxsize=128)
def letter_count(self, text, ignore_spaces=True):
"""
Function to return total letter amount in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(self.remove_punctuation(text))
@staticmethod
def remove_punctuation(text):
return ''.join(ch for ch in text if ch not in string.punctuation)
@repoze.lru.lru_cache(maxsize=128)
def lexicon_count(self, text, removepunct=True):
"""
Function to return total lexicon (words in lay terms) counts in a text
"""
if removepunct:
text = self.remove_punctuation(text)
count = len(text.split())
return count
@repoze.lru.lru_cache(maxsize=128)
def syllable_count(self, text, lang=None):
"""
Function to calculate syllable words in a text.
I/P - a text
O/P - number of syllable words
"""
if lang:
warnings.warn(
"The 'lang' argument has been moved to "
"'textstat.set_lang(<lang>)'. This argument will be removed "
"in the future.",
DeprecationWarning
)
if isinstance(text, bytes):
text = text.decode(self.text_encoding)
text = text.lower()
text = self.remove_punctuation(text)
if not text:
return 0
dic = Pyphen(lang=self.__lang)
count = 0
for word in text.split(' '):
word_hyphenated = dic.inserted(word)
count += max(1, word_hyphenated.count("-") + 1)
return count
@repoze.lru.lru_cache(maxsize=128)
def sentence_count(self, text):
"""
Sentence count of a text
"""
ignore_count = 0
sentences = re.split(r' *[\.\?!][\'"\)\]]*[ |\n](?=[A-Z])', text)
for sentence in sentences:
if self.lexicon_count(sentence) <= 2:
ignore_count += 1
return max(1, len(sentences) - ignore_count)
@repoze.lru.lru_cache(maxsize=128)
def avg_sentence_length(self, text):
try:
asl = float(self.lexicon_count(text) / self.sentence_count(text))
return legacy_round(asl, 1)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_syllables_per_word(self, text, interval=None):
syllable = self.syllable_count(text)
words = self.lexicon_count(text)
try:
if interval:
syllables_per_word = float(syllable) * interval / float(words)
else:
syllables_per_word = float(syllable) / float(words)
return legacy_round(syllables_per_word, 1)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_character_per_word(self, text):
try:
letters_per_word = float(
self.char_count(text) / self.lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_letter_per_word(self, text):
try:
letters_per_word = float(
self.letter_count(text) / self.lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_sentence_per_word(self, text):
try:
sentence_per_word = float(
self.sentence_count(text) / self.lexicon_count(text))
return legacy_round(sentence_per_word, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def flesch_reading_ease(self, text):
sentence_length = self.avg_sentence_length(text)
s_interval = 100 if self.__get_lang_root() in ['es', 'it'] else None
syllables_per_word = self.avg_syllables_per_word(text, s_interval)
flesch = (
self.__get_lang_cfg("fre_base")
- float(
self.__get_lang_cfg("fre_sentence_length") * sentence_length
)
- float(
self.__get_lang_cfg("fre_syll_per_word") * syllables_per_word
)
)
return legacy_round(flesch, 2)
@repoze.lru.lru_cache(maxsize=128)
def flesch_kincaid_grade(self, text):
sentence_lenth = self.avg_sentence_length(text)
syllables_per_word = self.avg_syllables_per_word(text)
flesch = (
float(0.39 * sentence_lenth)
+ float(11.8 * syllables_per_word)
- 15.59)
return legacy_round(flesch, 1)
@repoze.lru.lru_cache(maxsize=128)
def polysyllabcount(self, text):
count = 0
for word in text.split():
wrds = self.syllable_count(word)
if wrds >= 3:
count += 1
return count
@repoze.lru.lru_cache(maxsize=128)
def smog_index(self, text):
sentences = self.sentence_count(text)
if sentences >= 3:
try:
poly_syllab = self.polysyllabcount(text)
smog = (
(1.043 * (30 * (poly_syllab / sentences)) ** .5)
+ 3.1291)
return legacy_round(smog, 1)
except ZeroDivisionError:
return 0.0
else:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def coleman_liau_index(self, text):
letters = legacy_round(self.avg_letter_per_word(text) * 100, 2)
sentences = legacy_round(self.avg_sentence_per_word(text) * 100, 2)
coleman = float((0.058 * letters) - (0.296 * sentences) - 15.8)
return legacy_round(coleman, 2)
@repoze.lru.lru_cache(maxsize=128)
def automated_readability_index(self, text):
chrs = self.char_count(text)
words = self.lexicon_count(text)
sentences = self.sentence_count(text)
try:
a = float(chrs) / float(words)
b = float(words) / float(sentences)
readability = (
(4.71 * legacy_round(a, 2))
+ (0.5 * legacy_round(b, 2))
- 21.43)
return legacy_round(readability, 1)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def linsear_write_formula(self, text):
easy_word = 0
difficult_word = 0
text_list = text.split()[:100]
for word in text_list:
if self.syllable_count(word) < 3:
easy_word += 1
else:
difficult_word += 1
text = ' '.join(text_list)
number = float(
(easy_word * 1 + difficult_word * 3)
/ self.sentence_count(text))
if number <= 20:
number -= 2
return number / 2
@repoze.lru.lru_cache(maxsize=128)
def difficult_words(self, text, syllable_threshold=2):
text_list = re.findall(r"[\w\='‘’]+", text.lower())
diff_words_set = set()
for value in text_list:
if value not in easy_word_set:
if self.syllable_count(value) >= syllable_threshold:
diff_words_set.add(value)
return len(diff_words_set)
@repoze.lru.lru_cache(maxsize=128)
def difficult_words_list(self, text, syllable_threshold=2):
text_list = re.findall(r"[\w\='‘’]+", text.lower())
diff_words_set = set()
for value in text_list:
if value not in easy_word_set:
if self.syllable_count(value) >= syllable_threshold:
diff_words_set.add(value)
return list(diff_words_set)
@repoze.lru.lru_cache(maxsize=128)
def dale_chall_readability_score(self, text):
word_count = self.lexicon_count(text)
count = word_count - self.difficult_words(text)
try:
per = float(count) / float(word_count) * 100
except ZeroDivisionError:
return 0.0
difficult_words = 100 - per
score = (
(0.1579 * difficult_words)
+ (0.0496 * self.avg_sentence_length(text)))
if difficult_words > 5:
score += 3.6365
return legacy_round(score, 2)
@repoze.lru.lru_cache(maxsize=128)
def gunning_fog(self, text):
try:
syllable_threshold = self.__get_lang_cfg("syllable_threshold")
per_diff_words = (
(self.difficult_words(
text,
syllable_threshold=syllable_threshold)
/ self.lexicon_count(text) * 100))
grade = 0.4 * (self.avg_sentence_length(text) + per_diff_words)
return legacy_round(grade, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def lix(self, text):
words = text.split()
words_len = len(words)
long_words = len([wrd for wrd in words if len(wrd) > 6])
per_long_words = (float(long_words) * 100) / words_len
asl = self.avg_sentence_length(text)
lix = asl + per_long_words
return legacy_round(lix, 2)
@repoze.lru.lru_cache(maxsize=128)
def rix(self, text):
"""
A Rix ratio is simply the number of long words divided by
the number of assessed sentences.
rix = LW/S
"""
words = text.split()
long_words_count = len([wrd for wrd in words if len(wrd) > 6])
sentences_count = self.sentence_count(text)
try:
rix = long_words_count / sentences_count
except ZeroDivisionError:
rix = 0.00
return legacy_round(rix, 2)
@repoze.lru.lru_cache(maxsize=128)
def spache_readability(self, text, float_output=True):
"""
Function to calculate SPACHE readability formula for young readers.
I/P - a text
O/P - an int Spache Readability Index/Grade Level
"""
total_no_of_words = self.lexicon_count(text)
count_of_sentences = self.sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (self.difficult_words(text) / total_no_of_words) * 100
spache = (0.141 * asl) + (0.086 * pdw) + 0.839
if not float_output:
return int(spache)
else:
return spache
@repoze.lru.lru_cache(maxsize=128)
def dale_chall_readability_score_v2(self, text):
"""
Function to calculate New Dale Chall Readability formula.
I/P - a text
O/P - an int Dale Chall Readability Index/Grade Level
"""
total_no_of_words = self.lexicon_count(text)
count_of_sentences = self.sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (self.difficult_words(text) / total_no_of_words) * 100
raw_score = 0.1579 * (pdw) + 0.0496 * asl
adjusted_score = raw_score
if raw_score > 0.05:
adjusted_score = raw_score + 3.6365
return legacy_round(adjusted_score, 2)
@repoze.lru.lru_cache(maxsize=128)
def text_standard(self, text, float_output=None):
grade = []
# Appending Flesch Kincaid Grade
lower = legacy_round(self.flesch_kincaid_grade(text))
upper = math.ceil(self.flesch_kincaid_grade(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Flesch Reading Easy
score = self.flesch_reading_ease(text)
if score < 100 and score >= 90:
grade.append(5)
elif score < 90 and score >= 80:
grade.append(6)
elif score < 80 and score >= 70:
grade.append(7)
elif score < 70 and score >= 60:
grade.append(8)
grade.append(9)
elif score < 60 and score >= 50:
grade.append(10)
elif score < 50 and score >= 40:
grade.append(11)
elif score < 40 and score >= 30:
grade.append(12)
else:
grade.append(13)
# Appending SMOG Index
lower = legacy_round(self.smog_index(text))
upper = math.ceil(self.smog_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Coleman_Liau_Index
lower = legacy_round(self.coleman_liau_index(text))
upper = math.ceil(self.coleman_liau_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Automated_Readability_Index
lower = legacy_round(self.automated_readability_index(text))
upper = math.ceil(self.automated_readability_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Dale_Chall_Readability_Score
lower = legacy_round(self.dale_chall_readability_score(text))
upper = math.ceil(self.dale_chall_readability_score(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Linsear_Write_Formula
lower = legacy_round(self.linsear_write_formula(text))
upper = math.ceil(self.linsear_write_formula(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Gunning Fog Index
lower = legacy_round(self.gunning_fog(text))
upper = math.ceil(self.gunning_fog(text))
grade.append(int(lower))
grade.append(int(upper))
# Finding the Readability Consensus based upon all the above tests
d = Counter(grade)
final_grade = d.most_common(1)
score = final_grade[0][0]
if float_output:
return float(score)
else:
lower_score = int(score) - 1
upper_score = lower_score + 1
return "{}{} and {}{} grade".format(
lower_score, get_grade_suffix(lower_score),
upper_score, get_grade_suffix(upper_score)
)
@repoze.lru.lru_cache(maxsize=128)
def reading_time(self, text, ms_per_char=14.69):
"""
Function to calculate reading time based on Vera Demberg and Frank Keller (2008)
I/P - a text
O/P - reading time in millisecond
"""
words = text.split()
nchars = map(len, words)
rt_per_word = map(lambda nchar: nchar * ms_per_char, nchars)
reading_time = sum(list(rt_per_word))
return legacy_round(reading_time, 2)
def __get_lang_cfg(self, key):
default = langs.get("en")
config = langs.get(self.__get_lang_root(), default)
return config.get(key, default.get(key))
def __get_lang_root(self):
return self.__lang.split("_")[0]
textstat = textstatistics()
fixed pycodestyle
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import warnings
import string
import re
import math
from collections import Counter
import pkg_resources
import repoze.lru
from pyphen import Pyphen
easy_word_set = set([
ln.decode('utf-8').strip() for ln in
pkg_resources.resource_stream('textstat', 'easy_words.txt')
])
langs = {
"en": { # Default config
"fre_base": 206.835,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 84.6,
"syllable_threshold": 3,
},
"de": {
# Toni Amstad
"fre_base": 180,
"fre_sentence_length": 1,
"fre_syll_per_word": 58.5,
},
"es": {
# Fernandez Huerta Readability Formula
"fre_base": 206.84,
"fre_sentence_length": 1.02,
"fre_syll_per_word": 0.6,
},
"fr": {
"fre_base": 207,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 73.6,
},
"it": {
# Flesch-Vacca
"fre_base": 217,
"fre_sentence_length": 1.3,
"fre_syll_per_word": 0.6,
},
"nl": {
# Flesch-Douma
"fre_base": 206.835,
"fre_sentence_length": 0.93,
"fre_syll_per_word": 77,
},
"pl": {
"syllable_threshold": 4,
},
"ru": {
"fre_base": 206.835,
"fre_sentence_length": 1.3,
"fre_syll_per_word": 60.1,
},
}
def legacy_round(number, points=0):
p = 10 ** points
return float(math.floor((number * p) + math.copysign(0.5, number))) / p
def get_grade_suffix(grade):
"""
Select correct ordinal suffix
"""
ordinal_map = {1: 'st', 2: 'nd', 3: 'rd'}
teens_map = {11: 'th', 12: 'th', 13: 'th'}
return teens_map.get(grade % 100, ordinal_map.get(grade % 10, 'th'))
class textstatistics:
__lang = "en_US"
text_encoding = "utf-8"
def set_lang(self, lang):
self.__lang = lang
self.syllable_count._cache.clear()
self.avg_syllables_per_word._cache.clear()
self.flesch_reading_ease._cache.clear()
self.flesch_kincaid_grade._cache.clear()
self.polysyllabcount._cache.clear()
self.smog_index._cache.clear()
self.linsear_write_formula._cache.clear()
self.difficult_words._cache.clear()
self.dale_chall_readability_score._cache.clear()
self.gunning_fog._cache.clear()
self.spache_readability._cache.clear()
self.dale_chall_readability_score_v2._cache.clear()
self.text_standard._cache.clear()
self.reading_time._cache.clear()
@repoze.lru.lru_cache(maxsize=128)
def char_count(self, text, ignore_spaces=True):
"""
Function to return total character counts in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(text)
@repoze.lru.lru_cache(maxsize=128)
def letter_count(self, text, ignore_spaces=True):
"""
Function to return total letter amount in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(self.remove_punctuation(text))
@staticmethod
def remove_punctuation(text):
return ''.join(ch for ch in text if ch not in string.punctuation)
@repoze.lru.lru_cache(maxsize=128)
def lexicon_count(self, text, removepunct=True):
"""
Function to return total lexicon (words in lay terms) counts in a text
"""
if removepunct:
text = self.remove_punctuation(text)
count = len(text.split())
return count
@repoze.lru.lru_cache(maxsize=128)
def syllable_count(self, text, lang=None):
"""
Function to calculate syllable words in a text.
I/P - a text
O/P - number of syllable words
"""
if lang:
warnings.warn(
"The 'lang' argument has been moved to "
"'textstat.set_lang(<lang>)'. This argument will be removed "
"in the future.",
DeprecationWarning
)
if isinstance(text, bytes):
text = text.decode(self.text_encoding)
text = text.lower()
text = self.remove_punctuation(text)
if not text:
return 0
dic = Pyphen(lang=self.__lang)
count = 0
for word in text.split(' '):
word_hyphenated = dic.inserted(word)
count += max(1, word_hyphenated.count("-") + 1)
return count
@repoze.lru.lru_cache(maxsize=128)
def sentence_count(self, text):
"""
Sentence count of a text
"""
ignore_count = 0
sentences = re.split(r' *[\.\?!][\'"\)\]]*[ |\n](?=[A-Z])', text)
for sentence in sentences:
if self.lexicon_count(sentence) <= 2:
ignore_count += 1
return max(1, len(sentences) - ignore_count)
@repoze.lru.lru_cache(maxsize=128)
def avg_sentence_length(self, text):
try:
asl = float(self.lexicon_count(text) / self.sentence_count(text))
return legacy_round(asl, 1)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_syllables_per_word(self, text, interval=None):
syllable = self.syllable_count(text)
words = self.lexicon_count(text)
try:
if interval:
syllables_per_word = float(syllable) * interval / float(words)
else:
syllables_per_word = float(syllable) / float(words)
return legacy_round(syllables_per_word, 1)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_character_per_word(self, text):
try:
letters_per_word = float(
self.char_count(text) / self.lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_letter_per_word(self, text):
try:
letters_per_word = float(
self.letter_count(text) / self.lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def avg_sentence_per_word(self, text):
try:
sentence_per_word = float(
self.sentence_count(text) / self.lexicon_count(text))
return legacy_round(sentence_per_word, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def flesch_reading_ease(self, text):
sentence_length = self.avg_sentence_length(text)
s_interval = 100 if self.__get_lang_root() in ['es', 'it'] else None
syllables_per_word = self.avg_syllables_per_word(text, s_interval)
flesch = (
self.__get_lang_cfg("fre_base")
- float(
self.__get_lang_cfg("fre_sentence_length") * sentence_length
)
- float(
self.__get_lang_cfg("fre_syll_per_word") * syllables_per_word
)
)
return legacy_round(flesch, 2)
@repoze.lru.lru_cache(maxsize=128)
def flesch_kincaid_grade(self, text):
sentence_lenth = self.avg_sentence_length(text)
syllables_per_word = self.avg_syllables_per_word(text)
flesch = (
float(0.39 * sentence_lenth)
+ float(11.8 * syllables_per_word)
- 15.59)
return legacy_round(flesch, 1)
@repoze.lru.lru_cache(maxsize=128)
def polysyllabcount(self, text):
count = 0
for word in text.split():
wrds = self.syllable_count(word)
if wrds >= 3:
count += 1
return count
@repoze.lru.lru_cache(maxsize=128)
def smog_index(self, text):
sentences = self.sentence_count(text)
if sentences >= 3:
try:
poly_syllab = self.polysyllabcount(text)
smog = (
(1.043 * (30 * (poly_syllab / sentences)) ** .5)
+ 3.1291)
return legacy_round(smog, 1)
except ZeroDivisionError:
return 0.0
else:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def coleman_liau_index(self, text):
letters = legacy_round(self.avg_letter_per_word(text) * 100, 2)
sentences = legacy_round(self.avg_sentence_per_word(text) * 100, 2)
coleman = float((0.058 * letters) - (0.296 * sentences) - 15.8)
return legacy_round(coleman, 2)
@repoze.lru.lru_cache(maxsize=128)
def automated_readability_index(self, text):
chrs = self.char_count(text)
words = self.lexicon_count(text)
sentences = self.sentence_count(text)
try:
a = float(chrs) / float(words)
b = float(words) / float(sentences)
readability = (
(4.71 * legacy_round(a, 2))
+ (0.5 * legacy_round(b, 2))
- 21.43)
return legacy_round(readability, 1)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def linsear_write_formula(self, text):
easy_word = 0
difficult_word = 0
text_list = text.split()[:100]
for word in text_list:
if self.syllable_count(word) < 3:
easy_word += 1
else:
difficult_word += 1
text = ' '.join(text_list)
number = float(
(easy_word * 1 + difficult_word * 3)
/ self.sentence_count(text))
if number <= 20:
number -= 2
return number / 2
@repoze.lru.lru_cache(maxsize=128)
def difficult_words(self, text, syllable_threshold=2):
text_list = re.findall(r"[\w\='‘’]+", text.lower())
diff_words_set = set()
for value in text_list:
if value not in easy_word_set:
if self.syllable_count(value) >= syllable_threshold:
diff_words_set.add(value)
return len(diff_words_set)
@repoze.lru.lru_cache(maxsize=128)
def difficult_words_list(self, text, syllable_threshold=2):
text_list = re.findall(r"[\w\='‘’]+", text.lower())
diff_words_set = set()
for value in text_list:
if value not in easy_word_set:
if self.syllable_count(value) >= syllable_threshold:
diff_words_set.add(value)
return list(diff_words_set)
@repoze.lru.lru_cache(maxsize=128)
def dale_chall_readability_score(self, text):
word_count = self.lexicon_count(text)
count = word_count - self.difficult_words(text)
try:
per = float(count) / float(word_count) * 100
except ZeroDivisionError:
return 0.0
difficult_words = 100 - per
score = (
(0.1579 * difficult_words)
+ (0.0496 * self.avg_sentence_length(text)))
if difficult_words > 5:
score += 3.6365
return legacy_round(score, 2)
@repoze.lru.lru_cache(maxsize=128)
def gunning_fog(self, text):
try:
syllable_threshold = self.__get_lang_cfg("syllable_threshold")
per_diff_words = (
(self.difficult_words(
text,
syllable_threshold=syllable_threshold)
/ self.lexicon_count(text) * 100))
grade = 0.4 * (self.avg_sentence_length(text) + per_diff_words)
return legacy_round(grade, 2)
except ZeroDivisionError:
return 0.0
@repoze.lru.lru_cache(maxsize=128)
def lix(self, text):
words = text.split()
words_len = len(words)
long_words = len([wrd for wrd in words if len(wrd) > 6])
per_long_words = (float(long_words) * 100) / words_len
asl = self.avg_sentence_length(text)
lix = asl + per_long_words
return legacy_round(lix, 2)
@repoze.lru.lru_cache(maxsize=128)
def rix(self, text):
"""
A Rix ratio is simply the number of long words divided by
the number of assessed sentences.
rix = LW/S
"""
words = text.split()
long_words_count = len([wrd for wrd in words if len(wrd) > 6])
sentences_count = self.sentence_count(text)
try:
rix = long_words_count / sentences_count
except ZeroDivisionError:
rix = 0.00
return legacy_round(rix, 2)
@repoze.lru.lru_cache(maxsize=128)
def spache_readability(self, text, float_output=True):
"""
Function to calculate SPACHE readability formula for young readers.
I/P - a text
O/P - an int Spache Readability Index/Grade Level
"""
total_no_of_words = self.lexicon_count(text)
count_of_sentences = self.sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (self.difficult_words(text) / total_no_of_words) * 100
spache = (0.141 * asl) + (0.086 * pdw) + 0.839
if not float_output:
return int(spache)
else:
return spache
@repoze.lru.lru_cache(maxsize=128)
def dale_chall_readability_score_v2(self, text):
"""
Function to calculate New Dale Chall Readability formula.
I/P - a text
O/P - an int Dale Chall Readability Index/Grade Level
"""
total_no_of_words = self.lexicon_count(text)
count_of_sentences = self.sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (self.difficult_words(text) / total_no_of_words) * 100
raw_score = 0.1579 * (pdw) + 0.0496 * asl
adjusted_score = raw_score
if raw_score > 0.05:
adjusted_score = raw_score + 3.6365
return legacy_round(adjusted_score, 2)
@repoze.lru.lru_cache(maxsize=128)
def text_standard(self, text, float_output=None):
grade = []
# Appending Flesch Kincaid Grade
lower = legacy_round(self.flesch_kincaid_grade(text))
upper = math.ceil(self.flesch_kincaid_grade(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Flesch Reading Easy
score = self.flesch_reading_ease(text)
if score < 100 and score >= 90:
grade.append(5)
elif score < 90 and score >= 80:
grade.append(6)
elif score < 80 and score >= 70:
grade.append(7)
elif score < 70 and score >= 60:
grade.append(8)
grade.append(9)
elif score < 60 and score >= 50:
grade.append(10)
elif score < 50 and score >= 40:
grade.append(11)
elif score < 40 and score >= 30:
grade.append(12)
else:
grade.append(13)
# Appending SMOG Index
lower = legacy_round(self.smog_index(text))
upper = math.ceil(self.smog_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Coleman_Liau_Index
lower = legacy_round(self.coleman_liau_index(text))
upper = math.ceil(self.coleman_liau_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Automated_Readability_Index
lower = legacy_round(self.automated_readability_index(text))
upper = math.ceil(self.automated_readability_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Dale_Chall_Readability_Score
lower = legacy_round(self.dale_chall_readability_score(text))
upper = math.ceil(self.dale_chall_readability_score(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Linsear_Write_Formula
lower = legacy_round(self.linsear_write_formula(text))
upper = math.ceil(self.linsear_write_formula(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Gunning Fog Index
lower = legacy_round(self.gunning_fog(text))
upper = math.ceil(self.gunning_fog(text))
grade.append(int(lower))
grade.append(int(upper))
# Finding the Readability Consensus based upon all the above tests
d = Counter(grade)
final_grade = d.most_common(1)
score = final_grade[0][0]
if float_output:
return float(score)
else:
lower_score = int(score) - 1
upper_score = lower_score + 1
return "{}{} and {}{} grade".format(
lower_score, get_grade_suffix(lower_score),
upper_score, get_grade_suffix(upper_score)
)
@repoze.lru.lru_cache(maxsize=128)
def reading_time(self, text, ms_per_char=14.69):
"""
Function to calculate reading time (Demberg & Keller, 2008)
I/P - a text
O/P - reading time in millisecond
"""
words = text.split()
nchars = map(len, words)
rt_per_word = map(lambda nchar: nchar * ms_per_char, nchars)
reading_time = sum(list(rt_per_word))
return legacy_round(reading_time, 2)
def __get_lang_cfg(self, key):
default = langs.get("en")
config = langs.get(self.__get_lang_root(), default)
return config.get(key, default.get(key))
def __get_lang_root(self):
return self.__lang.split("_")[0]
textstat = textstatistics()
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from reportlab.pdfgen import canvas
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Table, Paragraph
from io import BytesIO
from .settings import BILLJOBS_DEBUG_PDF, BILLJOBS_BILL_LOGO_PATH, \
BILLJOBS_BILL_LOGO_WIDTH, BILLJOBS_BILL_LOGO_HEIGHT, \
BILLJOBS_BILL_PAYMENT_INFO
from .models import Bill
from billjobs.serializers import UserSerializer, GroupSerializer
from .permissions import CustomUserAPIPermission, \
CustomUserDetailAPIPermission, CustomGroupAPIPermission, \
CustomGroupDetailAPIPermission
from textwrap import wrap
class GroupAPI(APIView):
"""
API endpoint to list or create groups
"""
permission_classes = (CustomGroupAPIPermission,)
def get(self, request, format=None):
"""
List groups only accessible by admin
"""
groups = Group.objects.filter(user=request.user)
serializer = GroupSerializer(groups, context={'request': request},
many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create a group
"""
serializer = GroupSerializer(data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GroupDetailAPI(APIView):
"""
API endpoint that allow admin and user to retrieve, update and delete a
group
"""
permission_classes = (CustomGroupDetailAPIPermission,)
def get_object(self, pk):
try:
group = Group.objects.get(pk=pk)
self.check_object_permissions(self.request, group)
return group
except Group.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
group = self.get_object(pk)
serializer = GroupSerializer(group, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk, format=None):
"""
Update a group instance
"""
group = self.get_object(pk)
serializer = GroupSerializer(group, data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
"""
Delete a group instance
"""
group = self.get_object(pk)
group.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserAPI(APIView):
"""
API endpoint that allows admin to list or create users
"""
permission_classes = (CustomUserAPIPermission,)
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, context={'request': request},
many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
serializer = UserSerializer(data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetailAPI(APIView):
"""
API endpoint that allows admin to retrieve, update, delete a user
"""
permission_classes = (CustomUserDetailAPIPermission,)
def get_object(self, pk):
try:
user = User.objects.get(pk=pk)
self.check_object_permissions(self.request, user)
return user
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data,
context={'request': request}, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@login_required
def generate_pdf(request, bill_id):
bill = Bill.objects.get(id=bill_id)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % bill.number
# Create a buffer
buffer = BytesIO()
pdf = canvas.Canvas(buffer, pagesize=A4)
# define new 0,0 bottom left with cm as margin
pdf.translate(cm,cm)
# define document width and height with cm as margin
width, height = A4
width = width - 2*cm
height = height - 2*cm
# if debug draw lines for document limit
if BILLJOBS_DEBUG_PDF is True:
pdf.setStrokeColorRGB(1,0,0)
pdf.line(0,0,width,0)
pdf.line(0,0,0,height)
pdf.line(0,height,width,height)
pdf.line(width,height,width,0)
# Put logo on top of pdf original image size is 570px/250px
pdf.drawImage(BILLJOBS_BILL_LOGO_PATH, 0, height-BILLJOBS_BILL_LOGO_HEIGHT,
width=BILLJOBS_BILL_LOGO_WIDTH, height=BILLJOBS_BILL_LOGO_HEIGHT)
# billing information
lh = 15 #define a line height
pdf.setFillColorRGB(0.3,0.3,0.3)
pdf.setFont("Helvetica-Bold", 14)
pdf.drawRightString(width, height-lh, 'Facture');
pdf.setFont("Helvetica-Bold", 10)
pdf.drawRightString(width, height-2*lh, u'Numéro : %s' % bill.number)
pdf.setFont("Helvetica", 10)
pdf.drawRightString(width, height-3*lh, u'Date facturation : %s' % bill.billing_date.strftime('%d/%m/%Y'))
# define new height
nh = height - 90
# seller
pdf.setFillColorRGB(0.95,0.95,0.95)
pdf.setStrokeColorRGB(1,1,1)
# rect(x,y,width,height)
pdf.rect(0, nh-8*lh, width/2-40, 6.4*lh, fill=1)
# reset fill for text color
pdf.setFillColorRGB(0.3,0.3,0.3)
pdf.drawString(10, nh-lh, 'Émetteur')
issuer = Paragraph(bill.issuer_address, getSampleStyleSheet()['Normal'])
issuer.wrapOn(pdf, width*0.25, 6*lh)
issuer.drawOn(pdf, 20, nh-6*lh)
# customer
pdf.drawString(width/2, nh-lh, 'Adressé à')
customer = pdf.beginText()
customer.setTextOrigin(width/2+20, nh-3*lh)
# create text with \n and remove \r
text = '%s %s\n%s' % (bill.user.first_name, bill.user.last_name,
bill.billing_address.replace('\r',''))
# get each line
for line in text.split('\n'):
customer.textOut(line)
customer.moveCursor(0,lh)
pdf.drawText(customer)
pdf.setStrokeColorRGB(0,0,0)
# rect(x,y,width,height)
pdf.rect(width/2, nh-8*lh, width/2, 6.4*lh, fill=0)
# define new height
nh = nh - 10*lh
data = [['Désignation', 'Prix unit. HT', 'Quantité', 'Total HT']]
for line in bill.billline_set.all():
description = '%s - %s\n%s' % (line.service.reference,
line.service.name,
'\n'.join(wrap(line.service.description,62)))
if line.note :
description = '%s\n%s' % (description,
'\n'.join(wrap(line.note,62)))
line = (description, line.service.price, line.quantity, line.total)
data.append(line)
data.append(('TVA non applicable art-293B du CGI', '', 'Total HT',
'%s €' % bill.amount))
data.append(('', '', 'TVA 0%', '0'))
data.append(('', '', 'Total TTC', '%s €' % bill.amount))
# widths in percent of pdf width
colWidths = (width*0.55, width*0.15, width*0.15, width*0.15)
style = [('GRID', (0,0), (-1,0),1, colors.black),
('GRID', (-2,-3), (-1,-1), 1, colors.black),
('BOX', (0,1), (0,-4), 1, colors.black),
('BOX', (1,1), (1,-4), 1, colors.black),
('BOX', (2,1), (2,-4), 1, colors.black),
('BOX', (-1,1), (-1,-4), 1, colors.black),
('ALIGN',(0,0),(0,-1),'LEFT'),
('ALIGN',(1,0),(-1,-1),'CENTER'),
('ALIGN',(-1,0),(-1,-1),'RIGHT'),
('FONTNAME', (0,-3), (0,-3), 'Helvetica-Bold'),
]
table = Table(data, colWidths=colWidths, style=style)
# create table and get width and height
t_width, t_height = table.wrap(0,0)
table.drawOn(pdf, 0, nh-t_height)
p = Paragraph(BILLJOBS_BILL_PAYMENT_INFO, getSampleStyleSheet()['Normal'])
p.wrapOn(pdf, width*0.6, 100)
p.drawOn(pdf, 0, 3*lh)
pdf.line(0, 2*lh, width, 2*lh)
pdf.setFontSize(8)
pdf.drawCentredString(width/2.0, lh, 'Association Loi 1901')
pdf.showPage()
pdf.save()
# get pdf from buffer and return it to response
genpdf = buffer.getvalue()
buffer.close()
response.write(genpdf)
return response
feat(group-api): admin list all group
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse, Http404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from reportlab.pdfgen import canvas
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Table, Paragraph
from io import BytesIO
from .settings import BILLJOBS_DEBUG_PDF, BILLJOBS_BILL_LOGO_PATH, \
BILLJOBS_BILL_LOGO_WIDTH, BILLJOBS_BILL_LOGO_HEIGHT, \
BILLJOBS_BILL_PAYMENT_INFO
from .models import Bill
from billjobs.serializers import UserSerializer, GroupSerializer
from .permissions import CustomUserAPIPermission, \
CustomUserDetailAPIPermission, CustomGroupAPIPermission, \
CustomGroupDetailAPIPermission
from textwrap import wrap
class GroupAPI(APIView):
"""
API endpoint to list or create groups
"""
permission_classes = (CustomGroupAPIPermission,)
def get(self, request, format=None):
"""
List groups
"""
if request.user.is_staff is True:
groups = Group.objects.all()
else:
groups = Group.objects.filter(user=request.user)
serializer = GroupSerializer(groups, context={'request': request},
many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create a group
"""
serializer = GroupSerializer(data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GroupDetailAPI(APIView):
"""
API endpoint that allow admin and user to retrieve, update and delete a
group
"""
permission_classes = (CustomGroupDetailAPIPermission,)
def get_object(self, pk):
try:
group = Group.objects.get(pk=pk)
self.check_object_permissions(self.request, group)
return group
except Group.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
group = self.get_object(pk)
serializer = GroupSerializer(group, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk, format=None):
"""
Update a group instance
"""
group = self.get_object(pk)
serializer = GroupSerializer(group, data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
"""
Delete a group instance
"""
group = self.get_object(pk)
group.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserAPI(APIView):
"""
API endpoint that allows admin to list or create users
"""
permission_classes = (CustomUserAPIPermission,)
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, context={'request': request},
many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
serializer = UserSerializer(data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetailAPI(APIView):
"""
API endpoint that allows admin to retrieve, update, delete a user
"""
permission_classes = (CustomUserDetailAPIPermission,)
def get_object(self, pk):
try:
user = User.objects.get(pk=pk)
self.check_object_permissions(self.request, user)
return user
except User.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk, format=None):
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data,
context={'request': request}, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@login_required
def generate_pdf(request, bill_id):
bill = Bill.objects.get(id=bill_id)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % bill.number
# Create a buffer
buffer = BytesIO()
pdf = canvas.Canvas(buffer, pagesize=A4)
# define new 0,0 bottom left with cm as margin
pdf.translate(cm,cm)
# define document width and height with cm as margin
width, height = A4
width = width - 2*cm
height = height - 2*cm
# if debug draw lines for document limit
if BILLJOBS_DEBUG_PDF is True:
pdf.setStrokeColorRGB(1,0,0)
pdf.line(0,0,width,0)
pdf.line(0,0,0,height)
pdf.line(0,height,width,height)
pdf.line(width,height,width,0)
# Put logo on top of pdf original image size is 570px/250px
pdf.drawImage(BILLJOBS_BILL_LOGO_PATH, 0, height-BILLJOBS_BILL_LOGO_HEIGHT,
width=BILLJOBS_BILL_LOGO_WIDTH, height=BILLJOBS_BILL_LOGO_HEIGHT)
# billing information
lh = 15 #define a line height
pdf.setFillColorRGB(0.3,0.3,0.3)
pdf.setFont("Helvetica-Bold", 14)
pdf.drawRightString(width, height-lh, 'Facture');
pdf.setFont("Helvetica-Bold", 10)
pdf.drawRightString(width, height-2*lh, u'Numéro : %s' % bill.number)
pdf.setFont("Helvetica", 10)
pdf.drawRightString(width, height-3*lh, u'Date facturation : %s' % bill.billing_date.strftime('%d/%m/%Y'))
# define new height
nh = height - 90
# seller
pdf.setFillColorRGB(0.95,0.95,0.95)
pdf.setStrokeColorRGB(1,1,1)
# rect(x,y,width,height)
pdf.rect(0, nh-8*lh, width/2-40, 6.4*lh, fill=1)
# reset fill for text color
pdf.setFillColorRGB(0.3,0.3,0.3)
pdf.drawString(10, nh-lh, 'Émetteur')
issuer = Paragraph(bill.issuer_address, getSampleStyleSheet()['Normal'])
issuer.wrapOn(pdf, width*0.25, 6*lh)
issuer.drawOn(pdf, 20, nh-6*lh)
# customer
pdf.drawString(width/2, nh-lh, 'Adressé à')
customer = pdf.beginText()
customer.setTextOrigin(width/2+20, nh-3*lh)
# create text with \n and remove \r
text = '%s %s\n%s' % (bill.user.first_name, bill.user.last_name,
bill.billing_address.replace('\r',''))
# get each line
for line in text.split('\n'):
customer.textOut(line)
customer.moveCursor(0,lh)
pdf.drawText(customer)
pdf.setStrokeColorRGB(0,0,0)
# rect(x,y,width,height)
pdf.rect(width/2, nh-8*lh, width/2, 6.4*lh, fill=0)
# define new height
nh = nh - 10*lh
data = [['Désignation', 'Prix unit. HT', 'Quantité', 'Total HT']]
for line in bill.billline_set.all():
description = '%s - %s\n%s' % (line.service.reference,
line.service.name,
'\n'.join(wrap(line.service.description,62)))
if line.note :
description = '%s\n%s' % (description,
'\n'.join(wrap(line.note,62)))
line = (description, line.service.price, line.quantity, line.total)
data.append(line)
data.append(('TVA non applicable art-293B du CGI', '', 'Total HT',
'%s €' % bill.amount))
data.append(('', '', 'TVA 0%', '0'))
data.append(('', '', 'Total TTC', '%s €' % bill.amount))
# widths in percent of pdf width
colWidths = (width*0.55, width*0.15, width*0.15, width*0.15)
style = [('GRID', (0,0), (-1,0),1, colors.black),
('GRID', (-2,-3), (-1,-1), 1, colors.black),
('BOX', (0,1), (0,-4), 1, colors.black),
('BOX', (1,1), (1,-4), 1, colors.black),
('BOX', (2,1), (2,-4), 1, colors.black),
('BOX', (-1,1), (-1,-4), 1, colors.black),
('ALIGN',(0,0),(0,-1),'LEFT'),
('ALIGN',(1,0),(-1,-1),'CENTER'),
('ALIGN',(-1,0),(-1,-1),'RIGHT'),
('FONTNAME', (0,-3), (0,-3), 'Helvetica-Bold'),
]
table = Table(data, colWidths=colWidths, style=style)
# create table and get width and height
t_width, t_height = table.wrap(0,0)
table.drawOn(pdf, 0, nh-t_height)
p = Paragraph(BILLJOBS_BILL_PAYMENT_INFO, getSampleStyleSheet()['Normal'])
p.wrapOn(pdf, width*0.6, 100)
p.drawOn(pdf, 0, 3*lh)
pdf.line(0, 2*lh, width, 2*lh)
pdf.setFontSize(8)
pdf.drawCentredString(width/2.0, lh, 'Association Loi 1901')
pdf.showPage()
pdf.save()
# get pdf from buffer and return it to response
genpdf = buffer.getvalue()
buffer.close()
response.write(genpdf)
return response
|
import re
import ROOT
import os
from PyAnalysisTools.base import InvalidInputError, _logger
from PyAnalysisTools.ROOTUtils.ObjectHandle import get_objects_from_canvas_by_type, get_objects_from_canvas_by_name
from PyAnalysisTools.PlottingUtils.PlotConfig import get_style_setters_and_values, find_process_config
def load_atlas_style():
try:
base_path = os.path.dirname(os.path.join(os.path.realpath(__file__)))
ROOT.gROOT.LoadMacro(os.path.join(base_path, 'AtlasStyle/AtlasStyle.C'))
ROOT.SetAtlasStyle()
except Exception as e:
_logger.error("Could not find Atlas style files in %s" % os.path.join(base_path, 'AtlasStyle'))
def apply_style(obj, plot_config, process_config, index=None):
style_setter, style_attr, color = get_style_setters_and_values(plot_config, process_config, index)
if style_attr is not None:
for setter in style_setter:
getattr(obj, "Set" + setter + "Style")(style_attr)
if color is not None:
for setter in style_setter:
getattr(obj, "Set" + setter + "Color")(color)
def decorate_canvas(canvas, plot_config):
if hasattr(plot_config, "watermark"):
add_atlas_label(canvas, plot_config.watermark, {"x": 0.15, "y": 0.96}, size=0.03, offset=0.05)
if hasattr(plot_config, "lumi") and plot_config.lumi is not None:
add_lumi_text(canvas, plot_config.lumi, {"x": 0.6, "y": 0.9})
if hasattr(plot_config, "grid") and plot_config.grid is True:
canvas.SetGrid()
if hasattr(plot_config, "decor_text"):
add_text_to_canvas(canvas, plot_config.decor_text, {"x": 0.2, "y": 0.8})
def set_title_x(obj, title):
if not hasattr(obj, "GetXaxis"):
raise TypeError
try:
obj.GetXaxis().SetTitle(title)
except ReferenceError:
_logger.error("Nil object {:s}".format(obj.GetName()))
def set_title_y(obj, title):
if not hasattr(obj, "GetYaxis"):
raise TypeError
try:
obj.GetYaxis().SetTitle(title)
except ReferenceError:
_logger.error("Nil object {:s}".format(obj.GetName()))
def set_style_options(obj, style):
allowed_attributes = ["marker", "line"]
if not isinstance(style, dict):
raise InvalidInputError("Invalid style config. Needs to be dictionary")
for style_object, style_options in style.items():
if style_object.lower() not in allowed_attributes:
continue
if not isinstance(style_options, dict):
raise InvalidInputError("Invalid style option for " + style_object + ". Requires dict, but received " +
str(type(style_options)))
for style_option, value in style_options.items():
try:
getattr(obj, "Set" + style_object.capitalize() + style_option.capitalize())(value)
except AttributeError:
_logger.warning("Could not set rquested style " + style_object.capitalize() + style_option.capitalize()
+ " for object " + str(obj))
def make_text(x, y, text, size=0.05, angle=0, font=42, color=ROOT.kBlack, ndc=True):
t = ROOT.TLatex(x, y, text)
ROOT.SetOwnership(t, False)
t.SetTextSize(size)
t.SetTextAngle(angle)
t.SetTextFont(font)
if color is not None:
t.SetTextColor(color)
t.SetNDC(ndc)
return t
def add_lumi_text(canvas, lumi, pos={'x': 0.6, 'y': 0.85}, size=0.04, split_lumi_text=False):
canvas.cd()
text_lumi = '#scale[0.7]{#int}dt L = %.1f fb^{-1}' % (float(lumi))
text_energy = '#sqrt{s} = 13 TeV'
if split_lumi_text:
label_lumi = make_text(x=pos['x'], y=pos['y'] - 0.05, text=text_energy, size=size)
label_energy = make_text(x=pos['x'], y=pos['y'] - 0.05, text=text_energy, size=size)
label_energy.Draw('sames')
else:
label_lumi = make_text(x=pos['x'], y=pos['y'], text=','.join([text_lumi, text_energy]), size=size)
label_lumi.Draw('sames')
canvas.Update()
def add_atlas_label(canvas, description='', pos={'x': 0.6, 'y': 0.87}, size=0.05, offset=0.125):
label_atlas = make_text(x=pos['x'], y=pos['y'], text='ATLAS', size=size, font=72)
label_descr = make_text(x=pos['x'] + offset, y=pos['y'], text=description, size=size, font=42)
canvas.cd()
label_atlas.Draw('sames')
label_descr.Draw('sames')
canvas.Update()
def set_title(self, hist, title, axis='x'):
if title is None:
return
if type(hist) == dict:
for h in hist.keys():
hist[h] = __setTitle(hist[h], title, axis)
else:
if isinstance(hist, ROOT.TEfficiency):
# hist.Draw('ap')
ROOT.gPad.Update()
graph = hist.GetPaintedGraph()
self.__setTitle(graph, title, axis)
else:
hist = setTitle(hist, title, axis)
def add_stat_box_to_canvas(canvas):
def retrieve_stat_box(hist):
ctmp = ROOT.TCanvas("c_tmp", "c_tmp")
ctmp.cd()
ROOT.gStyle.SetOptStat(111111)
hist.SetStats(1)
hist.Draw()
ROOT.gPad.Update()
stat_box = hist.FindObject("stats").Clone()
ROOT.SetOwnership(stat_box, False)
ROOT.gStyle.SetOptStat(0)
return stat_box
hists = get_objects_from_canvas_by_type(canvas, "TH1F")
stat_boxes = [retrieve_stat_box(hist) for hist in hists]
canvas.cd()
height = min(0.15, 1. / len(stat_boxes))
offset = 0.01
for stat_box in stat_boxes:
index = stat_boxes.index(stat_box)
color = hists[index].GetLineColor()
stat_box.SetTextColor(color)
stat_box.SetY1NDC(1. - (index + 1.) * height)
stat_box.SetY2NDC(1. - index * (height + offset))
stat_box.Draw("sames")
canvas.Update()
def add_text_to_canvas(canvas, text, pos={'x': 0.6, 'y': 0.79}, size=0.04, color=None):
label = make_text(x=pos['x'], y=pos['y'], text=text, size=size, color=color)
label.Draw('sames')
canvas.Update()
def set_maximum(graph_obj, maximum, axis='y'):
_logger.debug("Set maximum for %s to %f" % (graph_obj.GetName(), maximum))
if axis == 'y':
set_maximum_y(graph_obj, maximum)
elif axis == 'x':
set_maximum_x(graph_obj, maximum)
def set_maximum_y(graph_obj, maximum):
if isinstance(graph_obj, ROOT.THStack):
graph_obj.SetMaximum(maximum)
return
minimum = get_min_y(graph_obj)
set_range_y(graph_obj, minimum, maximum)
def set_maximum_x(graph_obj, maximum):
graph_obj.GetXaxis().SetRangeUser(0, maximum)
def set_minimum(graph_obj, minimum, axis='y'):
_logger.debug("Set minimum for %s to %f" % (graph_obj.GetName(), minimum))
if axis == 'y':
set_minimum_y(graph_obj, minimum)
elif axis == 'x':
graph_obj.GetXaxis().SetRangeUser(minimum, graph_obj.GetXaxis().GetXmax())
def set_minimum_y(graph_obj, minimum):
if isinstance(graph_obj, ROOT.THStack):
graph_obj.SetMinimum(minimum)
return
maximum = get_max_y(graph_obj)
set_range_y(graph_obj, minimum, maximum)
def set_range_y(graph_obj, minimum, maximum):
if isinstance(graph_obj, ROOT.THStack):
graph_obj.SetMinimum(minimum)
graph_obj.SetMaximum(maximum)
elif isinstance(graph_obj, ROOT.TH1):
graph_obj.SetMaximum(maximum)
graph_obj.GetYaxis().SetRangeUser(minimum, maximum)
elif isinstance(graph_obj, ROOT.TEfficiency):
graph_obj.GetPaintedGraph().GetYaxis().SetRangeUser(minimum, maximum)
def get_min_y(graph_obj):
if isinstance(graph_obj, ROOT.TH1) or isinstance(graph_obj, ROOT.THStack):
return graph_obj.GetMinimum()
if isinstance(graph_obj, ROOT.TEfficiency):
return graph_obj.GetPaintedGraph().GetMinimum()
return None
def get_max_y(graph_obj):
if isinstance(graph_obj, ROOT.TH1) or isinstance(graph_obj, ROOT.THStack):
return graph_obj.GetMaximum()
if isinstance(graph_obj, ROOT.TEfficiency):
return graph_obj.GetPaintedGraph().GetMaximum()
return None
def set_range(graph_obj, minimum=None, maximum=None, axis='y'):
if minimum is None:
set_maximum(graph_obj, maximum, axis)
return
if maximum is None:
set_minimum(graph_obj, minimum, axis)
return
set_range_y(graph_obj, minimum, maximum)
def auto_scale_y_axis(canvas, offset=1.1):
graph_objects = get_objects_from_canvas_by_type(canvas, "TH1F")
max_y = 1.1 * max([graph_obj.GetMaximum() for graph_obj in graph_objects])
draw_options = [graph_obj.GetDrawOption() for graph_obj in graph_objects]
first_index = draw_options.index(filter(lambda draw_option: draw_option.count("same") == 0)[0])
first_graph_obj = graph_objects[first_index]
set_maximum_y(first_graph_obj, max_y)
canvas.Update()
def add_legend_to_canvas(canvas, **kwargs):
kwargs.setdefault("xl", 0.7)
kwargs.setdefault("yl", 0.6)
kwargs.setdefault("xh", 0.9)
kwargs.setdefault("yh", 0.9)
kwargs.setdefault("format", None)
def convert_draw_option(process_config=None, plot_config=None):
draw_option = plot_obj.GetDrawOption()
if is_stacked:
draw_option = "Hist"
legend_option = ""
if "hist" in draw_option.lower():
# if plot_obj.GetFillStyle() == 1001:
# legend_option += "L"
# else:
if process_config is not None and (hasattr(process_config, "format") or hasattr(plot_config, "format")) or kwargs["format"]:
if process_config.format.lower() == "line" or plot_config.format.lower() == "line" or kwargs["format"] == "line":
legend_option += "L"
else:
legend_option += "F"
if "l" in draw_option:
legend_option += "L"
if "p" in draw_option or "E" in draw_option:
legend_option += "P"
if re.match(r"e\d", draw_option.lower()):
legend_option += "F"
if not legend_option:
_logger.error("Unable to parse legend option from {:s}".format(draw_option))
return legend_option
legend = ROOT.TLegend(kwargs["xl"], kwargs["yl"], kwargs["xh"], kwargs["yh"])
ROOT.SetOwnership(legend, False)
legend.SetTextSize(0.025)
labels = None
stacks = []
if "labels" in kwargs:
labels = kwargs["labels"]
if "labels" not in kwargs or not isinstance(kwargs["labels"], dict):
plot_objects = get_objects_from_canvas_by_type(canvas, "TH1F")
plot_objects += get_objects_from_canvas_by_type(canvas, "TH1D")
stacks = get_objects_from_canvas_by_type(canvas, "THStack")
plot_objects += get_objects_from_canvas_by_type(canvas, "TEfficiency")
else:
labels = {}
plot_objects = []
for hist_pattern, lab in kwargs["labels"].iteritems():
plot_objects.append(get_objects_from_canvas_by_name(canvas, hist_pattern)[0])
labels[get_objects_from_canvas_by_name(canvas, hist_pattern)[0].GetName()] = lab
stacked_objects = None
if len(stacks) is not 0:
stacked_objects = stacks[0].GetHists()
plot_objects += stacked_objects
for plot_obj in plot_objects:
label = None
process_config = None
if "stat.unc" in plot_obj.GetName() and plot_obj != plot_objects[-1]:
plot_objects.append(plot_obj)
continue
if "process_configs" in kwargs and kwargs["process_configs"] is not None:
try:
process_config = find_process_config(plot_obj.GetName().split("_")[-1], kwargs["process_configs"])
label = process_config.label
except AttributeError:
pass
if "labels" is not None:
if isinstance(labels, list):
label = labels[plot_objects.index(plot_obj)]
if isinstance(labels, dict):
if plot_obj.GetName() in labels:
label = labels[plot_obj.GetName()]
is_stacked = False
if stacked_objects and plot_obj in stacked_objects:
is_stacked = True
if label is None:
continue
plot_config = kwargs["plot_config"] if "plot_config" in kwargs else None
legend.AddEntry(plot_obj, label, convert_draw_option(process_config, plot_config))
canvas.cd()
legend.Draw("sames")
canvas.Update()
update disabling lumi text
import re
import ROOT
import os
from PyAnalysisTools.base import InvalidInputError, _logger
from PyAnalysisTools.ROOTUtils.ObjectHandle import get_objects_from_canvas_by_type, get_objects_from_canvas_by_name
from PyAnalysisTools.PlottingUtils.PlotConfig import get_style_setters_and_values, find_process_config
def load_atlas_style():
try:
base_path = os.path.dirname(os.path.join(os.path.realpath(__file__)))
ROOT.gROOT.LoadMacro(os.path.join(base_path, 'AtlasStyle/AtlasStyle.C'))
ROOT.SetAtlasStyle()
except Exception as e:
_logger.error("Could not find Atlas style files in %s" % os.path.join(base_path, 'AtlasStyle'))
def apply_style(obj, plot_config, process_config, index=None):
style_setter, style_attr, color = get_style_setters_and_values(plot_config, process_config, index)
if style_attr is not None:
for setter in style_setter:
getattr(obj, "Set" + setter + "Style")(style_attr)
if color is not None:
for setter in style_setter:
getattr(obj, "Set" + setter + "Color")(color)
def decorate_canvas(canvas, plot_config):
if hasattr(plot_config, "watermark"):
add_atlas_label(canvas, plot_config.watermark, {"x": 0.15, "y": 0.96}, size=0.03, offset=0.05)
if hasattr(plot_config, "lumi") and plot_config.lumi is not None and plot_config.lumi >= 0:
add_lumi_text(canvas, plot_config.lumi, {"x": 0.6, "y": 0.9})
if hasattr(plot_config, "grid") and plot_config.grid is True:
canvas.SetGrid()
if hasattr(plot_config, "decor_text"):
add_text_to_canvas(canvas, plot_config.decor_text, {"x": 0.2, "y": 0.8})
def set_title_x(obj, title):
if not hasattr(obj, "GetXaxis"):
raise TypeError
try:
obj.GetXaxis().SetTitle(title)
except ReferenceError:
_logger.error("Nil object {:s}".format(obj.GetName()))
def set_title_y(obj, title):
if not hasattr(obj, "GetYaxis"):
raise TypeError
try:
obj.GetYaxis().SetTitle(title)
except ReferenceError:
_logger.error("Nil object {:s}".format(obj.GetName()))
def set_style_options(obj, style):
allowed_attributes = ["marker", "line"]
if not isinstance(style, dict):
raise InvalidInputError("Invalid style config. Needs to be dictionary")
for style_object, style_options in style.items():
if style_object.lower() not in allowed_attributes:
continue
if not isinstance(style_options, dict):
raise InvalidInputError("Invalid style option for " + style_object + ". Requires dict, but received " +
str(type(style_options)))
for style_option, value in style_options.items():
try:
getattr(obj, "Set" + style_object.capitalize() + style_option.capitalize())(value)
except AttributeError:
_logger.warning("Could not set rquested style " + style_object.capitalize() + style_option.capitalize()
+ " for object " + str(obj))
def make_text(x, y, text, size=0.05, angle=0, font=42, color=ROOT.kBlack, ndc=True):
t = ROOT.TLatex(x, y, text)
ROOT.SetOwnership(t, False)
t.SetTextSize(size)
t.SetTextAngle(angle)
t.SetTextFont(font)
if color is not None:
t.SetTextColor(color)
t.SetNDC(ndc)
return t
def add_lumi_text(canvas, lumi, pos={'x': 0.6, 'y': 0.85}, size=0.04, split_lumi_text=False):
canvas.cd()
text_lumi = '#scale[0.7]{#int}dt L = %.1f fb^{-1}' % (float(lumi))
text_energy = '#sqrt{s} = 13 TeV'
if split_lumi_text:
label_lumi = make_text(x=pos['x'], y=pos['y'] - 0.05, text=text_energy, size=size)
label_energy = make_text(x=pos['x'], y=pos['y'] - 0.05, text=text_energy, size=size)
label_energy.Draw('sames')
else:
label_lumi = make_text(x=pos['x'], y=pos['y'], text=','.join([text_lumi, text_energy]), size=size)
label_lumi.Draw('sames')
canvas.Update()
def add_atlas_label(canvas, description='', pos={'x': 0.6, 'y': 0.87}, size=0.05, offset=0.125):
label_atlas = make_text(x=pos['x'], y=pos['y'], text='ATLAS', size=size, font=72)
label_descr = make_text(x=pos['x'] + offset, y=pos['y'], text=description, size=size, font=42)
canvas.cd()
label_atlas.Draw('sames')
label_descr.Draw('sames')
canvas.Update()
def set_title(self, hist, title, axis='x'):
if title is None:
return
if type(hist) == dict:
for h in hist.keys():
hist[h] = __setTitle(hist[h], title, axis)
else:
if isinstance(hist, ROOT.TEfficiency):
# hist.Draw('ap')
ROOT.gPad.Update()
graph = hist.GetPaintedGraph()
self.__setTitle(graph, title, axis)
else:
hist = setTitle(hist, title, axis)
def add_stat_box_to_canvas(canvas):
def retrieve_stat_box(hist):
ctmp = ROOT.TCanvas("c_tmp", "c_tmp")
ctmp.cd()
ROOT.gStyle.SetOptStat(111111)
hist.SetStats(1)
hist.Draw()
ROOT.gPad.Update()
stat_box = hist.FindObject("stats").Clone()
ROOT.SetOwnership(stat_box, False)
ROOT.gStyle.SetOptStat(0)
return stat_box
hists = get_objects_from_canvas_by_type(canvas, "TH1F")
stat_boxes = [retrieve_stat_box(hist) for hist in hists]
canvas.cd()
height = min(0.15, 1. / len(stat_boxes))
offset = 0.01
for stat_box in stat_boxes:
index = stat_boxes.index(stat_box)
color = hists[index].GetLineColor()
stat_box.SetTextColor(color)
stat_box.SetY1NDC(1. - (index + 1.) * height)
stat_box.SetY2NDC(1. - index * (height + offset))
stat_box.Draw("sames")
canvas.Update()
def add_text_to_canvas(canvas, text, pos={'x': 0.6, 'y': 0.79}, size=0.04, color=None):
label = make_text(x=pos['x'], y=pos['y'], text=text, size=size, color=color)
label.Draw('sames')
canvas.Update()
def set_maximum(graph_obj, maximum, axis='y'):
_logger.debug("Set maximum for %s to %f" % (graph_obj.GetName(), maximum))
if axis == 'y':
set_maximum_y(graph_obj, maximum)
elif axis == 'x':
set_maximum_x(graph_obj, maximum)
def set_maximum_y(graph_obj, maximum):
if isinstance(graph_obj, ROOT.THStack):
graph_obj.SetMaximum(maximum)
return
minimum = get_min_y(graph_obj)
set_range_y(graph_obj, minimum, maximum)
def set_maximum_x(graph_obj, maximum):
graph_obj.GetXaxis().SetRangeUser(0, maximum)
def set_minimum(graph_obj, minimum, axis='y'):
_logger.debug("Set minimum for %s to %f" % (graph_obj.GetName(), minimum))
if axis == 'y':
set_minimum_y(graph_obj, minimum)
elif axis == 'x':
graph_obj.GetXaxis().SetRangeUser(minimum, graph_obj.GetXaxis().GetXmax())
def set_minimum_y(graph_obj, minimum):
if isinstance(graph_obj, ROOT.THStack):
graph_obj.SetMinimum(minimum)
return
maximum = get_max_y(graph_obj)
set_range_y(graph_obj, minimum, maximum)
def set_range_y(graph_obj, minimum, maximum):
if isinstance(graph_obj, ROOT.THStack):
graph_obj.SetMinimum(minimum)
graph_obj.SetMaximum(maximum)
elif isinstance(graph_obj, ROOT.TH1):
graph_obj.SetMaximum(maximum)
graph_obj.GetYaxis().SetRangeUser(minimum, maximum)
elif isinstance(graph_obj, ROOT.TEfficiency):
graph_obj.GetPaintedGraph().GetYaxis().SetRangeUser(minimum, maximum)
def get_min_y(graph_obj):
if isinstance(graph_obj, ROOT.TH1) or isinstance(graph_obj, ROOT.THStack):
return graph_obj.GetMinimum()
if isinstance(graph_obj, ROOT.TEfficiency):
return graph_obj.GetPaintedGraph().GetMinimum()
return None
def get_max_y(graph_obj):
if isinstance(graph_obj, ROOT.TH1) or isinstance(graph_obj, ROOT.THStack):
return graph_obj.GetMaximum()
if isinstance(graph_obj, ROOT.TEfficiency):
return graph_obj.GetPaintedGraph().GetMaximum()
return None
def set_range(graph_obj, minimum=None, maximum=None, axis='y'):
if minimum is None:
set_maximum(graph_obj, maximum, axis)
return
if maximum is None:
set_minimum(graph_obj, minimum, axis)
return
set_range_y(graph_obj, minimum, maximum)
def auto_scale_y_axis(canvas, offset=1.1):
graph_objects = get_objects_from_canvas_by_type(canvas, "TH1F")
max_y = 1.1 * max([graph_obj.GetMaximum() for graph_obj in graph_objects])
draw_options = [graph_obj.GetDrawOption() for graph_obj in graph_objects]
first_index = draw_options.index(filter(lambda draw_option: draw_option.count("same") == 0)[0])
first_graph_obj = graph_objects[first_index]
set_maximum_y(first_graph_obj, max_y)
canvas.Update()
def add_legend_to_canvas(canvas, **kwargs):
kwargs.setdefault("xl", 0.7)
kwargs.setdefault("yl", 0.6)
kwargs.setdefault("xh", 0.9)
kwargs.setdefault("yh", 0.9)
kwargs.setdefault("format", None)
def convert_draw_option(process_config=None, plot_config=None):
draw_option = plot_obj.GetDrawOption()
if is_stacked:
draw_option = "Hist"
legend_option = ""
if "hist" in draw_option.lower():
# if plot_obj.GetFillStyle() == 1001:
# legend_option += "L"
# else:
if process_config is not None and (hasattr(process_config, "format") or hasattr(plot_config, "format")) or kwargs["format"]:
if process_config.format.lower() == "line" or plot_config.format.lower() == "line" or kwargs["format"] == "line":
legend_option += "L"
else:
legend_option += "F"
if "l" in draw_option:
legend_option += "L"
if "p" in draw_option or "E" in draw_option:
legend_option += "P"
if re.match(r"e\d", draw_option.lower()):
legend_option += "F"
if not legend_option:
_logger.error("Unable to parse legend option from {:s}".format(draw_option))
return legend_option
legend = ROOT.TLegend(kwargs["xl"], kwargs["yl"], kwargs["xh"], kwargs["yh"])
ROOT.SetOwnership(legend, False)
legend.SetTextSize(0.025)
labels = None
stacks = []
if "labels" in kwargs:
labels = kwargs["labels"]
if "labels" not in kwargs or not isinstance(kwargs["labels"], dict):
plot_objects = get_objects_from_canvas_by_type(canvas, "TH1F")
plot_objects += get_objects_from_canvas_by_type(canvas, "TH1D")
stacks = get_objects_from_canvas_by_type(canvas, "THStack")
plot_objects += get_objects_from_canvas_by_type(canvas, "TEfficiency")
else:
labels = {}
plot_objects = []
for hist_pattern, lab in kwargs["labels"].iteritems():
plot_objects.append(get_objects_from_canvas_by_name(canvas, hist_pattern)[0])
labels[get_objects_from_canvas_by_name(canvas, hist_pattern)[0].GetName()] = lab
stacked_objects = None
if len(stacks) is not 0:
stacked_objects = stacks[0].GetHists()
plot_objects += stacked_objects
for plot_obj in plot_objects:
label = None
process_config = None
if "stat.unc" in plot_obj.GetName() and plot_obj != plot_objects[-1]:
plot_objects.append(plot_obj)
continue
if "process_configs" in kwargs and kwargs["process_configs"] is not None:
try:
process_config = find_process_config(plot_obj.GetName().split("_")[-1], kwargs["process_configs"])
label = process_config.label
except AttributeError:
pass
if "labels" is not None:
if isinstance(labels, list):
label = labels[plot_objects.index(plot_obj)]
if isinstance(labels, dict):
if plot_obj.GetName() in labels:
label = labels[plot_obj.GetName()]
is_stacked = False
if stacked_objects and plot_obj in stacked_objects:
is_stacked = True
if label is None:
continue
plot_config = kwargs["plot_config"] if "plot_config" in kwargs else None
legend.AddEntry(plot_obj, label, convert_draw_option(process_config, plot_config))
canvas.cd()
legend.Draw("sames")
canvas.Update()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import datetime
import pymel.core as pm
import maya.cmds as mc
from anima import stalker_server_internal_address
from anima.publish import (clear_publishers, publisher, staging,
POST_PUBLISHER_TYPE)
from anima.exc import PublishError
from anima.repr import Representation
from anima.utils import utc_to_local
from anima.env.mayaEnv import auxiliary
clear_publishers()
MAX_NODE_DISPLAY = 80
# TODO: this should be depending on to the project some projects still can
# use mental ray
VALID_MATERIALS = [
u'aiAmbientOcclusion',
u'aiHair',
u'aiRaySwitch',
u'aiShadowCatcher',
u'aiSkin',
u'aiSkinSss',
u'aiStandard',
u'aiUtility',
u'aiWireframe',
u'displacementShader',
u'lambert',
u'blinn',
u'layeredShader',
u'oceanShader',
u'phong',
u'phongE',
u'rampShader',
u'surfaceShader',
]
#*********#
# GENERIC #
#*********#
# @publisher
# def delete_turtle_nodes():
# """deletes the Turtle related nodes
# """
# # deletes Turtle from scene
# turtle_node_names = [
# 'TurtleRenderOptions',
# 'TurtleDefaultBakeLayer',
# 'TurtleBakeLayerManager',
# 'TurtleUIOptions'
# ]
#
# for node_name in turtle_node_names:
# try:
# node = pm.PyNode(node_name)
# node.unlock()
# pm.delete(node)
# except pm.MayaNodeError:
# pass
#
# try:
# pymel_undo_node = pm.PyNode('__pymelUndoNode')
# pymel_undo_node.unlock()
# pm.delete(pymel_undo_node)
# except pm.MayaNodeError:
# pass
#
# pm.unloadPlugin('Turtle', force=1)
#
# pm.warning('Turtle deleted successfully.')
@publisher
def delete_unknown_nodes():
"""deletes unknown nodes
"""
# delete the unknown nodes
unknown_nodes = pm.ls(type='unknown')
# unlock each possible locked unknown nodes
for node in unknown_nodes:
node.unlock()
pm.delete(unknown_nodes)
@publisher
def check_time_logs():
"""do not allow publishing if there is no time logs for the task, do that
only for non WFD tasks
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
if v:
task = v.task
now = datetime.datetime.now()
task_start = task.computed_start if task.computed_start else task.start
task_start = utc_to_local(task_start)
if task.status.code != 'WFD' and task_start <= now:
if len(task.time_logs) == 0:
raise PublishError(
'<p>Please create a TimeLog before publishing this '
'asset:<br><br>'
'<a href="%s/tasks/%s/view">Open In WebBrowser</a>'
'</p>' % (stalker_server_internal_address, task.id)
)
@publisher
def check_node_names_with_bad_characters():
"""checks node names and ensures that there are no nodes with ord(c) > 127
"""
nodes_with_bad_name = []
for node in pm.ls():
if any(map(lambda x: x == '?' or ord(x) > 127, node.name())):
nodes_with_bad_name.append(node)
if len(nodes_with_bad_name) > 0:
pm.select(nodes_with_bad_name)
raise PublishError(
'There are nodes with <b>unknown characters</b> in their names:'
'<br><br>'
'%s' %
'<br>'.join(
map(lambda x: x.name(),
nodes_with_bad_name)[:MAX_NODE_DISPLAY]
)
)
@publisher
def delete_unused_nodes():
"""deletes unused shading nodes
"""
num_of_items_deleted = pm.mel.eval('MLdeleteUnused')
if num_of_items_deleted:
# do not raise any error just warn the user
pm.warning('Deleted unused nodes during Publish operation!!')
@publisher
def check_representations():
"""checks if the referenced versions are all matching the representation
type of the current version
"""
ref_reprs = []
wrong_reprs = []
v = staging.get('version')
if v:
r = Representation(version=v)
current_repr = r.repr
# For **Base** representation
# allow any type of representation to be present in the scene
if r.is_base():
return
for ref in pm.listReferences():
ref_repr = ref.repr
if ref_repr is None:
# skip this one this is not related to a Stalker Version
continue
ref_reprs.append([ref, ref_repr])
if ref_repr != current_repr:
wrong_reprs.append(ref)
else:
return
if len(wrong_reprs):
ref_repr_labels = []
for ref_repr in ref_reprs:
ref = ref_repr[0]
repr_name = ref_repr[1]
color = 'red' if current_repr != repr_name else 'green'
ref_repr_labels.append(
'<span style="color: %(color)s">%(repr_name)s</span> -> '
'%(ref)s' %
{
'color': color,
'repr_name': repr_name,
'ref': ref.refNode.name()
}
)
raise PublishError(
'You are saving as the <b>%s</b> representation<br>'
'for the current scene, but the following references<br>'
'are not <b>%s</b> representations of their versions:<br><br>'
'%s' % (
current_repr, current_repr,
'<br>'.join(ref_repr_labels[:MAX_NODE_DISPLAY])
)
)
@publisher
def cleanup_intermediate_objects():
"""deletes any unused intermediate object in the current scene
"""
pm.delete(
[node
for node in pm.ls(type='mesh')
if len(node.inputs()) == 0 and len(node.outputs()) == 0 and
node.intermediateObject.get() and node.referenceFile() is None]
)
@publisher
def check_old_object_smoothing():
"""checking if there are objects with
"""
meshes_with_smooth_mesh_preview = []
for node in pm.ls(type='mesh'):
if node.displaySmoothMesh.get() != 0:
meshes_with_smooth_mesh_preview.append(node.getParent())
if len(meshes_with_smooth_mesh_preview) > 0:
pm.select(meshes_with_smooth_mesh_preview)
raise PublishError(
'Please do not use <b>Smooth Mesh</b> on following nodes:<br><br>'
'%s' %
'<br>'.join(
map(lambda x: x.name(),
meshes_with_smooth_mesh_preview[:MAX_NODE_DISPLAY])
)
)
@publisher
def check_if_previous_version_references():
"""check if a previous version of the same task is referenced to the scene
"""
from anima.env.mayaEnv import Maya
m = Maya()
ver = m.get_current_version()
if ver is None:
return
same_version_references = []
for ref in pm.listReferences(): # check only 1st level references
ref_version = m.get_version_from_full_path(ref.path)
if ref_version:
if ref_version.task == ver.task \
and ref_version.take_name == ver.take_name:
same_version_references.append(ref)
if len(same_version_references):
print('The following nodes are references to an older version of this '
'scene')
print(
'\n'.join(map(lambda x: x.refNode.name(), same_version_references))
)
raise PublishError(
'The current scene contains a <b>reference</b> to a<br>'
'<b>previous version</b> of itself.<br><br>'
'Please remove it!!!'
)
@publisher
def delete_empty_namespaces():
"""checks and deletes empty namespaces
"""
# only allow namespaces with DAG objects in it and no child namespaces
empty_namespaces = [
ns for ns in pm.listNamespaces(recursive=True)
if len(pm.ls(ns.listNodes(), dag=True, mat=True)) == 0
and len(ns.listNamespaces()) == 0
]
# remove all empty
for ns in empty_namespaces:
pm.namespace(rm=ns, mnr=1)
# if len(empty_namespaces):
# raise PublishError(
# 'There are empty <b>namespaces</b><br><br>'
# 'Please remove them!!!'
# )
@publisher
def check_only_published_versions_are_used():
"""checks if only published versions are used in this scene
"""
non_published_versions = []
for ref in pm.listReferences():
v = ref.version
if v and not v.is_published:
non_published_versions.append(v)
if len(non_published_versions):
raise PublishError(
'Please use only <b>published</b> versions for:<br><br>%s' %
'<br>'.join(
map(lambda x: x.nice_name,
non_published_versions[:MAX_NODE_DISPLAY])
)
)
#*******#
# MODEL #
#*******#
@publisher('model')
def check_no_references():
"""there should be no references
"""
if len(pm.listReferences()):
raise PublishError(
'There should be no <b>References</b> in a <b>Model</b> scene.'
)
@publisher('model')
def check_history():
"""there should be no history on the objects
"""
excluded_types = ['mesh', 'shadingEngine', 'groupId']
nodes_with_history = []
# get all shapes
all_shapes = pm.ls(type='mesh')
for node in all_shapes:
history_nodes = []
for h_node in node.listHistory(pdo=1, lv=1):
if h_node.type() not in excluded_types:
history_nodes.append(h_node)
if len(history_nodes) > 0:
nodes_with_history.append(node)
if len(nodes_with_history):
pm.select(nodes_with_history)
# there is history
raise PublishError(
'There is history on:\n\n'
'%s'
'\n\n'
'there should be no '
'history in Model versions' %
'\n'.join(
map(lambda x: x.name(),
nodes_with_history[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_if_default_shader():
"""check if only default shader is assigned
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
if len(pm.ls(mat=1)) > 2:
raise PublishError(
'Use only lambert1 as the shader!'
)
@publisher('model')
def check_if_root_nodes_have_no_transformation():
"""checks if transform nodes directly under world have 0 transformations
"""
root_transform_nodes = auxiliary.get_root_nodes()
non_freezed_root_nodes = []
for node in root_transform_nodes:
t = node.t.get()
r = node.r.get()
s = node.s.get()
if t.x != 0 or t.y != 0 or t.z != 0 \
or r.x != 0 or r.y != 0 or r.z != 0 \
or s.x != 1 or s.y != 1 or s.z != 1:
non_freezed_root_nodes.append(node)
if len(non_freezed_root_nodes):
pm.select(non_freezed_root_nodes)
raise PublishError(
'Please freeze the following node transformations:\n\n%s' %
'\n'.join(
map(lambda x: x.name(),
non_freezed_root_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_if_leaf_mesh_nodes_have_no_transformation():
"""checks if all the Mesh transforms have 0 transformation, but it is
allowed to move the mesh nodes in space with a parent group node.
"""
mesh_nodes_with_transform_children = []
for node in pm.ls(dag=1, type='mesh'):
parent = node.getParent()
tra_under_shape = pm.ls(
parent.listRelatives(),
type='transform'
)
if len(tra_under_shape):
mesh_nodes_with_transform_children.append(parent)
if len(mesh_nodes_with_transform_children):
pm.select(mesh_nodes_with_transform_children)
raise PublishError(
'The following meshes have other objects parented to them:'
'\n\n%s'
'\n\nPlease remove any object under them!' %
'\n'.join(
map(lambda x: x.name(),
mesh_nodes_with_transform_children[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_model_quality():
"""checks the quality of the model
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
pm.select(None)
pm.mel.eval(
'polyCleanupArgList 3 { "1","2","0","0","1","0","0","0","0","1e-005",'
'"0","0","0","0","0","2","1" };'
)
if len(pm.ls(sl=1)) > 0:
raise RuntimeError(
"""There are issues in your model please run:<br><br>
<b>PolygonMesh -> Mesh -> Cleanup...</b><br><br>
<ul>Check:
<li>Faces with more than 4 sides</li>
<li>Faces with holes</li>
<li>Lamina Faces</li>
<li>Non-manifold Geometry</li>
</ul>"""
)
@publisher('model')
def check_anim_layers():
"""check if there are animation layers on the scene
"""
if len(pm.ls(type='animLayer')) > 0:
raise PublishError(
'There should be no <b>Animation Layers</b> in the scene!!!'
)
@publisher('model')
def check_display_layer():
"""check if there are display layers
"""
if len(pm.ls(type='displayLayer')) > 1:
raise PublishError(
'There should be no <b>Display Layers</b> in the scene!!!'
)
@publisher('model')
def check_extra_cameras():
"""checking if there are extra cameras
"""
if len(pm.ls(type='camera')) > 4:
raise PublishError('There should be no extra cameras in your scene!')
@publisher('model')
def check_empty_groups():
"""check if there are empty groups
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
empty_groups = []
for node in pm.ls(type='transform'):
if len(node.listRelatives(children=1)) == 0:
empty_groups.append(node)
if len(empty_groups):
pm.select(empty_groups)
raise PublishError(
'There are <b>empty groups</b> in your scene, '
'please remove them!!!'
)
@publisher('model')
def check_empty_shapes():
"""checks if there are empty mesh nodes
"""
empty_shape_nodes = []
for node in pm.ls(type='mesh'):
if node.numVertices() == 0:
empty_shape_nodes.append(node)
if len(empty_shape_nodes) > 0:
pm.select(map(
lambda x: x.getParent(),
empty_shape_nodes
))
raise PublishError(
'There are <b>meshes with no geometry</b> in your scene, '
'please delete them!!!'
)
@publisher('model')
def check_uv_existence():
"""check if there are uvs in all objects
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
all_meshes = pm.ls(type='mesh')
nodes_with_no_uvs = []
for node in all_meshes:
if not node.getAttr('intermediateObject'):
if not len(node.getUVs(uvSet='map1')[0]):
nodes_with_no_uvs.append(node)
if len(nodes_with_no_uvs) > 0:
# get transform nodes
tra_nodes = map(
lambda x: x.getParent(),
nodes_with_no_uvs
)
pm.select(tra_nodes)
raise RuntimeError(
"""There are nodes with <b>no UVs</b>:
<br><br>%s""" %
'<br>'.join(
map(lambda x: x.name(),
tra_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_out_of_space_uvs():
"""checks if there are uvs with u values that are bigger than 10.0
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
all_meshes = pm.ls(type='mesh')
mesh_count = len(all_meshes)
nodes_with_out_of_space_uvs = []
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
if not pm.general.about(batch=1) and mesh_count:
pdm.use_ui = True
caller = pdm.register(mesh_count, 'check_out_of_space_uvs()')
for node in all_meshes:
u, v = node.getUVs()
u = sorted(u)
if u[0] < 0.0 or u[-1] > 10.0 or v[0] < 0.0:
nodes_with_out_of_space_uvs.append(node)
caller.step()
if len(nodes_with_out_of_space_uvs):
# get transform nodes
tra_nodes = map(
lambda x: x.getParent(),
nodes_with_out_of_space_uvs
)
pm.select(tra_nodes)
raise RuntimeError(
"""There are nodes which have a UV value bigger than <b>10</b>:
<br><br>%s""" %
'<br>'.join(
map(lambda x: x.name(),
tra_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_uv_border_crossing():
"""checks if any of the uv shells are crossing uv borders
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
all_meshes = pm.ls(type='mesh')
mesh_count = len(all_meshes)
nodes_with_uvs_crossing_borders = []
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
if not pm.general.about(batch=1) and mesh_count:
pdm.use_ui = True
caller = pdm.register(mesh_count, 'check_out_of_space_uvs()')
for node in all_meshes:
all_uvs = node.getUVs()
uv_shell_ids = node.getUvShellsIds()
# prepare an empty dict of lists
uvs_per_shell = {}
for shell_id in range(uv_shell_ids[1]):
uvs_per_shell[shell_id] = [[], []]
for uv_id in range(len(uv_shell_ids[0])):
u = all_uvs[0][uv_id]
v = all_uvs[1][uv_id]
shell_id = uv_shell_ids[0][uv_id]
uvs_per_shell[shell_id][0].append(u)
uvs_per_shell[shell_id][1].append(v)
# now check all uvs per shell
for shell_id in range(uv_shell_ids[1]):
us = sorted(uvs_per_shell[shell_id][0])
vs = sorted(uvs_per_shell[shell_id][1])
#check first and last u and v values
if int(us[0]) != int(us[-1]) or int(vs[0]) != int(vs[-1]):
# they are not equal it is crossing spaces
nodes_with_uvs_crossing_borders.append(node)
break
caller.step()
if len(nodes_with_uvs_crossing_borders):
# get transform nodes
tra_nodes = map(
lambda x: x.getParent(),
nodes_with_uvs_crossing_borders
)
pm.select(tra_nodes)
raise RuntimeError(
"""There are nodes with <b>UV-Shells</b> that are crossing
<b>UV BORDERS</b>:<br><br>%s""" %
'<br>'.join(
map(lambda x: x.name(),
tra_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_uvs():
"""checks uvs with no uv area
The area of a 2d polygon calculation is based on the answer of Darius Bacon
in http://stackoverflow.com/questions/451426/how-do-i-calculate-the-surface-area-of-a-2d-polygon
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
def area(p):
return 0.5 * abs(sum(x0 * y1 - x1 * y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, p[1:] + [p[0]])
all_meshes = pm.ls(type='mesh')
mesh_count = len(all_meshes)
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
if not pm.general.about(batch=1) and mesh_count:
pdm.use_ui = True
caller = pdm.register(mesh_count, 'check_uvs()')
meshes_with_zero_uv_area = []
for node in all_meshes:
all_uvs = node.getUVs()
try:
for i in range(node.numFaces()):
uvs = []
for j in range(node.numPolygonVertices(i)):
#uvs.append(node.getPolygonUV(i, j))
uv_id = node.getPolygonUVid(i, j)
uvs.append((all_uvs[0][uv_id], all_uvs[1][uv_id]))
if area(uvs) == 0.0:
meshes_with_zero_uv_area.append(node)
break
except RuntimeError:
meshes_with_zero_uv_area.append(node)
caller.step()
if len(meshes_with_zero_uv_area):
pm.select([node.getParent() for node in meshes_with_zero_uv_area])
raise RuntimeError(
"""There are meshes with no uvs or faces with zero uv area:<br><br>
%s""" %
'<br>'.join(
map(lambda x: x.name(),
meshes_with_zero_uv_area[:MAX_NODE_DISPLAY])
)
)
#******************#
# LOOK DEVELOPMENT #
#******************#
LOOK_DEV_TYPES = ['LookDev', 'Look Dev', 'LookDevelopment', 'Look Development']
@publisher(LOOK_DEV_TYPES)
def disable_internal_reflections_in_aiStandard():
"""disable internal reflections in aiStandard
"""
for mat in pm.ls(type='aiStandard'):
if mat.referenceFile() is None:
mat.setAttr('enableInternalReflections', 0)
@publisher(LOOK_DEV_TYPES)
def check_all_tx_textures():
"""checks if tx textures are created for all of the texture nodes in the
current scene
"""
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
texture_file_paths = []
workspace_path = pm.workspace.path
def add_path(path):
if path != '':
path = os.path.expandvars(path)
if not os.path.isabs(path):
path = \
os.path.normpath(os.path.join(workspace_path, path))
texture_file_paths.append(path)
for node in pm.ls(type='file'):
add_path(node.fileTextureName.get())
for node in pm.ls(type='aiImage'):
add_path(node.filename.get())
import glob
textures_with_no_tx = []
for path in texture_file_paths:
tx_path = '%s.tx' % os.path.splitext(path)[0]
# replace any <udim> value with *
tx_path = tx_path.replace('<udim>', '*')
if not len(glob.glob(tx_path)):
textures_with_no_tx.append(path)
if len(textures_with_no_tx):
raise PublishError('There are textures with no <b>TX</b> file!!!')
@publisher(LOOK_DEV_TYPES)
def check_lights():
"""checks if there are lights in the scene
"""
all_lights = pm.ls(
type=['light', 'aiAreaLight', 'aiSkyDomeLight', 'aiPhotometricLight']
)
if len(all_lights):
pm.select(all_lights)
raise PublishError(
'There are <b>Lights</b> in the current scene:<br><br>%s<br><br>'
'Please delete them!!!' %
'<br>'.join(map(lambda x: x.name(), all_lights))
)
@publisher(LOOK_DEV_TYPES)
def check_only_supported_materials_are_used():
"""check if only supported materials are used
"""
non_arnold_materials = []
for material in pm.ls(mat=1):
if material.name() not in ['lambert1', 'particleCloud1']:
if material.type() not in VALID_MATERIALS:
non_arnold_materials.append(material)
if len(non_arnold_materials):
pm.select(non_arnold_materials)
raise PublishError(
'There are non-Arnold materials in the scene:<br><br>%s<br><br>'
'Please remove them!!!' %
'<br>'.join(map(lambda x: x.name(), non_arnold_materials))
)
@publisher(LOOK_DEV_TYPES)
def check_multiple_connections_for_textures():
"""check if textures are only used in one material (not liking it very much
but it is breaking ASS files.
"""
v = staging.get('version')
# skip if
skip_types = ['character', 'animation', 'previs']
for t in v.naming_parents:
for st in skip_types:
if t.type and t.type.name.lower().startswith(st):
return
# get all the texture nodes
texture_nodes = [
'bulge',
'checker',
'cloth',
'file',
'fluidTexture2D',
'fractal',
'grid',
'mandelbrot',
'mountain',
'movie',
'noise',
'ocean',
'psdFileTex',
'ramp',
'water',
'brownian',
'cloud',
'crater',
'fluidTexture3D',
'granite',
'leather',
'mandelbrot3D',
'marble',
'rock',
'snow',
'solidFractal',
'stucco',
'volumeNoise',
'wood',
'bump2d',
'bump3d',
'place2dTexture',
'place3dTexture',
'plusMinusAverage',
'samplerInfo',
'stencil',
'uvChooser',
'surfaceInfo',
'blendColors',
'clamp',
'contrast',
'gammaCorrect',
'hsvToRgb',
'luminance',
'remapColor',
'remapHsv',
'remapValue',
'rgbToHsv',
'surfaceLuminance',
'imagePlane',
'aiImage',
'aiNoise',
]
# try to find the material it is been used by walking up the connections
nodes_with_multiple_materials = []
nodes_to_ignore = pm.ls(type='hyperLayout')
nodes_to_ignore += pm.ls('defaultTextureList*')
nodes_to_ignore += pm.ls('defaultRenderUtilityList*')
for node in pm.ls(type=texture_nodes):
materials_connected_to_this_node = \
pm.ls(node.listHistory(future=True), mat=True)
if len(materials_connected_to_this_node) > 1:
nodes_with_multiple_materials.append(node)
else:
connections_out_of_this_node = node.outputs()
[connections_out_of_this_node.remove(h)
for h in nodes_to_ignore
if h in connections_out_of_this_node]
if len(set(connections_out_of_this_node)) > 1:
nodes_with_multiple_materials.append(node)
# if we find more than one material add it to the list
# raise a PublishError if we have an item in the list
if len(nodes_with_multiple_materials) > 0:
pm.select(nodes_with_multiple_materials)
raise PublishError(
'Please update the scene so the following nodes are connected <br>'
'to only <b>one material</b> (duplicate them):<br><br>%s<br><br>' %
'<br>'.join(map(lambda x: x.name(), nodes_with_multiple_materials))
)
@publisher(LOOK_DEV_TYPES)
def check_objects_still_using_default_shader():
"""check if there are objects still using the default shader
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
objects_with_default_material = mc.sets('initialShadingGroup', q=1)
if objects_with_default_material and len(objects_with_default_material):
mc.select(objects_with_default_material)
raise PublishError(
'There are objects still using <b>initialShadingGroup</b><br><br>'
'%s<br><br>Please assign a proper material to them' %
'<br>'.join(
objects_with_default_material[:MAX_NODE_DISPLAY]
)
)
@publisher(LOOK_DEV_TYPES + ['layout'])
def check_component_edits_on_references():
"""check if there are component edits on references
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
import maya.cmds
reference_query = maya.cmds.referenceQuery
references_with_component_edits = []
for ref in pm.listReferences(recursive=True):
all_edits = reference_query(ref.refNode.name(), es=True)
joined_edits = '\n'.join(all_edits)
if '.pt[' in joined_edits or '.pnts[' in joined_edits:
references_with_component_edits.append(ref)
continue
if len(references_with_component_edits):
raise PublishError(
'There are <b>component edits</b> on the following References:'
'<br><br>%s<br><br>Please remove them!!!' %
'<br>'.join(
map(lambda x: x.refNode.name(),
references_with_component_edits[:MAX_NODE_DISPLAY])
)
)
@publisher('rig')
def check_cacheable_attr():
"""checks if there is at least one cacheable attr
"""
if not any([root_node.hasAttr('cacheable')
and root_node.getAttr('cacheable') != '' and
root_node.getAttr('cacheable') is not None
for root_node in auxiliary.get_root_nodes()]):
raise PublishError(
'Please add <b>cacheable</b> attribute and set it to a '
'<b>proper name</b>!'
)
@publisher('animation')
def check_shot_nodes():
"""checks if there is a shot node
"""
shot_nodes = pm.ls(type='shot')
if len(shot_nodes) == 0:
raise PublishError('There is no <b>Shot</b> node in the scene')
if len(shot_nodes) > 1:
raise PublishError('There is multiple <b>Shot</b> nodes in the scene')
@publisher('animation')
def set_frame_range():
"""sets the frame range from the shot node
"""
shot_node = pm.ls(type='shot')[0]
start_frame = shot_node.startFrame.get()
end_frame = shot_node.endFrame.get()
handle_count = 1
try:
handle_count = shot_node.getAttr('handle')
except AttributeError:
pass
# set it in the playback
pm.playbackOptions(
ast=start_frame,
aet=end_frame,
min=start_frame-handle_count,
max=end_frame+handle_count
)
@publisher(publisher_type=POST_PUBLISHER_TYPE)
def update_audit_info():
"""updates the audit info of the version
"""
from stalker import LocalSession
local_session = LocalSession()
logged_in_user = local_session.logged_in_user
if logged_in_user:
# update the version updated_by
from anima.env import mayaEnv
m_env = mayaEnv.Maya()
v = m_env.get_current_version()
if v:
v.updated_by = logged_in_user
from stalker import db
db.DBSession.commit()
@publisher(publisher_type=POST_PUBLISHER_TYPE)
def generate_thumbnail():
"""generates thumbnail for the current scene
"""
# skip this if maya is running in batch mode
if pm.general.about(batch=1):
return
from anima.env.mayaEnv import auxiliary
reload(auxiliary)
auxiliary.generate_thumbnail()
@publisher(
LOOK_DEV_TYPES + ['layout', 'model', 'vegetation', 'scene assembly'],
publisher_type=POST_PUBLISHER_TYPE
)
def create_representations():
"""creates the representations of the scene
"""
from anima.env import mayaEnv
m_env = mayaEnv.Maya()
v = m_env.get_current_version()
if not v:
return
if Representation.repr_separator in v.take_name:
return
# skip if it is a Character
skip_types = ['character', 'animation', 'previs']
for t in v.naming_parents:
for st in skip_types:
if t.type and t.type.name.lower().startswith(st):
return
from anima.env.mayaEnv import repr_tools
gen = repr_tools.RepresentationGenerator(version=v)
gen.generate_all()
# re-open the original scene
m_env = mayaEnv.Maya()
current_version = m_env.get_current_version()
if current_version != v:
m_env.open(v, force=True, skip_update_check=True)
@publisher('animation', publisher_type=POST_PUBLISHER_TYPE)
def cache_animations():
"""cache animations
"""
reload(auxiliary)
auxiliary.export_alembic_from_cache_node()
@publisher('animation', publisher_type=POST_PUBLISHER_TYPE)
def generate_playblast():
"""generates a playblast for the current scene
"""
import anima
from anima import utils
reload(anima)
reload(utils)
reload(auxiliary)
sp = auxiliary.ShotPlayblaster()
sp.playblast()
New: Added ``anima.env.mayaEnv.publish.set_pixel_error()`` to set pixel errors for all meshes in the published scene.
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import datetime
import pymel.core as pm
import maya.cmds as mc
from anima import stalker_server_internal_address
from anima.publish import (clear_publishers, publisher, staging,
POST_PUBLISHER_TYPE)
from anima.exc import PublishError
from anima.repr import Representation
from anima.utils import utc_to_local
from anima.env.mayaEnv import auxiliary
clear_publishers()
MAX_NODE_DISPLAY = 80
# TODO: this should be depending on to the project some projects still can
# use mental ray
VALID_MATERIALS = [
u'aiAmbientOcclusion',
u'aiHair',
u'aiRaySwitch',
u'aiShadowCatcher',
u'aiSkin',
u'aiSkinSss',
u'aiStandard',
u'aiUtility',
u'aiWireframe',
u'displacementShader',
u'lambert',
u'blinn',
u'layeredShader',
u'oceanShader',
u'phong',
u'phongE',
u'rampShader',
u'surfaceShader',
]
#*********#
# GENERIC #
#*********#
# @publisher
# def delete_turtle_nodes():
# """deletes the Turtle related nodes
# """
# # deletes Turtle from scene
# turtle_node_names = [
# 'TurtleRenderOptions',
# 'TurtleDefaultBakeLayer',
# 'TurtleBakeLayerManager',
# 'TurtleUIOptions'
# ]
#
# for node_name in turtle_node_names:
# try:
# node = pm.PyNode(node_name)
# node.unlock()
# pm.delete(node)
# except pm.MayaNodeError:
# pass
#
# try:
# pymel_undo_node = pm.PyNode('__pymelUndoNode')
# pymel_undo_node.unlock()
# pm.delete(pymel_undo_node)
# except pm.MayaNodeError:
# pass
#
# pm.unloadPlugin('Turtle', force=1)
#
# pm.warning('Turtle deleted successfully.')
@publisher
def delete_unknown_nodes():
"""deletes unknown nodes
"""
# delete the unknown nodes
unknown_nodes = pm.ls(type='unknown')
# unlock each possible locked unknown nodes
for node in unknown_nodes:
node.unlock()
pm.delete(unknown_nodes)
@publisher
def check_time_logs():
"""do not allow publishing if there is no time logs for the task, do that
only for non WFD tasks
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
if v:
task = v.task
now = datetime.datetime.now()
task_start = task.computed_start if task.computed_start else task.start
task_start = utc_to_local(task_start)
if task.status.code != 'WFD' and task_start <= now:
if len(task.time_logs) == 0:
raise PublishError(
'<p>Please create a TimeLog before publishing this '
'asset:<br><br>'
'<a href="%s/tasks/%s/view">Open In WebBrowser</a>'
'</p>' % (stalker_server_internal_address, task.id)
)
@publisher
def check_node_names_with_bad_characters():
"""checks node names and ensures that there are no nodes with ord(c) > 127
"""
nodes_with_bad_name = []
for node in pm.ls():
if any(map(lambda x: x == '?' or ord(x) > 127, node.name())):
nodes_with_bad_name.append(node)
if len(nodes_with_bad_name) > 0:
pm.select(nodes_with_bad_name)
raise PublishError(
'There are nodes with <b>unknown characters</b> in their names:'
'<br><br>'
'%s' %
'<br>'.join(
map(lambda x: x.name(),
nodes_with_bad_name)[:MAX_NODE_DISPLAY]
)
)
@publisher
def delete_unused_nodes():
"""deletes unused shading nodes
"""
num_of_items_deleted = pm.mel.eval('MLdeleteUnused')
if num_of_items_deleted:
# do not raise any error just warn the user
pm.warning('Deleted unused nodes during Publish operation!!')
@publisher
def check_representations():
"""checks if the referenced versions are all matching the representation
type of the current version
"""
ref_reprs = []
wrong_reprs = []
v = staging.get('version')
if v:
r = Representation(version=v)
current_repr = r.repr
# For **Base** representation
# allow any type of representation to be present in the scene
if r.is_base():
return
for ref in pm.listReferences():
ref_repr = ref.repr
if ref_repr is None:
# skip this one this is not related to a Stalker Version
continue
ref_reprs.append([ref, ref_repr])
if ref_repr != current_repr:
wrong_reprs.append(ref)
else:
return
if len(wrong_reprs):
ref_repr_labels = []
for ref_repr in ref_reprs:
ref = ref_repr[0]
repr_name = ref_repr[1]
color = 'red' if current_repr != repr_name else 'green'
ref_repr_labels.append(
'<span style="color: %(color)s">%(repr_name)s</span> -> '
'%(ref)s' %
{
'color': color,
'repr_name': repr_name,
'ref': ref.refNode.name()
}
)
raise PublishError(
'You are saving as the <b>%s</b> representation<br>'
'for the current scene, but the following references<br>'
'are not <b>%s</b> representations of their versions:<br><br>'
'%s' % (
current_repr, current_repr,
'<br>'.join(ref_repr_labels[:MAX_NODE_DISPLAY])
)
)
@publisher
def cleanup_intermediate_objects():
"""deletes any unused intermediate object in the current scene
"""
pm.delete(
[node
for node in pm.ls(type='mesh')
if len(node.inputs()) == 0 and len(node.outputs()) == 0 and
node.intermediateObject.get() and node.referenceFile() is None]
)
@publisher
def check_old_object_smoothing():
"""checking if there are objects with
"""
meshes_with_smooth_mesh_preview = []
for node in pm.ls(type='mesh'):
if node.displaySmoothMesh.get() != 0:
meshes_with_smooth_mesh_preview.append(node.getParent())
if len(meshes_with_smooth_mesh_preview) > 0:
pm.select(meshes_with_smooth_mesh_preview)
raise PublishError(
'Please do not use <b>Smooth Mesh</b> on following nodes:<br><br>'
'%s' %
'<br>'.join(
map(lambda x: x.name(),
meshes_with_smooth_mesh_preview[:MAX_NODE_DISPLAY])
)
)
@publisher
def check_if_previous_version_references():
"""check if a previous version of the same task is referenced to the scene
"""
from anima.env.mayaEnv import Maya
m = Maya()
ver = m.get_current_version()
if ver is None:
return
same_version_references = []
for ref in pm.listReferences(): # check only 1st level references
ref_version = m.get_version_from_full_path(ref.path)
if ref_version:
if ref_version.task == ver.task \
and ref_version.take_name == ver.take_name:
same_version_references.append(ref)
if len(same_version_references):
print('The following nodes are references to an older version of this '
'scene')
print(
'\n'.join(map(lambda x: x.refNode.name(), same_version_references))
)
raise PublishError(
'The current scene contains a <b>reference</b> to a<br>'
'<b>previous version</b> of itself.<br><br>'
'Please remove it!!!'
)
@publisher
def delete_empty_namespaces():
"""checks and deletes empty namespaces
"""
# only allow namespaces with DAG objects in it and no child namespaces
empty_namespaces = [
ns for ns in pm.listNamespaces(recursive=True)
if len(pm.ls(ns.listNodes(), dag=True, mat=True)) == 0
and len(ns.listNamespaces()) == 0
]
# remove all empty
for ns in empty_namespaces:
pm.namespace(rm=ns, mnr=1)
# if len(empty_namespaces):
# raise PublishError(
# 'There are empty <b>namespaces</b><br><br>'
# 'Please remove them!!!'
# )
@publisher
def check_only_published_versions_are_used():
"""checks if only published versions are used in this scene
"""
non_published_versions = []
for ref in pm.listReferences():
v = ref.version
if v and not v.is_published:
non_published_versions.append(v)
if len(non_published_versions):
raise PublishError(
'Please use only <b>published</b> versions for:<br><br>%s' %
'<br>'.join(
map(lambda x: x.nice_name,
non_published_versions[:MAX_NODE_DISPLAY])
)
)
#*******#
# MODEL #
#*******#
@publisher('model')
def check_no_references():
"""there should be no references
"""
if len(pm.listReferences()):
raise PublishError(
'There should be no <b>References</b> in a <b>Model</b> scene.'
)
@publisher('model')
def check_history():
"""there should be no history on the objects
"""
excluded_types = ['mesh', 'shadingEngine', 'groupId']
nodes_with_history = []
# get all shapes
all_shapes = pm.ls(type='mesh')
for node in all_shapes:
history_nodes = []
for h_node in node.listHistory(pdo=1, lv=1):
if h_node.type() not in excluded_types:
history_nodes.append(h_node)
if len(history_nodes) > 0:
nodes_with_history.append(node)
if len(nodes_with_history):
pm.select(nodes_with_history)
# there is history
raise PublishError(
'There is history on:\n\n'
'%s'
'\n\n'
'there should be no '
'history in Model versions' %
'\n'.join(
map(lambda x: x.name(),
nodes_with_history[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_if_default_shader():
"""check if only default shader is assigned
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
if len(pm.ls(mat=1)) > 2:
raise PublishError(
'Use only lambert1 as the shader!'
)
@publisher('model')
def check_if_root_nodes_have_no_transformation():
"""checks if transform nodes directly under world have 0 transformations
"""
root_transform_nodes = auxiliary.get_root_nodes()
non_freezed_root_nodes = []
for node in root_transform_nodes:
t = node.t.get()
r = node.r.get()
s = node.s.get()
if t.x != 0 or t.y != 0 or t.z != 0 \
or r.x != 0 or r.y != 0 or r.z != 0 \
or s.x != 1 or s.y != 1 or s.z != 1:
non_freezed_root_nodes.append(node)
if len(non_freezed_root_nodes):
pm.select(non_freezed_root_nodes)
raise PublishError(
'Please freeze the following node transformations:\n\n%s' %
'\n'.join(
map(lambda x: x.name(),
non_freezed_root_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_if_leaf_mesh_nodes_have_no_transformation():
"""checks if all the Mesh transforms have 0 transformation, but it is
allowed to move the mesh nodes in space with a parent group node.
"""
mesh_nodes_with_transform_children = []
for node in pm.ls(dag=1, type='mesh'):
parent = node.getParent()
tra_under_shape = pm.ls(
parent.listRelatives(),
type='transform'
)
if len(tra_under_shape):
mesh_nodes_with_transform_children.append(parent)
if len(mesh_nodes_with_transform_children):
pm.select(mesh_nodes_with_transform_children)
raise PublishError(
'The following meshes have other objects parented to them:'
'\n\n%s'
'\n\nPlease remove any object under them!' %
'\n'.join(
map(lambda x: x.name(),
mesh_nodes_with_transform_children[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_model_quality():
"""checks the quality of the model
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
pm.select(None)
pm.mel.eval(
'polyCleanupArgList 3 { "1","2","0","0","1","0","0","0","0","1e-005",'
'"0","0","0","0","0","2","1" };'
)
if len(pm.ls(sl=1)) > 0:
raise RuntimeError(
"""There are issues in your model please run:<br><br>
<b>PolygonMesh -> Mesh -> Cleanup...</b><br><br>
<ul>Check:
<li>Faces with more than 4 sides</li>
<li>Faces with holes</li>
<li>Lamina Faces</li>
<li>Non-manifold Geometry</li>
</ul>"""
)
@publisher('model')
def check_anim_layers():
"""check if there are animation layers on the scene
"""
if len(pm.ls(type='animLayer')) > 0:
raise PublishError(
'There should be no <b>Animation Layers</b> in the scene!!!'
)
@publisher('model')
def check_display_layer():
"""check if there are display layers
"""
if len(pm.ls(type='displayLayer')) > 1:
raise PublishError(
'There should be no <b>Display Layers</b> in the scene!!!'
)
@publisher('model')
def check_extra_cameras():
"""checking if there are extra cameras
"""
if len(pm.ls(type='camera')) > 4:
raise PublishError('There should be no extra cameras in your scene!')
@publisher('model')
def check_empty_groups():
"""check if there are empty groups
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
empty_groups = []
for node in pm.ls(type='transform'):
if len(node.listRelatives(children=1)) == 0:
empty_groups.append(node)
if len(empty_groups):
pm.select(empty_groups)
raise PublishError(
'There are <b>empty groups</b> in your scene, '
'please remove them!!!'
)
@publisher('model')
def check_empty_shapes():
"""checks if there are empty mesh nodes
"""
empty_shape_nodes = []
for node in pm.ls(type='mesh'):
if node.numVertices() == 0:
empty_shape_nodes.append(node)
if len(empty_shape_nodes) > 0:
pm.select(map(
lambda x: x.getParent(),
empty_shape_nodes
))
raise PublishError(
'There are <b>meshes with no geometry</b> in your scene, '
'please delete them!!!'
)
@publisher('model')
def check_uv_existence():
"""check if there are uvs in all objects
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
all_meshes = pm.ls(type='mesh')
nodes_with_no_uvs = []
for node in all_meshes:
if not node.getAttr('intermediateObject'):
if not len(node.getUVs(uvSet='map1')[0]):
nodes_with_no_uvs.append(node)
if len(nodes_with_no_uvs) > 0:
# get transform nodes
tra_nodes = map(
lambda x: x.getParent(),
nodes_with_no_uvs
)
pm.select(tra_nodes)
raise RuntimeError(
"""There are nodes with <b>no UVs</b>:
<br><br>%s""" %
'<br>'.join(
map(lambda x: x.name(),
tra_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_out_of_space_uvs():
"""checks if there are uvs with u values that are bigger than 10.0
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
all_meshes = pm.ls(type='mesh')
mesh_count = len(all_meshes)
nodes_with_out_of_space_uvs = []
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
if not pm.general.about(batch=1) and mesh_count:
pdm.use_ui = True
caller = pdm.register(mesh_count, 'check_out_of_space_uvs()')
for node in all_meshes:
u, v = node.getUVs()
u = sorted(u)
if u[0] < 0.0 or u[-1] > 10.0 or v[0] < 0.0:
nodes_with_out_of_space_uvs.append(node)
caller.step()
if len(nodes_with_out_of_space_uvs):
# get transform nodes
tra_nodes = map(
lambda x: x.getParent(),
nodes_with_out_of_space_uvs
)
pm.select(tra_nodes)
raise RuntimeError(
"""There are nodes which have a UV value bigger than <b>10</b>:
<br><br>%s""" %
'<br>'.join(
map(lambda x: x.name(),
tra_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_uv_border_crossing():
"""checks if any of the uv shells are crossing uv borders
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
all_meshes = pm.ls(type='mesh')
mesh_count = len(all_meshes)
nodes_with_uvs_crossing_borders = []
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
if not pm.general.about(batch=1) and mesh_count:
pdm.use_ui = True
caller = pdm.register(mesh_count, 'check_out_of_space_uvs()')
for node in all_meshes:
all_uvs = node.getUVs()
uv_shell_ids = node.getUvShellsIds()
# prepare an empty dict of lists
uvs_per_shell = {}
for shell_id in range(uv_shell_ids[1]):
uvs_per_shell[shell_id] = [[], []]
for uv_id in range(len(uv_shell_ids[0])):
u = all_uvs[0][uv_id]
v = all_uvs[1][uv_id]
shell_id = uv_shell_ids[0][uv_id]
uvs_per_shell[shell_id][0].append(u)
uvs_per_shell[shell_id][1].append(v)
# now check all uvs per shell
for shell_id in range(uv_shell_ids[1]):
us = sorted(uvs_per_shell[shell_id][0])
vs = sorted(uvs_per_shell[shell_id][1])
#check first and last u and v values
if int(us[0]) != int(us[-1]) or int(vs[0]) != int(vs[-1]):
# they are not equal it is crossing spaces
nodes_with_uvs_crossing_borders.append(node)
break
caller.step()
if len(nodes_with_uvs_crossing_borders):
# get transform nodes
tra_nodes = map(
lambda x: x.getParent(),
nodes_with_uvs_crossing_borders
)
pm.select(tra_nodes)
raise RuntimeError(
"""There are nodes with <b>UV-Shells</b> that are crossing
<b>UV BORDERS</b>:<br><br>%s""" %
'<br>'.join(
map(lambda x: x.name(),
tra_nodes[:MAX_NODE_DISPLAY])
)
)
@publisher('model')
def check_uvs():
"""checks uvs with no uv area
The area of a 2d polygon calculation is based on the answer of Darius Bacon
in http://stackoverflow.com/questions/451426/how-do-i-calculate-the-surface-area-of-a-2d-polygon
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
def area(p):
return 0.5 * abs(sum(x0 * y1 - x1 * y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, p[1:] + [p[0]])
all_meshes = pm.ls(type='mesh')
mesh_count = len(all_meshes)
from anima.ui.progress_dialog import ProgressDialogManager
pdm = ProgressDialogManager()
if not pm.general.about(batch=1) and mesh_count:
pdm.use_ui = True
caller = pdm.register(mesh_count, 'check_uvs()')
meshes_with_zero_uv_area = []
for node in all_meshes:
all_uvs = node.getUVs()
try:
for i in range(node.numFaces()):
uvs = []
for j in range(node.numPolygonVertices(i)):
#uvs.append(node.getPolygonUV(i, j))
uv_id = node.getPolygonUVid(i, j)
uvs.append((all_uvs[0][uv_id], all_uvs[1][uv_id]))
if area(uvs) == 0.0:
meshes_with_zero_uv_area.append(node)
break
except RuntimeError:
meshes_with_zero_uv_area.append(node)
caller.step()
if len(meshes_with_zero_uv_area):
pm.select([node.getParent() for node in meshes_with_zero_uv_area])
raise RuntimeError(
"""There are meshes with no uvs or faces with zero uv area:<br><br>
%s""" %
'<br>'.join(
map(lambda x: x.name(),
meshes_with_zero_uv_area[:MAX_NODE_DISPLAY])
)
)
#******************#
# LOOK DEVELOPMENT #
#******************#
LOOK_DEV_TYPES = ['LookDev', 'Look Dev', 'LookDevelopment', 'Look Development']
@publisher(LOOK_DEV_TYPES + ['model'])
def set_pixel_error():
"""sets the pixel error on objects which have a linear subdiv
"""
for node in pm.ls(type='mesh'):
subdiv_type = node.getAttr('aiSubdivType')
pixel_error = node.getAttr('aiSubdivPixelError')
if subdiv_type == 2: # linear
if pixel_error == 0:
node.setAttr('aiSubdivPixelError', 0.001)
pixel_error = node.getAttr('aiSubdivPixelError')
node.setAttr('aiSubdivPixelError', min(1, pixel_error))
@publisher(LOOK_DEV_TYPES)
def disable_internal_reflections_in_aiStandard():
"""disable internal reflections in aiStandard
"""
for mat in pm.ls(type='aiStandard'):
if mat.referenceFile() is None:
mat.setAttr('enableInternalReflections', 0)
@publisher(LOOK_DEV_TYPES)
def check_all_tx_textures():
"""checks if tx textures are created for all of the texture nodes in the
current scene
"""
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
texture_file_paths = []
workspace_path = pm.workspace.path
def add_path(path):
if path != '':
path = os.path.expandvars(path)
if not os.path.isabs(path):
path = \
os.path.normpath(os.path.join(workspace_path, path))
texture_file_paths.append(path)
for node in pm.ls(type='file'):
add_path(node.fileTextureName.get())
for node in pm.ls(type='aiImage'):
add_path(node.filename.get())
import glob
textures_with_no_tx = []
for path in texture_file_paths:
tx_path = '%s.tx' % os.path.splitext(path)[0]
# replace any <udim> value with *
tx_path = tx_path.replace('<udim>', '*')
if not len(glob.glob(tx_path)):
textures_with_no_tx.append(path)
if len(textures_with_no_tx):
raise PublishError('There are textures with no <b>TX</b> file!!!')
@publisher(LOOK_DEV_TYPES)
def check_lights():
"""checks if there are lights in the scene
"""
all_lights = pm.ls(
type=['light', 'aiAreaLight', 'aiSkyDomeLight', 'aiPhotometricLight']
)
if len(all_lights):
pm.select(all_lights)
raise PublishError(
'There are <b>Lights</b> in the current scene:<br><br>%s<br><br>'
'Please delete them!!!' %
'<br>'.join(map(lambda x: x.name(), all_lights))
)
@publisher(LOOK_DEV_TYPES)
def check_only_supported_materials_are_used():
"""check if only supported materials are used
"""
non_arnold_materials = []
for material in pm.ls(mat=1):
if material.name() not in ['lambert1', 'particleCloud1']:
if material.type() not in VALID_MATERIALS:
non_arnold_materials.append(material)
if len(non_arnold_materials):
pm.select(non_arnold_materials)
raise PublishError(
'There are non-Arnold materials in the scene:<br><br>%s<br><br>'
'Please remove them!!!' %
'<br>'.join(map(lambda x: x.name(), non_arnold_materials))
)
@publisher(LOOK_DEV_TYPES)
def check_multiple_connections_for_textures():
"""check if textures are only used in one material (not liking it very much
but it is breaking ASS files.
"""
v = staging.get('version')
# skip if
skip_types = ['character', 'animation', 'previs']
for t in v.naming_parents:
for st in skip_types:
if t.type and t.type.name.lower().startswith(st):
return
# get all the texture nodes
texture_nodes = [
'bulge',
'checker',
'cloth',
'file',
'fluidTexture2D',
'fractal',
'grid',
'mandelbrot',
'mountain',
'movie',
'noise',
'ocean',
'psdFileTex',
'ramp',
'water',
'brownian',
'cloud',
'crater',
'fluidTexture3D',
'granite',
'leather',
'mandelbrot3D',
'marble',
'rock',
'snow',
'solidFractal',
'stucco',
'volumeNoise',
'wood',
'bump2d',
'bump3d',
'place2dTexture',
'place3dTexture',
'plusMinusAverage',
'samplerInfo',
'stencil',
'uvChooser',
'surfaceInfo',
'blendColors',
'clamp',
'contrast',
'gammaCorrect',
'hsvToRgb',
'luminance',
'remapColor',
'remapHsv',
'remapValue',
'rgbToHsv',
'surfaceLuminance',
'imagePlane',
'aiImage',
'aiNoise',
]
# try to find the material it is been used by walking up the connections
nodes_with_multiple_materials = []
nodes_to_ignore = pm.ls(type='hyperLayout')
nodes_to_ignore += pm.ls('defaultTextureList*')
nodes_to_ignore += pm.ls('defaultRenderUtilityList*')
for node in pm.ls(type=texture_nodes):
materials_connected_to_this_node = \
pm.ls(node.listHistory(future=True), mat=True)
if len(materials_connected_to_this_node) > 1:
nodes_with_multiple_materials.append(node)
else:
connections_out_of_this_node = node.outputs()
[connections_out_of_this_node.remove(h)
for h in nodes_to_ignore
if h in connections_out_of_this_node]
if len(set(connections_out_of_this_node)) > 1:
nodes_with_multiple_materials.append(node)
# if we find more than one material add it to the list
# raise a PublishError if we have an item in the list
if len(nodes_with_multiple_materials) > 0:
pm.select(nodes_with_multiple_materials)
raise PublishError(
'Please update the scene so the following nodes are connected <br>'
'to only <b>one material</b> (duplicate them):<br><br>%s<br><br>' %
'<br>'.join(map(lambda x: x.name(), nodes_with_multiple_materials))
)
@publisher(LOOK_DEV_TYPES)
def check_objects_still_using_default_shader():
"""check if there are objects still using the default shader
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
objects_with_default_material = mc.sets('initialShadingGroup', q=1)
if objects_with_default_material and len(objects_with_default_material):
mc.select(objects_with_default_material)
raise PublishError(
'There are objects still using <b>initialShadingGroup</b><br><br>'
'%s<br><br>Please assign a proper material to them' %
'<br>'.join(
objects_with_default_material[:MAX_NODE_DISPLAY]
)
)
@publisher(LOOK_DEV_TYPES + ['layout'])
def check_component_edits_on_references():
"""check if there are component edits on references
"""
# skip if this is a representation
v = staging.get('version')
if v and Representation.repr_separator in v.take_name:
return
import maya.cmds
reference_query = maya.cmds.referenceQuery
references_with_component_edits = []
for ref in pm.listReferences(recursive=True):
all_edits = reference_query(ref.refNode.name(), es=True)
joined_edits = '\n'.join(all_edits)
if '.pt[' in joined_edits or '.pnts[' in joined_edits:
references_with_component_edits.append(ref)
continue
if len(references_with_component_edits):
raise PublishError(
'There are <b>component edits</b> on the following References:'
'<br><br>%s<br><br>Please remove them!!!' %
'<br>'.join(
map(lambda x: x.refNode.name(),
references_with_component_edits[:MAX_NODE_DISPLAY])
)
)
@publisher('rig')
def check_cacheable_attr():
"""checks if there is at least one cacheable attr
"""
if not any([root_node.hasAttr('cacheable')
and root_node.getAttr('cacheable') != '' and
root_node.getAttr('cacheable') is not None
for root_node in auxiliary.get_root_nodes()]):
raise PublishError(
'Please add <b>cacheable</b> attribute and set it to a '
'<b>proper name</b>!'
)
@publisher('animation')
def check_shot_nodes():
"""checks if there is a shot node
"""
shot_nodes = pm.ls(type='shot')
if len(shot_nodes) == 0:
raise PublishError('There is no <b>Shot</b> node in the scene')
if len(shot_nodes) > 1:
raise PublishError('There is multiple <b>Shot</b> nodes in the scene')
@publisher('animation')
def set_frame_range():
"""sets the frame range from the shot node
"""
shot_node = pm.ls(type='shot')[0]
start_frame = shot_node.startFrame.get()
end_frame = shot_node.endFrame.get()
handle_count = 1
try:
handle_count = shot_node.getAttr('handle')
except AttributeError:
pass
# set it in the playback
pm.playbackOptions(
ast=start_frame,
aet=end_frame,
min=start_frame-handle_count,
max=end_frame+handle_count
)
@publisher(publisher_type=POST_PUBLISHER_TYPE)
def update_audit_info():
"""updates the audit info of the version
"""
from stalker import LocalSession
local_session = LocalSession()
logged_in_user = local_session.logged_in_user
if logged_in_user:
# update the version updated_by
from anima.env import mayaEnv
m_env = mayaEnv.Maya()
v = m_env.get_current_version()
if v:
v.updated_by = logged_in_user
from stalker import db
db.DBSession.commit()
@publisher(publisher_type=POST_PUBLISHER_TYPE)
def generate_thumbnail():
"""generates thumbnail for the current scene
"""
# skip this if maya is running in batch mode
if pm.general.about(batch=1):
return
from anima.env.mayaEnv import auxiliary
reload(auxiliary)
auxiliary.generate_thumbnail()
@publisher(
LOOK_DEV_TYPES + ['layout', 'model', 'vegetation', 'scene assembly'],
publisher_type=POST_PUBLISHER_TYPE
)
def create_representations():
"""creates the representations of the scene
"""
from anima.env import mayaEnv
m_env = mayaEnv.Maya()
v = m_env.get_current_version()
if not v:
return
if Representation.repr_separator in v.take_name:
return
# skip if it is a Character
skip_types = ['character', 'animation', 'previs']
for t in v.naming_parents:
for st in skip_types:
if t.type and t.type.name.lower().startswith(st):
return
from anima.env.mayaEnv import repr_tools
gen = repr_tools.RepresentationGenerator(version=v)
gen.generate_all()
# re-open the original scene
m_env = mayaEnv.Maya()
current_version = m_env.get_current_version()
if current_version != v:
m_env.open(v, force=True, skip_update_check=True)
@publisher('animation', publisher_type=POST_PUBLISHER_TYPE)
def cache_animations():
"""cache animations
"""
reload(auxiliary)
auxiliary.export_alembic_from_cache_node()
@publisher('animation', publisher_type=POST_PUBLISHER_TYPE)
def generate_playblast():
"""generates a playblast for the current scene
"""
import anima
from anima import utils
reload(anima)
reload(utils)
reload(auxiliary)
sp = auxiliary.ShotPlayblaster()
sp.playblast()
|
__version__ = '2.0.0a4'
default_app_config = 'tg_react.apps.TgReactConfig'
Dump version to 2.0.0
__version__ = '2.0.0'
default_app_config = 'tg_react.apps.TgReactConfig'
|
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
import pandas as pd
from settings import ASSEMBLY_ID, BASEURL, DIR, END_BILL, ID_MULTIPLIER, LIST_DATA
import utils
def get_urlmap():
with open(LIST_DATA, 'r') as f:
data = pd.read_csv(f)
return zip(data['bill_id'], data['link_id'], data['has_summaries'])
def get_pages(bill_id, link_id, has_summaries):
#TODO: 파일들이 다 있는지 확인하고, if not, 재다운로드 시도
#TODO: ZZ 파일들은 한 번 더 시도
# 상세내역
outp = '%s/%s.html' % (DIR['specifics'], bill_id)
utils.get_webpage(BASEURL['specific'] + link_id, outp)
# 요약
if has_summaries==1:
outp = '%s/%s.html' % (DIR['summaries'], id)
utils.get_webpage(BASEURL['summary'] + link_id, outp)
# 제안자명단
outp = '%s/%s.html' % (DIR['proposers'], bill_id)
utils.get_webpage(BASEURL['proposer_list'] + link_id, outp)
# 철회요구자명단
outp = '%s/%s.html' % (DIR['withdrawers'], bill_id)
utils.get_webpage(BASEURL['withdrawers'] + link_id, outp)
def check_missing(typename, nbills):
a = ASSEMBLY_ID * ID_MULTIPLIER
A = [str(a + b + 1) for b in range(nbills)]
B = [f.strip('.html') for f in os.listdir(DIR[typename])]
return [c for c in A if c not in B]
if __name__=='__main__':
utils.check_dir(DIR['summaries'])
utils.check_dir(DIR['specifics'])
utils.check_dir(DIR['proposers'])
utils.check_dir(DIR['withdrawers'])
urlmap = get_urlmap()
for bill_id, link_id, has_summaries in urlmap[3970:]:
get_pages(bill_id, link_id, has_summaries)
print bill_id
missing = check_missing('specifics', END_BILL)
print missing
#get_pages('1901020', 'PRC_C1G2G0V8D0B2H1S5I4J2Z4G9N2B0F6', 1)
Fix bug in summary downloader
- Wrong filename
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
import pandas as pd
from settings import ASSEMBLY_ID, BASEURL, DIR, END_BILL, ID_MULTIPLIER, LIST_DATA
import utils
def get_urlmap():
with open(LIST_DATA, 'r') as f:
data = pd.read_csv(f)
return zip(data['bill_id'], data['link_id'], data['has_summaries'])
def get_pages(bill_id, link_id, has_summaries):
#TODO: 파일들이 다 있는지 확인하고, if not, 재다운로드 시도
#TODO: ZZ 파일들은 한 번 더 시도
def get_specifics():
outp = '%s/%s.html' % (DIR['specifics'], bill_id)
utils.get_webpage(BASEURL['specific'] + link_id, outp)
def get_summaries():
if has_summaries==1:
outp = '%s/%s.html' % (DIR['summaries'], bill_id)
utils.get_webpage(BASEURL['summary'] + link_id, outp)
def get_proposers():
outp = '%s/%s.html' % (DIR['proposers'], bill_id)
utils.get_webpage(BASEURL['proposer_list'] + link_id, outp)
def get_withdrawers():
outp = '%s/%s.html' % (DIR['withdrawers'], bill_id)
utils.get_webpage(BASEURL['withdrawers'] + link_id, outp)
get_specifics()
get_summaries()
get_proposers()
get_withdrawers()
def check_missing(typename, nbills):
a = ASSEMBLY_ID * ID_MULTIPLIER
A = [str(a + b + 1) for b in range(nbills)]
B = [f.strip('.html') for f in os.listdir(DIR[typename])]
return [c for c in A if c not in B]
if __name__=='__main__':
utils.check_dir(DIR['summaries'])
utils.check_dir(DIR['specifics'])
utils.check_dir(DIR['proposers'])
utils.check_dir(DIR['withdrawers'])
urlmap = get_urlmap()
for bill_id, link_id, has_summaries in urlmap:
get_pages(bill_id, link_id, has_summaries)
print bill_id
missing = check_missing('specifics', END_BILL)
print missing
#get_pages('1901020', 'PRC_C1G2G0V8D0B2H1S5I4J2Z4G9N2B0F6', 1)
|
from __future__ import print_function
import argparse
import os
import shutil
import subprocess as sp
import sys
import yaml
def run_cmd(args, cwd='.', raise_err=True):
if isinstance(args, str):
args = args.split()
print("RUN CMD", args, file=sys.stderr)
proc = sp.Popen(args, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=cwd)
lines = []
while proc.poll() is None:
line = proc.stdout.readline()
print(line, end='', file=sys.stderr)
lines.append(line)
new_lines = proc.stdout.readlines()
print(''.join(new_lines), file=sys.stderr)
if proc.poll() and raise_err:
raise ValueError("Subprocess failed {}".format(proc.poll()))
return lines
_d = os.path.dirname
REGRESSION_CONFIG = os.path.join(_d(_d(_d(os.path.abspath(__file__)))), '.regression.yml')
REGRESSION_CONFIG = yaml.load(open(REGRESSION_CONFIG))
REQUIRED = set(('compare_taxcalc_version',
'compare_ogusa_version',
'install_taxcalc_version',
'diff',
'numpy_version'))
if not set(REGRESSION_CONFIG) >= REQUIRED:
raise ValueError('.regression.yml at top level of repo needs to define: '.format(REQUIRED - set(REGRESSION_CONFIG)))
OGUSA_ENV_PATH = os.path.join(os.environ['WORKSPACE'], 'ogusa_env')
def checkout_build_sources():
parser = argparse.ArgumentParser(description='Get install OG-USA branch')
parser.add_argument('ogusabranch')
numpy_vers = REGRESSION_CONFIG['numpy_version']
install_ogusa_version = parser.parse_args().ogusabranch
install_taxcalc_version = REGRESSION_CONFIG['install_taxcalc_version']
compare_ogusa_version = REGRESSION_CONFIG['compare_ogusa_version']
compare_taxcalc_version = REGRESSION_CONFIG['compare_taxcalc_version']
print('CHECKOUT_BUILD_SOURCES')
run_cmd('wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh')
miniconda_path = os.path.join(os.environ['WORKSPACE'], 'miniconda')
run_cmd('bash miniconda.sh -b -p {}'.format(miniconda_path))
run_cmd('conda config --set always_yes yes --set changeps1 no')
run_cmd('conda update conda -n root')
lines = ' '.join(run_cmd('conda env list')).lower()
if 'ogusa_env' in lines:
run_cmd('conda env remove --name ogusa_env')
run_cmd('conda install nomkl')
run_cmd('conda create --force python=2.7 --name ogusa_env')
line = [line for line in run_cmd('conda env list')
if 'ogusa_env' in line][0]
conda_path = os.path.join(line.strip().split()[-1].strip(), 'bin', 'conda')
print('Using conda {}'.format(conda_path))
run_cmd('{} install --force -c ospc openblas pytest toolz scipy numpy={} pandas=0.18.1 matplotlib'.format(conda_path, numpy_vers))
run_cmd('{} remove mkl mkl-service'.format(conda_path), raise_err=False)
run_cmd('{} install -c ospc taxcalc={} --force'.format(conda_path, install_taxcalc_version))
run_cmd('git fetch --all')
run_cmd('git checkout regression')
regression_tmp = os.path.join(cwd, '..', 'regression')
if os.path.exists(regression_tmp):
shutil.rmtree(regression_tmp)
shutil.copytree(os.path.join(cwd, 'Python', 'regression'), regression_tmp)
run_cmd('git checkout {}'.format(install_ogusa_version))
run_cmd('python setup.py install')
puf_choices = (os.path.join(cwd, '..', '..', 'puf.csv'),
os.path.join('Python', 'regression', 'puf.csv'),
os.path.join('/home', 'ubuntu', 'deploy', 'puf.csv'))
for puf in puf_choices:
if os.path.exists(puf):
print('puf from', puf)
shutil.copy(puf, os.path.join('Python', 'regression', 'puf.csv'))
print("CHECKOUT_BUILD_SOURCES OK")
return cwd
if __name__ == "__main__":
checkout_build_sources()
complexity related to co regression branch first
from __future__ import print_function
import argparse
import os
import shutil
import subprocess as sp
import sys
import yaml
def run_cmd(args, cwd='.', raise_err=True):
if isinstance(args, str):
args = args.split()
print("RUN CMD", args, file=sys.stderr)
proc = sp.Popen(args, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=cwd)
lines = []
while proc.poll() is None:
line = proc.stdout.readline()
print(line, end='', file=sys.stderr)
lines.append(line)
new_lines = proc.stdout.readlines()
print(''.join(new_lines), file=sys.stderr)
if proc.poll() and raise_err:
raise ValueError("Subprocess failed {}".format(proc.poll()))
return lines
_d = os.path.dirname
REGRESSION_CONFIG = os.path.join(_d(_d(_d(os.path.abspath(__file__)))), '.regression.yml')
REGRESSION_CONFIG = yaml.load(open(REGRESSION_CONFIG))
REQUIRED = set(('compare_taxcalc_version',
'compare_ogusa_version',
'install_taxcalc_version',
'diff',
'numpy_version'))
if not set(REGRESSION_CONFIG) >= REQUIRED:
raise ValueError('.regression.yml at top level of repo needs to define: '.format(REQUIRED - set(REGRESSION_CONFIG)))
OGUSA_ENV_PATH = os.path.join(os.environ['WORKSPACE'], 'ogusa_env')
def checkout_build_sources():
parser = argparse.ArgumentParser(description='Get install OG-USA branch')
parser.add_argument('ogusabranch')
numpy_vers = REGRESSION_CONFIG['numpy_version']
install_ogusa_version = parser.parse_args().ogusabranch
install_taxcalc_version = REGRESSION_CONFIG['install_taxcalc_version']
compare_ogusa_version = REGRESSION_CONFIG['compare_ogusa_version']
compare_taxcalc_version = REGRESSION_CONFIG['compare_taxcalc_version']
print('CHECKOUT_BUILD_SOURCES')
run_cmd('wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh')
miniconda_path = os.path.join(os.environ['WORKSPACE'], 'miniconda')
run_cmd('bash miniconda.sh -b -p {}'.format(miniconda_path))
run_cmd('conda config --set always_yes yes --set changeps1 no')
run_cmd('conda update conda -n root')
lines = ' '.join(run_cmd('conda env list')).lower()
if 'ogusa_env' in lines:
run_cmd('conda env remove --name ogusa_env')
run_cmd('conda install nomkl')
run_cmd('conda create --force python=2.7 --name ogusa_env')
line = [line for line in run_cmd('conda env list')
if 'ogusa_env' in line][0]
conda_path = os.path.join(line.strip().split()[-1].strip(), 'bin', 'conda')
print('Using conda {}'.format(conda_path))
run_cmd('{} install --force -c ospc openblas pytest toolz scipy numpy={} pandas=0.18.1 matplotlib'.format(conda_path, numpy_vers))
run_cmd('{} remove mkl mkl-service'.format(conda_path), raise_err=False)
run_cmd('{} install -c ospc taxcalc={} --force'.format(conda_path, install_taxcalc_version))
run_cmd('git fetch --all')
run_cmd('git checkout regression')
regression_tmp = os.path.join('..', 'regression')
if os.path.exists(regression_tmp):
shutil.rmtree(regression_tmp)
src = os.path.join('Python', 'regression')
shutil.copytree(src, regression_tmp)
run_cmd('git checkout {}'.format(install_ogusa_version))
shutil.copytree(regression_tmp, src)
run_cmd('python setup.py install')
puf_choices = (os.path.join(cwd, '..', '..', 'puf.csv'),
os.path.join('Python', 'regression', 'puf.csv'),
os.path.join('/home', 'ubuntu', 'deploy', 'puf.csv'))
for puf in puf_choices:
if os.path.exists(puf):
print('puf from', puf)
shutil.copy(puf, os.path.join('Python', 'regression', 'puf.csv'))
print("CHECKOUT_BUILD_SOURCES OK")
return cwd
if __name__ == "__main__":
checkout_build_sources() |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units dealing with storage of instances."""
import itertools
import logging
import os
import time
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import objects
from ganeti import utils
import ganeti.rpc.node as rpc
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
import ganeti.masterd.instance
_DISK_TEMPLATE_NAME_PREFIX = {
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
constants.DT_EXT: ".ext",
constants.DT_FILE: ".file",
constants.DT_SHARED_FILE: ".sharedfile",
}
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor):
"""Create a single block device on a given node.
This will not recurse over children of the device, so they must be
created in advance.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
"""
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
device.size, instance.name, force_open,
info, excl_stor)
result.Raise("Can't create block device %s on"
" node %s for instance %s" % (device,
lu.cfg.GetNodeName(node_uuid),
instance.name))
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
info, force_open, excl_stor):
"""Create a tree of block devices on a given node.
If this device type has to be created on secondaries, create it and
all its children.
If not, just recurse to children keeping the same 'force' value.
@attention: The device has to be annotated already.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@type force_create: boolean
@param force_create: whether to force creation of this device; this
will be change to True whenever we find a device which has
CreateOnSecondary() attribute
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
@return: list of created devices
"""
created_devices = []
try:
if device.CreateOnSecondary():
force_create = True
if device.children:
for child in device.children:
devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
force_create, info, force_open, excl_stor)
created_devices.extend(devs)
if not force_create:
return created_devices
CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor)
# The device has been completely created, so there is no point in keeping
# its subdevices in the list. We just add the device itself instead.
created_devices = [(node_uuid, device)]
return created_devices
except errors.DeviceCreationError, e:
e.created_devices.extend(created_devices)
raise e
except errors.OpExecError, e:
raise errors.DeviceCreationError(str(e), created_devices)
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type node_uuid: string
@param node_uuid: The node UUID
@rtype: bool
@return: The effective value of exclusive_storage
@raise errors.OpPrereqError: if no node exists with the given name
"""
ni = cfg.GetNodeInfo(node_uuid)
if ni is None:
raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
errors.ECODE_NOENT)
return IsExclusiveStorageEnabledNode(cfg, ni)
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
force_open):
"""Wrapper around L{_CreateBlockDevInner}.
This method annotates the root device first.
"""
(disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
force_open, excl_stor)
def _UndoCreateDisks(lu, disks_created, instance):
"""Undo the work performed by L{CreateDisks}.
This function is called in case of an error to undo the work of
L{CreateDisks}.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@param disks_created: the result returned by L{CreateDisks}
@type instance: L{objects.Instance}
@param instance: the instance for which disks were created
"""
for (node_uuid, disk) in disks_created:
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
result.Warn("Failed to remove newly-created disk %s on node %s" %
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
def CreateDisks(lu, instance, instance_disks=None,
to_skip=None, target_node_uuid=None, disks=None):
"""Create all disks for an instance.
This abstracts away some work from AddInstance.
Since the instance may not have been saved to the config file yet, this
function can not query the config file for the instance's disks; in that
case they need to be passed as an argument.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type instance_disks: list of L{objects.Disk}
@param instance_disks: the disks that belong to the instance; if not
specified, retrieve them from config file
@type to_skip: list
@param to_skip: list of indices to skip
@type target_node_uuid: string
@param target_node_uuid: if passed, overrides the target node for creation
@type disks: list of {objects.Disk}
@param disks: the disks to create; if not specified, all the disks of the
instance are created
@return: information about the created disks, to be used to call
L{_UndoCreateDisks}
@raise errors.OpPrereqError: in case of error
"""
info = GetInstanceInfoText(instance)
if instance_disks is None:
instance_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if target_node_uuid is None:
pnode_uuid = instance.primary_node
# We cannot use config's 'GetInstanceNodes' here as 'CreateDisks'
# is used by 'LUInstanceCreate' and the instance object is not
# stored in the config yet.
all_node_uuids = []
for disk in instance_disks:
all_node_uuids.extend(disk.all_nodes)
all_node_uuids = set(all_node_uuids)
# ensure that primary node is always the first
all_node_uuids.discard(pnode_uuid)
all_node_uuids = [pnode_uuid] + list(all_node_uuids)
else:
pnode_uuid = target_node_uuid
all_node_uuids = [pnode_uuid]
if disks is None:
disks = instance_disks
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
if instance.disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(instance_disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
result.Raise("Failed to create directory '%s' on"
" node %s" % (file_storage_dir,
lu.cfg.GetNodeName(pnode_uuid)))
disks_created = []
for idx, device in enumerate(disks):
if to_skip and idx in to_skip:
continue
logging.info("Creating disk %s for instance '%s'", idx, instance.name)
for node_uuid in all_node_uuids:
f_create = node_uuid == pnode_uuid
try:
_CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
f_create)
disks_created.append((node_uuid, device))
except errors.DeviceCreationError, e:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
disks_created.extend(e.created_devices)
_UndoCreateDisks(lu, disks_created, instance)
raise errors.OpExecError(e.message)
return disks_created
def ComputeDiskSizePerVG(disk_template, disks):
"""Compute disk size requirements in the volume group
"""
def _compute(disks, payload):
"""Universal algorithm.
"""
vgs = {}
for disk in disks:
vgs[disk[constants.IDISK_VG]] = \
vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
return vgs
# Required free disk space as a function of disk and swap space
req_size_dict = {
constants.DT_DISKLESS: {},
constants.DT_PLAIN: _compute(disks, 0),
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
constants.DT_FILE: {},
constants.DT_SHARED_FILE: {},
constants.DT_GLUSTER: {},
}
if disk_template not in req_size_dict:
raise errors.ProgrammerError("Disk template '%s' size requirement"
" is unknown" % disk_template)
return req_size_dict[disk_template]
def ComputeDisks(op, default_vg):
"""Computes the instance disks.
@param op: The instance opcode
@param default_vg: The default_vg to assume
@return: The computed disks
"""
disks = []
for disk in op.disks:
mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" %
mode, errors.ECODE_INVAL)
size = disk.get(constants.IDISK_SIZE, None)
if size is None:
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
size = int(size)
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
ext_provider = disk.get(constants.IDISK_PROVIDER, None)
if ext_provider and op.disk_template != constants.DT_EXT:
raise errors.OpPrereqError("The '%s' option is only valid for the %s"
" disk template, not %s" %
(constants.IDISK_PROVIDER, constants.DT_EXT,
op.disk_template), errors.ECODE_INVAL)
data_vg = disk.get(constants.IDISK_VG, default_vg)
name = disk.get(constants.IDISK_NAME, None)
if name is not None and name.lower() == constants.VALUE_NONE:
name = None
new_disk = {
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
constants.IDISK_NAME: name,
}
for key in [
constants.IDISK_METAVG,
constants.IDISK_ADOPT,
constants.IDISK_SPINDLES,
]:
if key in disk:
new_disk[key] = disk[key]
# For extstorage, demand the `provider' option and add any
# additional parameters (ext-params) to the dict
if op.disk_template == constants.DT_EXT:
if ext_provider:
new_disk[constants.IDISK_PROVIDER] = ext_provider
for key in disk:
if key not in constants.IDISK_PARAMS:
new_disk[key] = disk[key]
else:
raise errors.OpPrereqError("Missing provider for template '%s'" %
constants.DT_EXT, errors.ECODE_INVAL)
disks.append(new_disk)
return disks
def CheckRADOSFreeSpace():
"""Compute disk size requirements inside the RADOS cluster.
"""
# For the RADOS cluster we assume there is always enough space.
pass
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
iv_name, p_minor, s_minor):
"""Generate a drbd8 device complete with its children.
"""
assert len(vgnames) == len(names) == 2
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
logical_id=(vgnames[0], names[0]),
params={})
dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vgnames[1], names[1]),
params={})
dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
logical_id=(primary_uuid, secondary_uuid, port,
p_minor, s_minor,
shared_secret),
children=[dev_data, dev_meta],
iv_name=iv_name, params={})
drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
return drbd_dev
def GenerateDiskTemplate(
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
if len(secondary_node_uuids) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node_uuid = secondary_node_uuids[0]
minors = lu.cfg.AllocateDRBDMinor(
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
full_disk_params)
drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
names = []
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
for i in range(disk_count)]):
names.append(lv_prefix + "_data")
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
minors[idx * 2], minors[idx * 2 + 1])
disk_dev.mode = disk[constants.IDISK_MODE]
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disks.append(disk_dev)
else:
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
else:
names = _GenerateUniqueNames(lu, ["%s.disk%s" %
(name_prefix, base_index + i)
for i in range(disk_count)])
if template_name == constants.DT_PLAIN:
def logical_id_fn(idx, _, disk):
vg = disk.get(constants.IDISK_VG, vgname)
return (vg, names[idx])
elif template_name == constants.DT_GLUSTER:
logical_id_fn = lambda _1, disk_index, _2: \
(file_driver, "ganeti/%s.%d" % (instance_uuid,
disk_index))
elif template_name in constants.DTS_FILEBASED: # Gluster handled above
logical_id_fn = \
lambda _, disk_index, disk: (file_driver,
"%s/%s" % (file_storage_dir,
names[idx]))
elif template_name == constants.DT_BLOCK:
logical_id_fn = \
lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
elif template_name == constants.DT_EXT:
def logical_id_fn(idx, _, disk):
provider = disk.get(constants.IDISK_PROVIDER, None)
if provider is None:
raise errors.ProgrammerError("Disk template is %s, but '%s' is"
" not found", constants.DT_EXT,
constants.IDISK_PROVIDER)
return (provider, names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
dev_type = template_name
for idx, disk in enumerate(disk_info):
params = {}
# Only for the Ext template add disk_info to params
if template_name == constants.DT_EXT:
params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
for key in disk:
if key not in constants.IDISK_PARAMS:
params[key] = disk[key]
disk_index = idx + base_index
size = disk[constants.IDISK_SIZE]
feedback_fn("* disk %s, size %s" %
(disk_index, utils.FormatUnit(size, "h")))
disk_dev = objects.Disk(dev_type=dev_type, size=size,
logical_id=logical_id_fn(idx, disk_index, disk),
iv_name="disk/%d" % disk_index,
mode=disk[constants.IDISK_MODE],
params=params,
spindles=disk.get(constants.IDISK_SPINDLES))
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
disks.append(disk_dev)
return disks
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
"""Check the presence of the spindle options with exclusive_storage.
@type diskdict: dict
@param diskdict: disk parameters
@type es_flag: bool
@param es_flag: the effective value of the exlusive_storage flag
@type required: bool
@param required: whether spindles are required or just optional
@raise errors.OpPrereqError when spindles are given and they should not
"""
if (not es_flag and constants.IDISK_SPINDLES in diskdict and
diskdict[constants.IDISK_SPINDLES] is not None):
raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
" when exclusive storage is not active",
errors.ECODE_INVAL)
if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
diskdict[constants.IDISK_SPINDLES] is None)):
raise errors.OpPrereqError("You must specify spindles in instance disks"
" when exclusive storage is active",
errors.ECODE_INVAL)
class LUInstanceRecreateDisks(LogicalUnit):
"""Recreate an instance's missing disks.
"""
HPATH = "instance-recreate-disks"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
_MODIFYABLE = compat.UniqueFrozenset([
constants.IDISK_SIZE,
constants.IDISK_MODE,
constants.IDISK_SPINDLES,
])
# New or changed disk parameters may have different semantics
assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
constants.IDISK_ADOPT,
# TODO: Implement support changing VG while recreating
constants.IDISK_VG,
constants.IDISK_METAVG,
constants.IDISK_PROVIDER,
constants.IDISK_NAME,
]))
def _RunAllocator(self):
"""Run the allocator based on input opcode.
"""
be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
# FIXME
# The allocator should actually run in "relocate" mode, but current
# allocators don't support relocating all the nodes of an instance at
# the same time. As a workaround we use "allocate" mode, but this is
# suboptimal for two reasons:
# - The instance name passed to the allocator is present in the list of
# existing instances, so there could be a conflict within the
# internal structures of the allocator. This doesn't happen with the
# current allocators, but it's a liability.
# - The allocator counts the resources used by the instance twice: once
# because the instance exists already, and once because it tries to
# allocate a new instance.
# The allocator could choose some of the nodes on which the instance is
# running, but that's not a problem. If the instance nodes are broken,
# they should be already be marked as drained or offline, and hence
# skipped by the allocator. If instance disks have been lost for other
# reasons, then recreating the disks on the same nodes should be fine.
disk_template = self.instance.disk_template
spindle_use = be_full[constants.BE_SPINDLE_USE]
disks = [{
constants.IDISK_SIZE: d.size,
constants.IDISK_MODE: d.mode,
constants.IDISK_SPINDLES: d.spindles,
} for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
disk_template=disk_template,
tags=list(self.instance.GetTags()),
os=self.instance.os,
nics=[{}],
vcpus=be_full[constants.BE_VCPUS],
memory=be_full[constants.BE_MAXMEM],
spindle_use=spindle_use,
disks=disks,
hypervisor=self.instance.hypervisor,
node_whitelist=None)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
assert req.RequiredNodes() == \
len(self.cfg.GetInstanceNodes(self.instance.uuid))
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (self.op.iallocator, ial.info),
errors.ECODE_NORES)
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
utils.CommaJoin(self.op.nodes))
def CheckArguments(self):
if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
# Normalize and convert deprecated list of disk indices
self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
if duplicates:
raise errors.OpPrereqError("Some disks have been specified more than"
" once: %s" % utils.CommaJoin(duplicates),
errors.ECODE_INVAL)
# We don't want _CheckIAllocatorOrNode selecting the default iallocator
# when neither iallocator nor nodes are specified
if self.op.iallocator or self.op.nodes:
CheckIAllocatorOrNode(self, "iallocator", "nodes")
for (idx, params) in self.op.disks:
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
unsupported = frozenset(params.keys()) - self._MODIFYABLE
if unsupported:
raise errors.OpPrereqError("Parameters for disk %s try to change"
" unmodifyable parameter(s): %s" %
(idx, utils.CommaJoin(unsupported)),
errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
if self.op.nodes:
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
else:
self.needed_locks[locking.LEVEL_NODE] = []
if self.op.iallocator:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.iallocator is not None
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock the primary group used by the instance optimistically; this
# requires going via the node before it's locked, requiring
# verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
elif level == locking.LEVEL_NODE:
# If an allocator is used, then we lock all the nodes in the current
# instance group, as we don't know yet which ones will be selected;
# if we replace the nodes without using an allocator, locks are
# already declared in ExpandNames; otherwise, we need to lock all the
# instance nodes for disk re-creation
if self.op.iallocator:
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODE]
assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
# Lock member nodes of the group of the primary node
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
self.needed_locks[locking.LEVEL_NODE].extend(
self.cfg.GetNodeGroup(group_uuid).members)
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
elif not self.op.nodes:
self._LockInstancesNodes(primary_only=False)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if self.op.node_uuids:
inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
if len(self.op.node_uuids) != len(inst_nodes):
raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
" %d replacement nodes were specified" %
(instance.name, len(inst_nodes),
len(self.op.node_uuids)),
errors.ECODE_INVAL)
assert instance.disk_template != constants.DT_DRBD8 or \
len(self.op.node_uuids) == 2
assert instance.disk_template != constants.DT_PLAIN or \
len(self.op.node_uuids) == 1
primary_node = self.op.node_uuids[0]
else:
primary_node = instance.primary_node
if not self.op.iallocator:
CheckNodeOnline(self, primary_node)
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
# Node group locks are acquired only for the primary node (and only
# when the allocator is used)
CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
primary_only=True)
# if we replace nodes *and* the old primary is offline, we don't
# check the instance state
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot recreate disks")
if self.op.disks:
self.disks = dict(self.op.disks)
else:
self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
maxidx = max(self.disks.keys())
if maxidx >= len(instance.disks):
raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
errors.ECODE_INVAL)
if ((self.op.node_uuids or self.op.iallocator) and
sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
errors.ECODE_INVAL)
self.instance = instance
if self.op.iallocator:
self._RunAllocator()
# Release unneeded node and node resource locks
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
if self.op.node_uuids:
node_uuids = self.op.node_uuids
else:
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
excl_stor = compat.any(
rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
)
for new_params in self.disks.values():
CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
def Exec(self, feedback_fn):
"""Recreate the disks.
"""
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
to_skip = []
mods = [] # keeps track of needed changes
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(inst_disks):
try:
changes = self.disks[idx]
except KeyError:
# Disk should not be recreated
to_skip.append(idx)
continue
# update secondaries for disks, if needed
if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
# need to update the nodes and minors
assert len(self.op.node_uuids) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
self.instance.uuid)
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
else:
new_id = None
mods.append((idx, new_id, changes))
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id, changes in mods:
disk = inst_disks[idx]
if new_id is not None:
assert disk.dev_type == constants.DT_DRBD8
disk.logical_id = new_id
if changes:
disk.Update(size=changes.get(constants.IDISK_SIZE, None),
mode=changes.get(constants.IDISK_MODE, None),
spindles=changes.get(constants.IDISK_SPINDLES, None))
self.cfg.Update(disk, feedback_fn)
# change primary node, if needed
if self.op.node_uuids:
self.instance.primary_node = self.op.node_uuids[0]
self.LogWarning("Changing the instance's nodes, you will have to"
" remove any disks left on the older nodes manually")
if self.op.node_uuids:
self.cfg.Update(self.instance, feedback_fn)
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
assert mylocks.issuperset(frozenset(inst_nodes))
new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
# TODO: Release node locks before wiping, or explain why it's not possible
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
wipedisks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)
if idx not in to_skip]
WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
cleanup=new_disks)
def _PerformNodeInfoCall(lu, node_uuids, vg):
"""Prepares the input and performs a node info call.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: list of string
@param node_uuids: list of node UUIDs to perform the call for
@type vg: string
@param vg: the volume group's name
"""
lvm_storage_units = [(constants.ST_LVM_VG, vg)]
storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
node_uuids)
hvname = lu.cfg.GetHypervisorType()
hvparams = lu.cfg.GetClusterInfo().hvparams
nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
[(hvname, hvparams[hvname])])
return nodeinfo
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
"""Checks the vg capacity for a given node.
@type node_info: tuple (_, list of dicts, _)
@param node_info: the result of the node info call for one node
@type node_name: string
@param node_name: the name of the node
@type vg: string
@param vg: volume group name
@type requested: int
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
(_, space_info, _) = node_info
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_VG)
if not lvm_vg_info:
raise errors.OpPrereqError("Can't retrieve storage information for LVM")
vg_free = lvm_vg_info.get("storage_free", None)
if not isinstance(vg_free, int):
raise errors.OpPrereqError("Can't compute free disk space on node"
" %s for vg %s, result was '%s'" %
(node_name, vg, vg_free), errors.ECODE_ENVIRON)
if requested > vg_free:
raise errors.OpPrereqError("Not enough disk space on target node %s"
" vg %s: required %d MiB, available %d MiB" %
(node_name, vg, requested, vg_free),
errors.ECODE_NORES)
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
"""Checks if nodes have enough free disk space in the specified VG.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type vg: C{str}
@param vg: the volume group to check
@type requested: C{int}
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
for node_uuid in node_uuids:
node_name = lu.cfg.GetNodeName(node_uuid)
info = nodeinfo[node_uuid]
info.Raise("Cannot get current information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
_CheckVgCapacityForNode(node_name, info.payload, vg, requested)
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
"""Checks if nodes have enough free disk space in all the VGs.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type req_sizes: C{dict}
@param req_sizes: the hash of vg and corresponding amount of disk in
MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
for vg, req_size in req_sizes.items():
_CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
def _DiskSizeInBytesToMebibytes(lu, size):
"""Converts a disk size in bytes to mebibytes.
Warns and rounds up if the size isn't an even multiple of 1 MiB.
"""
(mib, remainder) = divmod(size, 1024 * 1024)
if remainder != 0:
lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
" to not overwrite existing data (%s bytes will not be"
" wiped)", (1024 * 1024) - remainder)
mib += 1
return mib
def _CalcEta(time_taken, written, total_size):
"""Calculates the ETA based on size written and total size.
@param time_taken: The time taken so far
@param written: amount written so far
@param total_size: The total size of data to be written
@return: The remaining time in seconds
"""
avg_time = time_taken / float(written)
return (total_size - written) * avg_time
def WipeDisks(lu, instance, disks=None):
"""Wipes instance disks.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type disks: None or list of tuple of (number, L{objects.Disk}, number)
@param disks: Disk details; tuple contains disk index, disk object and the
start offset
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
if disks is None:
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device, offset) in disks:
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
# MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
wipe_chunk_size = \
int(min(constants.MAX_WIPE_CHUNK,
device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
size = device.size
last_output = 0
start_time = time.time()
if offset == 0:
info_text = ""
else:
info_text = (" (from %s to %s)" %
(utils.FormatUnit(offset, "h"),
utils.FormatUnit(size, "h")))
lu.LogInfo("* Wiping disk %s%s", idx, info_text)
logging.info("Wiping disk %d for instance %s on node %s using"
" chunk size %s", idx, instance.name, node_name,
wipe_chunk_size)
while offset < size:
wipe_size = min(wipe_chunk_size, size - offset)
logging.debug("Wiping disk %d, offset %s, chunk %s",
idx, offset, wipe_size)
result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
offset, wipe_size)
result.Raise("Could not wipe disk %d at offset %d for size %d" %
(idx, offset, wipe_size))
now = time.time()
offset += wipe_size
if now - last_output >= 60:
eta = _CalcEta(now - start_time, offset, size)
lu.LogInfo(" - done: %.1f%% ETA: %s",
offset / float(size) * 100, utils.FormatSeconds(eta))
last_output = now
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
def ImageDisks(lu, instance, image, disks=None):
"""Dumps an image onto an instance disk.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type image: string
@param image: the image whose disks we should create
@type disks: None or list of ints
@param disks: disk indices
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if disks is None:
disks = [(0, inst_disks[0])]
else:
disks = map(lambda idx: (idx, inst_disks[idx]), disks)
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device) in disks:
lu.LogInfo("Imaging disk '%d' for instance '%s' on node '%s'",
idx, instance.name, node_name)
result = lu.rpc.call_blockdev_image(node_uuid, (device, instance),
image, device.size)
result.Raise("Could not image disk '%d' for instance '%s' on node '%s'" %
(idx, instance.name, node_name))
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization for instance '%s' on"
" node '%s'", node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Failed to resume synchronization of disk '%d' of"
" instance '%s'", idx, instance.name)
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
"""Wrapper for L{WipeDisks} that handles errors.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should wipe
@param disks: see L{WipeDisks}
@param cleanup: the result returned by L{CreateDisks}, used for cleanup in
case of error
@raise errors.OpPrereqError: in case of failure
"""
try:
WipeDisks(lu, instance, disks=disks)
except errors.OpExecError:
logging.warning("Wiping disks for instance '%s' failed",
instance.name)
_UndoCreateDisks(lu, cleanup, instance)
raise
def ExpandCheckDisks(instance_disks, disks):
"""Return the instance disks selected by the disks list
@type disks: list of L{objects.Disk} or None
@param disks: selected disks
@rtype: list of L{objects.Disk}
@return: selected instance disks to act on
"""
if disks is None:
return instance_disks
else:
inst_disks_uuids = [d.uuid for d in instance_disks]
disks_uuids = [d.uuid for d in disks]
if not set(disks_uuids).issubset(inst_disks_uuids):
raise errors.ProgrammerError("Can only act on disks belonging to the"
" target instance: expected a subset of %s,"
" got %s" % (inst_disks_uuids, disks_uuids))
return disks
def WaitForSync(lu, instance, disks=None, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if not inst_disks or disks is not None and not disks:
return True
disks = ExpandCheckDisks(inst_disks, disks)
if not oneshot:
lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
# TODO: Convert to utils.Retry
retries = 0
degr_retries = 10 # in seconds, as we sleep 1 second each time
while True:
max_time = 0
done = True
cumul_degraded = False
rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
retries += 1
if retries >= 10:
raise errors.RemoteError("Can't contact node %s for mirror data,"
" aborting." % node_name)
time.sleep(6)
continue
rstats = rstats.payload
retries = 0
for i, mstat in enumerate(rstats):
if mstat is None:
lu.LogWarning("Can't compute data for node %s/%s",
node_name, disks[i].iv_name)
continue
cumul_degraded = (cumul_degraded or
(mstat.is_degraded and mstat.sync_percent is None))
if mstat.sync_percent is not None:
done = False
if mstat.estimated_time is not None:
rem_time = ("%s remaining (estimated)" %
utils.FormatSeconds(mstat.estimated_time))
max_time = mstat.estimated_time
else:
rem_time = "no time estimate"
max_time = 5 # sleep at least a bit between retries
lu.LogInfo("- device %s: %5.2f%% done, %s",
disks[i].iv_name, mstat.sync_percent, rem_time)
# if we're done but degraded, let's do a few small retries, to
# make sure we see a stable and not transient situation; therefore
# we force restart of the loop
if (done or oneshot) and cumul_degraded and degr_retries > 0:
logging.info("Degraded disks found, %d retries left", degr_retries)
degr_retries -= 1
time.sleep(1)
continue
if done or oneshot:
break
time.sleep(min(60, max_time))
if done:
lu.LogInfo("Instance %s's disks are in sync", instance.name)
return not cumul_degraded
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
"""Shutdown block devices of an instance.
This does the shutdown on all nodes of the instance.
If the ignore_primary is false, errors on the primary node are
ignored.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
all_result = True
if disks is None:
# only mark instance disks as inactive if all disks are affected
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
for disk in disks:
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
msg = result.fail_msg
if msg:
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if ((node_uuid == instance.primary_node and not ignore_primary) or
(node_uuid != instance.primary_node and not result.offline)):
all_result = False
return all_result
def _SafeShutdownInstanceDisks(lu, instance, disks=None, req_states=None):
"""Shutdown block devices of an instance.
This function checks if an instance is running, before calling
_ShutdownInstanceDisks.
"""
if req_states is None:
req_states = INSTANCE_DOWN
CheckInstanceState(lu, instance, req_states, msg="cannot shutdown disks")
ShutdownInstanceDisks(lu, instance, disks=disks)
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
ignore_size=False):
"""Prepare the block devices for an instance.
This sets up the block devices on all nodes.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance for whose disks we assemble
@type disks: list of L{objects.Disk} or None
@param disks: which disks to assemble (or all, if None)
@type ignore_secondaries: boolean
@param ignore_secondaries: if true, errors on secondary nodes
won't result in an error return from the function
@type ignore_size: boolean
@param ignore_size: if true, the current known size of the disk
will not be used during the disk activation, useful for cases
when the size is wrong
@return: False if the operation failed, otherwise a list of
(host, instance_visible_name, node_visible_name)
with the mapping from node devices to instance devices
"""
device_info = []
disks_ok = True
if disks is None:
# only mark instance disks as active if all disks are affected
instance = lu.cfg.MarkInstanceDisksActive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
# With the two passes mechanism we try to reduce the window of
# opportunity for the race condition of switching DRBD to primary
# before handshaking occured, but we do not eliminate it
# The proper fix would be to wait (with some limits) until the
# connection has been made and drbd transitions from WFConnection
# into any other network-connected state (Connected, SyncTarget,
# SyncSource, etc.)
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, False, idx)
msg = result.fail_msg
if msg:
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
is_offline_secondary = (node_uuid in secondary_nodes and
result.offline)
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=False, pass=1): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if not (ignore_secondaries or is_offline_secondary):
disks_ok = False
# FIXME: race condition on drbd migration to primary
# 2nd pass, do only the primary node
for idx, inst_disk in enumerate(disks):
dev_path = None
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if node_uuid != instance.primary_node:
continue
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, True, idx)
msg = result.fail_msg
if msg:
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=True, pass=2): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
disks_ok = False
else:
dev_path, _, __ = result.payload
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
if not disks_ok:
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
return disks_ok, device_info
def StartInstanceDisks(lu, instance, force):
"""Start the disks of an instance.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
disks_ok, _ = AssembleInstanceDisks(lu, instance,
ignore_secondaries=force)
if not disks_ok:
ShutdownInstanceDisks(lu, instance)
if force is not None and not force:
lu.LogWarning("",
hint=("If the message above refers to a secondary node,"
" you can retry the operation using '--force'"))
raise errors.OpExecError("Disk consistency error")
class LUInstanceGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
"""
HPATH = "disk-grow"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
env = {
"DISK": self.op.disk,
"AMOUNT": self.op.amount,
"ABSOLUTE": self.op.absolute,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
for node_uuid in node_uuids:
CheckNodeOnline(self, node_uuid)
self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
if self.instance.disk_template not in constants.DTS_GROWABLE:
raise errors.OpPrereqError("Instance's disk layout does not support"
" growing", errors.ECODE_INVAL)
self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
if self.op.absolute:
self.target = self.op.amount
self.delta = self.target - self.disk.size
if self.delta < 0:
raise errors.OpPrereqError("Requested size (%s) is smaller than "
"current disk size (%s)" %
(utils.FormatUnit(self.target, "h"),
utils.FormatUnit(self.disk.size, "h")),
errors.ECODE_STATE)
else:
self.delta = self.op.amount
self.target = self.disk.size + self.delta
if self.delta < 0:
raise errors.OpPrereqError("Requested increment (%s) is negative" %
utils.FormatUnit(self.delta, "h"),
errors.ECODE_INVAL)
self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
def _CheckDiskSpace(self, node_uuids, req_vgspace):
template = self.instance.disk_template
if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
not any(self.node_es_flags.values())):
# TODO: check the free disk space for file, when that feature will be
# supported
# With exclusive storage we need to do something smarter than just looking
# at free space, which, in the end, is basically a dry run. So we rely on
# the dry run performed in Exec() instead.
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
def Exec(self, feedback_fn):
"""Execute disk grow.
"""
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
(self.op.disk, self.instance.name,
utils.FormatUnit(self.delta, "h"),
utils.FormatUnit(self.target, "h")))
# First run all grow ops in dry-run mode
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, True, True,
self.node_es_flags[node_uuid])
result.Raise("Dry-run grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
if wipe_disks:
# Get disk size from primary node for wiping
result = self.rpc.call_blockdev_getdimensions(
self.instance.primary_node, [([self.disk], self.instance)])
result.Raise("Failed to retrieve disk size from node '%s'" %
self.instance.primary_node)
(disk_dimensions, ) = result.payload
if disk_dimensions is None:
raise errors.OpExecError("Failed to retrieve disk size from primary"
" node '%s'" % self.instance.primary_node)
(disk_size_in_bytes, _) = disk_dimensions
old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
assert old_disk_size >= self.disk.size, \
("Retrieved disk size too small (got %s, should be at least %s)" %
(old_disk_size, self.disk.size))
else:
old_disk_size = None
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real on the backing storage
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, False, True,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
# And now execute it for logical storage, on the primary node
node_uuid = self.instance.primary_node
result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
self.delta, False, False,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
self.disk.RecordGrow(self.delta)
self.cfg.Update(self.instance, feedback_fn)
self.cfg.Update(self.disk, feedback_fn)
# Changes have been recorded, release node lock
ReleaseLocks(self, locking.LEVEL_NODE)
# Downgrade lock while waiting for sync
self.WConfdClient().DownGradeLocksLevel(
locking.LEVEL_NAMES[locking.LEVEL_INSTANCE])
assert wipe_disks ^ (old_disk_size is None)
if wipe_disks:
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
assert inst_disks[self.op.disk] == self.disk
# Wipe newly added disk space
WipeDisks(self, self.instance,
disks=[(self.op.disk, self.disk, old_disk_size)])
if self.op.wait_for_sync:
disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
if not self.instance.disks_active:
_SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
elif not self.instance.disks_active:
self.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
assert self.owned_locks(locking.LEVEL_NODE_RES)
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
class LUInstanceReplaceDisks(LogicalUnit):
"""Replace the disks of an instance.
"""
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
if self.op.mode == constants.REPLACE_DISK_CHG:
if self.op.remote_node is None and self.op.iallocator is None:
raise errors.OpPrereqError("When changing the secondary either an"
" iallocator script must be used or the"
" new node given", errors.ECODE_INVAL)
else:
CheckIAllocatorOrNode(self, "iallocator", "remote_node")
elif self.op.remote_node is not None or self.op.iallocator is not None:
# Not replacing the secondary
raise errors.OpPrereqError("The iallocator and new node options can"
" only be used when changing the"
" secondary node", errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
assert locking.LEVEL_NODE not in self.needed_locks
assert locking.LEVEL_NODE_RES not in self.needed_locks
assert locking.LEVEL_NODEGROUP not in self.needed_locks
assert self.op.iallocator is None or self.op.remote_node is None, \
"Conflicting options"
if self.op.remote_node is not None:
(self.op.remote_node_uuid, self.op.remote_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
self.op.remote_node)
# Warning: do not remove the locking of the new secondary here
# unless DRBD8Dev.AddChildren is changed to work in parallel;
# currently it doesn't since parallel invocations of
# FindUnusedMinor will conflict
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
else:
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
if self.op.iallocator is not None:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node_uuid,
self.op.disks, self.op.early_release,
self.op.ignore_ipolicy)
self.tasklets = [self.replacer]
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.remote_node_uuid is None
assert self.op.iallocator is not None
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
elif level == locking.LEVEL_NODE:
if self.op.iallocator is not None:
assert self.op.remote_node_uuid is None
assert not self.needed_locks[locking.LEVEL_NODE]
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = \
[node_uuid
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Reuse node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
self.needed_locks[locking.LEVEL_NODE]
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
instance = self.replacer.instance
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
env = {
"MODE": self.op.mode,
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.cfg.GetNodeName(secondary_nodes[0]),
}
env.update(BuildInstanceHookEnvByObject(self, instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
instance = self.replacer.instance
nl = [
self.cfg.GetMasterNode(),
instance.primary_node,
]
if self.op.remote_node_uuid is not None:
nl.append(self.op.remote_node_uuid)
return nl, nl
def CheckPrereq(self):
"""Check prerequisites.
"""
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
return LogicalUnit.CheckPrereq(self)
class LUInstanceActivateDisks(NoHooksLU):
"""Bring up an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Activate the disks.
"""
disks_ok, disks_info = \
AssembleInstanceDisks(self, self.instance,
ignore_size=self.op.ignore_size)
if not disks_ok:
raise errors.OpExecError("Cannot activate block devices")
if self.op.wait_for_sync:
if not WaitForSync(self, self.instance):
self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
class LUInstanceDeactivateDisks(NoHooksLU):
"""Shutdown an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Deactivate the disks
"""
if self.op.force:
ShutdownInstanceDisks(self, self.instance)
else:
_SafeShutdownInstanceDisks(self, self.instance)
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
ldisk=False):
"""Check that mirrors are not degraded.
@attention: The device has to be annotated already.
The ldisk parameter, if True, will change the test from the
is_degraded attribute (which represents overall non-ok status for
the device(s)) to the ldisk (representing the local storage status).
"""
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't find disk on node %s: %s",
lu.cfg.GetNodeName(node_uuid), msg)
result = False
elif not rstats.payload:
lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
result = False
else:
if ldisk:
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
else:
result = result and not rstats.payload.is_degraded
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistencyInner(lu, instance, child,
node_uuid, on_primary)
return result
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
"""Wrapper around L{_CheckDiskConsistencyInner}.
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
ldisk=ldisk)
def _BlockdevFind(lu, node_uuid, dev, instance):
"""Wrapper around call_blockdev_find to annotate diskparams.
@param lu: A reference to the lu object
@param node_uuid: The node to call out
@param dev: The device to find
@param instance: The instance object the device belongs to
@returns The result of the rpc call
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
def _GenerateUniqueNames(lu, exts):
"""Generate a suitable LV name.
This will generate a logical volume name for the given instance.
"""
results = []
for val in exts:
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
results.append("%s%s" % (new_id, val))
return results
class TLReplaceDisks(Tasklet):
"""Replaces disks for an instance.
Note: Locking is not within the scope of this class.
"""
def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
remote_node_uuid, disks, early_release, ignore_ipolicy):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.mode = mode
self.iallocator_name = iallocator_name
self.remote_node_uuid = remote_node_uuid
self.disks = disks
self.early_release = early_release
self.ignore_ipolicy = ignore_ipolicy
# Runtime data
self.instance = None
self.new_node_uuid = None
self.target_node_uuid = None
self.other_node_uuid = None
self.remote_node_info = None
self.node_secondary_ip = None
@staticmethod
def _RunAllocator(lu, iallocator_name, instance_uuid,
relocate_from_node_uuids):
"""Compute a new secondary node using an IAllocator.
"""
req = iallocator.IAReqRelocate(
inst_uuid=instance_uuid,
relocate_from_node_uuids=list(relocate_from_node_uuids))
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
ial.Run(iallocator_name)
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (iallocator_name, ial.info),
errors.ECODE_NORES)
remote_node_name = ial.result[0]
remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
if remote_node is None:
raise errors.OpPrereqError("Node %s not found in configuration" %
remote_node_name, errors.ECODE_NOENT)
lu.LogInfo("Selected new secondary for instance '%s': %s",
instance_uuid, remote_node_name)
return remote_node.uuid
def _FindFaultyDisks(self, node_uuid):
"""Wrapper for L{FindFaultyInstanceDisks}.
"""
return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
node_uuid, True)
def _CheckDisksActivated(self, instance):
"""Checks if the instance disks are activated.
@param instance: The instance to check disks
@return: True if they are activated, False otherwise
"""
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, instance)
if result.offline:
continue
elif result.fail_msg or not result.payload:
return False
return True
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.instance_name
if self.instance.disk_template != constants.DT_DRBD8:
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
" instances", errors.ECODE_INVAL)
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
if len(secondary_nodes) != 1:
raise errors.OpPrereqError("The instance has a strange layout,"
" expected one secondary but found %d" %
len(secondary_nodes),
errors.ECODE_FAULT)
secondary_node_uuid = secondary_nodes[0]
if self.iallocator_name is None:
remote_node_uuid = self.remote_node_uuid
else:
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
self.instance.uuid,
secondary_nodes)
if remote_node_uuid is None:
self.remote_node_info = None
else:
assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node_uuid
self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
assert self.remote_node_info is not None, \
"Cannot retrieve locked node %s" % remote_node_uuid
if remote_node_uuid == self.instance.primary_node:
raise errors.OpPrereqError("The specified node is the primary node of"
" the instance", errors.ECODE_INVAL)
if remote_node_uuid == secondary_node_uuid:
raise errors.OpPrereqError("The specified node is already the"
" secondary node of the instance",
errors.ECODE_INVAL)
if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
constants.REPLACE_DISK_CHG):
raise errors.OpPrereqError("Cannot specify disks to be replaced",
errors.ECODE_INVAL)
if self.mode == constants.REPLACE_DISK_AUTO:
if not self._CheckDisksActivated(self.instance):
raise errors.OpPrereqError("Please run activate-disks on instance %s"
" first" % self.instance_name,
errors.ECODE_STATE)
faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
if faulty_primary and faulty_secondary:
raise errors.OpPrereqError("Instance %s has faulty disks on more than"
" one node and can not be repaired"
" automatically" % self.instance_name,
errors.ECODE_STATE)
if faulty_primary:
self.disks = faulty_primary
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif faulty_secondary:
self.disks = faulty_secondary
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
else:
self.disks = []
check_nodes = []
else:
# Non-automatic modes
if self.mode == constants.REPLACE_DISK_PRI:
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_SEC:
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_CHG:
self.new_node_uuid = remote_node_uuid
self.other_node_uuid = self.instance.primary_node
self.target_node_uuid = secondary_node_uuid
check_nodes = [self.new_node_uuid, self.other_node_uuid]
CheckNodeNotDrained(self.lu, remote_node_uuid)
CheckNodeVmCapable(self.lu, remote_node_uuid)
old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
assert old_node_info is not None
if old_node_info.offline and not self.early_release:
# doesn't make sense to delay the release
self.early_release = True
self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
" early-release mode", secondary_node_uuid)
else:
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
self.mode)
# If not specified all disks should be replaced
if not self.disks:
self.disks = range(len(self.instance.disks))
# TODO: This is ugly, but right now we can't distinguish between internal
# submitted opcode and external one. We should fix that.
if self.remote_node_info:
# We change the node, lets verify it still meets instance policy
new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
new_group_info)
CheckTargetNodeIPolicy(self, ipolicy, self.instance,
self.remote_node_info, self.cfg,
ignore=self.ignore_ipolicy)
for node_uuid in check_nodes:
CheckNodeOnline(self.lu, node_uuid)
touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
self.other_node_uuid,
self.target_node_uuid]
if node_uuid is not None)
# Release unneeded node and node resource locks
ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
# Release any owned node group
ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
# Check whether disks are valid
for disk_idx in self.disks:
self.instance.FindDisk(disk_idx)
# Get secondary node IP addresses
self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
in self.cfg.GetMultiNodeInfo(touched_nodes))
def Exec(self, feedback_fn):
"""Execute disk replacement.
This dispatches the disk replacement to the appropriate handler.
"""
if __debug__:
# Verify owned locks before starting operation
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
assert set(owned_nodes) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
(owned_nodes, self.node_secondary_ip.keys()))
assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
self.lu.owned_locks(locking.LEVEL_NODE_RES))
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
assert list(owned_instances) == [self.instance_name], \
"Instance '%s' not locked" % self.instance_name
if not self.disks:
feedback_fn("No disks need replacement for instance '%s'" %
self.instance.name)
return
feedback_fn("Replacing disk(s) %s for instance '%s'" %
(utils.CommaJoin(self.disks), self.instance.name))
feedback_fn("Current primary node: %s" %
self.cfg.GetNodeName(self.instance.primary_node))
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
feedback_fn("Current seconary node: %s" %
utils.CommaJoin(self.cfg.GetNodeNames(secondary_nodes)))
activate_disks = not self.instance.disks_active
# Activate the instance disks if we're replacing them on a down instance
if activate_disks:
StartInstanceDisks(self.lu, self.instance, True)
try:
# Should we replace the secondary node?
if self.new_node_uuid is not None:
fn = self._ExecDrbd8Secondary
else:
fn = self._ExecDrbd8DiskOnly
result = fn(feedback_fn)
finally:
# Deactivate the instance disks if we're replacing them on a
# down instance
if activate_disks:
_SafeShutdownInstanceDisks(self.lu, self.instance,
req_states=INSTANCE_NOT_RUNNING)
assert not self.lu.owned_locks(locking.LEVEL_NODE)
if __debug__:
# Verify owned locks
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
nodes = frozenset(self.node_secondary_ip)
assert ((self.early_release and not owned_nodes) or
(not self.early_release and not (set(owned_nodes) - nodes))), \
("Not owning the correct locks, early_release=%s, owned=%r,"
" nodes=%r" % (self.early_release, owned_nodes, nodes))
return result
def _CheckVolumeGroup(self, node_uuids):
self.lu.LogInfo("Checking volume groups")
vgname = self.cfg.GetVGName()
# Make sure volume group exists on all involved nodes
results = self.rpc.call_vg_list(node_uuids)
if not results:
raise errors.OpExecError("Can't list volume groups on the nodes")
for node_uuid in node_uuids:
res = results[node_uuid]
res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
if vgname not in res.payload:
raise errors.OpExecError("Volume group '%s' not found on node %s" %
(vgname, self.cfg.GetNodeName(node_uuid)))
def _CheckDisksExistence(self, node_uuids):
# Check disk existence
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if idx not in self.disks:
continue
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
if not msg:
msg = "disk not found"
if not self._CheckDisksActivated(self.instance):
extra_hint = ("\nDisks seem to be not properly activated. Try"
" running activate-disks on the instance before"
" using replace-disks.")
else:
extra_hint = ""
raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
(idx, self.cfg.GetNodeName(node_uuid), msg,
extra_hint))
def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if idx not in self.disks:
continue
self.lu.LogInfo("Checking disk/%d consistency on node %s" %
(idx, self.cfg.GetNodeName(node_uuid)))
if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
on_primary, ldisk=ldisk):
raise errors.OpExecError("Node %s has degraded storage, unsafe to"
" replace disks for instance %s" %
(self.cfg.GetNodeName(node_uuid),
self.instance.name))
def _CreateNewStorage(self, node_uuid):
"""Create new storage on the primary or secondary node.
This is only used for same-node replaces, not for changing the
secondary node, hence we don't want to modify the existing disk.
"""
iv_names = {}
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
for idx, dev in enumerate(disks):
if idx not in self.disks:
continue
self.lu.LogInfo("Adding storage on %s for disk/%d",
self.cfg.GetNodeName(node_uuid), idx)
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
(data_disk, meta_disk) = dev.children
vg_data = data_disk.logical_id[0]
lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
logical_id=(vg_data, names[0]),
params=data_disk.params)
vg_meta = meta_disk.logical_id[0]
lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vg_meta, names[1]),
params=meta_disk.params)
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
try:
_CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.DeviceCreationError, e:
raise errors.OpExecError("Can't create block device: %s" % e.message)
return iv_names
def _CheckDevices(self, node_uuid, iv_names):
for name, (dev, _, _) in iv_names.iteritems():
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
if not msg:
msg = "disk not found"
raise errors.OpExecError("Can't find DRBD device %s: %s" %
(name, msg))
if result.payload.is_degraded:
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_uuid, iv_names):
for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s", name)
for lv in old_lvs:
msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
.fail_msg
if msg:
self.lu.LogWarning("Can't remove old LV: %s", msg,
hint="remove unused LVs manually")
def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
"""Replace a disk on the primary or secondary for DRBD 8.
The algorithm for replace is quite complicated:
1. for each disk to be replaced:
1. create new LVs on the target node with unique names
1. detach old LVs from the drbd device
1. rename old LVs to name_replaced.<time_t>
1. rename new LVs to old LVs
1. attach the new LVs (with the old names now) to the drbd device
1. wait for sync across all devices
1. for each modified disk:
1. remove old LVs (which have the name name_replaces.<time_t>)
Failures are not very well handled.
"""
steps_total = 6
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
# Step: check other node consistency
self.lu.LogStep(2, steps_total, "Check peer consistency")
self._CheckDisksConsistency(
self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
False)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
iv_names = self._CreateNewStorage(self.target_node_uuid)
# Step: for each lv, detach+rename*2+attach
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
for dev, old_lvs, new_lvs in iv_names.itervalues():
self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
result = self.rpc.call_blockdev_removechildren(self.target_node_uuid,
(dev, self.instance),
(old_lvs, self.instance))
result.Raise("Can't detach drbd from local storage on node"
" %s for device %s" %
(self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
#dev.children = []
#cfg.Update(instance)
# ok, we created the new LVs, so now we know we have the needed
# storage; as such, we proceed on the target node to rename
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
# using the assumption that logical_id == unique_id on that node
# FIXME(iustin): use a better name for the replaced LVs
temp_suffix = int(time.time())
ren_fn = lambda d, suff: (d.logical_id[0],
d.logical_id[1] + "_replaced-%s" % suff)
# Build the rename list based on what LVs exist on the node
rename_old_to_new = []
for to_ren in old_lvs:
result = self.rpc.call_blockdev_find(self.target_node_uuid,
(to_ren, self.instance))
if not result.fail_msg and result.payload:
# device exists
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
self.lu.LogInfo("Renaming the old LVs on the target node")
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_old_to_new)
result.Raise("Can't rename old LVs on node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
# Now we rename the new LVs to the old LVs
self.lu.LogInfo("Renaming the new LVs on the target node")
rename_new_to_old = [(new, old.logical_id)
for old, new in zip(old_lvs, new_lvs)]
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_new_to_old)
result.Raise("Can't rename new LVs on node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
# Intermediate steps of in memory modifications
for old, new in zip(old_lvs, new_lvs):
new.logical_id = old.logical_id
# We need to modify old_lvs so that removal later removes the
# right LVs, not the newly added ones; note that old_lvs is a
# copy here
for disk in old_lvs:
disk.logical_id = ren_fn(disk, temp_suffix)
# Now that the new lvs have the old name, we can add them to the device
self.lu.LogInfo("Adding new mirror component on %s",
self.cfg.GetNodeName(self.target_node_uuid))
result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
(dev, self.instance),
(new_lvs, self.instance))
msg = result.fail_msg
if msg:
for new_lv in new_lvs:
msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
(new_lv, self.instance)).fail_msg
if msg2:
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
hint=("cleanup manually the unused logical"
"volumes"))
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
cstep = itertools.count(5)
if self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
# TODO: Check if releasing locks early still makes sense
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
keep=self.node_secondary_ip.keys())
# Release all node locks while waiting for sync
ReleaseLocks(self.lu, locking.LEVEL_NODE)
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
if not self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
def _ExecDrbd8Secondary(self, feedback_fn):
"""Replace the secondary node for DRBD 8.
The algorithm for replace is quite complicated:
- for all disks of the instance:
- create new LVs on the new node with same names
- shutdown the drbd device on the old secondary
- disconnect the drbd network on the primary
- create the drbd device on the new secondary
- network attach the drbd on the primary, using an artifice:
the drbd code for Attach() will connect to the network if it
finds a device which is connected to the good local disks but
not network enabled
- wait for sync across all devices
- remove all disks from the old secondary
Failures are not very well handled.
"""
steps_total = 6
pnode = self.instance.primary_node
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.instance.primary_node])
self._CheckVolumeGroup([self.instance.primary_node])
# Step: check other node consistency
self.lu.LogStep(2, steps_total, "Check peer consistency")
self._CheckDisksConsistency(self.instance.primary_node, True, True)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
self.new_node_uuid)
for idx, dev in enumerate(disks):
self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
try:
_CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
new_lv, True, GetInstanceInfoText(self.instance),
False, excl_stor)
except errors.DeviceCreationError, e:
raise errors.OpExecError("Can't create block device: %s" % e.message)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the
# error and the success paths
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
for _ in inst_disks],
self.instance.uuid)
logging.debug("Allocated minors %r", minors)
iv_names = {}
for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# create new devices on new_node; note that we create two IDs:
# one without port, so the drbd will be activated without
# networking information on the new node at this stage, and one
# with network, for the latter activation in step 4
(o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
p_minor, new_minor, o_secret)
new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
p_minor, new_minor, o_secret)
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
logical_id=new_alone_id,
children=dev.children,
size=dev.size,
params={})
(anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
self.cfg)
try:
CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
anno_new_drbd,
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.GenericError:
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise
# We have new devices, shutdown the drbd on the old secondary
for idx, dev in enumerate(inst_disks):
self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
(dev, self.instance)).fail_msg
if msg:
self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
"node: %s" % (idx, msg),
hint=("Please cleanup this device manually as"
" soon as possible"))
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
result = self.rpc.call_drbd_disconnect_net(
[pnode], (inst_disks, self.instance))[pnode]
msg = result.fail_msg
if msg:
# detaches didn't succeed (unlikely)
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise errors.OpExecError("Can't detach the disks from the network on"
" old node: %s" % (msg,))
# if we managed to detach at least one, we update all the disks of
# the instance to point to the new secondary
self.lu.LogInfo("Updating instance configuration")
for dev, _, new_logical_id in iv_names.itervalues():
dev.logical_id = new_logical_id
self.cfg.Update(dev, feedback_fn)
self.cfg.Update(self.instance, feedback_fn)
# Release all node locks (the configuration has been updated)
ReleaseLocks(self.lu, locking.LEVEL_NODE)
# and now perform the drbd attach
self.lu.LogInfo("Attaching primary drbds to new secondary"
" (standalone => connected)")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
self.new_node_uuid],
(inst_disks, self.instance),
self.instance.name,
False)
for to_node, to_result in result.items():
msg = to_result.fail_msg
if msg:
raise errors.OpExecError(
"Can't attach drbd disks on node %s: %s (please do a gnt-instance "
"info to see the status of disks)" %
(self.cfg.GetNodeName(to_node), msg))
cstep = itertools.count(5)
if self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
# TODO: Check if releasing locks early still makes sense
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
keep=self.node_secondary_ip.keys())
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
if not self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
class TemporaryDisk():
""" Creates a new temporary bootable disk, and makes sure it is destroyed.
Is a context manager, and should be used with the ``with`` statement as such.
The disk is guaranteed to be created at index 0, shifting any other disks of
the instance by one place, and allowing the instance to be booted with the
content of the disk.
"""
def __init__(self, lu, instance, disks, feedback_fn,
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
""" Constructor storing arguments until used later.
@type lu: L{ganeti.cmdlib.base.LogicalUnit}
@param lu: The LU within which this disk is created.
@type instance: L{ganeti.objects.Instance}
@param instance: The instance to which the disk should be added
@type disks: list of triples (disk template, disk access mode, int)
@param disks:
disk specification, which is a list of triples containing the
disk template (e.g., L{constants.DT_PLAIN}), the disk access
mode (i.e., L{constants.DISK_RDONLY} or L{constants.DISK_RDWR}),
and size in MiB.
@type feedback_fn: function
@param feedback_fn: Function used to log progress
"""
self._lu = lu
self._instance = instance
self._disks = disks
self._feedback_fn = feedback_fn
self._shutdown_timeout = shutdown_timeout
def _EnsureInstanceDiskState(self):
""" Ensures that the instance is down, and its disks inactive.
All the operations related to the creation and destruction of disks require
that the instance is down and that the disks are inactive. This function is
invoked to make it so.
"""
# The instance needs to be down before any of these actions occur
# Whether it is must be checked manually through a RPC - configuration
# reflects only the desired state
self._feedback_fn("Shutting down instance")
result = self._lu.rpc.call_instance_shutdown(self._instance.primary_node,
self._instance,
self._shutdown_timeout,
self._lu.op.reason)
result.Raise("Shutdown of instance '%s' while removing temporary disk "
"failed" % self._instance.name)
# Disks need to be deactivated prior to being removed
# The disks_active configuration entry should match the actual state
if self._instance.disks_active:
self._feedback_fn("Deactivating disks")
ShutdownInstanceDisks(self._lu, self._instance)
def __enter__(self):
""" Context manager entry function, creating the disk.
@rtype: L{ganeti.objects.Disk}
@return: The disk object created.
"""
self._EnsureInstanceDiskState()
new_disks = []
# The iv_name of the disk intentionally diverges from Ganeti's standards, as
# this disk should be very temporary and its presence should be reported.
# With the special iv_name, gnt-cluster verify detects the disk and warns
# the user of its presence. Removing the disk restores the instance to its
# proper state, despite an error that appears when the removal is performed.
for idx, (disk_template, disk_access, disk_size) in enumerate(self._disks):
new_disk = objects.Disk()
new_disk.dev_type = disk_template
new_disk.mode = disk_access
new_disk.uuid = self._lu.cfg.GenerateUniqueID(self._lu.proc.GetECId())
new_disk.logical_id = (self._lu.cfg.GetVGName(), new_disk.uuid)
new_disk.params = {}
new_disk.size = disk_size
new_disks.append(new_disk)
self._feedback_fn("Attempting to create temporary disk")
self._undoing_info = CreateDisks(self._lu, self._instance, disks=new_disks)
for idx, new_disk in enumerate(new_disks):
self._lu.cfg.AddInstanceDisk(self._instance.uuid, new_disk, idx=idx)
self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
self._feedback_fn("Temporary disk created")
self._new_disks = new_disks
return new_disks
def __exit__(self, exc_type, _value, _traceback):
""" Context manager exit function, destroying the disk.
"""
if exc_type:
self._feedback_fn("Exception raised, cleaning up temporary disk")
else:
self._feedback_fn("Regular cleanup of temporary disk")
try:
self._EnsureInstanceDiskState()
_UndoCreateDisks(self._lu, self._undoing_info, self._instance)
for disk in self._new_disks:
self._lu.cfg.RemoveInstanceDisk(self._instance.uuid, disk.uuid)
self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
self._feedback_fn("Temporary disk removed")
except:
self._feedback_fn("Disk cleanup failed; it will have to be removed "
"manually")
raise
Re-read an instance object after starting its disks
.. in "replace-disks", as starting the disks changes the instance object
and a subsequent call to cfg.Update fails.
Signed-off-by: Petr Pudlak <9d19625f8ee825a2c398462ed335cbcf98f79c32@google.com>
Reviewed-by: Klaus Aehlig <4ce51de36d9068cd7db2eec1151e0f3f95419008@google.com>
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units dealing with storage of instances."""
import itertools
import logging
import os
import time
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import objects
from ganeti import utils
import ganeti.rpc.node as rpc
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
import ganeti.masterd.instance
_DISK_TEMPLATE_NAME_PREFIX = {
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
constants.DT_EXT: ".ext",
constants.DT_FILE: ".file",
constants.DT_SHARED_FILE: ".sharedfile",
}
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor):
"""Create a single block device on a given node.
This will not recurse over children of the device, so they must be
created in advance.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
"""
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
device.size, instance.name, force_open,
info, excl_stor)
result.Raise("Can't create block device %s on"
" node %s for instance %s" % (device,
lu.cfg.GetNodeName(node_uuid),
instance.name))
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
info, force_open, excl_stor):
"""Create a tree of block devices on a given node.
If this device type has to be created on secondaries, create it and
all its children.
If not, just recurse to children keeping the same 'force' value.
@attention: The device has to be annotated already.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@type force_create: boolean
@param force_create: whether to force creation of this device; this
will be change to True whenever we find a device which has
CreateOnSecondary() attribute
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
@return: list of created devices
"""
created_devices = []
try:
if device.CreateOnSecondary():
force_create = True
if device.children:
for child in device.children:
devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
force_create, info, force_open, excl_stor)
created_devices.extend(devs)
if not force_create:
return created_devices
CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor)
# The device has been completely created, so there is no point in keeping
# its subdevices in the list. We just add the device itself instead.
created_devices = [(node_uuid, device)]
return created_devices
except errors.DeviceCreationError, e:
e.created_devices.extend(created_devices)
raise e
except errors.OpExecError, e:
raise errors.DeviceCreationError(str(e), created_devices)
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type node_uuid: string
@param node_uuid: The node UUID
@rtype: bool
@return: The effective value of exclusive_storage
@raise errors.OpPrereqError: if no node exists with the given name
"""
ni = cfg.GetNodeInfo(node_uuid)
if ni is None:
raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
errors.ECODE_NOENT)
return IsExclusiveStorageEnabledNode(cfg, ni)
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
force_open):
"""Wrapper around L{_CreateBlockDevInner}.
This method annotates the root device first.
"""
(disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
force_open, excl_stor)
def _UndoCreateDisks(lu, disks_created, instance):
"""Undo the work performed by L{CreateDisks}.
This function is called in case of an error to undo the work of
L{CreateDisks}.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@param disks_created: the result returned by L{CreateDisks}
@type instance: L{objects.Instance}
@param instance: the instance for which disks were created
"""
for (node_uuid, disk) in disks_created:
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
result.Warn("Failed to remove newly-created disk %s on node %s" %
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
def CreateDisks(lu, instance, instance_disks=None,
to_skip=None, target_node_uuid=None, disks=None):
"""Create all disks for an instance.
This abstracts away some work from AddInstance.
Since the instance may not have been saved to the config file yet, this
function can not query the config file for the instance's disks; in that
case they need to be passed as an argument.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type instance_disks: list of L{objects.Disk}
@param instance_disks: the disks that belong to the instance; if not
specified, retrieve them from config file
@type to_skip: list
@param to_skip: list of indices to skip
@type target_node_uuid: string
@param target_node_uuid: if passed, overrides the target node for creation
@type disks: list of {objects.Disk}
@param disks: the disks to create; if not specified, all the disks of the
instance are created
@return: information about the created disks, to be used to call
L{_UndoCreateDisks}
@raise errors.OpPrereqError: in case of error
"""
info = GetInstanceInfoText(instance)
if instance_disks is None:
instance_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if target_node_uuid is None:
pnode_uuid = instance.primary_node
# We cannot use config's 'GetInstanceNodes' here as 'CreateDisks'
# is used by 'LUInstanceCreate' and the instance object is not
# stored in the config yet.
all_node_uuids = []
for disk in instance_disks:
all_node_uuids.extend(disk.all_nodes)
all_node_uuids = set(all_node_uuids)
# ensure that primary node is always the first
all_node_uuids.discard(pnode_uuid)
all_node_uuids = [pnode_uuid] + list(all_node_uuids)
else:
pnode_uuid = target_node_uuid
all_node_uuids = [pnode_uuid]
if disks is None:
disks = instance_disks
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
if instance.disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(instance_disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
result.Raise("Failed to create directory '%s' on"
" node %s" % (file_storage_dir,
lu.cfg.GetNodeName(pnode_uuid)))
disks_created = []
for idx, device in enumerate(disks):
if to_skip and idx in to_skip:
continue
logging.info("Creating disk %s for instance '%s'", idx, instance.name)
for node_uuid in all_node_uuids:
f_create = node_uuid == pnode_uuid
try:
_CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
f_create)
disks_created.append((node_uuid, device))
except errors.DeviceCreationError, e:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
disks_created.extend(e.created_devices)
_UndoCreateDisks(lu, disks_created, instance)
raise errors.OpExecError(e.message)
return disks_created
def ComputeDiskSizePerVG(disk_template, disks):
"""Compute disk size requirements in the volume group
"""
def _compute(disks, payload):
"""Universal algorithm.
"""
vgs = {}
for disk in disks:
vgs[disk[constants.IDISK_VG]] = \
vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
return vgs
# Required free disk space as a function of disk and swap space
req_size_dict = {
constants.DT_DISKLESS: {},
constants.DT_PLAIN: _compute(disks, 0),
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
constants.DT_FILE: {},
constants.DT_SHARED_FILE: {},
constants.DT_GLUSTER: {},
}
if disk_template not in req_size_dict:
raise errors.ProgrammerError("Disk template '%s' size requirement"
" is unknown" % disk_template)
return req_size_dict[disk_template]
def ComputeDisks(op, default_vg):
"""Computes the instance disks.
@param op: The instance opcode
@param default_vg: The default_vg to assume
@return: The computed disks
"""
disks = []
for disk in op.disks:
mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" %
mode, errors.ECODE_INVAL)
size = disk.get(constants.IDISK_SIZE, None)
if size is None:
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
size = int(size)
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
ext_provider = disk.get(constants.IDISK_PROVIDER, None)
if ext_provider and op.disk_template != constants.DT_EXT:
raise errors.OpPrereqError("The '%s' option is only valid for the %s"
" disk template, not %s" %
(constants.IDISK_PROVIDER, constants.DT_EXT,
op.disk_template), errors.ECODE_INVAL)
data_vg = disk.get(constants.IDISK_VG, default_vg)
name = disk.get(constants.IDISK_NAME, None)
if name is not None and name.lower() == constants.VALUE_NONE:
name = None
new_disk = {
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
constants.IDISK_NAME: name,
}
for key in [
constants.IDISK_METAVG,
constants.IDISK_ADOPT,
constants.IDISK_SPINDLES,
]:
if key in disk:
new_disk[key] = disk[key]
# For extstorage, demand the `provider' option and add any
# additional parameters (ext-params) to the dict
if op.disk_template == constants.DT_EXT:
if ext_provider:
new_disk[constants.IDISK_PROVIDER] = ext_provider
for key in disk:
if key not in constants.IDISK_PARAMS:
new_disk[key] = disk[key]
else:
raise errors.OpPrereqError("Missing provider for template '%s'" %
constants.DT_EXT, errors.ECODE_INVAL)
disks.append(new_disk)
return disks
def CheckRADOSFreeSpace():
"""Compute disk size requirements inside the RADOS cluster.
"""
# For the RADOS cluster we assume there is always enough space.
pass
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
iv_name, p_minor, s_minor):
"""Generate a drbd8 device complete with its children.
"""
assert len(vgnames) == len(names) == 2
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
logical_id=(vgnames[0], names[0]),
params={})
dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vgnames[1], names[1]),
params={})
dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
logical_id=(primary_uuid, secondary_uuid, port,
p_minor, s_minor,
shared_secret),
children=[dev_data, dev_meta],
iv_name=iv_name, params={})
drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
return drbd_dev
def GenerateDiskTemplate(
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
if len(secondary_node_uuids) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node_uuid = secondary_node_uuids[0]
minors = lu.cfg.AllocateDRBDMinor(
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
full_disk_params)
drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
names = []
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
for i in range(disk_count)]):
names.append(lv_prefix + "_data")
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
minors[idx * 2], minors[idx * 2 + 1])
disk_dev.mode = disk[constants.IDISK_MODE]
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disks.append(disk_dev)
else:
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
else:
names = _GenerateUniqueNames(lu, ["%s.disk%s" %
(name_prefix, base_index + i)
for i in range(disk_count)])
if template_name == constants.DT_PLAIN:
def logical_id_fn(idx, _, disk):
vg = disk.get(constants.IDISK_VG, vgname)
return (vg, names[idx])
elif template_name == constants.DT_GLUSTER:
logical_id_fn = lambda _1, disk_index, _2: \
(file_driver, "ganeti/%s.%d" % (instance_uuid,
disk_index))
elif template_name in constants.DTS_FILEBASED: # Gluster handled above
logical_id_fn = \
lambda _, disk_index, disk: (file_driver,
"%s/%s" % (file_storage_dir,
names[idx]))
elif template_name == constants.DT_BLOCK:
logical_id_fn = \
lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
elif template_name == constants.DT_EXT:
def logical_id_fn(idx, _, disk):
provider = disk.get(constants.IDISK_PROVIDER, None)
if provider is None:
raise errors.ProgrammerError("Disk template is %s, but '%s' is"
" not found", constants.DT_EXT,
constants.IDISK_PROVIDER)
return (provider, names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
dev_type = template_name
for idx, disk in enumerate(disk_info):
params = {}
# Only for the Ext template add disk_info to params
if template_name == constants.DT_EXT:
params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
for key in disk:
if key not in constants.IDISK_PARAMS:
params[key] = disk[key]
disk_index = idx + base_index
size = disk[constants.IDISK_SIZE]
feedback_fn("* disk %s, size %s" %
(disk_index, utils.FormatUnit(size, "h")))
disk_dev = objects.Disk(dev_type=dev_type, size=size,
logical_id=logical_id_fn(idx, disk_index, disk),
iv_name="disk/%d" % disk_index,
mode=disk[constants.IDISK_MODE],
params=params,
spindles=disk.get(constants.IDISK_SPINDLES))
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
disks.append(disk_dev)
return disks
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
"""Check the presence of the spindle options with exclusive_storage.
@type diskdict: dict
@param diskdict: disk parameters
@type es_flag: bool
@param es_flag: the effective value of the exlusive_storage flag
@type required: bool
@param required: whether spindles are required or just optional
@raise errors.OpPrereqError when spindles are given and they should not
"""
if (not es_flag and constants.IDISK_SPINDLES in diskdict and
diskdict[constants.IDISK_SPINDLES] is not None):
raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
" when exclusive storage is not active",
errors.ECODE_INVAL)
if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
diskdict[constants.IDISK_SPINDLES] is None)):
raise errors.OpPrereqError("You must specify spindles in instance disks"
" when exclusive storage is active",
errors.ECODE_INVAL)
class LUInstanceRecreateDisks(LogicalUnit):
"""Recreate an instance's missing disks.
"""
HPATH = "instance-recreate-disks"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
_MODIFYABLE = compat.UniqueFrozenset([
constants.IDISK_SIZE,
constants.IDISK_MODE,
constants.IDISK_SPINDLES,
])
# New or changed disk parameters may have different semantics
assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
constants.IDISK_ADOPT,
# TODO: Implement support changing VG while recreating
constants.IDISK_VG,
constants.IDISK_METAVG,
constants.IDISK_PROVIDER,
constants.IDISK_NAME,
]))
def _RunAllocator(self):
"""Run the allocator based on input opcode.
"""
be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
# FIXME
# The allocator should actually run in "relocate" mode, but current
# allocators don't support relocating all the nodes of an instance at
# the same time. As a workaround we use "allocate" mode, but this is
# suboptimal for two reasons:
# - The instance name passed to the allocator is present in the list of
# existing instances, so there could be a conflict within the
# internal structures of the allocator. This doesn't happen with the
# current allocators, but it's a liability.
# - The allocator counts the resources used by the instance twice: once
# because the instance exists already, and once because it tries to
# allocate a new instance.
# The allocator could choose some of the nodes on which the instance is
# running, but that's not a problem. If the instance nodes are broken,
# they should be already be marked as drained or offline, and hence
# skipped by the allocator. If instance disks have been lost for other
# reasons, then recreating the disks on the same nodes should be fine.
disk_template = self.instance.disk_template
spindle_use = be_full[constants.BE_SPINDLE_USE]
disks = [{
constants.IDISK_SIZE: d.size,
constants.IDISK_MODE: d.mode,
constants.IDISK_SPINDLES: d.spindles,
} for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
disk_template=disk_template,
tags=list(self.instance.GetTags()),
os=self.instance.os,
nics=[{}],
vcpus=be_full[constants.BE_VCPUS],
memory=be_full[constants.BE_MAXMEM],
spindle_use=spindle_use,
disks=disks,
hypervisor=self.instance.hypervisor,
node_whitelist=None)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
assert req.RequiredNodes() == \
len(self.cfg.GetInstanceNodes(self.instance.uuid))
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (self.op.iallocator, ial.info),
errors.ECODE_NORES)
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
utils.CommaJoin(self.op.nodes))
def CheckArguments(self):
if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
# Normalize and convert deprecated list of disk indices
self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
if duplicates:
raise errors.OpPrereqError("Some disks have been specified more than"
" once: %s" % utils.CommaJoin(duplicates),
errors.ECODE_INVAL)
# We don't want _CheckIAllocatorOrNode selecting the default iallocator
# when neither iallocator nor nodes are specified
if self.op.iallocator or self.op.nodes:
CheckIAllocatorOrNode(self, "iallocator", "nodes")
for (idx, params) in self.op.disks:
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
unsupported = frozenset(params.keys()) - self._MODIFYABLE
if unsupported:
raise errors.OpPrereqError("Parameters for disk %s try to change"
" unmodifyable parameter(s): %s" %
(idx, utils.CommaJoin(unsupported)),
errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
if self.op.nodes:
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
else:
self.needed_locks[locking.LEVEL_NODE] = []
if self.op.iallocator:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.iallocator is not None
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock the primary group used by the instance optimistically; this
# requires going via the node before it's locked, requiring
# verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
elif level == locking.LEVEL_NODE:
# If an allocator is used, then we lock all the nodes in the current
# instance group, as we don't know yet which ones will be selected;
# if we replace the nodes without using an allocator, locks are
# already declared in ExpandNames; otherwise, we need to lock all the
# instance nodes for disk re-creation
if self.op.iallocator:
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODE]
assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
# Lock member nodes of the group of the primary node
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
self.needed_locks[locking.LEVEL_NODE].extend(
self.cfg.GetNodeGroup(group_uuid).members)
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
elif not self.op.nodes:
self._LockInstancesNodes(primary_only=False)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if self.op.node_uuids:
inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
if len(self.op.node_uuids) != len(inst_nodes):
raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
" %d replacement nodes were specified" %
(instance.name, len(inst_nodes),
len(self.op.node_uuids)),
errors.ECODE_INVAL)
assert instance.disk_template != constants.DT_DRBD8 or \
len(self.op.node_uuids) == 2
assert instance.disk_template != constants.DT_PLAIN or \
len(self.op.node_uuids) == 1
primary_node = self.op.node_uuids[0]
else:
primary_node = instance.primary_node
if not self.op.iallocator:
CheckNodeOnline(self, primary_node)
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
# Node group locks are acquired only for the primary node (and only
# when the allocator is used)
CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
primary_only=True)
# if we replace nodes *and* the old primary is offline, we don't
# check the instance state
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot recreate disks")
if self.op.disks:
self.disks = dict(self.op.disks)
else:
self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
maxidx = max(self.disks.keys())
if maxidx >= len(instance.disks):
raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
errors.ECODE_INVAL)
if ((self.op.node_uuids or self.op.iallocator) and
sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
errors.ECODE_INVAL)
self.instance = instance
if self.op.iallocator:
self._RunAllocator()
# Release unneeded node and node resource locks
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
if self.op.node_uuids:
node_uuids = self.op.node_uuids
else:
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
excl_stor = compat.any(
rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
)
for new_params in self.disks.values():
CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
def Exec(self, feedback_fn):
"""Recreate the disks.
"""
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
to_skip = []
mods = [] # keeps track of needed changes
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(inst_disks):
try:
changes = self.disks[idx]
except KeyError:
# Disk should not be recreated
to_skip.append(idx)
continue
# update secondaries for disks, if needed
if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
# need to update the nodes and minors
assert len(self.op.node_uuids) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
self.instance.uuid)
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
else:
new_id = None
mods.append((idx, new_id, changes))
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id, changes in mods:
disk = inst_disks[idx]
if new_id is not None:
assert disk.dev_type == constants.DT_DRBD8
disk.logical_id = new_id
if changes:
disk.Update(size=changes.get(constants.IDISK_SIZE, None),
mode=changes.get(constants.IDISK_MODE, None),
spindles=changes.get(constants.IDISK_SPINDLES, None))
self.cfg.Update(disk, feedback_fn)
# change primary node, if needed
if self.op.node_uuids:
self.instance.primary_node = self.op.node_uuids[0]
self.LogWarning("Changing the instance's nodes, you will have to"
" remove any disks left on the older nodes manually")
if self.op.node_uuids:
self.cfg.Update(self.instance, feedback_fn)
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
assert mylocks.issuperset(frozenset(inst_nodes))
new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
# TODO: Release node locks before wiping, or explain why it's not possible
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
wipedisks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)
if idx not in to_skip]
WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
cleanup=new_disks)
def _PerformNodeInfoCall(lu, node_uuids, vg):
"""Prepares the input and performs a node info call.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: list of string
@param node_uuids: list of node UUIDs to perform the call for
@type vg: string
@param vg: the volume group's name
"""
lvm_storage_units = [(constants.ST_LVM_VG, vg)]
storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
node_uuids)
hvname = lu.cfg.GetHypervisorType()
hvparams = lu.cfg.GetClusterInfo().hvparams
nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
[(hvname, hvparams[hvname])])
return nodeinfo
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
"""Checks the vg capacity for a given node.
@type node_info: tuple (_, list of dicts, _)
@param node_info: the result of the node info call for one node
@type node_name: string
@param node_name: the name of the node
@type vg: string
@param vg: volume group name
@type requested: int
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
(_, space_info, _) = node_info
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_VG)
if not lvm_vg_info:
raise errors.OpPrereqError("Can't retrieve storage information for LVM")
vg_free = lvm_vg_info.get("storage_free", None)
if not isinstance(vg_free, int):
raise errors.OpPrereqError("Can't compute free disk space on node"
" %s for vg %s, result was '%s'" %
(node_name, vg, vg_free), errors.ECODE_ENVIRON)
if requested > vg_free:
raise errors.OpPrereqError("Not enough disk space on target node %s"
" vg %s: required %d MiB, available %d MiB" %
(node_name, vg, requested, vg_free),
errors.ECODE_NORES)
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
"""Checks if nodes have enough free disk space in the specified VG.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type vg: C{str}
@param vg: the volume group to check
@type requested: C{int}
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
for node_uuid in node_uuids:
node_name = lu.cfg.GetNodeName(node_uuid)
info = nodeinfo[node_uuid]
info.Raise("Cannot get current information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
_CheckVgCapacityForNode(node_name, info.payload, vg, requested)
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
"""Checks if nodes have enough free disk space in all the VGs.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type req_sizes: C{dict}
@param req_sizes: the hash of vg and corresponding amount of disk in
MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
for vg, req_size in req_sizes.items():
_CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
def _DiskSizeInBytesToMebibytes(lu, size):
"""Converts a disk size in bytes to mebibytes.
Warns and rounds up if the size isn't an even multiple of 1 MiB.
"""
(mib, remainder) = divmod(size, 1024 * 1024)
if remainder != 0:
lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
" to not overwrite existing data (%s bytes will not be"
" wiped)", (1024 * 1024) - remainder)
mib += 1
return mib
def _CalcEta(time_taken, written, total_size):
"""Calculates the ETA based on size written and total size.
@param time_taken: The time taken so far
@param written: amount written so far
@param total_size: The total size of data to be written
@return: The remaining time in seconds
"""
avg_time = time_taken / float(written)
return (total_size - written) * avg_time
def WipeDisks(lu, instance, disks=None):
"""Wipes instance disks.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type disks: None or list of tuple of (number, L{objects.Disk}, number)
@param disks: Disk details; tuple contains disk index, disk object and the
start offset
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
if disks is None:
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device, offset) in disks:
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
# MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
wipe_chunk_size = \
int(min(constants.MAX_WIPE_CHUNK,
device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
size = device.size
last_output = 0
start_time = time.time()
if offset == 0:
info_text = ""
else:
info_text = (" (from %s to %s)" %
(utils.FormatUnit(offset, "h"),
utils.FormatUnit(size, "h")))
lu.LogInfo("* Wiping disk %s%s", idx, info_text)
logging.info("Wiping disk %d for instance %s on node %s using"
" chunk size %s", idx, instance.name, node_name,
wipe_chunk_size)
while offset < size:
wipe_size = min(wipe_chunk_size, size - offset)
logging.debug("Wiping disk %d, offset %s, chunk %s",
idx, offset, wipe_size)
result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
offset, wipe_size)
result.Raise("Could not wipe disk %d at offset %d for size %d" %
(idx, offset, wipe_size))
now = time.time()
offset += wipe_size
if now - last_output >= 60:
eta = _CalcEta(now - start_time, offset, size)
lu.LogInfo(" - done: %.1f%% ETA: %s",
offset / float(size) * 100, utils.FormatSeconds(eta))
last_output = now
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
def ImageDisks(lu, instance, image, disks=None):
"""Dumps an image onto an instance disk.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type image: string
@param image: the image whose disks we should create
@type disks: None or list of ints
@param disks: disk indices
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if disks is None:
disks = [(0, inst_disks[0])]
else:
disks = map(lambda idx: (idx, inst_disks[idx]), disks)
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device) in disks:
lu.LogInfo("Imaging disk '%d' for instance '%s' on node '%s'",
idx, instance.name, node_name)
result = lu.rpc.call_blockdev_image(node_uuid, (device, instance),
image, device.size)
result.Raise("Could not image disk '%d' for instance '%s' on node '%s'" %
(idx, instance.name, node_name))
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization for instance '%s' on"
" node '%s'", node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Failed to resume synchronization of disk '%d' of"
" instance '%s'", idx, instance.name)
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
"""Wrapper for L{WipeDisks} that handles errors.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should wipe
@param disks: see L{WipeDisks}
@param cleanup: the result returned by L{CreateDisks}, used for cleanup in
case of error
@raise errors.OpPrereqError: in case of failure
"""
try:
WipeDisks(lu, instance, disks=disks)
except errors.OpExecError:
logging.warning("Wiping disks for instance '%s' failed",
instance.name)
_UndoCreateDisks(lu, cleanup, instance)
raise
def ExpandCheckDisks(instance_disks, disks):
"""Return the instance disks selected by the disks list
@type disks: list of L{objects.Disk} or None
@param disks: selected disks
@rtype: list of L{objects.Disk}
@return: selected instance disks to act on
"""
if disks is None:
return instance_disks
else:
inst_disks_uuids = [d.uuid for d in instance_disks]
disks_uuids = [d.uuid for d in disks]
if not set(disks_uuids).issubset(inst_disks_uuids):
raise errors.ProgrammerError("Can only act on disks belonging to the"
" target instance: expected a subset of %s,"
" got %s" % (inst_disks_uuids, disks_uuids))
return disks
def WaitForSync(lu, instance, disks=None, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if not inst_disks or disks is not None and not disks:
return True
disks = ExpandCheckDisks(inst_disks, disks)
if not oneshot:
lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
# TODO: Convert to utils.Retry
retries = 0
degr_retries = 10 # in seconds, as we sleep 1 second each time
while True:
max_time = 0
done = True
cumul_degraded = False
rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
retries += 1
if retries >= 10:
raise errors.RemoteError("Can't contact node %s for mirror data,"
" aborting." % node_name)
time.sleep(6)
continue
rstats = rstats.payload
retries = 0
for i, mstat in enumerate(rstats):
if mstat is None:
lu.LogWarning("Can't compute data for node %s/%s",
node_name, disks[i].iv_name)
continue
cumul_degraded = (cumul_degraded or
(mstat.is_degraded and mstat.sync_percent is None))
if mstat.sync_percent is not None:
done = False
if mstat.estimated_time is not None:
rem_time = ("%s remaining (estimated)" %
utils.FormatSeconds(mstat.estimated_time))
max_time = mstat.estimated_time
else:
rem_time = "no time estimate"
max_time = 5 # sleep at least a bit between retries
lu.LogInfo("- device %s: %5.2f%% done, %s",
disks[i].iv_name, mstat.sync_percent, rem_time)
# if we're done but degraded, let's do a few small retries, to
# make sure we see a stable and not transient situation; therefore
# we force restart of the loop
if (done or oneshot) and cumul_degraded and degr_retries > 0:
logging.info("Degraded disks found, %d retries left", degr_retries)
degr_retries -= 1
time.sleep(1)
continue
if done or oneshot:
break
time.sleep(min(60, max_time))
if done:
lu.LogInfo("Instance %s's disks are in sync", instance.name)
return not cumul_degraded
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
"""Shutdown block devices of an instance.
This does the shutdown on all nodes of the instance.
If the ignore_primary is false, errors on the primary node are
ignored.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
all_result = True
if disks is None:
# only mark instance disks as inactive if all disks are affected
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
for disk in disks:
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
msg = result.fail_msg
if msg:
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if ((node_uuid == instance.primary_node and not ignore_primary) or
(node_uuid != instance.primary_node and not result.offline)):
all_result = False
return all_result
def _SafeShutdownInstanceDisks(lu, instance, disks=None, req_states=None):
"""Shutdown block devices of an instance.
This function checks if an instance is running, before calling
_ShutdownInstanceDisks.
"""
if req_states is None:
req_states = INSTANCE_DOWN
CheckInstanceState(lu, instance, req_states, msg="cannot shutdown disks")
ShutdownInstanceDisks(lu, instance, disks=disks)
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
ignore_size=False):
"""Prepare the block devices for an instance.
This sets up the block devices on all nodes.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance for whose disks we assemble
@type disks: list of L{objects.Disk} or None
@param disks: which disks to assemble (or all, if None)
@type ignore_secondaries: boolean
@param ignore_secondaries: if true, errors on secondary nodes
won't result in an error return from the function
@type ignore_size: boolean
@param ignore_size: if true, the current known size of the disk
will not be used during the disk activation, useful for cases
when the size is wrong
@return: False if the operation failed, otherwise a list of
(host, instance_visible_name, node_visible_name)
with the mapping from node devices to instance devices
"""
device_info = []
disks_ok = True
if disks is None:
# only mark instance disks as active if all disks are affected
instance = lu.cfg.MarkInstanceDisksActive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
# With the two passes mechanism we try to reduce the window of
# opportunity for the race condition of switching DRBD to primary
# before handshaking occured, but we do not eliminate it
# The proper fix would be to wait (with some limits) until the
# connection has been made and drbd transitions from WFConnection
# into any other network-connected state (Connected, SyncTarget,
# SyncSource, etc.)
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, False, idx)
msg = result.fail_msg
if msg:
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
is_offline_secondary = (node_uuid in secondary_nodes and
result.offline)
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=False, pass=1): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if not (ignore_secondaries or is_offline_secondary):
disks_ok = False
# FIXME: race condition on drbd migration to primary
# 2nd pass, do only the primary node
for idx, inst_disk in enumerate(disks):
dev_path = None
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if node_uuid != instance.primary_node:
continue
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, True, idx)
msg = result.fail_msg
if msg:
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=True, pass=2): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
disks_ok = False
else:
dev_path, _, __ = result.payload
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
if not disks_ok:
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
return disks_ok, device_info
def StartInstanceDisks(lu, instance, force):
"""Start the disks of an instance.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
disks_ok, _ = AssembleInstanceDisks(lu, instance,
ignore_secondaries=force)
if not disks_ok:
ShutdownInstanceDisks(lu, instance)
if force is not None and not force:
lu.LogWarning("",
hint=("If the message above refers to a secondary node,"
" you can retry the operation using '--force'"))
raise errors.OpExecError("Disk consistency error")
class LUInstanceGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
"""
HPATH = "disk-grow"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
env = {
"DISK": self.op.disk,
"AMOUNT": self.op.amount,
"ABSOLUTE": self.op.absolute,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
for node_uuid in node_uuids:
CheckNodeOnline(self, node_uuid)
self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
if self.instance.disk_template not in constants.DTS_GROWABLE:
raise errors.OpPrereqError("Instance's disk layout does not support"
" growing", errors.ECODE_INVAL)
self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
if self.op.absolute:
self.target = self.op.amount
self.delta = self.target - self.disk.size
if self.delta < 0:
raise errors.OpPrereqError("Requested size (%s) is smaller than "
"current disk size (%s)" %
(utils.FormatUnit(self.target, "h"),
utils.FormatUnit(self.disk.size, "h")),
errors.ECODE_STATE)
else:
self.delta = self.op.amount
self.target = self.disk.size + self.delta
if self.delta < 0:
raise errors.OpPrereqError("Requested increment (%s) is negative" %
utils.FormatUnit(self.delta, "h"),
errors.ECODE_INVAL)
self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
def _CheckDiskSpace(self, node_uuids, req_vgspace):
template = self.instance.disk_template
if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
not any(self.node_es_flags.values())):
# TODO: check the free disk space for file, when that feature will be
# supported
# With exclusive storage we need to do something smarter than just looking
# at free space, which, in the end, is basically a dry run. So we rely on
# the dry run performed in Exec() instead.
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
def Exec(self, feedback_fn):
"""Execute disk grow.
"""
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
(self.op.disk, self.instance.name,
utils.FormatUnit(self.delta, "h"),
utils.FormatUnit(self.target, "h")))
# First run all grow ops in dry-run mode
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, True, True,
self.node_es_flags[node_uuid])
result.Raise("Dry-run grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
if wipe_disks:
# Get disk size from primary node for wiping
result = self.rpc.call_blockdev_getdimensions(
self.instance.primary_node, [([self.disk], self.instance)])
result.Raise("Failed to retrieve disk size from node '%s'" %
self.instance.primary_node)
(disk_dimensions, ) = result.payload
if disk_dimensions is None:
raise errors.OpExecError("Failed to retrieve disk size from primary"
" node '%s'" % self.instance.primary_node)
(disk_size_in_bytes, _) = disk_dimensions
old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
assert old_disk_size >= self.disk.size, \
("Retrieved disk size too small (got %s, should be at least %s)" %
(old_disk_size, self.disk.size))
else:
old_disk_size = None
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real on the backing storage
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, False, True,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
# And now execute it for logical storage, on the primary node
node_uuid = self.instance.primary_node
result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
self.delta, False, False,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
self.disk.RecordGrow(self.delta)
self.cfg.Update(self.instance, feedback_fn)
self.cfg.Update(self.disk, feedback_fn)
# Changes have been recorded, release node lock
ReleaseLocks(self, locking.LEVEL_NODE)
# Downgrade lock while waiting for sync
self.WConfdClient().DownGradeLocksLevel(
locking.LEVEL_NAMES[locking.LEVEL_INSTANCE])
assert wipe_disks ^ (old_disk_size is None)
if wipe_disks:
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
assert inst_disks[self.op.disk] == self.disk
# Wipe newly added disk space
WipeDisks(self, self.instance,
disks=[(self.op.disk, self.disk, old_disk_size)])
if self.op.wait_for_sync:
disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
if not self.instance.disks_active:
_SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
elif not self.instance.disks_active:
self.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
assert self.owned_locks(locking.LEVEL_NODE_RES)
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
class LUInstanceReplaceDisks(LogicalUnit):
"""Replace the disks of an instance.
"""
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
if self.op.mode == constants.REPLACE_DISK_CHG:
if self.op.remote_node is None and self.op.iallocator is None:
raise errors.OpPrereqError("When changing the secondary either an"
" iallocator script must be used or the"
" new node given", errors.ECODE_INVAL)
else:
CheckIAllocatorOrNode(self, "iallocator", "remote_node")
elif self.op.remote_node is not None or self.op.iallocator is not None:
# Not replacing the secondary
raise errors.OpPrereqError("The iallocator and new node options can"
" only be used when changing the"
" secondary node", errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
assert locking.LEVEL_NODE not in self.needed_locks
assert locking.LEVEL_NODE_RES not in self.needed_locks
assert locking.LEVEL_NODEGROUP not in self.needed_locks
assert self.op.iallocator is None or self.op.remote_node is None, \
"Conflicting options"
if self.op.remote_node is not None:
(self.op.remote_node_uuid, self.op.remote_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
self.op.remote_node)
# Warning: do not remove the locking of the new secondary here
# unless DRBD8Dev.AddChildren is changed to work in parallel;
# currently it doesn't since parallel invocations of
# FindUnusedMinor will conflict
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
else:
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
if self.op.iallocator is not None:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node_uuid,
self.op.disks, self.op.early_release,
self.op.ignore_ipolicy)
self.tasklets = [self.replacer]
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.remote_node_uuid is None
assert self.op.iallocator is not None
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
elif level == locking.LEVEL_NODE:
if self.op.iallocator is not None:
assert self.op.remote_node_uuid is None
assert not self.needed_locks[locking.LEVEL_NODE]
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = \
[node_uuid
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Reuse node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
self.needed_locks[locking.LEVEL_NODE]
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
instance = self.replacer.instance
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
env = {
"MODE": self.op.mode,
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.cfg.GetNodeName(secondary_nodes[0]),
}
env.update(BuildInstanceHookEnvByObject(self, instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
instance = self.replacer.instance
nl = [
self.cfg.GetMasterNode(),
instance.primary_node,
]
if self.op.remote_node_uuid is not None:
nl.append(self.op.remote_node_uuid)
return nl, nl
def CheckPrereq(self):
"""Check prerequisites.
"""
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
return LogicalUnit.CheckPrereq(self)
class LUInstanceActivateDisks(NoHooksLU):
"""Bring up an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Activate the disks.
"""
disks_ok, disks_info = \
AssembleInstanceDisks(self, self.instance,
ignore_size=self.op.ignore_size)
if not disks_ok:
raise errors.OpExecError("Cannot activate block devices")
if self.op.wait_for_sync:
if not WaitForSync(self, self.instance):
self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
class LUInstanceDeactivateDisks(NoHooksLU):
"""Shutdown an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Deactivate the disks
"""
if self.op.force:
ShutdownInstanceDisks(self, self.instance)
else:
_SafeShutdownInstanceDisks(self, self.instance)
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
ldisk=False):
"""Check that mirrors are not degraded.
@attention: The device has to be annotated already.
The ldisk parameter, if True, will change the test from the
is_degraded attribute (which represents overall non-ok status for
the device(s)) to the ldisk (representing the local storage status).
"""
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't find disk on node %s: %s",
lu.cfg.GetNodeName(node_uuid), msg)
result = False
elif not rstats.payload:
lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
result = False
else:
if ldisk:
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
else:
result = result and not rstats.payload.is_degraded
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistencyInner(lu, instance, child,
node_uuid, on_primary)
return result
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
"""Wrapper around L{_CheckDiskConsistencyInner}.
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
ldisk=ldisk)
def _BlockdevFind(lu, node_uuid, dev, instance):
"""Wrapper around call_blockdev_find to annotate diskparams.
@param lu: A reference to the lu object
@param node_uuid: The node to call out
@param dev: The device to find
@param instance: The instance object the device belongs to
@returns The result of the rpc call
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
def _GenerateUniqueNames(lu, exts):
"""Generate a suitable LV name.
This will generate a logical volume name for the given instance.
"""
results = []
for val in exts:
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
results.append("%s%s" % (new_id, val))
return results
class TLReplaceDisks(Tasklet):
"""Replaces disks for an instance.
Note: Locking is not within the scope of this class.
"""
def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
remote_node_uuid, disks, early_release, ignore_ipolicy):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.mode = mode
self.iallocator_name = iallocator_name
self.remote_node_uuid = remote_node_uuid
self.disks = disks
self.early_release = early_release
self.ignore_ipolicy = ignore_ipolicy
# Runtime data
self.instance = None
self.new_node_uuid = None
self.target_node_uuid = None
self.other_node_uuid = None
self.remote_node_info = None
self.node_secondary_ip = None
@staticmethod
def _RunAllocator(lu, iallocator_name, instance_uuid,
relocate_from_node_uuids):
"""Compute a new secondary node using an IAllocator.
"""
req = iallocator.IAReqRelocate(
inst_uuid=instance_uuid,
relocate_from_node_uuids=list(relocate_from_node_uuids))
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
ial.Run(iallocator_name)
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (iallocator_name, ial.info),
errors.ECODE_NORES)
remote_node_name = ial.result[0]
remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
if remote_node is None:
raise errors.OpPrereqError("Node %s not found in configuration" %
remote_node_name, errors.ECODE_NOENT)
lu.LogInfo("Selected new secondary for instance '%s': %s",
instance_uuid, remote_node_name)
return remote_node.uuid
def _FindFaultyDisks(self, node_uuid):
"""Wrapper for L{FindFaultyInstanceDisks}.
"""
return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
node_uuid, True)
def _CheckDisksActivated(self, instance):
"""Checks if the instance disks are activated.
@param instance: The instance to check disks
@return: True if they are activated, False otherwise
"""
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, instance)
if result.offline:
continue
elif result.fail_msg or not result.payload:
return False
return True
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.instance_name
if self.instance.disk_template != constants.DT_DRBD8:
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
" instances", errors.ECODE_INVAL)
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
if len(secondary_nodes) != 1:
raise errors.OpPrereqError("The instance has a strange layout,"
" expected one secondary but found %d" %
len(secondary_nodes),
errors.ECODE_FAULT)
secondary_node_uuid = secondary_nodes[0]
if self.iallocator_name is None:
remote_node_uuid = self.remote_node_uuid
else:
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
self.instance.uuid,
secondary_nodes)
if remote_node_uuid is None:
self.remote_node_info = None
else:
assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node_uuid
self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
assert self.remote_node_info is not None, \
"Cannot retrieve locked node %s" % remote_node_uuid
if remote_node_uuid == self.instance.primary_node:
raise errors.OpPrereqError("The specified node is the primary node of"
" the instance", errors.ECODE_INVAL)
if remote_node_uuid == secondary_node_uuid:
raise errors.OpPrereqError("The specified node is already the"
" secondary node of the instance",
errors.ECODE_INVAL)
if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
constants.REPLACE_DISK_CHG):
raise errors.OpPrereqError("Cannot specify disks to be replaced",
errors.ECODE_INVAL)
if self.mode == constants.REPLACE_DISK_AUTO:
if not self._CheckDisksActivated(self.instance):
raise errors.OpPrereqError("Please run activate-disks on instance %s"
" first" % self.instance_name,
errors.ECODE_STATE)
faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
if faulty_primary and faulty_secondary:
raise errors.OpPrereqError("Instance %s has faulty disks on more than"
" one node and can not be repaired"
" automatically" % self.instance_name,
errors.ECODE_STATE)
if faulty_primary:
self.disks = faulty_primary
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif faulty_secondary:
self.disks = faulty_secondary
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
else:
self.disks = []
check_nodes = []
else:
# Non-automatic modes
if self.mode == constants.REPLACE_DISK_PRI:
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_SEC:
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_CHG:
self.new_node_uuid = remote_node_uuid
self.other_node_uuid = self.instance.primary_node
self.target_node_uuid = secondary_node_uuid
check_nodes = [self.new_node_uuid, self.other_node_uuid]
CheckNodeNotDrained(self.lu, remote_node_uuid)
CheckNodeVmCapable(self.lu, remote_node_uuid)
old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
assert old_node_info is not None
if old_node_info.offline and not self.early_release:
# doesn't make sense to delay the release
self.early_release = True
self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
" early-release mode", secondary_node_uuid)
else:
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
self.mode)
# If not specified all disks should be replaced
if not self.disks:
self.disks = range(len(self.instance.disks))
# TODO: This is ugly, but right now we can't distinguish between internal
# submitted opcode and external one. We should fix that.
if self.remote_node_info:
# We change the node, lets verify it still meets instance policy
new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
new_group_info)
CheckTargetNodeIPolicy(self, ipolicy, self.instance,
self.remote_node_info, self.cfg,
ignore=self.ignore_ipolicy)
for node_uuid in check_nodes:
CheckNodeOnline(self.lu, node_uuid)
touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
self.other_node_uuid,
self.target_node_uuid]
if node_uuid is not None)
# Release unneeded node and node resource locks
ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
# Release any owned node group
ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
# Check whether disks are valid
for disk_idx in self.disks:
self.instance.FindDisk(disk_idx)
# Get secondary node IP addresses
self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
in self.cfg.GetMultiNodeInfo(touched_nodes))
def Exec(self, feedback_fn):
"""Execute disk replacement.
This dispatches the disk replacement to the appropriate handler.
"""
if __debug__:
# Verify owned locks before starting operation
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
assert set(owned_nodes) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
(owned_nodes, self.node_secondary_ip.keys()))
assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
self.lu.owned_locks(locking.LEVEL_NODE_RES))
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
assert list(owned_instances) == [self.instance_name], \
"Instance '%s' not locked" % self.instance_name
if not self.disks:
feedback_fn("No disks need replacement for instance '%s'" %
self.instance.name)
return
feedback_fn("Replacing disk(s) %s for instance '%s'" %
(utils.CommaJoin(self.disks), self.instance.name))
feedback_fn("Current primary node: %s" %
self.cfg.GetNodeName(self.instance.primary_node))
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
feedback_fn("Current seconary node: %s" %
utils.CommaJoin(self.cfg.GetNodeNames(secondary_nodes)))
activate_disks = not self.instance.disks_active
# Activate the instance disks if we're replacing them on a down instance
if activate_disks:
StartInstanceDisks(self.lu, self.instance, True)
# Re-read the instance object modified by the previous call
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
try:
# Should we replace the secondary node?
if self.new_node_uuid is not None:
fn = self._ExecDrbd8Secondary
else:
fn = self._ExecDrbd8DiskOnly
result = fn(feedback_fn)
finally:
# Deactivate the instance disks if we're replacing them on a
# down instance
if activate_disks:
_SafeShutdownInstanceDisks(self.lu, self.instance,
req_states=INSTANCE_NOT_RUNNING)
assert not self.lu.owned_locks(locking.LEVEL_NODE)
if __debug__:
# Verify owned locks
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
nodes = frozenset(self.node_secondary_ip)
assert ((self.early_release and not owned_nodes) or
(not self.early_release and not (set(owned_nodes) - nodes))), \
("Not owning the correct locks, early_release=%s, owned=%r,"
" nodes=%r" % (self.early_release, owned_nodes, nodes))
return result
def _CheckVolumeGroup(self, node_uuids):
self.lu.LogInfo("Checking volume groups")
vgname = self.cfg.GetVGName()
# Make sure volume group exists on all involved nodes
results = self.rpc.call_vg_list(node_uuids)
if not results:
raise errors.OpExecError("Can't list volume groups on the nodes")
for node_uuid in node_uuids:
res = results[node_uuid]
res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
if vgname not in res.payload:
raise errors.OpExecError("Volume group '%s' not found on node %s" %
(vgname, self.cfg.GetNodeName(node_uuid)))
def _CheckDisksExistence(self, node_uuids):
# Check disk existence
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if idx not in self.disks:
continue
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
if not msg:
msg = "disk not found"
if not self._CheckDisksActivated(self.instance):
extra_hint = ("\nDisks seem to be not properly activated. Try"
" running activate-disks on the instance before"
" using replace-disks.")
else:
extra_hint = ""
raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
(idx, self.cfg.GetNodeName(node_uuid), msg,
extra_hint))
def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if idx not in self.disks:
continue
self.lu.LogInfo("Checking disk/%d consistency on node %s" %
(idx, self.cfg.GetNodeName(node_uuid)))
if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
on_primary, ldisk=ldisk):
raise errors.OpExecError("Node %s has degraded storage, unsafe to"
" replace disks for instance %s" %
(self.cfg.GetNodeName(node_uuid),
self.instance.name))
def _CreateNewStorage(self, node_uuid):
"""Create new storage on the primary or secondary node.
This is only used for same-node replaces, not for changing the
secondary node, hence we don't want to modify the existing disk.
"""
iv_names = {}
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
for idx, dev in enumerate(disks):
if idx not in self.disks:
continue
self.lu.LogInfo("Adding storage on %s for disk/%d",
self.cfg.GetNodeName(node_uuid), idx)
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
(data_disk, meta_disk) = dev.children
vg_data = data_disk.logical_id[0]
lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
logical_id=(vg_data, names[0]),
params=data_disk.params)
vg_meta = meta_disk.logical_id[0]
lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vg_meta, names[1]),
params=meta_disk.params)
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
try:
_CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.DeviceCreationError, e:
raise errors.OpExecError("Can't create block device: %s" % e.message)
return iv_names
def _CheckDevices(self, node_uuid, iv_names):
for name, (dev, _, _) in iv_names.iteritems():
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
if not msg:
msg = "disk not found"
raise errors.OpExecError("Can't find DRBD device %s: %s" %
(name, msg))
if result.payload.is_degraded:
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_uuid, iv_names):
for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s", name)
for lv in old_lvs:
msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
.fail_msg
if msg:
self.lu.LogWarning("Can't remove old LV: %s", msg,
hint="remove unused LVs manually")
def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
"""Replace a disk on the primary or secondary for DRBD 8.
The algorithm for replace is quite complicated:
1. for each disk to be replaced:
1. create new LVs on the target node with unique names
1. detach old LVs from the drbd device
1. rename old LVs to name_replaced.<time_t>
1. rename new LVs to old LVs
1. attach the new LVs (with the old names now) to the drbd device
1. wait for sync across all devices
1. for each modified disk:
1. remove old LVs (which have the name name_replaces.<time_t>)
Failures are not very well handled.
"""
steps_total = 6
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
# Step: check other node consistency
self.lu.LogStep(2, steps_total, "Check peer consistency")
self._CheckDisksConsistency(
self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
False)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
iv_names = self._CreateNewStorage(self.target_node_uuid)
# Step: for each lv, detach+rename*2+attach
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
for dev, old_lvs, new_lvs in iv_names.itervalues():
self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
result = self.rpc.call_blockdev_removechildren(self.target_node_uuid,
(dev, self.instance),
(old_lvs, self.instance))
result.Raise("Can't detach drbd from local storage on node"
" %s for device %s" %
(self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
#dev.children = []
#cfg.Update(instance)
# ok, we created the new LVs, so now we know we have the needed
# storage; as such, we proceed on the target node to rename
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
# using the assumption that logical_id == unique_id on that node
# FIXME(iustin): use a better name for the replaced LVs
temp_suffix = int(time.time())
ren_fn = lambda d, suff: (d.logical_id[0],
d.logical_id[1] + "_replaced-%s" % suff)
# Build the rename list based on what LVs exist on the node
rename_old_to_new = []
for to_ren in old_lvs:
result = self.rpc.call_blockdev_find(self.target_node_uuid,
(to_ren, self.instance))
if not result.fail_msg and result.payload:
# device exists
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
self.lu.LogInfo("Renaming the old LVs on the target node")
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_old_to_new)
result.Raise("Can't rename old LVs on node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
# Now we rename the new LVs to the old LVs
self.lu.LogInfo("Renaming the new LVs on the target node")
rename_new_to_old = [(new, old.logical_id)
for old, new in zip(old_lvs, new_lvs)]
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_new_to_old)
result.Raise("Can't rename new LVs on node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
# Intermediate steps of in memory modifications
for old, new in zip(old_lvs, new_lvs):
new.logical_id = old.logical_id
# We need to modify old_lvs so that removal later removes the
# right LVs, not the newly added ones; note that old_lvs is a
# copy here
for disk in old_lvs:
disk.logical_id = ren_fn(disk, temp_suffix)
# Now that the new lvs have the old name, we can add them to the device
self.lu.LogInfo("Adding new mirror component on %s",
self.cfg.GetNodeName(self.target_node_uuid))
result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
(dev, self.instance),
(new_lvs, self.instance))
msg = result.fail_msg
if msg:
for new_lv in new_lvs:
msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
(new_lv, self.instance)).fail_msg
if msg2:
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
hint=("cleanup manually the unused logical"
"volumes"))
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
cstep = itertools.count(5)
if self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
# TODO: Check if releasing locks early still makes sense
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
keep=self.node_secondary_ip.keys())
# Release all node locks while waiting for sync
ReleaseLocks(self.lu, locking.LEVEL_NODE)
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
if not self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
def _ExecDrbd8Secondary(self, feedback_fn):
"""Replace the secondary node for DRBD 8.
The algorithm for replace is quite complicated:
- for all disks of the instance:
- create new LVs on the new node with same names
- shutdown the drbd device on the old secondary
- disconnect the drbd network on the primary
- create the drbd device on the new secondary
- network attach the drbd on the primary, using an artifice:
the drbd code for Attach() will connect to the network if it
finds a device which is connected to the good local disks but
not network enabled
- wait for sync across all devices
- remove all disks from the old secondary
Failures are not very well handled.
"""
steps_total = 6
pnode = self.instance.primary_node
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.instance.primary_node])
self._CheckVolumeGroup([self.instance.primary_node])
# Step: check other node consistency
self.lu.LogStep(2, steps_total, "Check peer consistency")
self._CheckDisksConsistency(self.instance.primary_node, True, True)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
self.new_node_uuid)
for idx, dev in enumerate(disks):
self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
try:
_CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
new_lv, True, GetInstanceInfoText(self.instance),
False, excl_stor)
except errors.DeviceCreationError, e:
raise errors.OpExecError("Can't create block device: %s" % e.message)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the
# error and the success paths
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
for _ in inst_disks],
self.instance.uuid)
logging.debug("Allocated minors %r", minors)
iv_names = {}
for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# create new devices on new_node; note that we create two IDs:
# one without port, so the drbd will be activated without
# networking information on the new node at this stage, and one
# with network, for the latter activation in step 4
(o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
p_minor, new_minor, o_secret)
new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
p_minor, new_minor, o_secret)
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
logical_id=new_alone_id,
children=dev.children,
size=dev.size,
params={})
(anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
self.cfg)
try:
CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
anno_new_drbd,
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.GenericError:
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise
# We have new devices, shutdown the drbd on the old secondary
for idx, dev in enumerate(inst_disks):
self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
(dev, self.instance)).fail_msg
if msg:
self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
"node: %s" % (idx, msg),
hint=("Please cleanup this device manually as"
" soon as possible"))
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
result = self.rpc.call_drbd_disconnect_net(
[pnode], (inst_disks, self.instance))[pnode]
msg = result.fail_msg
if msg:
# detaches didn't succeed (unlikely)
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise errors.OpExecError("Can't detach the disks from the network on"
" old node: %s" % (msg,))
# if we managed to detach at least one, we update all the disks of
# the instance to point to the new secondary
self.lu.LogInfo("Updating instance configuration")
for dev, _, new_logical_id in iv_names.itervalues():
dev.logical_id = new_logical_id
self.cfg.Update(dev, feedback_fn)
self.cfg.Update(self.instance, feedback_fn)
# Release all node locks (the configuration has been updated)
ReleaseLocks(self.lu, locking.LEVEL_NODE)
# and now perform the drbd attach
self.lu.LogInfo("Attaching primary drbds to new secondary"
" (standalone => connected)")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
self.new_node_uuid],
(inst_disks, self.instance),
self.instance.name,
False)
for to_node, to_result in result.items():
msg = to_result.fail_msg
if msg:
raise errors.OpExecError(
"Can't attach drbd disks on node %s: %s (please do a gnt-instance "
"info to see the status of disks)" %
(self.cfg.GetNodeName(to_node), msg))
cstep = itertools.count(5)
if self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
# TODO: Check if releasing locks early still makes sense
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
keep=self.node_secondary_ip.keys())
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
if not self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
class TemporaryDisk():
""" Creates a new temporary bootable disk, and makes sure it is destroyed.
Is a context manager, and should be used with the ``with`` statement as such.
The disk is guaranteed to be created at index 0, shifting any other disks of
the instance by one place, and allowing the instance to be booted with the
content of the disk.
"""
def __init__(self, lu, instance, disks, feedback_fn,
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
""" Constructor storing arguments until used later.
@type lu: L{ganeti.cmdlib.base.LogicalUnit}
@param lu: The LU within which this disk is created.
@type instance: L{ganeti.objects.Instance}
@param instance: The instance to which the disk should be added
@type disks: list of triples (disk template, disk access mode, int)
@param disks:
disk specification, which is a list of triples containing the
disk template (e.g., L{constants.DT_PLAIN}), the disk access
mode (i.e., L{constants.DISK_RDONLY} or L{constants.DISK_RDWR}),
and size in MiB.
@type feedback_fn: function
@param feedback_fn: Function used to log progress
"""
self._lu = lu
self._instance = instance
self._disks = disks
self._feedback_fn = feedback_fn
self._shutdown_timeout = shutdown_timeout
def _EnsureInstanceDiskState(self):
""" Ensures that the instance is down, and its disks inactive.
All the operations related to the creation and destruction of disks require
that the instance is down and that the disks are inactive. This function is
invoked to make it so.
"""
# The instance needs to be down before any of these actions occur
# Whether it is must be checked manually through a RPC - configuration
# reflects only the desired state
self._feedback_fn("Shutting down instance")
result = self._lu.rpc.call_instance_shutdown(self._instance.primary_node,
self._instance,
self._shutdown_timeout,
self._lu.op.reason)
result.Raise("Shutdown of instance '%s' while removing temporary disk "
"failed" % self._instance.name)
# Disks need to be deactivated prior to being removed
# The disks_active configuration entry should match the actual state
if self._instance.disks_active:
self._feedback_fn("Deactivating disks")
ShutdownInstanceDisks(self._lu, self._instance)
def __enter__(self):
""" Context manager entry function, creating the disk.
@rtype: L{ganeti.objects.Disk}
@return: The disk object created.
"""
self._EnsureInstanceDiskState()
new_disks = []
# The iv_name of the disk intentionally diverges from Ganeti's standards, as
# this disk should be very temporary and its presence should be reported.
# With the special iv_name, gnt-cluster verify detects the disk and warns
# the user of its presence. Removing the disk restores the instance to its
# proper state, despite an error that appears when the removal is performed.
for idx, (disk_template, disk_access, disk_size) in enumerate(self._disks):
new_disk = objects.Disk()
new_disk.dev_type = disk_template
new_disk.mode = disk_access
new_disk.uuid = self._lu.cfg.GenerateUniqueID(self._lu.proc.GetECId())
new_disk.logical_id = (self._lu.cfg.GetVGName(), new_disk.uuid)
new_disk.params = {}
new_disk.size = disk_size
new_disks.append(new_disk)
self._feedback_fn("Attempting to create temporary disk")
self._undoing_info = CreateDisks(self._lu, self._instance, disks=new_disks)
for idx, new_disk in enumerate(new_disks):
self._lu.cfg.AddInstanceDisk(self._instance.uuid, new_disk, idx=idx)
self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
self._feedback_fn("Temporary disk created")
self._new_disks = new_disks
return new_disks
def __exit__(self, exc_type, _value, _traceback):
""" Context manager exit function, destroying the disk.
"""
if exc_type:
self._feedback_fn("Exception raised, cleaning up temporary disk")
else:
self._feedback_fn("Regular cleanup of temporary disk")
try:
self._EnsureInstanceDiskState()
_UndoCreateDisks(self._lu, self._undoing_info, self._instance)
for disk in self._new_disks:
self._lu.cfg.RemoveInstanceDisk(self._instance.uuid, disk.uuid)
self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
self._feedback_fn("Temporary disk removed")
except:
self._feedback_fn("Disk cleanup failed; it will have to be removed "
"manually")
raise
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from tgym.core import Env
from tgym.utils import calc_spread
plt.style.use('dark_background')
mpl.rcParams.update(
{
"font.size": 15,
"axes.labelsize": 15,
"lines.linewidth": 1,
"lines.markersize": 3
}
)
class SpreadTrading(Env):
"""Class for a discrete (buy/hold/sell) spread trading environment.
"""
_actions = {
'hold': np.array([1, 0, 0]),
'buy': np.array([0, 1, 0]),
'sell': np.array([0, 0, 1])
}
_positions = {
'flat': np.array([1, 0, 0]),
'long': np.array([0, 1, 0]),
'short': np.array([0, 0, 1])
}
def __init__(self, data_generator, spread_coefficients, game_length=1000, trading_fee=0, time_fee=0, history_length=2):
"""Initialisation function
Args:
data_generator (tgym.core.DataGenerator): A data
generator object yielding a 1D array of bid-ask prices.
spread_coefficients (list): A list of signed integers defining
how much of each product to buy (positive) or sell (negative)
when buying or selling the spread.
game_length (int): number of steps to play the game for
trading_fee (float): penalty for trading
time_fee (float): time fee
history_length (int): number of historical states to stack in the
observation vector.
"""
assert data_generator.n_products == len(spread_coefficients)
self._data_generator = data_generator
self._spread_coefficients = spread_coefficients
self._first_render = True
self._trading_fee = trading_fee
self._time_fee = time_fee
self._game_length = game_length
self.n_actions = 3
self._prices_history = []
self._history_length = history_length
self.reset()
def reset(self):
"""Reset the trading environment. Reset rewards, data generator...
Returns:
observation (numpy.array): observation of the state
"""
self._iteration = 0
self._data_generator.rewind()
self._total_reward = 0
self._total_pnl = 0
self._position = self._positions['flat']
self._entry_price = 0
self._exit_price = 0
for i in range(self._history_length):
self._prices_history.append(self._data_generator.next())
observation = self._get_observation()
self.state_shape = observation.shape
self._action = self._actions['hold']
return observation
def step(self, action):
"""Take an action (buy/sell/hold) and computes the immediate reward.
Args:
action (numpy.array): Action to be taken, one-hot encoded.
Returns:
tuple:
- observation (numpy.array): Agent's observation of the current environment.
- reward (float) : Amount of reward returned after previous action.
- done (bool): Whether the episode has ended, in which case further step() calls will return undefined results.
- info (str): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
"""
assert any([(action == x).all() for x in self._actions.values()])
self._action = action
self._iteration += 1
reward = -self._time_fee
instant_pnl = 0
info = {}
done = False
if all(action == self._actions['buy']):
reward -= self._trading_fee
if all(self._position == self._positions['flat']):
self._position = self._positions['long']
self._entry_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[1] # Ask
info['entry_price'] = self._entry_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
elif all(self._position == self._positions['short']):
self._exit_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[1] # Ask
info['exit_price'] = self._exit_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
instant_pnl = self._calc_int_pnl()
info['instant_pnl'] = instant_pnl
self._position = self._positions['flat']
self._entry_price = 0
elif all(action == self._actions['sell']):
reward -= self._trading_fee
if all(self._position == self._positions['flat']):
self._position = self._positions['short']
self._entry_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[0] # Bid
info['entry_price'] = self._entry_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
elif all(self._position == self._positions['long']):
self._exit_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[0] # Bid
info['exit_price'] = self._exit_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
instant_pnl = self._calc_int_pnl()
info['instant_pnl'] = instant_pnl
self._position = self._positions['flat']
self._entry_price = 0
reward += instant_pnl
self._total_pnl += instant_pnl
self._total_reward += reward
# Game over logic
try:
self._prices_history.append(self._data_generator.next())
except StopIteration:
done = True
info['status'] = 'No more data.'
if self._iteration >= self._game_length:
done = True
info['status'] = 'Time out.'
observation = self._get_observation()
return observation, reward, done, info
def render(self, savefig=False, filename='myfig'):
"""Matlplotlib rendering of each step.
Args:
savefig (bool): Whether to save the figure as an image or not.
filename (str): Name of the image file.
"""
if self._first_render:
self._f, self._ax = plt.subplots(
len(self._spread_coefficients) + int(len(self._spread_coefficients) > 1),
sharex=True
)
if len(self._spread_coefficients) == 1:
self._ax = [self._ax]
self._f.set_size_inches(12, 6)
self._first_render = False
if len(self._spread_coefficients) > 1:
# TODO: To be checked
for prod_i in range(len(self._spread_coefficients)):
bid = self._prices_history[-1][2 * prod_i]
ask = self._prices_history[-1][2 * prod_i + 1]
self._ax[prod_i].clear()
self._ax[prod_i].plot([self._iteration, self._iteration + 1],
[bid, bid], color='white')
self._ax[prod_i].plot([self._iteration, self._iteration + 1],
[ask, ask], color='white')
self._ax[prod_i].set_title('Product {} (spread coef {})'.format(
prod_i, str(self._spread_coefficients[prod_i])))
# Spread price
prices = self._prices_history[-1]
bid, ask = calc_spread(prices, self._spread_coefficients)
self._ax[-1].plot([self._iteration, self._iteration + 1],
[bid, bid], color='white')
self._ax[-1].plot([self._iteration, self._iteration + 1],
[ask, ask], color='white')
ymin, ymax = self._ax[-1].get_ylim()
yrange = ymax - ymin
if (self._action == self._actions['sell']).all():
self._ax[-1].scatter(self._iteration + 0.5, bid + 0.03 *
yrange, color='orangered', marker='v')
elif (self._action == self._actions['buy']).all():
self._ax[-1].scatter(self._iteration + 0.5, ask - 0.03 *
yrange, color='lawngreen', marker='^')
plt.suptitle('Cumulated Reward: ' + "%.2f" % self._total_reward + ' ~ ' +
'Cumulated PnL: ' + "%.2f" % self._total_pnl + ' ~ ' +
'Position: ' + ['flat', 'long', 'short'][list(self._position).index(1)] + ' ~ ' +
'Entry Price: ' + "%.2f" % self._entry_price)
self._f.tight_layout()
plt.xticks(range(self._iteration)[::5])
plt.xlim([max(0, self._iteration - 80.5), self._iteration + 0.5])
plt.subplots_adjust(top=0.85)
plt.pause(0.01)
if savefig:
plt.savefig(filename)
def _get_observation(self):
"""Concatenate all necessary elements to create the observation.
Returns:
numpy.array: observation array.
"""
return np.concatenate(
[prices for prices in self._prices_history[-self._history_length:]] +
[
np.array([self._entry_price]),
np.array(self._position)
]
)
def _calc_int_pnl(self):
"""Calculate the PnL at each position closed.
Returns:
float: pnl for this closed trade
"""
if all(self._position == self._positions['long']):
return self._exit_price - self._entry_price
if all(self._position == self._positions['short']):
return self._entry_price - self._exit_price
@staticmethod
def random_action_fun():
"""The default random action for exploration.
We hold 80% of the time and buy or sell 10% of the time each.
Returns:
numpy.array: array with a 1 on the action index, 0 elsewhere.
"""
return np.random.multinomial(1, [0.8, 0.1, 0.1])
fixed rendering
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from tgym.core import Env
from tgym.utils import calc_spread
plt.style.use('dark_background')
mpl.rcParams.update(
{
"font.size": 15,
"axes.labelsize": 15,
"lines.linewidth": 1,
"lines.markersize": 10
}
)
class SpreadTrading(Env):
"""Class for a discrete (buy/hold/sell) spread trading environment.
"""
_actions = {
'hold': np.array([1, 0, 0]),
'buy': np.array([0, 1, 0]),
'sell': np.array([0, 0, 1])
}
_positions = {
'flat': np.array([1, 0, 0]),
'long': np.array([0, 1, 0]),
'short': np.array([0, 0, 1])
}
def __init__(self, data_generator, spread_coefficients, game_length=1000, trading_fee=0, time_fee=0, history_length=2):
"""Initialisation function
Args:
data_generator (tgym.core.DataGenerator): A data
generator object yielding a 1D array of bid-ask prices.
spread_coefficients (list): A list of signed integers defining
how much of each product to buy (positive) or sell (negative)
when buying or selling the spread.
game_length (int): number of steps to play the game for
trading_fee (float): penalty for trading
time_fee (float): time fee
history_length (int): number of historical states to stack in the
observation vector.
"""
assert data_generator.n_products == len(spread_coefficients)
self._data_generator = data_generator
self._spread_coefficients = spread_coefficients
self._first_render = True
self._trading_fee = trading_fee
self._time_fee = time_fee
self._game_length = game_length
self.n_actions = 3
self._prices_history = []
self._history_length = history_length
self.reset()
def reset(self):
"""Reset the trading environment. Reset rewards, data generator...
Returns:
observation (numpy.array): observation of the state
"""
self._iteration = 0
self._data_generator.rewind()
self._total_reward = 0
self._total_pnl = 0
self._position = self._positions['flat']
self._entry_price = 0
self._exit_price = 0
for i in range(self._history_length):
self._prices_history.append(self._data_generator.next())
observation = self._get_observation()
self.state_shape = observation.shape
self._action = self._actions['hold']
return observation
def step(self, action):
"""Take an action (buy/sell/hold) and computes the immediate reward.
Args:
action (numpy.array): Action to be taken, one-hot encoded.
Returns:
tuple:
- observation (numpy.array): Agent's observation of the current environment.
- reward (float) : Amount of reward returned after previous action.
- done (bool): Whether the episode has ended, in which case further step() calls will return undefined results.
- info (str): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
"""
assert any([(action == x).all() for x in self._actions.values()])
self._action = action
self._iteration += 1
reward = -self._time_fee
instant_pnl = 0
info = {}
done = False
if all(action == self._actions['buy']):
reward -= self._trading_fee
if all(self._position == self._positions['flat']):
self._position = self._positions['long']
self._entry_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[1] # Ask
info['entry_price'] = self._entry_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
elif all(self._position == self._positions['short']):
self._exit_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[1] # Ask
info['exit_price'] = self._exit_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
instant_pnl = self._calc_int_pnl()
info['instant_pnl'] = instant_pnl
self._position = self._positions['flat']
self._entry_price = 0
elif all(action == self._actions['sell']):
reward -= self._trading_fee
if all(self._position == self._positions['flat']):
self._position = self._positions['short']
self._entry_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[0] # Bid
info['entry_price'] = self._entry_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
elif all(self._position == self._positions['long']):
self._exit_price = calc_spread(
self._prices_history[-1], self._spread_coefficients)[0] # Bid
info['exit_price'] = self._exit_price
info['action'] = ['hold', 'buy', 'sell'][list(action).index(1)]
instant_pnl = self._calc_int_pnl()
info['instant_pnl'] = instant_pnl
self._position = self._positions['flat']
self._entry_price = 0
reward += instant_pnl
self._total_pnl += instant_pnl
self._total_reward += reward
# Game over logic
try:
self._prices_history.append(self._data_generator.next())
except StopIteration:
done = True
info['status'] = 'No more data.'
if self._iteration >= self._game_length:
done = True
info['status'] = 'Time out.'
observation = self._get_observation()
return observation, reward, done, info
def render(self, savefig=False, filename='myfig'):
"""Matlplotlib rendering of each step.
Args:
savefig (bool): Whether to save the figure as an image or not.
filename (str): Name of the image file.
"""
if self._first_render:
self._f, self._ax = plt.subplots(
len(self._spread_coefficients) + int(len(self._spread_coefficients) > 1),
sharex=True
)
if len(self._spread_coefficients) == 1:
self._ax = [self._ax]
self._f.set_size_inches(12, 6)
self._first_render = False
if len(self._spread_coefficients) > 1:
# TODO: To be checked
for prod_i in range(len(self._spread_coefficients)):
bid = self._prices_history[-1][2 * prod_i]
ask = self._prices_history[-1][2 * prod_i + 1]
self._ax[prod_i].clear()
self._ax[prod_i].plot([self._iteration, self._iteration + 1],
[bid, bid], color='white')
self._ax[prod_i].plot([self._iteration, self._iteration + 1],
[ask, ask], color='white')
self._ax[prod_i].set_title('Product {} (spread coef {})'.format(
prod_i, str(self._spread_coefficients[prod_i])))
# Spread price
prices = self._prices_history[-1]
bid, ask = calc_spread(prices, self._spread_coefficients)
self._ax[-1].plot([self._iteration, self._iteration + 1],
[bid, bid], color='white')
self._ax[-1].plot([self._iteration, self._iteration + 1],
[ask, ask], color='white')
ymin, ymax = self._ax[-1].get_ylim()
yrange = ymax - ymin
if (self._action == self._actions['sell']).all():
self._ax[-1].scatter(self._iteration + 0.5, bid + 0.03 *
yrange, color='orangered', marker='v')
elif (self._action == self._actions['buy']).all():
self._ax[-1].scatter(self._iteration + 0.5, ask - 0.03 *
yrange, color='lawngreen', marker='^')
plt.suptitle('Cumulated Reward: ' + "%.2f" % self._total_reward + ' ~ ' +
'Cumulated PnL: ' + "%.2f" % self._total_pnl + ' ~ ' +
'Position: ' + ['flat', 'long', 'short'][list(self._position).index(1)] + ' ~ ' +
'Entry Price: ' + "%.2f" % self._entry_price)
self._f.tight_layout()
plt.xticks(range(self._iteration)[::5])
plt.xlim([max(0, self._iteration - 80.5), self._iteration + 0.5])
plt.subplots_adjust(top=0.85)
plt.pause(0.01)
if savefig:
plt.savefig(filename)
def _get_observation(self):
"""Concatenate all necessary elements to create the observation.
Returns:
numpy.array: observation array.
"""
return np.concatenate(
[prices for prices in self._prices_history[-self._history_length:]] +
[
np.array([self._entry_price]),
np.array(self._position)
]
)
def _calc_int_pnl(self):
"""Calculate the PnL at each position closed.
Returns:
float: pnl for this closed trade
"""
if all(self._position == self._positions['long']):
return self._exit_price - self._entry_price
if all(self._position == self._positions['short']):
return self._entry_price - self._exit_price
@staticmethod
def random_action_fun():
"""The default random action for exploration.
We hold 80% of the time and buy or sell 10% of the time each.
Returns:
numpy.array: array with a 1 on the action index, 0 elsewhere.
"""
return np.random.multinomial(1, [0.8, 0.1, 0.1])
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import time
from webkitpy.layout_tests.controllers import test_result_writer
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_results import TestResult
_log = logging.getLogger(__name__)
def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
return runner.run()
class SingleTestRunner(object):
(ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done):
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._results_directory = results_directory
self._driver = driver
self._timeout = test_input.timeout
self._worker_name = worker_name
self._test_name = test_input.test_name
self._should_run_pixel_test = test_input.should_run_pixel_test
self._reference_files = test_input.reference_files
self._should_add_missing_baselines = test_input.should_add_missing_baselines
self._stop_when_done = stop_when_done
if self._reference_files:
# Detect and report a test which has a wrong combination of expectation files.
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
# 'foo-expected.txt', we should warn users. One test file must be used exclusively
# in either layout tests or reftests, but not in both.
for suffix in ('.txt', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._test_name, suffix)
if self._filesystem.exists(expected_filename):
_log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
self._test_name, expected_filename)
def _expected_driver_output(self):
return DriverOutput(self._port.expected_text(self._test_name),
self._port.expected_image(self._test_name),
self._port.expected_checksum(self._test_name),
self._port.expected_audio(self._test_name))
def _should_fetch_expected_checksum(self):
return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
def _driver_input(self):
# The image hash is used to avoid doing an image dump if the
# checksums match, so it should be set to a blank value if we
# are generating a new baseline. (Otherwise, an image from a
# previous run will be copied into the baseline."""
image_hash = None
if self._should_fetch_expected_checksum():
image_hash = self._port.expected_checksum(self._test_name)
return DriverInput(self._test_name, self._timeout, image_hash, self._should_run_pixel_test)
def run(self):
if self._reference_files:
if self._options.reset_results:
reftest_type = set([reference_file[0] for reference_file in self._reference_files])
result = TestResult(self._test_name, reftest_type=reftest_type)
result.type = test_expectations.SKIP
return result
return self._run_reftest()
if self._options.reset_results:
return self._run_rebaseline()
return self._run_compare_test()
def _run_compare_test(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
expected_driver_output = self._expected_driver_output()
test_result = self._compare_output(expected_driver_output, driver_output)
if self._should_add_missing_baselines:
self._add_missing_baselines(test_result, driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
failures = self._handle_error(driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be better to avoid
# to write new baselines.
self._overwrite_baselines(driver_output)
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
_render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
def _add_missing_baselines(self, test_result, driver_output):
missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'))
if missingImage:
self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'))
def _location_for_new_baseline(self, data, extension):
if self._options.add_platform_exceptions:
return self.VERSION_DIR
if extension == '.png':
return self.PLATFORM_DIR
if extension == '.wav':
return self.ALONGSIDE_TEST
if extension == '.txt' and self._render_tree_dump_pattern.match(data):
return self.PLATFORM_DIR
return self.ALONGSIDE_TEST
def _overwrite_baselines(self, driver_output):
location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
self._save_baseline_data(driver_output.text, '.txt', location)
self._save_baseline_data(driver_output.audio, '.wav', location)
if self._should_run_pixel_test:
self._save_baseline_data(driver_output.image, '.png', location)
def _save_baseline_data(self, data, extension, location):
if data is None:
return
port = self._port
fs = self._filesystem
if location == self.ALONGSIDE_TEST:
output_dir = fs.dirname(port.abspath_for_test(self._test_name))
elif location == self.VERSION_DIR:
output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name))
elif location == self.PLATFORM_DIR:
output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name))
elif location == self.UPDATE:
output_dir = fs.dirname(port.expected_filename(self._test_name, extension))
else:
raise AssertionError('unrecognized baseline location: %s' % location)
fs.maybe_make_directory(output_dir)
output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
output_path = fs.join(output_dir, output_basename)
_log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
port.update_baseline(output_path, data)
def _handle_error(self, driver_output, reference_filename=None):
"""Returns test failures if some unusual errors happen in driver's run.
Args:
driver_output: The output from the driver.
reference_filename: The full path to the reference file which produced the driver_output.
This arg is optional and should be used only in reftests until we have a better way to know
which html file is used for producing the driver_output.
"""
failures = []
fs = self._filesystem
if driver_output.timeout:
failures.append(test_failures.FailureTimeout(bool(reference_filename)))
if reference_filename:
testname = self._port.relative_test_filename(reference_filename)
else:
testname = self._test_name
if driver_output.crash:
failures.append(test_failures.FailureCrash(bool(reference_filename),
driver_output.crashed_process_name,
driver_output.crashed_pid))
if driver_output.error:
_log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
else:
_log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
elif driver_output.error:
_log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
for line in driver_output.error.splitlines():
_log.debug(" %s" % line)
return failures
def _compare_output(self, expected_driver_output, driver_output):
failures = []
failures.extend(self._handle_error(driver_output))
if driver_output.crash:
# Don't continue any more if we already have a crash.
# In case of timeouts, we continue since we still want to see the text and image output.
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
if self._should_run_pixel_test:
failures.extend(self._compare_image(expected_driver_output, driver_output))
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
def _compare_text(self, expected_text, actual_text):
failures = []
if (expected_text and actual_text and
# Assuming expected_text is already normalized.
self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
failures.append(test_failures.FailureTextMismatch())
elif actual_text and not expected_text:
failures.append(test_failures.FailureMissingResult())
return failures
def _compare_audio(self, expected_audio, actual_audio):
failures = []
if (expected_audio and actual_audio and
self._port.do_audio_results_differ(expected_audio, actual_audio)):
failures.append(test_failures.FailureAudioMismatch())
elif actual_audio and not expected_audio:
failures.append(test_failures.FailureMissingAudio())
return failures
def _get_normalized_output_text(self, output):
"""Returns the normalized text output, i.e. the output in which
the end-of-line characters are normalized to "\n"."""
# Running tests on Windows produces "\r\n". The "\n" part is helpfully
# changed to "\r\n" by our system (Python/Cygwin), resulting in
# "\r\r\n", when, in fact, we wanted to compare the text output with
# the normalized text expectation files.
return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
# FIXME: This function also creates the image diff. Maybe that work should
# be handled elsewhere?
def _compare_image(self, expected_driver_output, driver_output):
failures = []
# If we didn't produce a hash file, this test must be text-only.
if driver_output.image_hash is None:
return failures
if not expected_driver_output.image:
failures.append(test_failures.FailureMissingImage())
elif not expected_driver_output.image_hash:
failures.append(test_failures.FailureMissingImageHash())
elif driver_output.image_hash != expected_driver_output.image_hash:
diff, err_str = self._port.diff_image(expected_driver_output.image, driver_output.image)
if err_str:
_log.warning(' %s : %s' % (self._test_name, err_str))
failures.append(test_failures.FailureImageHashMismatch())
driver_output.error = (driver_output.error or '') + err_str
else:
driver_output.image_diff = diff
if driver_output.image_diff:
failures.append(test_failures.FailureImageHashMismatch())
else:
# See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
_log.warning(' %s -> pixel hash failed (but diff passed)' % self._test_name)
return failures
def _run_reftest(self):
test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
total_test_time = 0
reference_output = None
test_result = None
# If the test crashed, or timed out, there's no point in running the reference at all.
# This can save a lot of execution time if we have a lot of crashes or timeouts.
if test_output.crash or test_output.timeout:
expected_driver_output = DriverOutput(text=None, image=None, image_hash=None, audio=None)
return self._compare_output(expected_driver_output, test_output)
# A reftest can have multiple match references and multiple mismatch references;
# the test fails if any mismatch matches and all of the matches don't match.
# To minimize the number of references we have to check, we run all of the mismatches first,
# then the matches, and short-circuit out as soon as we can.
# Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
putAllMismatchBeforeMatch = sorted
reference_test_names = []
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
reference_test_names.append(reference_test_name)
reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
break
total_test_time += test_result.test_run_time
assert(reference_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
# FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
# and only really handle the first of the references in the result.
reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
failures = []
failures.extend(self._handle_error(actual_driver_output))
if failures:
# Don't continue any more if we already have crash or timeout.
return TestResult(self._test_name, failures, total_test_time, has_stderr)
failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
if failures:
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
elif mismatch:
if reference_driver_output.image_hash == actual_driver_output.image_hash:
diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
if not diff:
failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
else:
_log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name)
elif reference_driver_output.image_hash != actual_driver_output.image_hash:
diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
if diff:
failures.append(test_failures.FailureReftestMismatch(reference_filename))
else:
_log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name)
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
Print the error when image_diff fails when comparing reftests.
Before this patch, we'd say that the image diff passed.
Review URL: https://chromiumcodereview.appspot.com/23272007
git-svn-id: bf5cd6ccde378db821296732a091cfbcf5285fbd@156754 bbb929c8-8fbe-4397-9dbb-9b2b20218538
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import time
from webkitpy.layout_tests.controllers import test_result_writer
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_results import TestResult
_log = logging.getLogger(__name__)
def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
return runner.run()
class SingleTestRunner(object):
(ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done):
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._results_directory = results_directory
self._driver = driver
self._timeout = test_input.timeout
self._worker_name = worker_name
self._test_name = test_input.test_name
self._should_run_pixel_test = test_input.should_run_pixel_test
self._reference_files = test_input.reference_files
self._should_add_missing_baselines = test_input.should_add_missing_baselines
self._stop_when_done = stop_when_done
if self._reference_files:
# Detect and report a test which has a wrong combination of expectation files.
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
# 'foo-expected.txt', we should warn users. One test file must be used exclusively
# in either layout tests or reftests, but not in both.
for suffix in ('.txt', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._test_name, suffix)
if self._filesystem.exists(expected_filename):
_log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
self._test_name, expected_filename)
def _expected_driver_output(self):
return DriverOutput(self._port.expected_text(self._test_name),
self._port.expected_image(self._test_name),
self._port.expected_checksum(self._test_name),
self._port.expected_audio(self._test_name))
def _should_fetch_expected_checksum(self):
return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
def _driver_input(self):
# The image hash is used to avoid doing an image dump if the
# checksums match, so it should be set to a blank value if we
# are generating a new baseline. (Otherwise, an image from a
# previous run will be copied into the baseline."""
image_hash = None
if self._should_fetch_expected_checksum():
image_hash = self._port.expected_checksum(self._test_name)
return DriverInput(self._test_name, self._timeout, image_hash, self._should_run_pixel_test)
def run(self):
if self._reference_files:
if self._options.reset_results:
reftest_type = set([reference_file[0] for reference_file in self._reference_files])
result = TestResult(self._test_name, reftest_type=reftest_type)
result.type = test_expectations.SKIP
return result
return self._run_reftest()
if self._options.reset_results:
return self._run_rebaseline()
return self._run_compare_test()
def _run_compare_test(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
expected_driver_output = self._expected_driver_output()
test_result = self._compare_output(expected_driver_output, driver_output)
if self._should_add_missing_baselines:
self._add_missing_baselines(test_result, driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
failures = self._handle_error(driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be better to avoid
# to write new baselines.
self._overwrite_baselines(driver_output)
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
_render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
def _add_missing_baselines(self, test_result, driver_output):
missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'))
if missingImage:
self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'))
def _location_for_new_baseline(self, data, extension):
if self._options.add_platform_exceptions:
return self.VERSION_DIR
if extension == '.png':
return self.PLATFORM_DIR
if extension == '.wav':
return self.ALONGSIDE_TEST
if extension == '.txt' and self._render_tree_dump_pattern.match(data):
return self.PLATFORM_DIR
return self.ALONGSIDE_TEST
def _overwrite_baselines(self, driver_output):
location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
self._save_baseline_data(driver_output.text, '.txt', location)
self._save_baseline_data(driver_output.audio, '.wav', location)
if self._should_run_pixel_test:
self._save_baseline_data(driver_output.image, '.png', location)
def _save_baseline_data(self, data, extension, location):
if data is None:
return
port = self._port
fs = self._filesystem
if location == self.ALONGSIDE_TEST:
output_dir = fs.dirname(port.abspath_for_test(self._test_name))
elif location == self.VERSION_DIR:
output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name))
elif location == self.PLATFORM_DIR:
output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name))
elif location == self.UPDATE:
output_dir = fs.dirname(port.expected_filename(self._test_name, extension))
else:
raise AssertionError('unrecognized baseline location: %s' % location)
fs.maybe_make_directory(output_dir)
output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
output_path = fs.join(output_dir, output_basename)
_log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
port.update_baseline(output_path, data)
def _handle_error(self, driver_output, reference_filename=None):
"""Returns test failures if some unusual errors happen in driver's run.
Args:
driver_output: The output from the driver.
reference_filename: The full path to the reference file which produced the driver_output.
This arg is optional and should be used only in reftests until we have a better way to know
which html file is used for producing the driver_output.
"""
failures = []
fs = self._filesystem
if driver_output.timeout:
failures.append(test_failures.FailureTimeout(bool(reference_filename)))
if reference_filename:
testname = self._port.relative_test_filename(reference_filename)
else:
testname = self._test_name
if driver_output.crash:
failures.append(test_failures.FailureCrash(bool(reference_filename),
driver_output.crashed_process_name,
driver_output.crashed_pid))
if driver_output.error:
_log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
else:
_log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
elif driver_output.error:
_log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
for line in driver_output.error.splitlines():
_log.debug(" %s" % line)
return failures
def _compare_output(self, expected_driver_output, driver_output):
failures = []
failures.extend(self._handle_error(driver_output))
if driver_output.crash:
# Don't continue any more if we already have a crash.
# In case of timeouts, we continue since we still want to see the text and image output.
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
if self._should_run_pixel_test:
failures.extend(self._compare_image(expected_driver_output, driver_output))
return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
def _compare_text(self, expected_text, actual_text):
failures = []
if (expected_text and actual_text and
# Assuming expected_text is already normalized.
self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
failures.append(test_failures.FailureTextMismatch())
elif actual_text and not expected_text:
failures.append(test_failures.FailureMissingResult())
return failures
def _compare_audio(self, expected_audio, actual_audio):
failures = []
if (expected_audio and actual_audio and
self._port.do_audio_results_differ(expected_audio, actual_audio)):
failures.append(test_failures.FailureAudioMismatch())
elif actual_audio and not expected_audio:
failures.append(test_failures.FailureMissingAudio())
return failures
def _get_normalized_output_text(self, output):
"""Returns the normalized text output, i.e. the output in which
the end-of-line characters are normalized to "\n"."""
# Running tests on Windows produces "\r\n". The "\n" part is helpfully
# changed to "\r\n" by our system (Python/Cygwin), resulting in
# "\r\r\n", when, in fact, we wanted to compare the text output with
# the normalized text expectation files.
return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
# FIXME: This function also creates the image diff. Maybe that work should
# be handled elsewhere?
def _compare_image(self, expected_driver_output, driver_output):
failures = []
# If we didn't produce a hash file, this test must be text-only.
if driver_output.image_hash is None:
return failures
if not expected_driver_output.image:
failures.append(test_failures.FailureMissingImage())
elif not expected_driver_output.image_hash:
failures.append(test_failures.FailureMissingImageHash())
elif driver_output.image_hash != expected_driver_output.image_hash:
diff, err_str = self._port.diff_image(expected_driver_output.image, driver_output.image)
if err_str:
_log.warning(' %s : %s' % (self._test_name, err_str))
failures.append(test_failures.FailureImageHashMismatch())
driver_output.error = (driver_output.error or '') + err_str
else:
driver_output.image_diff = diff
if driver_output.image_diff:
failures.append(test_failures.FailureImageHashMismatch())
else:
# See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
_log.warning(' %s -> pixel hash failed (but diff passed)' % self._test_name)
return failures
def _run_reftest(self):
test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
total_test_time = 0
reference_output = None
test_result = None
# If the test crashed, or timed out, there's no point in running the reference at all.
# This can save a lot of execution time if we have a lot of crashes or timeouts.
if test_output.crash or test_output.timeout:
expected_driver_output = DriverOutput(text=None, image=None, image_hash=None, audio=None)
return self._compare_output(expected_driver_output, test_output)
# A reftest can have multiple match references and multiple mismatch references;
# the test fails if any mismatch matches and all of the matches don't match.
# To minimize the number of references we have to check, we run all of the mismatches first,
# then the matches, and short-circuit out as soon as we can.
# Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
putAllMismatchBeforeMatch = sorted
reference_test_names = []
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
reference_test_names.append(reference_test_name)
reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
break
total_test_time += test_result.test_run_time
assert(reference_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
# FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
# and only really handle the first of the references in the result.
reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
failures = []
failures.extend(self._handle_error(actual_driver_output))
if failures:
# Don't continue any more if we already have crash or timeout.
return TestResult(self._test_name, failures, total_test_time, has_stderr)
failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
if failures:
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
elif mismatch:
if reference_driver_output.image_hash == actual_driver_output.image_hash:
diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
if not diff:
failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
elif err_str:
_log.error(err_str)
else:
_log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name)
elif reference_driver_output.image_hash != actual_driver_output.image_hash:
diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
if diff:
failures.append(test_failures.FailureReftestMismatch(reference_filename))
elif err_str:
_log.error(err_str)
else:
_log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name)
return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
|
__author__ = 'sevas'
from datetime import datetime, timedelta
from StringIO import StringIO
class Event(object):
def __init__(self):
self.summary = ""
self.organizer = ""
self.location = ""
self.description = ""
self.dtstart = None
self.dtend = None
self.dtstamp = None
class Calendar(object):
def __init__(self):
self.name = ""
self.description = ""
self.version = "2.0"
self.provider = "https://bitbucket.org/odebeir/geholimport"
self.start_date = datetime.today()
self.events = []
def add_event(self, event):
self.events.append(event)
def as_string(self):
def write_line(out, line):
out.write(line+'\r\n')
out = StringIO()
write_line(out, "BEGIN:VCALENDAR")
write_line(out, "VERSION:%s" % self.version)
write_line(out, "PRODID:%s" % self.provider)
write_line(out, "X-WR-CALNAME:%s" % self.name)
write_line(out, "X-WR-CALDESC:%s" % self.description)
write_line(out, "BEGIN:VTIMEZONE")
write_line(out, "TZID:Europe/Brussels")
write_line(out, "X-LIC-LOCATION:Europe/Brussels")
write_line(out, "BEGIN:DAYLIGHT")
write_line(out, "TZOFFSETFROM:+0100")
write_line(out, "TZOFFSETTO:+0200")
write_line(out, "TZNAME:CEST")
write_line(out, "DTSTART:19700329T020000")
write_line(out, "RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU")
write_line(out, "END:DAYLIGHT")
write_line(out, "BEGIN:STANDARD")
write_line(out, "TZOFFSETFROM:+0200")
write_line(out, "TZOFFSETTO:+0100")
write_line(out, "TZNAME:CET")
write_line(out, "DTSTART:19701025T030000")
write_line(out, "RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU")
write_line(out, "END:STANDARD")
write_line(out, "END:VTIMEZONE")
for event in self.events:
write_line(out, "BEGIN:VEVENT")
write_line(out, "DTSTAMP:%s" % event.dtstart.strftime("%Y%m%dT%H%M%S"))
write_line(out, "DTSTART;TZID=Europe/Brussels:%s" % event.dtstart.strftime("%Y%m%dT%H%M%S"))
write_line(out, "DTEND;TZID=Europe/Brussels:%s" % event.dtend.strftime("%Y%m%dT%H%M%S"))
write_line(out, "SUMMARY:%s" % event.summary)
write_line(out, "DESCRIPTION:%s" % event.description)
write_line(out, "LOCATION:%s" % event.location)
write_line(out, "ORGANIZER:%s" % event.organizer)
#write_line(out, "GEO:5.092867;51.557655")
write_line(out, "END:VEVENT")
write_line(out, "END:VCALENDAR")
ical_string = out.getvalue()
out.close()
return ical_string.encode('utf-8')
TYPE_TO_DESCR = {
'THE':u'Theorie',
'EXE':u'Exercices'
}
def convert_type_to_description(type_mnemo):
if type_mnemo in TYPE_TO_DESCR:
return TYPE_TO_DESCR[type_mnemo]
else:
return type_mnemo
def convert_geholcalendar_to_ical(gehol_calendar, first_monday):
date_init = datetime.strptime(first_monday,'%d/%m/%Y')
cal = Calendar()
cal.description = gehol_calendar.description
cal.name = gehol_calendar.name
cal.start_date = date_init
for event in gehol_calendar.events:
ical_event = Event()
# get some common values for the events we will generate next
event_type_description = convert_type_to_description(event['type'])
event_summary = "%s (%s) %s" % (event['title'], event_type_description, event['group'])
event_organizer = event['organizer']
event_location = event['location']
event_descr = "%s [%s]" % (event_summary, event_organizer)
for (i, event_week) in enumerate(event['weeks']):
delta = timedelta(days=(event_week-1)*7+(event['day']))
dtstart = date_init+delta + timedelta(hours = event['start_time'].hour,
minutes = event['start_time'].minute)
dtend = date_init+delta + timedelta(hours = event['stop_time'].hour,
minutes = event['stop_time'].minute)
ical_event = Event()
ical_event.summary = event_summary
ical_event.location = event_location
ical_event.description = event_descr
ical_event.organizer = event_organizer
ical_event.dtstamp = dtstart
ical_event.dtstart = dtstart
ical_event.dtend = dtend
cal.add_event(ical_event)
return cal
comma-separated locations don't show well in iCal
__author__ = 'sevas'
from datetime import datetime, timedelta
from StringIO import StringIO
class Event(object):
def __init__(self):
self.summary = ""
self.organizer = ""
self.location = ""
self.description = ""
self.dtstart = None
self.dtend = None
self.dtstamp = None
class Calendar(object):
def __init__(self):
self.name = ""
self.description = ""
self.version = "2.0"
self.provider = "https://bitbucket.org/odebeir/geholimport"
self.start_date = datetime.today()
self.events = []
def add_event(self, event):
self.events.append(event)
def as_string(self):
def write_line(out, line):
out.write(line+'\r\n')
out = StringIO()
write_line(out, "BEGIN:VCALENDAR")
write_line(out, "VERSION:%s" % self.version)
write_line(out, "PRODID:%s" % self.provider)
write_line(out, "X-WR-CALNAME:%s" % self.name)
write_line(out, "X-WR-CALDESC:%s" % self.description)
write_line(out, "BEGIN:VTIMEZONE")
write_line(out, "TZID:Europe/Brussels")
write_line(out, "X-LIC-LOCATION:Europe/Brussels")
write_line(out, "BEGIN:DAYLIGHT")
write_line(out, "TZOFFSETFROM:+0100")
write_line(out, "TZOFFSETTO:+0200")
write_line(out, "TZNAME:CEST")
write_line(out, "DTSTART:19700329T020000")
write_line(out, "RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU")
write_line(out, "END:DAYLIGHT")
write_line(out, "BEGIN:STANDARD")
write_line(out, "TZOFFSETFROM:+0200")
write_line(out, "TZOFFSETTO:+0100")
write_line(out, "TZNAME:CET")
write_line(out, "DTSTART:19701025T030000")
write_line(out, "RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU")
write_line(out, "END:STANDARD")
write_line(out, "END:VTIMEZONE")
for event in self.events:
write_line(out, "BEGIN:VEVENT")
write_line(out, "DTSTAMP:%s" % event.dtstart.strftime("%Y%m%dT%H%M%S"))
write_line(out, "DTSTART;TZID=Europe/Brussels:%s" % event.dtstart.strftime("%Y%m%dT%H%M%S"))
write_line(out, "DTEND;TZID=Europe/Brussels:%s" % event.dtend.strftime("%Y%m%dT%H%M%S"))
write_line(out, "SUMMARY:%s" % event.summary)
write_line(out, "DESCRIPTION:%s" % event.description)
write_line(out, "LOCATION:%s" % event.location)
write_line(out, "ORGANIZER:%s" % event.organizer)
#write_line(out, "GEO:5.092867;51.557655")
write_line(out, "END:VEVENT")
write_line(out, "END:VCALENDAR")
ical_string = out.getvalue()
out.close()
return ical_string.encode('utf-8')
TYPE_TO_DESCR = {
'THE':u'Theorie',
'EXE':u'Exercices'
}
def convert_type_to_description(type_mnemo):
if type_mnemo in TYPE_TO_DESCR:
return TYPE_TO_DESCR[type_mnemo]
else:
return type_mnemo
def convert_geholcalendar_to_ical(gehol_calendar, first_monday):
date_init = datetime.strptime(first_monday,'%d/%m/%Y')
cal = Calendar()
cal.description = gehol_calendar.description
cal.name = gehol_calendar.name
cal.start_date = date_init
for event in gehol_calendar.events:
ical_event = Event()
# get some common values for the events we will generate next
event_type_description = convert_type_to_description(event['type'])
event_summary = "%s (%s) %s" % (event['title'], event_type_description, event['group'])
event_organizer = event['organizer']
event_location = " ".join(event['location'].split(","))
event_descr = "%s [%s]" % (event_summary, event_organizer)
for (i, event_week) in enumerate(event['weeks']):
delta = timedelta(days=(event_week-1)*7+(event['day']))
dtstart = date_init+delta + timedelta(hours = event['start_time'].hour,
minutes = event['start_time'].minute)
dtend = date_init+delta + timedelta(hours = event['stop_time'].hour,
minutes = event['stop_time'].minute)
ical_event = Event()
ical_event.summary = event_summary
ical_event.location = event_location
ical_event.description = event_descr
ical_event.organizer = event_organizer
ical_event.dtstamp = dtstart
ical_event.dtstart = dtstart
ical_event.dtend = dtend
cal.add_event(ical_event)
return cal |
# coding=utf-8
import numpy as np
import multiprocessing
import itertools as it
import collections as coll
from functools import partial
import logging
import h5py
import scipy.ndimage as nd
import scipy.sparse as sparse
from scipy.ndimage.measurements import label
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import precision_recall_curve
def nzcol(mat, row_idx):
"""Return the nonzero elements of given row in a CSR matrix.
Parameters
----------
mat : CSR matrix
Input matrix.
row_idx : int
The index of the row (if `mat` is CSR) for which the nonzero
elements are desired.
Returns
-------
nz : array of int
The location of nonzero elements of `mat[main_axis_idx]`.
Examples
--------
>>> mat = sparse.csr_matrix(np.array([[0, 1, 0, 0], [0, 5, 8, 0]]))
>>> nzcol(mat, 1)
array([1, 2], dtype=int32)
>>> mat[1, 2] = 0
>>> nzcol(mat, 1)
array([1], dtype=int32)
"""
return mat[row_idx].nonzero()[1]
def pixel_wise_boundary_precision_recall(pred, gt):
"""Evaluate voxel prediction accuracy against a ground truth.
Parameters
----------
pred : np.ndarray of int or bool, arbitrary shape
The voxel-wise discrete prediction. 1 for boundary, 0 for non-boundary.
gt : np.ndarray of int or bool, same shape as `pred`
The ground truth boundary voxels. 1 for boundary, 0 for non-boundary.
Returns
-------
pr : float
rec : float
The precision and recall values associated with the prediction.
Notes
-----
Precision is defined as "True Positives / Total Positive Calls", and
Recall is defined as "True Positives / Total Positives in Ground Truth".
This function only calculates this value for discretized predictions,
i.e. it does not work with continuous prediction confidence values.
"""
tp = float((gt * pred).sum())
fp = (pred * (1-gt)).sum()
fn = (gt * (1-pred)).sum()
return tp/(tp+fp), tp/(tp+fn)
def wiggle_room_precision_recall(pred, boundary, margin=2, connectivity=1):
"""Voxel-wise, continuous value precision recall curve allowing drift.
Voxel-wise precision recall evaluates predictions against a ground truth.
Wiggle-room precision recall (WRPR, "warper") allows calls from nearby
voxels to be counted as correct. Specifically, if a voxel is predicted to
be a boundary within a dilation distance of `margin` (distance defined
according to `connectivity`) of a true boundary voxel, it will be counted
as a True Positive in the Precision, and vice-versa for the Recall.
Parameters
----------
pred : np.ndarray of float, arbitrary shape
The prediction values, expressed as probability of observing a boundary
(i.e. a voxel with label 1).
boundary : np.ndarray of int, same shape as pred
The true boundary map. 1 indicates boundary, 0 indicates non-boundary.
margin : int, optional
The number of dilations that define the margin. default: 2.
connectivity : {1, ..., pred.ndim}, optional
The morphological voxel connectivity (defined as in SciPy) for the
dilation step.
Returns
-------
ts, pred, rec : np.ndarray of float, shape `(len(np.unique(pred)+1),)`
The prediction value thresholds corresponding to each precision and
recall value, the precision values, and the recall values.
"""
struct = nd.generate_binary_structure(boundary.ndim, connectivity)
gtd = nd.binary_dilation(boundary, struct, margin)
struct_m = nd.iterate_structure(struct, margin)
pred_dil = nd.grey_dilation(pred, footprint=struct_m)
missing = np.setdiff1d(np.unique(pred), np.unique(pred_dil))
for m in missing:
pred_dil.ravel()[np.flatnonzero(pred==m)[0]] = m
prec, _, ts = precision_recall_curve(gtd.ravel(), pred.ravel())
_, rec, _ = precision_recall_curve(boundary.ravel(), pred_dil.ravel())
return list(zip(ts, prec, rec))
def get_stratified_sample(ar, n):
"""Get a regularly-spaced sample of the unique values of an array.
Parameters
----------
ar : np.ndarray, arbitrary shape and type
The input array.
n : int
The desired sample size.
Returns
-------
u : np.ndarray, shape approximately (n,)
Notes
-----
If `len(np.unique(ar)) <= 2*n`, all the values of `ar` are returned. The
requested sample size is taken as an approximate lower bound.
Examples
--------
>>> ar = np.array([[0, 4, 1, 3],
... [4, 1, 3, 5],
... [3, 5, 2, 1]])
>>> np.unique(ar)
array([0, 1, 2, 3, 4, 5])
>>> get_stratified_sample(ar, 3)
array([0, 2, 4])
"""
u = np.unique(ar)
nu = len(u)
if nu < 2*n:
return u
else:
step = nu // n
return u[0:nu:step]
def edit_distance(aseg, gt, size_threshold=1000, sp=None):
"""Find the number of splits and merges needed to convert `aseg` to `gt`.
Parameters
----------
aseg : np.ndarray, int type, arbitrary shape
The candidate automatic segmentation being evaluated.
gt : np.ndarray, int type, same shape as `aseg`
The ground truth segmentation.
size_threshold : int or float, optional
Ignore splits or merges smaller than this number of voxels.
sp : np.ndarray, int type, same shape as `aseg`, optional
A superpixel map. If provided, compute the edit distance to the best
possible agglomeration of `sp` to `gt`, rather than to `gt` itself.
Returns
-------
(false_merges, false_splits) : float
The number of splits and merges needed to convert aseg to gt.
"""
if sp is None:
return raw_edit_distance(aseg, gt, size_threshold)
else:
from . import agglo
bps = agglo.best_possible_segmentation(sp, gt)
return raw_edit_distance(aseg, bps, size_threshold)
def raw_edit_distance(aseg, gt, size_threshold=1000):
"""Compute the edit distance between two segmentations.
Parameters
----------
aseg : np.ndarray, int type, arbitrary shape
The candidate automatic segmentation.
gt : np.ndarray, int type, same shape as `aseg`
The ground truth segmentation.
size_threshold : int or float, optional
Ignore splits or merges smaller than this number of voxels.
Returns
-------
(false_merges, false_splits) : float
The number of splits and merges required to convert aseg to gt.
"""
aseg = relabel_from_one(aseg)[0]
gt = relabel_from_one(gt)[0]
r = contingency_table(aseg, gt, ignore_seg=[0], ignore_gt=[0], norm=False)
r.data[r.data <= size_threshold] = 0
# make each segment overlap count for 1, since it will be one
# operation to fix (split or merge)
r.data[r.data.nonzero()] /= r.data[r.data.nonzero()]
false_splits = (r.sum(axis=0)-1)[1:].sum()
false_merges = (r.sum(axis=1)-1)[1:].sum()
return (false_merges, false_splits)
def relabel_from_one(label_field):
"""Convert labels in an arbitrary label field to {1, ... number_of_labels}.
This function also returns the forward map (mapping the original labels to
the reduced labels) and the inverse map (mapping the reduced labels back
to the original ones).
Parameters
----------
label_field : numpy ndarray (integer type)
Returns
-------
relabeled : numpy array of same shape as ar
forward_map : 1d numpy array of length np.unique(ar) + 1
inverse_map : 1d numpy array of length len(np.unique(ar))
The length is len(np.unique(ar)) + 1 if 0 is not in np.unique(ar)
Examples
--------
>>> import numpy as np
>>> label_field = np.array([1, 1, 5, 5, 8, 99, 42])
>>> relab, fw, inv = relabel_from_one(label_field)
>>> relab
array([1, 1, 2, 2, 3, 5, 4])
>>> fw
array([0, 1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 5])
>>> inv
array([ 0, 1, 5, 8, 42, 99])
>>> (fw[label_field] == relab).all()
True
>>> (inv[relab] == label_field).all()
True
"""
labels = np.unique(label_field)
labels0 = labels[labels != 0]
m = labels.max()
if m == len(labels0): # nothing to do, already 1...n labels
return label_field, labels, labels
forward_map = np.zeros(m+1, int)
forward_map[labels0] = np.arange(1, len(labels0) + 1)
if not (labels == 0).any():
labels = np.concatenate(([0], labels))
inverse_map = labels
return forward_map[label_field], forward_map, inverse_map
def contingency_table(seg, gt, *, ignore_seg=(), ignore_gt=(), norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg : iterable of int, optional
Values to ignore in `seg`. Voxels in `seg` having a value in this list
will not contribute to the contingency table. (default: [0])
ignore_gt : iterable of int, optional
Values to ignore in `gt`. Voxels in `gt` having a value in this list
will not contribute to the contingency table. (default: [0])
norm : bool, optional
Whether to normalize the table so that it sums to 1.
Returns
-------
cont : scipy.sparse.csr_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels
if `norm=True`.)
"""
segr = seg.ravel()
gtr = gt.ravel()
ignored = np.zeros(segr.shape, np.bool)
data = np.ones(gtr.shape)
for i in ignore_seg:
ignored[segr == i] = True
for j in ignore_gt:
ignored[gtr == j] = True
data[ignored] = 0
cont = sparse.coo_matrix((data, (segr, gtr))).tocsr()
if norm:
cont /= cont.sum()
return cont
def assignment_table(seg_or_ctable, gt=None, *, dtype=np.bool_):
"""Create an assignment table of value in `seg` to `gt`.
Parameters
----------
seg_or_ctable : array of int, or 2D array of float
The segmentation to assign. Every value in `seg` will be
assigned to a single value in `gt`.
Alternatively, pass a single, pre-computed contingency table
to be converted to an assignment table.
gt : array of int, same shape as seg
The segmentation to assign to. Don't pass if `seg_or_cont` is
a contingency matrix.
dtype : numpy dtype specification
The desired data type for the assignment matrix.
Returns
-------
assignments : sparse matrix
A matrix with `True` at position [i, j] if segment i in `seg`
is assigned to segment j in `gt`.
Examples
--------
>>> seg = np.array([0, 1, 1, 1, 2, 2])
>>> gt = np.array([1, 1, 1, 2, 2, 2])
>>> assignment_table(seg, gt).toarray()
array([[False, True, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> cont = contingency_table(seg, gt)
>>> assignment_table(cont).toarray()
array([[False, True, False],
[False, True, False],
[False, False, True]], dtype=bool)
"""
if gt is None:
ctable = seg_or_ctable.copy()
else:
ctable = contingency_table(seg_or_ctable, gt, norm=False)
minval = _mindiff(ctable.data)
ctable.data += np.random.randn(ctable.data.size) * 0.01 * minval
maxes = ctable.max(axis=1).toarray()
maxes_repeated = np.repeat(maxes, np.diff(ctable.indptr))
assignments = sparse.csr_matrix((ctable.data == maxes_repeated,
ctable.indices, ctable.indptr),
dtype=dtype)
assignments.eliminate_zeros()
return assignments
def _mindiff(arr):
"""Compute the smallest nonzero difference between elements in arr
Parameters
----------
arr : array
Array of *positive* numeric values.
Returns
-------
mindiff : float
The smallest nonzero difference between any two elements in arr.
Examples
--------
>>> arr = np.array([5, 5, 2.5, 7, 9.2])
>>> _mindiff(arr)
2.0
>>> arr = np.array([0.5, 0.5])
>>> _mindiff(arr)
0.5
"""
arr = np.sort(arr) # this *must* be a copy!
diffs = np.diff(arr)
diffs = diffs[diffs != 0]
if arr[0] != 0:
diffs = np.concatenate((diffs, [arr[0]]))
mindiff = np.min(diffs)
return mindiff
# note: subclassing scipy sparse matrices requires that the class name
# start with the same three letters as the given format. See:
# https://stackoverflow.com/questions/24508214/inherit-from-scipy-sparse-csr-matrix-class
# https://groups.google.com/d/msg/scipy-user/-1PIkEMFWd8/KX6idRoIqqkJ
class csrRowExpandableCSR(sparse.csr_matrix):
"""Like a scipy CSR matrix, but rows can be appended.
Use `mat[i] = v` to append the row-vector v as row i to the matrix mat.
Any rows between the current last row and i are filled with zeros.
Parameters
----------
arg1 :
Any valid instantiation of a sparse.csr_matrix. This includes a
dense matrix or 2D NumPy array, any SciPy sparse matrix, or a
tuple of the three defining values of a scipy sparse matrix,
(data, indices, indptr). See the documentation for
sparse.csr_matrix for more information.
dtype : numpy dtype specification, optional
The data type contained in the matrix, e.g. 'float32', np.float64,
np.complex128.
shape : tuple of two ints, optional
The number of rows and columns of the matrix.
copy : bool, optional
This argument does nothing, and is maintained for compatibility
with the csr_matrix constructor. Because we create bigger-than-
necessary buffer arrays, the data must always be copied.
max_num_rows : int, optional
The initial maximum number of rows. Note that more rows can
always be added; this is used only for efficiency. If None,
defaults to twice the initial number of rows.
max_nonzero : int, optional
The maximum number of nonzero elements. As with max_num_rows,
this is only necessary for efficiency.
expansion_factor : int or float, optional
The maximum number of rows or nonzero elements will be this
number times the initial number of rows or nonzero elements.
This is overridden if max_num_rows or max_nonzero are provided.
Examples
--------
>>> init = csrRowExpandableCSR([[0, 0, 2], [0, 4, 0]])
>>> init[2] = np.array([9, 0, 0])
>>> init[4] = sparse.csr_matrix([0, 0, 5])
>>> init.nnz
4
>>> init.data
array([2, 4, 9, 5], dtype=int64)
>>> init.toarray()
array([[0, 0, 2],
[0, 4, 0],
[9, 0, 0],
[0, 0, 0],
[0, 0, 5]], dtype=int64)
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False,
max_num_rows=None, max_nonzero=None,
expansion_factor=2):
other = sparse.csr_matrix(arg1, shape=shape, dtype=dtype, copy=copy)
if max_nonzero is None:
max_nonzero = other.nnz * expansion_factor
if max_num_rows is None:
max_num_rows = other.shape[0] * expansion_factor
self.curr_nonzero = other.nnz
self.curr_indptr = other.shape[0] + 1
self._data = np.empty(max_nonzero, dtype=other.dtype)
self._indices = np.empty(max_nonzero, dtype=other.indices.dtype)
self._indptr = np.empty(max_num_rows + 1, dtype=other.indptr.dtype)
super().__init__((other.data, other.indices, other.indptr),
shape=other.shape, dtype=other.dtype, copy=False)
@property
def data(self):
"""The data array is virtual, truncated from the data "buffer", _data.
"""
return self._data[:self.curr_nonzero]
@data.setter
def data(self, value):
"""Setter for the data property.
We have to special-case for a few kinds of values.
When creating a new instance, the csr_matrix class removes some
zeros from the array and ends up setting data to a smaller array.
In that case, we need to make sure that we reset `self.curr_nonzero`
and copy the relevant part of the array.
"""
if np.isscalar(value) or len(value) == self.curr_nonzero:
self._data[:self.curr_nonzero] = value
else: # `value` is array-like of different length
self.curr_nonzero = len(value)
while self._data.size < self.curr_nonzero:
self._double_data_and_indices()
self._data[:self.curr_nonzero] = value
@property
def indices(self):
return self._indices[:self.curr_nonzero]
@indices.setter
def indices(self, value):
if np.isscalar(value) or len(value) == self.curr_nonzero:
self._indices[:self.curr_nonzero] = value
else: # `value` is array-like of different length
self.curr_nonzero = len(value)
while self._indices.size < self.curr_nonzero:
self._double_data_and_indices()
self._indices[:self.curr_nonzero] = value
@property
def indptr(self):
return self._indptr[:self.curr_indptr]
@indptr.setter
def indptr(self, value):
if np.isscalar(value) or len(value) == self.curr_indptr:
self._indptr[:self.curr_indptr] = value
else: # `value` is array-like of different length
self.curr_indptr = len(value)
while self._indptr.size < self.curr_indptr:
self._double_data_and_indices()
self._indptr[:self.curr_indptr] = value
def __setitem__(self, index, value):
if np.isscalar(index):
if index >= self.shape[0]: # appending a row
self._append_row_at(index, value)
else:
if np.isscalar(value):
if value == 0: # zeroing out a row
self._zero_row(index)
else:
super().__setitem__(index, value)
def _append_row_at(self, index, value):
# first: normalize the input value. We want a sparse CSR matrix as
# input, to make data copying logic much simpler.
if np.isscalar(value):
value = np.full(self.shape[1], value) # make a full row if scalar
if not sparse.isspmatrix_csr(value):
value = sparse.csr_matrix(value)
# Make sure we have sufficient room for the new row.
if index + 2 > self._indptr.size:
self._double_indptr()
num_values = value.nnz
if self.curr_nonzero + num_values > self._data.size:
self._double_data_and_indices()
i, j = self.indptr[-1], self.indptr[-1] + num_values
self._indptr[self.curr_indptr:index + 1] = i
self._indptr[index + 1] = j
self.curr_indptr = index + 2
self._indices[i:j] = value.indices[:]
self._data[i:j] = value.data[:]
self.curr_nonzero += num_values
# It turns out that the `shape` attribute is a property in SciPy
# sparse matrices, and can't be set directly. So, we bypass it and
# set the corresponding tuple directly, interfaces be damned.
self._shape = (int(index + 1), self.shape[1])
def _zero_row(self, index):
"""Set all elements of row `index` to 0."""
i, j = self.indptr[index:index+2]
self.data[i:j] = 0
def _double_indptr(self):
"""Double the size of the array backing `indptr`.
Doubling on demand gives amortized constant time append.
"""
old_indptr = self._indptr
self._indptr = np.empty(2 * old_indptr.size, old_indptr.dtype)
self._indptr[:old_indptr.size] = old_indptr[:]
def _double_data_and_indices(self):
"""Double size of the arrays backing `indices` and `data` attributes.
Doubling on demand gives amortized constant time append. Since these
two arrays are always the same size in the CSR format, they are
doubled together in the same function.
"""
n = self._data.size
old_data = self._data
self._data = np.empty(2 * n, old_data.dtype)
self._data[:n] = old_data[:]
old_indices = self._indices
self._indices = np.empty(2 * n, old_indices.dtype)
self._indices[:n] = old_indices[:]
def merge_contingency_table(a, b, ignore_seg=[0], ignore_gt=[0]):
"""A contingency table that has additional rows for merging initial rows.
Parameters
----------
a
b
ignore_seg
ignore_gt
Returns
-------
ct : array, shape (2M + 1, N)
"""
ct = contingency_table(a, b,
ignore_seg=ignore_seg, ignore_gt=ignore_gt)
ctout = csrRowExpandableCSR(ct)
return ctout
def xlogx(x, out=None, in_place=False):
"""Compute x * log_2(x).
We define 0 * log_2(0) = 0
Parameters
----------
x : np.ndarray or scipy.sparse.csc_matrix or csr_matrix
The input array.
out : same type as x (optional)
If provided, use this array/matrix for the result.
in_place : bool (optional, default False)
Operate directly on x.
Returns
-------
y : same type as x
Result of x * log_2(x).
"""
if in_place:
y = x
elif out is None:
y = x.copy()
else:
y = out
if isinstance(y, sparse.csc_matrix) or isinstance(y, sparse.csr_matrix):
z = y.data
else:
z = np.asarray(y) # ensure np.matrix converted to np.array
nz = z.nonzero()
z[nz] *= np.log2(z[nz])
return y
def special_points_evaluate(eval_fct, coords, flatten=True, coord_format=True):
"""Return an evaluation function to only evaluate at special coordinates.
Parameters
----------
eval_fct : function taking at least two np.ndarray of equal shapes as args
The function to be used for evaluation.
coords : np.ndarray of int, shape (n_points, n_dim) or (n_points,)
The coordinates at which to evaluate the function. The coordinates can
either be subscript format (one index into each dimension of input
arrays) or index format (a single index into the linear array). For
the latter, use `flatten=False`.
flatten : bool, optional
Whether to flatten the coordinates (default) or leave them untouched
(if they are already in raveled format).
coord_format : bool, optional
Format the coordinates to a tuple of np.ndarray as numpy expects. Set
to False if coordinates are already in this format or flattened.
Returns
-------
special_eval_fct : function taking at least two np.ndarray of equal shapes
The returned function is the same as the above function but only
evaluated at the coordinates specified. This can be used, for example,
to subsample a volume, or to evaluate only whether synapses are
correctly assigned, rather than every voxel, in a neuronal image
volume.
"""
if coord_format:
coords = [coords[:, i] for i in range(coords.shape[1])]
def special_eval_fct(x, y, *args, **kwargs):
if flatten:
for i in range(len(coords)):
if coords[i][0] < 0:
coords[i] += x.shape[i]
coords2 = np.ravel_multi_index(coords, x.shape)
else:
coords2 = coords
sx = x.ravel()[coords2]
sy = y.ravel()[coords2]
return eval_fct(sx, sy, *args, **kwargs)
return special_eval_fct
def make_synaptic_functions(fn, fcts):
"""Make evaluation functions that only evaluate at synaptic sites.
Parameters
----------
fn : string
Filename containing synapse coordinates, in Raveler format. [1]
fcts : function, or iterable of functions
Functions to be converted to synaptic evaluation.
Returns
-------
syn_fcts : function or iterable of functions
Evaluation functions that will evaluate only at synaptic sites.
Raises
------
ImportError : if the `syngeo` package [2, 3] is not installed.
References
----------
[1] https://wiki.janelia.org/wiki/display/flyem/synapse+annotation+file+format
[2] https://github.com/janelia-flyem/synapse-geometry
[3] https://github.com/jni/synapse-geometry
"""
from syngeo import io as synio
synapse_coords = \
synio.raveler_synapse_annotations_to_coords(fn, 'arrays')
synapse_coords = np.array(list(it.chain(*synapse_coords)))
make_function = partial(special_points_evaluate, coords=synapse_coords)
if not isinstance(fcts, coll.Iterable):
return make_function(fcts)
else:
return list(map(make_function, fcts))
def make_synaptic_vi(fn):
"""Shortcut for `make_synaptic_functions(fn, split_vi)`."""
return make_synaptic_functions(fn, split_vi)
def vi(x, y=None, weights=np.ones(2), ignore_x=[0], ignore_y=[0]):
"""Return the variation of information metric. [1]
VI(X, Y) = H(X | Y) + H(Y | X), where H(.|.) denotes the conditional
entropy.
Parameters
----------
x : np.ndarray
Label field (int type) or contingency table (float). `x` is
interpreted as a contingency table (summing to 1.0) if and only if `y`
is not provided.
y : np.ndarray of int, same shape as x, optional
A label field to compare to `x`.
weights : np.ndarray of float, shape (2,), optional
The weights of the conditional entropies of `x` and `y`. Equal weights
are the default.
ignore_x, ignore_y : list of int, optional
Any points having a label in this list are ignored in the evaluation.
Ignore 0-labeled points by default.
Returns
-------
v : float
The variation of information between `x` and `y`.
References
----------
[1] Meila, M. (2007). Comparing clusterings - an information based
distance. Journal of Multivariate Analysis 98, 873-895.
"""
return np.dot(weights, split_vi(x, y, ignore_x, ignore_y))
def split_vi(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return the symmetric conditional entropies associated with the VI.
The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X).
If Y is the ground-truth segmentation, then H(Y|X) can be interpreted
as the amount of under-segmentation of Y and H(X|Y) is then the amount
of over-segmentation. In other words, a perfect over-segmentation
will have H(Y|X)=0 and a perfect under-segmentation will have H(X|Y)=0.
If y is None, x is assumed to be a contingency table.
Parameters
----------
x : np.ndarray
Label field (int type) or contingency table (float). `x` is
interpreted as a contingency table (summing to 1.0) if and only if `y`
is not provided.
y : np.ndarray of int, same shape as x, optional
A label field to compare to `x`.
ignore_x, ignore_y : list of int, optional
Any points having a label in this list are ignored in the evaluation.
Ignore 0-labeled points by default.
Returns
-------
sv : np.ndarray of float, shape (2,)
The conditional entropies of Y|X and X|Y.
See Also
--------
vi
"""
_, _, _ , hxgy, hygx, _, _ = vi_tables(x, y, ignore_x, ignore_y)
# false merges, false splits
return np.array([hygx.sum(), hxgy.sum()])
def vi_pairwise_matrix(segs, split=False):
"""Compute the pairwise VI distances within a set of segmentations.
If 'split' is set to True, two matrices are returned, one for each
direction of the conditional entropy.
0-labeled pixels are ignored.
Parameters
----------
segs : iterable of np.ndarray of int
A list or iterable of segmentations. All arrays must have the same
shape.
split : bool, optional
Should the split VI be returned, or just the VI itself (default)?
Returns
-------
vi_sq : np.ndarray of float, shape (len(segs), len(segs))
The distances between segmentations. If `split==False`, this is a
symmetric square matrix of distances. Otherwise, the lower triangle
of the output matrix is the false split distance, while the upper
triangle is the false merge distance.
"""
d = np.array([s.ravel() for s in segs])
if split:
def dmerge(x, y): return split_vi(x, y)[0]
def dsplit(x, y): return split_vi(x, y)[1]
merges, splits = [squareform(pdist(d, df)) for df in [dmerge, dsplit]]
out = merges
tri = np.tril(np.ones(splits.shape), -1).astype(bool)
out[tri] = splits[tri]
else:
out = squareform(pdist(d, vi))
return out
def split_vi_threshold(tup):
"""Compute VI with tuple input (to support multiprocessing).
Parameters
----------
tup : a tuple, (np.ndarray, np.ndarray, [int], [int], float)
The tuple should consist of::
- the UCM for the candidate segmentation,
- the gold standard,
- list of ignored labels in the segmentation,
- list of ignored labels in the gold standard,
- threshold to use for the UCM.
Returns
-------
sv : np.ndarray of float, shape (2,)
The undersegmentation and oversegmentation of the comparison between
applying a threshold and connected components labeling of the first
array, and the second array.
"""
ucm, gt, ignore_seg, ignore_gt, t = tup
return split_vi(label(ucm<t)[0], gt, ignore_seg, ignore_gt)
def vi_by_threshold(ucm, gt, ignore_seg=[], ignore_gt=[], npoints=None,
nprocessors=None):
"""Compute the VI at every threshold of the provided UCM.
Parameters
----------
ucm : np.ndarray of float, arbitrary shape
The Ultrametric Contour Map, where each 0.0-region is separated by a
boundary. Higher values of the boundary indicate more confidence in
its presence.
gt : np.ndarray of int, same shape as `ucm`
The ground truth segmentation.
ignore_seg : list of int, optional
The labels to ignore in the segmentation of the UCM.
ignore_gt : list of int, optional
The labels to ignore in the ground truth.
npoints : int, optional
The number of thresholds to sample. By default, all thresholds are
sampled.
nprocessors : int, optional
Number of processors to use for the parallel evaluation of different
thresholds.
Returns
-------
result : np.ndarray of float, shape (3, npoints)
The evaluation of segmentation at each threshold. The rows of this
array are:
- the threshold used
- the undersegmentation component of VI
- the oversegmentation component of VI
"""
ts = np.unique(ucm)[1:]
if npoints is None:
npoints = len(ts)
if len(ts) > 2*npoints:
ts = ts[np.arange(1, len(ts), len(ts)/npoints)]
if nprocessors == 1: # this should avoid pickling overhead
result = [split_vi_threshold((ucm, gt, ignore_seg, ignore_gt, t))
for t in ts]
else:
p = multiprocessing.Pool(nprocessors)
result = p.map(split_vi_threshold,
((ucm, gt, ignore_seg, ignore_gt, t) for t in ts))
return np.concatenate((ts[np.newaxis, :], np.array(result).T), axis=0)
def rand_by_threshold(ucm, gt, npoints=None):
"""Compute Rand and Adjusted Rand indices for each threshold of a UCM
Parameters
----------
ucm : np.ndarray, arbitrary shape
An Ultrametric Contour Map of region boundaries having specific
values. Higher values indicate higher boundary probabilities.
gt : np.ndarray, int type, same shape as ucm
The ground truth segmentation.
npoints : int, optional
If provided, only compute values at npoints thresholds, rather than
all thresholds. Useful when ucm has an extremely large number of
unique values.
Returns
-------
ris : np.ndarray of float, shape (3, len(np.unique(ucm))) or (3, npoints)
The rand indices of the segmentation induced by thresholding and
labeling `ucm` at different values. The 3 rows of `ris` are the values
used for thresholding, the corresponding Rand Index at that threshold,
and the corresponding Adjusted Rand Index at that threshold.
"""
ts = np.unique(ucm)[1:]
if npoints is None:
npoints = len(ts)
if len(ts) > 2 * npoints:
ts = ts[np.arange(1, len(ts), len(ts) / npoints)]
result = np.zeros((2, len(ts)))
for i, t in enumerate(ts):
seg = label(ucm < t)[0]
result[0, i] = rand_index(seg, gt)
result[1, i] = adj_rand_index(seg, gt)
return np.concatenate((ts[np.newaxis, :], result), axis=0)
def adapted_rand_error(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# segA is truth, segB is query
segA = np.ravel(gt)
segB = np.ravel(seg)
# mask to foreground in A
mask = (segA > 0)
segA = segA[mask]
segB = segB[mask]
n = segA.size
n_labels_A = np.amax(segA) + 1
n_labels_B = np.amax(segB) + 1
ones_data = np.ones(n)
p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B), dtype=np.uint64)
# In the paper where adapted rand is proposed, they treat each background
# pixel in segB as a different value (i.e., unique label for each pixel).
# To do this, we sum them differently than others
B_nonzero = p_ij[:, 1:]
B_zero = p_ij[:, 0]
# this is a count
num_B_zero = B_zero.sum()
# This is the new code, removing the divides by n because they cancel.
# sum of the joint distribution ,separate sum of B>0 and B=0 parts
sum_p_ij = (B_nonzero).power(2).sum() + num_B_zero
a = p_ij[1:n_labels_A,:]
b = p_ij[1:n_labels_A,1:n_labels_B]
c = p_ij[1:n_labels_A,0].todense()
d = np.array(b.todense()) ** 2
a_i = np.array(a.sum(1))
b_i = np.array(b.sum(0))
sumA = np.sum(a_i * a_i)
sumB = np.sum(b_i * b_i) + (np.sum(c) / n)
sumAB = np.sum(d) + (np.sum(c) / n)
precision = sumAB / sumB
recall = sumAB / sumA
fScore = 2.0 * precision * recall / (precision + recall)
are = 1.0 - fScore
if all_stats:
return (are, precision, recall)
else:
return are
def calc_entropy(split_vals, count):
col_count = 0
for key, val in split_vals.items():
col_count += val
col_prob = float(col_count) / count
ent_val = 0
for key, val in split_vals.items():
val_norm = float(val)/count
temp = (val_norm / col_prob)
ent_val += temp * np.log2(temp)
return -(col_prob * ent_val)
def split_vi_mem(x, y):
x_labels = np.unique(x)
y_labels = np.unique(y)
x_labels0 = x_labels[x_labels != 0]
y_labels0 = y_labels[y_labels != 0]
x_map = {}
y_map = {}
for label in x_labels0:
x_map[label] = {}
for label in y_labels0:
y_map[label] = {}
x_flat = x.ravel()
y_flat = y.ravel()
count = 0
print("Analyzing similarities")
for pos in range(0,len(x_flat)):
x_val = x_flat[pos]
y_val = y_flat[pos]
if x_val != 0 and y_val != 0:
x_map[x_val].setdefault(y_val, 0)
y_map[y_val].setdefault(x_val, 0)
(x_map[x_val])[y_val] += 1
(y_map[y_val])[x_val] += 1
count += 1
print("Finished analyzing similarities")
x_ents = {}
y_ents = {}
x_sum = 0.0
y_sum = 0.0
for key, vals in x_map.items():
x_ents[key] = calc_entropy(vals, count)
x_sum += x_ents[key]
for key, vals in y_map.items():
y_ents[key] = calc_entropy(vals, count)
y_sum += y_ents[key]
x_s = sorted(x_ents.items(), key=lambda x: x[1], reverse=True)
y_s = sorted(y_ents.items(), key=lambda x: x[1], reverse=True)
x_sorted = [ pair[0] for pair in x_s ]
y_sorted = [ pair[0] for pair in y_s ]
return x_sum, y_sum, x_sorted, x_ents, y_sorted, y_ents
def divide_rows(matrix, column, in_place=False):
"""Divide each row of `matrix` by the corresponding element in `column`.
The result is as follows: out[i, j] = matrix[i, j] / column[i]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (M,)
The column dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csr_matrix:
convert_to_csr = True
out = out.tocsc()
else:
convert_to_csr = False
column_repeated = np.take(column, out.indices)
nz = out.data.nonzero()
out.data[nz] /= column_repeated[nz]
if convert_to_csr:
out = out.tocsr()
else:
out /= column[:, np.newaxis]
return out
def divide_columns(matrix, row, in_place=False):
"""Divide each column of `matrix` by the corresponding element in `row`.
The result is as follows: out[i, j] = matrix[i, j] / row[j]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (N,)
The row dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csc_matrix:
convert_to_csc = True
out = out.tocsr()
else:
convert_to_csc = False
row_repeated = np.take(row, out.indices)
nz = out.data.nonzero()
out.data[nz] /= row_repeated[nz]
if convert_to_csc:
out = out.tocsc()
else:
out /= row[np.newaxis, :]
return out
def vi_tables(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return probability tables used for calculating VI.
If y is None, x is assumed to be a contingency table.
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that may or may not sum to 1.
ignore_x, ignore_y : list of int, optional
Rows and columns (respectively) to ignore in the contingency table.
These are labels that are not counted when evaluating VI.
Returns
-------
pxy : sparse.csc_matrix of float
The normalized contingency table.
px, py, hxgy, hygx, lpygx, lpxgy : np.ndarray of float
The proportions of each label in `x` and `y` (`px`, `py`), the
per-segment conditional entropies of `x` given `y` and vice-versa, the
per-segment conditional probability p log p.
"""
if y is not None:
pxy = contingency_table(x, y, ignore_seg=ignore_x, ignore_gt=ignore_y)
else:
cont = x
total = float(cont.sum())
# normalize, since it is an identity op if already done
pxy = cont / total
# Calculate probabilities
px = np.array(pxy.sum(axis=1)).ravel()
py = np.array(pxy.sum(axis=0)).ravel()
# Remove zero rows/cols
nzx = px.nonzero()[0]
nzy = py.nonzero()[0]
nzpx = px[nzx]
nzpy = py[nzy]
nzpxy = pxy[nzx, :][:, nzy]
# Calculate log conditional probabilities and entropies
lpygx = np.zeros(np.shape(px))
lpygx[nzx] = xlogx(divide_rows(nzpxy, nzpx)).sum(axis=1).ravel()
# \sum_x{p_{y|x} \log{p_{y|x}}}
hygx = -(px*lpygx) # \sum_x{p_x H(Y|X=x)} = H(Y|X)
lpxgy = np.zeros(np.shape(py))
lpxgy[nzy] = xlogx(divide_columns(nzpxy, nzpy)).sum(axis=0).ravel()
hxgy = -(py*lpxgy)
return [pxy] + list(map(np.asarray, [px, py, hxgy, hygx, lpygx, lpxgy]))
def sorted_vi_components(s1, s2, ignore1=[0], ignore2=[0], compress=False):
"""Return lists of the most entropic segments in s1|s2 and s2|s1.
Parameters
----------
s1, s2 : np.ndarray of int
Segmentations to be compared. Usually, `s1` will be a candidate
segmentation and `s2` will be the ground truth or target segmentation.
ignore1, ignore2 : list of int, optional
Labels in these lists are ignored in computing the VI. 0-labels are
ignored by default; pass empty lists to use all labels.
compress : bool, optional
The 'compress' flag performs a remapping of the labels before doing
the VI computation, resulting in memory savings when many labels are
not used in the volume. (For example, if you have just two labels, 1
and 1,000,000, 'compress=False' will give a vector of length
1,000,000, whereas with 'compress=True' it will have just size 2.)
Returns
-------
ii1 : np.ndarray of int
The labels in `s1` having the most entropy. If `s1` is the automatic
segmentation, these are the worst false merges.
h2g1 : np.ndarray of float
The conditional entropy corresponding to the labels in `ii1`.
ii2 : np.ndarray of int
The labels in `s1` having the most entropy. These correspond to the
worst false splits.
h2g1 : np.ndarray of float
The conditional entropy corresponding to the labels in `ii2`.
"""
if compress:
s1, forw1, back1 = relabel_from_one(s1)
s2, forw2, back2 = relabel_from_one(s2)
_, _, _, h1g2, h2g1, _, _ = vi_tables(s1, s2, ignore1, ignore2)
i1 = (-h2g1).argsort()
i2 = (-h1g2).argsort()
ii1 = back1[i1] if compress else i1
ii2 = back2[i2] if compress else i2
return ii1, h2g1[i1], ii2, h1g2[i2]
def split_components(idx, cont, num_elems=4, axis=0):
"""Return the indices of the bodies most overlapping with body idx.
Parameters
----------
idx : int
The segment index being examined.
cont : sparse.csc_matrix
The normalized contingency table.
num_elems : int, optional
The number of overlapping bodies desired.
axis : int, optional
The axis along which to perform the calculations. Assuming `cont` has
the automatic segmentation as the rows and the gold standard as the
columns, `axis=0` will return the segment IDs in the gold standard of
the worst merges comprising `idx`, while `axis=1` will return the
segment IDs in the automatic segmentation of the worst splits
comprising `idx`.
Value:
comps : list of (int, float, float) tuples
`num_elems` indices of the biggest overlaps comprising `idx`, along
with the percent of `idx` that they comprise and the percent of
themselves that overlaps with `idx`.
"""
if axis == 1:
cont= cont.T
x_sizes = np.asarray(cont.sum(axis=1)).ravel()
y_sizes = np.asarray(cont.sum(axis=0)).ravel()
cc = divide_rows(cont, x_sizes)[idx].toarray().ravel()
cct = divide_columns(cont, y_sizes)[idx].toarray().ravel()
idxs = (-cc).argsort()[:num_elems]
probs = cc[idxs]
probst = cct[idxs]
return list(zip(idxs, probs, probst))
def rand_values(cont_table):
"""Calculate values for Rand Index and related values, e.g. Adjusted Rand.
Parameters
----------
cont_table : scipy.sparse.csc_matrix
A contingency table of the two segmentations.
Returns
-------
a, b, c, d : float
The values necessary for computing Rand Index and related values. [1, 2]
References
----------
[1] Rand, W. M. (1971). Objective criteria for the evaluation of
clustering methods. J Am Stat Assoc.
[2] http://en.wikipedia.org/wiki/Rand_index#Definition on 2013-05-16.
"""
n = cont_table.sum()
sum1 = (cont_table.multiply(cont_table)).sum()
sum2 = (np.asarray(cont_table.sum(axis=1)) ** 2).sum()
sum3 = (np.asarray(cont_table.sum(axis=0)) ** 2).sum()
a = (sum1 - n)/2.0;
b = (sum2 - sum1)/2
c = (sum3 - sum1)/2
d = (sum1 + n**2 - sum2 - sum3)/2
return a, b, c, d
def rand_index(x, y=None):
"""Return the unadjusted Rand index. [1]
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that is *not* normalised to sum to 1.
Returns
-------
ri : float
The Rand index of `x` and `y`.
References
----------
[1] WM Rand. (1971) Objective criteria for the evaluation of
clustering methods. J Am Stat Assoc. 66: 846–850
"""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
return (a+d)/(a+b+c+d)
def adj_rand_index(x, y=None):
"""Return the adjusted Rand index.
The Adjusted Rand Index (ARI) is the deviation of the Rand Index from the
expected value if the marginal distributions of the contingency table were
independent. Its value ranges from 1 (perfectly correlated marginals) to
-1 (perfectly anti-correlated).
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that is *not* normalised to sum to 1.
Returns
-------
ari : float
The adjusted Rand index of `x` and `y`.
"""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
nk = a+b+c+d
return (nk*(a+d) - ((a+b)*(a+c) + (c+d)*(b+d)))/(
nk**2 - ((a+b)*(a+c) + (c+d)*(b+d)))
def fm_index(x, y=None):
"""Return the Fowlkes-Mallows index. [1]
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that is *not* normalised to sum to 1.
Returns
-------
fm : float
The FM index of `x` and `y`. 1 is perfect agreement.
References
----------
[1] EB Fowlkes & CL Mallows. (1983) A method for comparing two
hierarchical clusterings. J Am Stat Assoc 78: 553
"""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
return a/(np.sqrt((a+b)*(a+c)))
def reduce_vi(fn_pattern='testing/%i/flat-single-channel-tr%i-%i-%.2f.lzf.h5',
iterable=[(ts, tr, ts) for ts, tr in it.permutations(range(8), 2)],
thresholds=np.arange(0, 1.01, 0.01)):
"""Compile evaluation results embedded in many .h5 files under "vi".
Parameters
----------
fn_pattern : string, optional
A format string defining the files to be examined.
iterable : iterable of tuples, optional
The (partial) tuples to apply to the format string to obtain
individual files.
thresholds : iterable of float, optional
The final tuple elements to apply to the format string. The final
tuples are the product of `iterable` and `thresholds`.
Returns
-------
vi : np.ndarray of float, shape (3, len(thresholds))
The under and over segmentation components of VI at each threshold.
`vi[0, :]` is the threshold, `vi[1, :]` the undersegmentation and
`vi[2, :]` is the oversegmentation.
"""
iterable = list(iterable)
vi = np.zeros((3, len(thresholds), len(iterable)), np.double)
current_vi = np.zeros(3)
for i, t in enumerate(thresholds):
for j, v in enumerate(iterable):
current_fn = fn_pattern % (tuple(v) + (t,))
try:
f = h5py.File(current_fn, 'r')
except IOError:
logging.warning('IOError: could not open file %s' % current_fn)
else:
try:
current_vi = np.array(f['vi'])[:, 0]
except IOError:
logging.warning('IOError: could not open file %s'
% current_fn)
except KeyError:
logging.warning('KeyError: could not find vi in file %s'
% current_fn)
finally:
f.close()
vi[:, i, j] += current_vi
return vi
def sem(ar, axis=None):
"""Calculate the standard error of the mean (SEM) along an axis.
Parameters
----------
ar : np.ndarray
The input array of values.
axis : int, optional
Calculate SEM along the given axis. If omitted, calculate along the
raveled array.
Returns
-------
sem : float or np.ndarray of float
The SEM over the whole array (if `axis=None`) or over the chosen axis.
"""
if axis is None:
ar = ar.ravel()
axis = 0
return np.std(ar, axis=axis) / np.sqrt(ar.shape[axis])
def vi_statistics(vi_table):
"""Descriptive statistics from a block of related VI evaluations.
Parameters
----------
vi_table : np.ndarray of float
An array containing VI evaluations of various samples. The last axis
represents the samples.
Returns
-------
means, sems, medians : np.ndarrays of float
The statistics of the given array along the samples axis.
"""
return np.mean(vi_table, axis=-1), sem(vi_table, axis=-1), \
np.median(vi_table, axis=-1)
Include marginal probabilities
# coding=utf-8
import numpy as np
import multiprocessing
import itertools as it
import collections as coll
from functools import partial
import logging
import h5py
import scipy.ndimage as nd
import scipy.sparse as sparse
from scipy.ndimage.measurements import label
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import precision_recall_curve
def nzcol(mat, row_idx):
"""Return the nonzero elements of given row in a CSR matrix.
Parameters
----------
mat : CSR matrix
Input matrix.
row_idx : int
The index of the row (if `mat` is CSR) for which the nonzero
elements are desired.
Returns
-------
nz : array of int
The location of nonzero elements of `mat[main_axis_idx]`.
Examples
--------
>>> mat = sparse.csr_matrix(np.array([[0, 1, 0, 0], [0, 5, 8, 0]]))
>>> nzcol(mat, 1)
array([1, 2], dtype=int32)
>>> mat[1, 2] = 0
>>> nzcol(mat, 1)
array([1], dtype=int32)
"""
return mat[row_idx].nonzero()[1]
def pixel_wise_boundary_precision_recall(pred, gt):
"""Evaluate voxel prediction accuracy against a ground truth.
Parameters
----------
pred : np.ndarray of int or bool, arbitrary shape
The voxel-wise discrete prediction. 1 for boundary, 0 for non-boundary.
gt : np.ndarray of int or bool, same shape as `pred`
The ground truth boundary voxels. 1 for boundary, 0 for non-boundary.
Returns
-------
pr : float
rec : float
The precision and recall values associated with the prediction.
Notes
-----
Precision is defined as "True Positives / Total Positive Calls", and
Recall is defined as "True Positives / Total Positives in Ground Truth".
This function only calculates this value for discretized predictions,
i.e. it does not work with continuous prediction confidence values.
"""
tp = float((gt * pred).sum())
fp = (pred * (1-gt)).sum()
fn = (gt * (1-pred)).sum()
return tp/(tp+fp), tp/(tp+fn)
def wiggle_room_precision_recall(pred, boundary, margin=2, connectivity=1):
"""Voxel-wise, continuous value precision recall curve allowing drift.
Voxel-wise precision recall evaluates predictions against a ground truth.
Wiggle-room precision recall (WRPR, "warper") allows calls from nearby
voxels to be counted as correct. Specifically, if a voxel is predicted to
be a boundary within a dilation distance of `margin` (distance defined
according to `connectivity`) of a true boundary voxel, it will be counted
as a True Positive in the Precision, and vice-versa for the Recall.
Parameters
----------
pred : np.ndarray of float, arbitrary shape
The prediction values, expressed as probability of observing a boundary
(i.e. a voxel with label 1).
boundary : np.ndarray of int, same shape as pred
The true boundary map. 1 indicates boundary, 0 indicates non-boundary.
margin : int, optional
The number of dilations that define the margin. default: 2.
connectivity : {1, ..., pred.ndim}, optional
The morphological voxel connectivity (defined as in SciPy) for the
dilation step.
Returns
-------
ts, pred, rec : np.ndarray of float, shape `(len(np.unique(pred)+1),)`
The prediction value thresholds corresponding to each precision and
recall value, the precision values, and the recall values.
"""
struct = nd.generate_binary_structure(boundary.ndim, connectivity)
gtd = nd.binary_dilation(boundary, struct, margin)
struct_m = nd.iterate_structure(struct, margin)
pred_dil = nd.grey_dilation(pred, footprint=struct_m)
missing = np.setdiff1d(np.unique(pred), np.unique(pred_dil))
for m in missing:
pred_dil.ravel()[np.flatnonzero(pred==m)[0]] = m
prec, _, ts = precision_recall_curve(gtd.ravel(), pred.ravel())
_, rec, _ = precision_recall_curve(boundary.ravel(), pred_dil.ravel())
return list(zip(ts, prec, rec))
def get_stratified_sample(ar, n):
"""Get a regularly-spaced sample of the unique values of an array.
Parameters
----------
ar : np.ndarray, arbitrary shape and type
The input array.
n : int
The desired sample size.
Returns
-------
u : np.ndarray, shape approximately (n,)
Notes
-----
If `len(np.unique(ar)) <= 2*n`, all the values of `ar` are returned. The
requested sample size is taken as an approximate lower bound.
Examples
--------
>>> ar = np.array([[0, 4, 1, 3],
... [4, 1, 3, 5],
... [3, 5, 2, 1]])
>>> np.unique(ar)
array([0, 1, 2, 3, 4, 5])
>>> get_stratified_sample(ar, 3)
array([0, 2, 4])
"""
u = np.unique(ar)
nu = len(u)
if nu < 2*n:
return u
else:
step = nu // n
return u[0:nu:step]
def edit_distance(aseg, gt, size_threshold=1000, sp=None):
"""Find the number of splits and merges needed to convert `aseg` to `gt`.
Parameters
----------
aseg : np.ndarray, int type, arbitrary shape
The candidate automatic segmentation being evaluated.
gt : np.ndarray, int type, same shape as `aseg`
The ground truth segmentation.
size_threshold : int or float, optional
Ignore splits or merges smaller than this number of voxels.
sp : np.ndarray, int type, same shape as `aseg`, optional
A superpixel map. If provided, compute the edit distance to the best
possible agglomeration of `sp` to `gt`, rather than to `gt` itself.
Returns
-------
(false_merges, false_splits) : float
The number of splits and merges needed to convert aseg to gt.
"""
if sp is None:
return raw_edit_distance(aseg, gt, size_threshold)
else:
from . import agglo
bps = agglo.best_possible_segmentation(sp, gt)
return raw_edit_distance(aseg, bps, size_threshold)
def raw_edit_distance(aseg, gt, size_threshold=1000):
"""Compute the edit distance between two segmentations.
Parameters
----------
aseg : np.ndarray, int type, arbitrary shape
The candidate automatic segmentation.
gt : np.ndarray, int type, same shape as `aseg`
The ground truth segmentation.
size_threshold : int or float, optional
Ignore splits or merges smaller than this number of voxels.
Returns
-------
(false_merges, false_splits) : float
The number of splits and merges required to convert aseg to gt.
"""
aseg = relabel_from_one(aseg)[0]
gt = relabel_from_one(gt)[0]
r = contingency_table(aseg, gt, ignore_seg=[0], ignore_gt=[0], norm=False)
r.data[r.data <= size_threshold] = 0
# make each segment overlap count for 1, since it will be one
# operation to fix (split or merge)
r.data[r.data.nonzero()] /= r.data[r.data.nonzero()]
false_splits = (r.sum(axis=0)-1)[1:].sum()
false_merges = (r.sum(axis=1)-1)[1:].sum()
return (false_merges, false_splits)
def relabel_from_one(label_field):
"""Convert labels in an arbitrary label field to {1, ... number_of_labels}.
This function also returns the forward map (mapping the original labels to
the reduced labels) and the inverse map (mapping the reduced labels back
to the original ones).
Parameters
----------
label_field : numpy ndarray (integer type)
Returns
-------
relabeled : numpy array of same shape as ar
forward_map : 1d numpy array of length np.unique(ar) + 1
inverse_map : 1d numpy array of length len(np.unique(ar))
The length is len(np.unique(ar)) + 1 if 0 is not in np.unique(ar)
Examples
--------
>>> import numpy as np
>>> label_field = np.array([1, 1, 5, 5, 8, 99, 42])
>>> relab, fw, inv = relabel_from_one(label_field)
>>> relab
array([1, 1, 2, 2, 3, 5, 4])
>>> fw
array([0, 1, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 5])
>>> inv
array([ 0, 1, 5, 8, 42, 99])
>>> (fw[label_field] == relab).all()
True
>>> (inv[relab] == label_field).all()
True
"""
labels = np.unique(label_field)
labels0 = labels[labels != 0]
m = labels.max()
if m == len(labels0): # nothing to do, already 1...n labels
return label_field, labels, labels
forward_map = np.zeros(m+1, int)
forward_map[labels0] = np.arange(1, len(labels0) + 1)
if not (labels == 0).any():
labels = np.concatenate(([0], labels))
inverse_map = labels
return forward_map[label_field], forward_map, inverse_map
def contingency_table(seg, gt, *, ignore_seg=(), ignore_gt=(), norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg : iterable of int, optional
Values to ignore in `seg`. Voxels in `seg` having a value in this list
will not contribute to the contingency table. (default: [0])
ignore_gt : iterable of int, optional
Values to ignore in `gt`. Voxels in `gt` having a value in this list
will not contribute to the contingency table. (default: [0])
norm : bool, optional
Whether to normalize the table so that it sums to 1.
Returns
-------
cont : scipy.sparse.csr_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels
if `norm=True`.)
"""
segr = seg.ravel()
gtr = gt.ravel()
ignored = np.zeros(segr.shape, np.bool)
data = np.ones(gtr.shape)
for i in ignore_seg:
ignored[segr == i] = True
for j in ignore_gt:
ignored[gtr == j] = True
data[ignored] = 0
cont = sparse.coo_matrix((data, (segr, gtr))).tocsr()
if norm:
cont /= cont.sum()
return cont
def assignment_table(seg_or_ctable, gt=None, *, dtype=np.bool_):
"""Create an assignment table of value in `seg` to `gt`.
Parameters
----------
seg_or_ctable : array of int, or 2D array of float
The segmentation to assign. Every value in `seg` will be
assigned to a single value in `gt`.
Alternatively, pass a single, pre-computed contingency table
to be converted to an assignment table.
gt : array of int, same shape as seg
The segmentation to assign to. Don't pass if `seg_or_cont` is
a contingency matrix.
dtype : numpy dtype specification
The desired data type for the assignment matrix.
Returns
-------
assignments : sparse matrix
A matrix with `True` at position [i, j] if segment i in `seg`
is assigned to segment j in `gt`.
Examples
--------
>>> seg = np.array([0, 1, 1, 1, 2, 2])
>>> gt = np.array([1, 1, 1, 2, 2, 2])
>>> assignment_table(seg, gt).toarray()
array([[False, True, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> cont = contingency_table(seg, gt)
>>> assignment_table(cont).toarray()
array([[False, True, False],
[False, True, False],
[False, False, True]], dtype=bool)
"""
if gt is None:
ctable = seg_or_ctable.copy()
else:
ctable = contingency_table(seg_or_ctable, gt, norm=False)
minval = _mindiff(ctable.data)
ctable.data += np.random.randn(ctable.data.size) * 0.01 * minval
maxes = ctable.max(axis=1).toarray()
maxes_repeated = np.repeat(maxes, np.diff(ctable.indptr))
assignments = sparse.csr_matrix((ctable.data == maxes_repeated,
ctable.indices, ctable.indptr),
dtype=dtype)
assignments.eliminate_zeros()
return assignments
def _mindiff(arr):
"""Compute the smallest nonzero difference between elements in arr
Parameters
----------
arr : array
Array of *positive* numeric values.
Returns
-------
mindiff : float
The smallest nonzero difference between any two elements in arr.
Examples
--------
>>> arr = np.array([5, 5, 2.5, 7, 9.2])
>>> _mindiff(arr)
2.0
>>> arr = np.array([0.5, 0.5])
>>> _mindiff(arr)
0.5
"""
arr = np.sort(arr) # this *must* be a copy!
diffs = np.diff(arr)
diffs = diffs[diffs != 0]
if arr[0] != 0:
diffs = np.concatenate((diffs, [arr[0]]))
mindiff = np.min(diffs)
return mindiff
# note: subclassing scipy sparse matrices requires that the class name
# start with the same three letters as the given format. See:
# https://stackoverflow.com/questions/24508214/inherit-from-scipy-sparse-csr-matrix-class
# https://groups.google.com/d/msg/scipy-user/-1PIkEMFWd8/KX6idRoIqqkJ
class csrRowExpandableCSR(sparse.csr_matrix):
"""Like a scipy CSR matrix, but rows can be appended.
Use `mat[i] = v` to append the row-vector v as row i to the matrix mat.
Any rows between the current last row and i are filled with zeros.
Parameters
----------
arg1 :
Any valid instantiation of a sparse.csr_matrix. This includes a
dense matrix or 2D NumPy array, any SciPy sparse matrix, or a
tuple of the three defining values of a scipy sparse matrix,
(data, indices, indptr). See the documentation for
sparse.csr_matrix for more information.
dtype : numpy dtype specification, optional
The data type contained in the matrix, e.g. 'float32', np.float64,
np.complex128.
shape : tuple of two ints, optional
The number of rows and columns of the matrix.
copy : bool, optional
This argument does nothing, and is maintained for compatibility
with the csr_matrix constructor. Because we create bigger-than-
necessary buffer arrays, the data must always be copied.
max_num_rows : int, optional
The initial maximum number of rows. Note that more rows can
always be added; this is used only for efficiency. If None,
defaults to twice the initial number of rows.
max_nonzero : int, optional
The maximum number of nonzero elements. As with max_num_rows,
this is only necessary for efficiency.
expansion_factor : int or float, optional
The maximum number of rows or nonzero elements will be this
number times the initial number of rows or nonzero elements.
This is overridden if max_num_rows or max_nonzero are provided.
Examples
--------
>>> init = csrRowExpandableCSR([[0, 0, 2], [0, 4, 0]])
>>> init[2] = np.array([9, 0, 0])
>>> init[4] = sparse.csr_matrix([0, 0, 5])
>>> init.nnz
4
>>> init.data
array([2, 4, 9, 5], dtype=int64)
>>> init.toarray()
array([[0, 0, 2],
[0, 4, 0],
[9, 0, 0],
[0, 0, 0],
[0, 0, 5]], dtype=int64)
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False,
max_num_rows=None, max_nonzero=None,
expansion_factor=2):
other = sparse.csr_matrix(arg1, shape=shape, dtype=dtype, copy=copy)
if max_nonzero is None:
max_nonzero = other.nnz * expansion_factor
if max_num_rows is None:
max_num_rows = other.shape[0] * expansion_factor
self.curr_nonzero = other.nnz
self.curr_indptr = other.shape[0] + 1
self._data = np.empty(max_nonzero, dtype=other.dtype)
self._indices = np.empty(max_nonzero, dtype=other.indices.dtype)
self._indptr = np.empty(max_num_rows + 1, dtype=other.indptr.dtype)
super().__init__((other.data, other.indices, other.indptr),
shape=other.shape, dtype=other.dtype, copy=False)
@property
def data(self):
"""The data array is virtual, truncated from the data "buffer", _data.
"""
return self._data[:self.curr_nonzero]
@data.setter
def data(self, value):
"""Setter for the data property.
We have to special-case for a few kinds of values.
When creating a new instance, the csr_matrix class removes some
zeros from the array and ends up setting data to a smaller array.
In that case, we need to make sure that we reset `self.curr_nonzero`
and copy the relevant part of the array.
"""
if np.isscalar(value) or len(value) == self.curr_nonzero:
self._data[:self.curr_nonzero] = value
else: # `value` is array-like of different length
self.curr_nonzero = len(value)
while self._data.size < self.curr_nonzero:
self._double_data_and_indices()
self._data[:self.curr_nonzero] = value
@property
def indices(self):
return self._indices[:self.curr_nonzero]
@indices.setter
def indices(self, value):
if np.isscalar(value) or len(value) == self.curr_nonzero:
self._indices[:self.curr_nonzero] = value
else: # `value` is array-like of different length
self.curr_nonzero = len(value)
while self._indices.size < self.curr_nonzero:
self._double_data_and_indices()
self._indices[:self.curr_nonzero] = value
@property
def indptr(self):
return self._indptr[:self.curr_indptr]
@indptr.setter
def indptr(self, value):
if np.isscalar(value) or len(value) == self.curr_indptr:
self._indptr[:self.curr_indptr] = value
else: # `value` is array-like of different length
self.curr_indptr = len(value)
while self._indptr.size < self.curr_indptr:
self._double_data_and_indices()
self._indptr[:self.curr_indptr] = value
def __setitem__(self, index, value):
if np.isscalar(index):
if index >= self.shape[0]: # appending a row
self._append_row_at(index, value)
else:
if np.isscalar(value):
if value == 0: # zeroing out a row
self._zero_row(index)
else:
super().__setitem__(index, value)
def _append_row_at(self, index, value):
# first: normalize the input value. We want a sparse CSR matrix as
# input, to make data copying logic much simpler.
if np.isscalar(value):
value = np.full(self.shape[1], value) # make a full row if scalar
if not sparse.isspmatrix_csr(value):
value = sparse.csr_matrix(value)
# Make sure we have sufficient room for the new row.
if index + 2 > self._indptr.size:
self._double_indptr()
num_values = value.nnz
if self.curr_nonzero + num_values > self._data.size:
self._double_data_and_indices()
i, j = self.indptr[-1], self.indptr[-1] + num_values
self._indptr[self.curr_indptr:index + 1] = i
self._indptr[index + 1] = j
self.curr_indptr = index + 2
self._indices[i:j] = value.indices[:]
self._data[i:j] = value.data[:]
self.curr_nonzero += num_values
# It turns out that the `shape` attribute is a property in SciPy
# sparse matrices, and can't be set directly. So, we bypass it and
# set the corresponding tuple directly, interfaces be damned.
self._shape = (int(index + 1), self.shape[1])
def _zero_row(self, index):
"""Set all elements of row `index` to 0."""
i, j = self.indptr[index:index+2]
self.data[i:j] = 0
def _double_indptr(self):
"""Double the size of the array backing `indptr`.
Doubling on demand gives amortized constant time append.
"""
old_indptr = self._indptr
self._indptr = np.empty(2 * old_indptr.size, old_indptr.dtype)
self._indptr[:old_indptr.size] = old_indptr[:]
def _double_data_and_indices(self):
"""Double size of the arrays backing `indices` and `data` attributes.
Doubling on demand gives amortized constant time append. Since these
two arrays are always the same size in the CSR format, they are
doubled together in the same function.
"""
n = self._data.size
old_data = self._data
self._data = np.empty(2 * n, old_data.dtype)
self._data[:n] = old_data[:]
old_indices = self._indices
self._indices = np.empty(2 * n, old_indices.dtype)
self._indices[:n] = old_indices[:]
def merge_contingency_table(a, b, ignore_seg=[0], ignore_gt=[0]):
"""A contingency table that has additional rows for merging initial rows.
Parameters
----------
a
b
ignore_seg
ignore_gt
Returns
-------
ct : array, shape (2M + 1, N)
"""
ct = contingency_table(a, b,
ignore_seg=ignore_seg, ignore_gt=ignore_gt)
ctout = csrRowExpandableCSR(ct)
return ctout
def xlogx(x, out=None, in_place=False):
"""Compute x * log_2(x).
We define 0 * log_2(0) = 0
Parameters
----------
x : np.ndarray or scipy.sparse.csc_matrix or csr_matrix
The input array.
out : same type as x (optional)
If provided, use this array/matrix for the result.
in_place : bool (optional, default False)
Operate directly on x.
Returns
-------
y : same type as x
Result of x * log_2(x).
"""
if in_place:
y = x
elif out is None:
y = x.copy()
else:
y = out
if isinstance(y, sparse.csc_matrix) or isinstance(y, sparse.csr_matrix):
z = y.data
else:
z = np.asarray(y) # ensure np.matrix converted to np.array
nz = z.nonzero()
z[nz] *= np.log2(z[nz])
return y
def special_points_evaluate(eval_fct, coords, flatten=True, coord_format=True):
"""Return an evaluation function to only evaluate at special coordinates.
Parameters
----------
eval_fct : function taking at least two np.ndarray of equal shapes as args
The function to be used for evaluation.
coords : np.ndarray of int, shape (n_points, n_dim) or (n_points,)
The coordinates at which to evaluate the function. The coordinates can
either be subscript format (one index into each dimension of input
arrays) or index format (a single index into the linear array). For
the latter, use `flatten=False`.
flatten : bool, optional
Whether to flatten the coordinates (default) or leave them untouched
(if they are already in raveled format).
coord_format : bool, optional
Format the coordinates to a tuple of np.ndarray as numpy expects. Set
to False if coordinates are already in this format or flattened.
Returns
-------
special_eval_fct : function taking at least two np.ndarray of equal shapes
The returned function is the same as the above function but only
evaluated at the coordinates specified. This can be used, for example,
to subsample a volume, or to evaluate only whether synapses are
correctly assigned, rather than every voxel, in a neuronal image
volume.
"""
if coord_format:
coords = [coords[:, i] for i in range(coords.shape[1])]
def special_eval_fct(x, y, *args, **kwargs):
if flatten:
for i in range(len(coords)):
if coords[i][0] < 0:
coords[i] += x.shape[i]
coords2 = np.ravel_multi_index(coords, x.shape)
else:
coords2 = coords
sx = x.ravel()[coords2]
sy = y.ravel()[coords2]
return eval_fct(sx, sy, *args, **kwargs)
return special_eval_fct
def make_synaptic_functions(fn, fcts):
"""Make evaluation functions that only evaluate at synaptic sites.
Parameters
----------
fn : string
Filename containing synapse coordinates, in Raveler format. [1]
fcts : function, or iterable of functions
Functions to be converted to synaptic evaluation.
Returns
-------
syn_fcts : function or iterable of functions
Evaluation functions that will evaluate only at synaptic sites.
Raises
------
ImportError : if the `syngeo` package [2, 3] is not installed.
References
----------
[1] https://wiki.janelia.org/wiki/display/flyem/synapse+annotation+file+format
[2] https://github.com/janelia-flyem/synapse-geometry
[3] https://github.com/jni/synapse-geometry
"""
from syngeo import io as synio
synapse_coords = \
synio.raveler_synapse_annotations_to_coords(fn, 'arrays')
synapse_coords = np.array(list(it.chain(*synapse_coords)))
make_function = partial(special_points_evaluate, coords=synapse_coords)
if not isinstance(fcts, coll.Iterable):
return make_function(fcts)
else:
return list(map(make_function, fcts))
def make_synaptic_vi(fn):
"""Shortcut for `make_synaptic_functions(fn, split_vi)`."""
return make_synaptic_functions(fn, split_vi)
def vi(x, y=None, weights=np.ones(2), ignore_x=[0], ignore_y=[0]):
"""Return the variation of information metric. [1]
VI(X, Y) = H(X | Y) + H(Y | X), where H(.|.) denotes the conditional
entropy.
Parameters
----------
x : np.ndarray
Label field (int type) or contingency table (float). `x` is
interpreted as a contingency table (summing to 1.0) if and only if `y`
is not provided.
y : np.ndarray of int, same shape as x, optional
A label field to compare to `x`.
weights : np.ndarray of float, shape (2,), optional
The weights of the conditional entropies of `x` and `y`. Equal weights
are the default.
ignore_x, ignore_y : list of int, optional
Any points having a label in this list are ignored in the evaluation.
Ignore 0-labeled points by default.
Returns
-------
v : float
The variation of information between `x` and `y`.
References
----------
[1] Meila, M. (2007). Comparing clusterings - an information based
distance. Journal of Multivariate Analysis 98, 873-895.
"""
return np.dot(weights, split_vi(x, y, ignore_x, ignore_y))
def split_vi(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return the symmetric conditional entropies associated with the VI.
The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X).
If Y is the ground-truth segmentation, then H(Y|X) can be interpreted
as the amount of under-segmentation of Y and H(X|Y) is then the amount
of over-segmentation. In other words, a perfect over-segmentation
will have H(Y|X)=0 and a perfect under-segmentation will have H(X|Y)=0.
If y is None, x is assumed to be a contingency table.
Parameters
----------
x : np.ndarray
Label field (int type) or contingency table (float). `x` is
interpreted as a contingency table (summing to 1.0) if and only if `y`
is not provided.
y : np.ndarray of int, same shape as x, optional
A label field to compare to `x`.
ignore_x, ignore_y : list of int, optional
Any points having a label in this list are ignored in the evaluation.
Ignore 0-labeled points by default.
Returns
-------
sv : np.ndarray of float, shape (2,)
The conditional entropies of Y|X and X|Y.
See Also
--------
vi
"""
_, _, _ , hxgy, hygx, _, _ = vi_tables(x, y, ignore_x, ignore_y)
# false merges, false splits
return np.array([hygx.sum(), hxgy.sum()])
def vi_pairwise_matrix(segs, split=False):
"""Compute the pairwise VI distances within a set of segmentations.
If 'split' is set to True, two matrices are returned, one for each
direction of the conditional entropy.
0-labeled pixels are ignored.
Parameters
----------
segs : iterable of np.ndarray of int
A list or iterable of segmentations. All arrays must have the same
shape.
split : bool, optional
Should the split VI be returned, or just the VI itself (default)?
Returns
-------
vi_sq : np.ndarray of float, shape (len(segs), len(segs))
The distances between segmentations. If `split==False`, this is a
symmetric square matrix of distances. Otherwise, the lower triangle
of the output matrix is the false split distance, while the upper
triangle is the false merge distance.
"""
d = np.array([s.ravel() for s in segs])
if split:
def dmerge(x, y): return split_vi(x, y)[0]
def dsplit(x, y): return split_vi(x, y)[1]
merges, splits = [squareform(pdist(d, df)) for df in [dmerge, dsplit]]
out = merges
tri = np.tril(np.ones(splits.shape), -1).astype(bool)
out[tri] = splits[tri]
else:
out = squareform(pdist(d, vi))
return out
def split_vi_threshold(tup):
"""Compute VI with tuple input (to support multiprocessing).
Parameters
----------
tup : a tuple, (np.ndarray, np.ndarray, [int], [int], float)
The tuple should consist of::
- the UCM for the candidate segmentation,
- the gold standard,
- list of ignored labels in the segmentation,
- list of ignored labels in the gold standard,
- threshold to use for the UCM.
Returns
-------
sv : np.ndarray of float, shape (2,)
The undersegmentation and oversegmentation of the comparison between
applying a threshold and connected components labeling of the first
array, and the second array.
"""
ucm, gt, ignore_seg, ignore_gt, t = tup
return split_vi(label(ucm<t)[0], gt, ignore_seg, ignore_gt)
def vi_by_threshold(ucm, gt, ignore_seg=[], ignore_gt=[], npoints=None,
nprocessors=None):
"""Compute the VI at every threshold of the provided UCM.
Parameters
----------
ucm : np.ndarray of float, arbitrary shape
The Ultrametric Contour Map, where each 0.0-region is separated by a
boundary. Higher values of the boundary indicate more confidence in
its presence.
gt : np.ndarray of int, same shape as `ucm`
The ground truth segmentation.
ignore_seg : list of int, optional
The labels to ignore in the segmentation of the UCM.
ignore_gt : list of int, optional
The labels to ignore in the ground truth.
npoints : int, optional
The number of thresholds to sample. By default, all thresholds are
sampled.
nprocessors : int, optional
Number of processors to use for the parallel evaluation of different
thresholds.
Returns
-------
result : np.ndarray of float, shape (3, npoints)
The evaluation of segmentation at each threshold. The rows of this
array are:
- the threshold used
- the undersegmentation component of VI
- the oversegmentation component of VI
"""
ts = np.unique(ucm)[1:]
if npoints is None:
npoints = len(ts)
if len(ts) > 2*npoints:
ts = ts[np.arange(1, len(ts), len(ts)/npoints)]
if nprocessors == 1: # this should avoid pickling overhead
result = [split_vi_threshold((ucm, gt, ignore_seg, ignore_gt, t))
for t in ts]
else:
p = multiprocessing.Pool(nprocessors)
result = p.map(split_vi_threshold,
((ucm, gt, ignore_seg, ignore_gt, t) for t in ts))
return np.concatenate((ts[np.newaxis, :], np.array(result).T), axis=0)
def rand_by_threshold(ucm, gt, npoints=None):
"""Compute Rand and Adjusted Rand indices for each threshold of a UCM
Parameters
----------
ucm : np.ndarray, arbitrary shape
An Ultrametric Contour Map of region boundaries having specific
values. Higher values indicate higher boundary probabilities.
gt : np.ndarray, int type, same shape as ucm
The ground truth segmentation.
npoints : int, optional
If provided, only compute values at npoints thresholds, rather than
all thresholds. Useful when ucm has an extremely large number of
unique values.
Returns
-------
ris : np.ndarray of float, shape (3, len(np.unique(ucm))) or (3, npoints)
The rand indices of the segmentation induced by thresholding and
labeling `ucm` at different values. The 3 rows of `ris` are the values
used for thresholding, the corresponding Rand Index at that threshold,
and the corresponding Adjusted Rand Index at that threshold.
"""
ts = np.unique(ucm)[1:]
if npoints is None:
npoints = len(ts)
if len(ts) > 2 * npoints:
ts = ts[np.arange(1, len(ts), len(ts) / npoints)]
result = np.zeros((2, len(ts)))
for i, t in enumerate(ts):
seg = label(ucm < t)[0]
result[0, i] = rand_index(seg, gt)
result[1, i] = adj_rand_index(seg, gt)
return np.concatenate((ts[np.newaxis, :], result), axis=0)
def adapted_rand_error(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# segA is truth, segB is query
segA = np.ravel(gt)
segB = np.ravel(seg)
# mask to foreground in A
mask = (segA > 0)
segA = segA[mask]
segB = segB[mask]
n = segA.size
n_labels_A = np.amax(segA) + 1
n_labels_B = np.amax(segB) + 1
ones_data = np.ones(n)
p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B), dtype=np.uint64)
# In the paper where adapted rand is proposed, they treat each background
# pixel in segB as a different value (i.e., unique label for each pixel).
# To do this, we sum them differently than others
B_nonzero = p_ij[:, 1:]
B_zero = p_ij[:, 0]
# this is a count
num_B_zero = B_zero.sum()
# This is the new code, removing the divides by n because they cancel.
# sum of the joint distribution ,separate sum of B>0 and B=0 parts
sum_p_ij = (B_nonzero).power(2).sum() + num_B_zero
# these are marginal probabilities
a_i = p_ij.sum(1)
b_i = B_nonzero.sum(0)
a = p_ij[1:n_labels_A,:]
b = p_ij[1:n_labels_A,1:n_labels_B]
c = p_ij[1:n_labels_A,0].todense()
d = np.array(b.todense()) ** 2
a_i = np.array(a.sum(1))
b_i = np.array(b.sum(0))
sumA = np.sum(a_i * a_i)
sumB = np.sum(b_i * b_i) + (np.sum(c) / n)
sumAB = np.sum(d) + (np.sum(c) / n)
precision = sumAB / sumB
recall = sumAB / sumA
fScore = 2.0 * precision * recall / (precision + recall)
are = 1.0 - fScore
if all_stats:
return (are, precision, recall)
else:
return are
def calc_entropy(split_vals, count):
col_count = 0
for key, val in split_vals.items():
col_count += val
col_prob = float(col_count) / count
ent_val = 0
for key, val in split_vals.items():
val_norm = float(val)/count
temp = (val_norm / col_prob)
ent_val += temp * np.log2(temp)
return -(col_prob * ent_val)
def split_vi_mem(x, y):
x_labels = np.unique(x)
y_labels = np.unique(y)
x_labels0 = x_labels[x_labels != 0]
y_labels0 = y_labels[y_labels != 0]
x_map = {}
y_map = {}
for label in x_labels0:
x_map[label] = {}
for label in y_labels0:
y_map[label] = {}
x_flat = x.ravel()
y_flat = y.ravel()
count = 0
print("Analyzing similarities")
for pos in range(0,len(x_flat)):
x_val = x_flat[pos]
y_val = y_flat[pos]
if x_val != 0 and y_val != 0:
x_map[x_val].setdefault(y_val, 0)
y_map[y_val].setdefault(x_val, 0)
(x_map[x_val])[y_val] += 1
(y_map[y_val])[x_val] += 1
count += 1
print("Finished analyzing similarities")
x_ents = {}
y_ents = {}
x_sum = 0.0
y_sum = 0.0
for key, vals in x_map.items():
x_ents[key] = calc_entropy(vals, count)
x_sum += x_ents[key]
for key, vals in y_map.items():
y_ents[key] = calc_entropy(vals, count)
y_sum += y_ents[key]
x_s = sorted(x_ents.items(), key=lambda x: x[1], reverse=True)
y_s = sorted(y_ents.items(), key=lambda x: x[1], reverse=True)
x_sorted = [ pair[0] for pair in x_s ]
y_sorted = [ pair[0] for pair in y_s ]
return x_sum, y_sum, x_sorted, x_ents, y_sorted, y_ents
def divide_rows(matrix, column, in_place=False):
"""Divide each row of `matrix` by the corresponding element in `column`.
The result is as follows: out[i, j] = matrix[i, j] / column[i]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (M,)
The column dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csr_matrix:
convert_to_csr = True
out = out.tocsc()
else:
convert_to_csr = False
column_repeated = np.take(column, out.indices)
nz = out.data.nonzero()
out.data[nz] /= column_repeated[nz]
if convert_to_csr:
out = out.tocsr()
else:
out /= column[:, np.newaxis]
return out
def divide_columns(matrix, row, in_place=False):
"""Divide each column of `matrix` by the corresponding element in `row`.
The result is as follows: out[i, j] = matrix[i, j] / row[j]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (N,)
The row dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csc_matrix:
convert_to_csc = True
out = out.tocsr()
else:
convert_to_csc = False
row_repeated = np.take(row, out.indices)
nz = out.data.nonzero()
out.data[nz] /= row_repeated[nz]
if convert_to_csc:
out = out.tocsc()
else:
out /= row[np.newaxis, :]
return out
def vi_tables(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return probability tables used for calculating VI.
If y is None, x is assumed to be a contingency table.
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that may or may not sum to 1.
ignore_x, ignore_y : list of int, optional
Rows and columns (respectively) to ignore in the contingency table.
These are labels that are not counted when evaluating VI.
Returns
-------
pxy : sparse.csc_matrix of float
The normalized contingency table.
px, py, hxgy, hygx, lpygx, lpxgy : np.ndarray of float
The proportions of each label in `x` and `y` (`px`, `py`), the
per-segment conditional entropies of `x` given `y` and vice-versa, the
per-segment conditional probability p log p.
"""
if y is not None:
pxy = contingency_table(x, y, ignore_seg=ignore_x, ignore_gt=ignore_y)
else:
cont = x
total = float(cont.sum())
# normalize, since it is an identity op if already done
pxy = cont / total
# Calculate probabilities
px = np.array(pxy.sum(axis=1)).ravel()
py = np.array(pxy.sum(axis=0)).ravel()
# Remove zero rows/cols
nzx = px.nonzero()[0]
nzy = py.nonzero()[0]
nzpx = px[nzx]
nzpy = py[nzy]
nzpxy = pxy[nzx, :][:, nzy]
# Calculate log conditional probabilities and entropies
lpygx = np.zeros(np.shape(px))
lpygx[nzx] = xlogx(divide_rows(nzpxy, nzpx)).sum(axis=1).ravel()
# \sum_x{p_{y|x} \log{p_{y|x}}}
hygx = -(px*lpygx) # \sum_x{p_x H(Y|X=x)} = H(Y|X)
lpxgy = np.zeros(np.shape(py))
lpxgy[nzy] = xlogx(divide_columns(nzpxy, nzpy)).sum(axis=0).ravel()
hxgy = -(py*lpxgy)
return [pxy] + list(map(np.asarray, [px, py, hxgy, hygx, lpygx, lpxgy]))
def sorted_vi_components(s1, s2, ignore1=[0], ignore2=[0], compress=False):
"""Return lists of the most entropic segments in s1|s2 and s2|s1.
Parameters
----------
s1, s2 : np.ndarray of int
Segmentations to be compared. Usually, `s1` will be a candidate
segmentation and `s2` will be the ground truth or target segmentation.
ignore1, ignore2 : list of int, optional
Labels in these lists are ignored in computing the VI. 0-labels are
ignored by default; pass empty lists to use all labels.
compress : bool, optional
The 'compress' flag performs a remapping of the labels before doing
the VI computation, resulting in memory savings when many labels are
not used in the volume. (For example, if you have just two labels, 1
and 1,000,000, 'compress=False' will give a vector of length
1,000,000, whereas with 'compress=True' it will have just size 2.)
Returns
-------
ii1 : np.ndarray of int
The labels in `s1` having the most entropy. If `s1` is the automatic
segmentation, these are the worst false merges.
h2g1 : np.ndarray of float
The conditional entropy corresponding to the labels in `ii1`.
ii2 : np.ndarray of int
The labels in `s1` having the most entropy. These correspond to the
worst false splits.
h2g1 : np.ndarray of float
The conditional entropy corresponding to the labels in `ii2`.
"""
if compress:
s1, forw1, back1 = relabel_from_one(s1)
s2, forw2, back2 = relabel_from_one(s2)
_, _, _, h1g2, h2g1, _, _ = vi_tables(s1, s2, ignore1, ignore2)
i1 = (-h2g1).argsort()
i2 = (-h1g2).argsort()
ii1 = back1[i1] if compress else i1
ii2 = back2[i2] if compress else i2
return ii1, h2g1[i1], ii2, h1g2[i2]
def split_components(idx, cont, num_elems=4, axis=0):
"""Return the indices of the bodies most overlapping with body idx.
Parameters
----------
idx : int
The segment index being examined.
cont : sparse.csc_matrix
The normalized contingency table.
num_elems : int, optional
The number of overlapping bodies desired.
axis : int, optional
The axis along which to perform the calculations. Assuming `cont` has
the automatic segmentation as the rows and the gold standard as the
columns, `axis=0` will return the segment IDs in the gold standard of
the worst merges comprising `idx`, while `axis=1` will return the
segment IDs in the automatic segmentation of the worst splits
comprising `idx`.
Value:
comps : list of (int, float, float) tuples
`num_elems` indices of the biggest overlaps comprising `idx`, along
with the percent of `idx` that they comprise and the percent of
themselves that overlaps with `idx`.
"""
if axis == 1:
cont= cont.T
x_sizes = np.asarray(cont.sum(axis=1)).ravel()
y_sizes = np.asarray(cont.sum(axis=0)).ravel()
cc = divide_rows(cont, x_sizes)[idx].toarray().ravel()
cct = divide_columns(cont, y_sizes)[idx].toarray().ravel()
idxs = (-cc).argsort()[:num_elems]
probs = cc[idxs]
probst = cct[idxs]
return list(zip(idxs, probs, probst))
def rand_values(cont_table):
"""Calculate values for Rand Index and related values, e.g. Adjusted Rand.
Parameters
----------
cont_table : scipy.sparse.csc_matrix
A contingency table of the two segmentations.
Returns
-------
a, b, c, d : float
The values necessary for computing Rand Index and related values. [1, 2]
References
----------
[1] Rand, W. M. (1971). Objective criteria for the evaluation of
clustering methods. J Am Stat Assoc.
[2] http://en.wikipedia.org/wiki/Rand_index#Definition on 2013-05-16.
"""
n = cont_table.sum()
sum1 = (cont_table.multiply(cont_table)).sum()
sum2 = (np.asarray(cont_table.sum(axis=1)) ** 2).sum()
sum3 = (np.asarray(cont_table.sum(axis=0)) ** 2).sum()
a = (sum1 - n)/2.0;
b = (sum2 - sum1)/2
c = (sum3 - sum1)/2
d = (sum1 + n**2 - sum2 - sum3)/2
return a, b, c, d
def rand_index(x, y=None):
"""Return the unadjusted Rand index. [1]
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that is *not* normalised to sum to 1.
Returns
-------
ri : float
The Rand index of `x` and `y`.
References
----------
[1] WM Rand. (1971) Objective criteria for the evaluation of
clustering methods. J Am Stat Assoc. 66: 846–850
"""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
return (a+d)/(a+b+c+d)
def adj_rand_index(x, y=None):
"""Return the adjusted Rand index.
The Adjusted Rand Index (ARI) is the deviation of the Rand Index from the
expected value if the marginal distributions of the contingency table were
independent. Its value ranges from 1 (perfectly correlated marginals) to
-1 (perfectly anti-correlated).
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that is *not* normalised to sum to 1.
Returns
-------
ari : float
The adjusted Rand index of `x` and `y`.
"""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
nk = a+b+c+d
return (nk*(a+d) - ((a+b)*(a+c) + (c+d)*(b+d)))/(
nk**2 - ((a+b)*(a+c) + (c+d)*(b+d)))
def fm_index(x, y=None):
"""Return the Fowlkes-Mallows index. [1]
Parameters
----------
x, y : np.ndarray
Either x and y are provided as equal-shaped np.ndarray label fields
(int type), or y is not provided and x is a contingency table
(sparse.csc_matrix) that is *not* normalised to sum to 1.
Returns
-------
fm : float
The FM index of `x` and `y`. 1 is perfect agreement.
References
----------
[1] EB Fowlkes & CL Mallows. (1983) A method for comparing two
hierarchical clusterings. J Am Stat Assoc 78: 553
"""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
return a/(np.sqrt((a+b)*(a+c)))
def reduce_vi(fn_pattern='testing/%i/flat-single-channel-tr%i-%i-%.2f.lzf.h5',
iterable=[(ts, tr, ts) for ts, tr in it.permutations(range(8), 2)],
thresholds=np.arange(0, 1.01, 0.01)):
"""Compile evaluation results embedded in many .h5 files under "vi".
Parameters
----------
fn_pattern : string, optional
A format string defining the files to be examined.
iterable : iterable of tuples, optional
The (partial) tuples to apply to the format string to obtain
individual files.
thresholds : iterable of float, optional
The final tuple elements to apply to the format string. The final
tuples are the product of `iterable` and `thresholds`.
Returns
-------
vi : np.ndarray of float, shape (3, len(thresholds))
The under and over segmentation components of VI at each threshold.
`vi[0, :]` is the threshold, `vi[1, :]` the undersegmentation and
`vi[2, :]` is the oversegmentation.
"""
iterable = list(iterable)
vi = np.zeros((3, len(thresholds), len(iterable)), np.double)
current_vi = np.zeros(3)
for i, t in enumerate(thresholds):
for j, v in enumerate(iterable):
current_fn = fn_pattern % (tuple(v) + (t,))
try:
f = h5py.File(current_fn, 'r')
except IOError:
logging.warning('IOError: could not open file %s' % current_fn)
else:
try:
current_vi = np.array(f['vi'])[:, 0]
except IOError:
logging.warning('IOError: could not open file %s'
% current_fn)
except KeyError:
logging.warning('KeyError: could not find vi in file %s'
% current_fn)
finally:
f.close()
vi[:, i, j] += current_vi
return vi
def sem(ar, axis=None):
"""Calculate the standard error of the mean (SEM) along an axis.
Parameters
----------
ar : np.ndarray
The input array of values.
axis : int, optional
Calculate SEM along the given axis. If omitted, calculate along the
raveled array.
Returns
-------
sem : float or np.ndarray of float
The SEM over the whole array (if `axis=None`) or over the chosen axis.
"""
if axis is None:
ar = ar.ravel()
axis = 0
return np.std(ar, axis=axis) / np.sqrt(ar.shape[axis])
def vi_statistics(vi_table):
"""Descriptive statistics from a block of related VI evaluations.
Parameters
----------
vi_table : np.ndarray of float
An array containing VI evaluations of various samples. The last axis
represents the samples.
Returns
-------
means, sems, medians : np.ndarrays of float
The statistics of the given array along the samples axis.
"""
return np.mean(vi_table, axis=-1), sem(vi_table, axis=-1), \
np.median(vi_table, axis=-1)
|
#!/usr/bin/env python
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
tox-bootstrap
=============
A bootstrap to automatically install tox and dependencies on machines that may
not already have tox installed. This is useful when configuring a number of
Hudson quickly; only Python needs to be installed.
Getting started
---------------
::
$ cd my_project/
$ ls
. .. src/ doc/ setup.py tox.ini
$ curl http://pytox.googlecode.com/hg/toxbootstrap.py -O
Instead of running "tox", now you can just run "python toxbootstrap.py" which
will take care of installing tox (if not already installed into
``.tox/_toxinstall``)::
$ python toxbootstrap.py
If you're using Hudson_, you may also do::
import sys
sys.path.insert(0, '.') # sometimes necessary :/
import toxbootstrap
toxbootstrap.cmdline() # also accepts argv list
.. _Hudson: http://hudson-ci.org/
ToDo
----
1. Detect tox in ``$PATH`` (eg: ``C:\Python26\Scripts`` or
``%APPDATA%\Python\Scripts``)
2. Gracefully ignore PyPI xmlrpc downtime errors when checking for new release.
"""
__version__ = '0.9.1.dev1'
import sys
import os
from os import path
import logging
from subprocess import Popen, PIPE, check_call, CalledProcessError
USETOXDEV=os.environ.get('USETOXDEV', False)
TENV='_toxinstall'
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.request import urlretrieve
import xmlrpc.client as xmlrpclib
else:
from urllib import urlretrieve
import xmlrpclib
logging.basicConfig(level=logging.INFO)
# Last stable: 1.5.1
VIRTUALENVPY_URL = (
'http://bitbucket.org/ianb/virtualenv/raw/eb94c9ebe0ba/virtualenv.py')
def run(cmd, shell=True):
"""Run the given command in shell"""
logging.info('Running command: %s', cmd)
check_call(cmd, shell=shell)
def crun(cmd, shell=True):
"""Run the given command and return its output"""
logging.info('Running command (for output): %s', cmd)
p = Popen(cmd, stdout=PIPE, shell=shell)
stdout, stderr = p.communicate()
return stdout
def wget(url):
"""Download the given file to current directory"""
logging.info('Downloading %s', url)
localpath = path.join(path.abspath(os.getcwd()), path.basename(url))
urlretrieve(url, localpath)
def has_script(venv, name):
"""Check if the virtualenv has the given script
Looks for bin/$name (unix) or Scripts/$name.exe (windows) in the virtualenv
"""
if sys.platform == 'win32':
return any([path.exists(path.join(venv, 'Scripts', name)),
path.exists(path.join(venv, 'Scripts', name + '.exe'))])
else:
return path.exists(path.join(venv, 'bin', name))
def activate_path(venv):
"""Return the full path to the script virtualenv directory"""
if sys.platform == 'win32':
p = path.abspath(path.join(venv, 'Scripts'))
else:
p = path.abspath(path.join(venv, 'bin'))
assert path.exists(p), p
os.environ['PATH'] = p + os.pathsep + os.environ['PATH']
logging.info("added to PATH: %s", p)
def get_script_path(venv, name):
"""Return the full path to the script in virtualenv directory"""
if sys.platform == 'win32':
p = path.join(venv, 'Scripts', name)
if not path.exists(p):
p = path.join(venv, 'Scripts', name + '.exe')
else:
p = path.join(venv, 'bin', name)
if not path.exists(p):
raise NameError('cannot find a script named "%s"' % (name,))
return p
def get_tox_version(venv):
"""Return the installed version of tox"""
py = get_script_path(venv, 'python')
s = 'import tox,sys; sys.stdout.write(str(tox.__version__))'
if sys.version_info[:2] >= (2, 6):
return crun('%s -s -c "%s"' % (py, s))
else:
return crun('%s -c "%s"' % (py, s))
def parse_simple_version(v):
"""A simplified version of pkg_resources.parse_version
This method can only parse simple versions like the ones with a set of
numbers separated by dots (eg: 1.2.3)
"""
return [int(c) for c in v.split('.')]
def pypi_get_latest_version(pkgname):
"""Return the latest version of package from PyPI"""
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
versions = pypi.package_releases('tox')
assert versions
versions.sort(key=parse_simple_version, reverse=True)
return versions[0]
def ensuredir(p):
if not path.isdir(p):
os.makedirs(p)
def cmdline(argv=None):
currentdir = os.getcwd()
#os.chdir(path.abspath(path.dirname(__file__)))
ensuredir('.tox')
os.chdir('.tox')
os.environ['PATH'] = os.path.abspath(TENV) + os.path.pathsep + os.environ['PATH']
# create virtual environment
if not path.isdir(TENV) or not has_script(TENV, 'python') or \
not has_script(TENV, 'pip'):
# get virtualenv.py
if not path.isfile('virtualenv.py'):
wget(VIRTUALENVPY_URL)
assert path.isfile('virtualenv.py')
# XXX: we use --no-site-packages because: if tox is installed in global
# site-packages, then pip will not install it locally. ideal fix for
# this should be to first look for tox in the global scripts/ directory
run('%s virtualenv.py --no-site-packages --distribute %s' %
(sys.executable, TENV))
assert has_script(TENV, 'python'), 'no python script'
assert has_script(TENV, 'pip'), 'no pip script'
activate_path(TENV)
pip = get_script_path(TENV, 'pip')
# install/upgrade tox itself
if USETOXDEV:
if 'PIP_DOWNLOAD_CACHE' in os.environ:
cache = ""
else:
cache = "--download-cache=_download"
ensuredir('_download')
run('%s install -q -i http://pypi.testrun.org '
'--upgrade %s tox' % (pip, cache))
elif any([
not has_script(TENV, 'tox'),
get_tox_version(TENV) != pypi_get_latest_version('tox')]):
run('%s install --upgrade --download-cache=_download tox' % (pip,))
toxversion = get_tox_version(TENV)
assert has_script(TENV, 'tox')
tox_script = path.abspath(get_script_path(TENV, 'tox'))
logging.info('tox is installed at %s version %s', tox_script, toxversion)
virtualenv = get_script_path(TENV, 'virtualenv')
venv_version = crun('%s --version' % (virtualenv,)).strip()
logging.info('virtualenv at %s version %s', virtualenv, venv_version)
# XXX: virtualenv 1.5 is broken; replace it
if venv_version == '1.5':
logging.info(
'Replacing the unstable virtualenv-1.5 with the latest stable')
run('%s uninstall -y virtualenv' % (pip,))
run('%s install virtualenv!=1.5' % (pip,))
# Now run the locally-installed tox
os.chdir(currentdir)
try:
run([tox_script] + (argv or []), shell=False)
except CalledProcessError:
_, e, _ = sys.exc_info()
logging.error('tox exited with error code %d', e.returncode)
sys.exit(e.returncode)
if __name__ == '__main__':
cmdline(sys.argv[1:])
make toxbootstrap print version as well
#!/usr/bin/env python
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
tox-bootstrap
=============
A bootstrap to automatically install tox and dependencies on machines that may
not already have tox installed. This is useful when configuring a number of
Hudson quickly; only Python needs to be installed.
Getting started
---------------
::
$ cd my_project/
$ ls
. .. src/ doc/ setup.py tox.ini
$ curl http://pytox.googlecode.com/hg/toxbootstrap.py -O
Instead of running "tox", now you can just run "python toxbootstrap.py" which
will take care of installing tox (if not already installed into
``.tox/_toxinstall``)::
$ python toxbootstrap.py
If you're using Hudson_, you may also do::
import sys
sys.path.insert(0, '.') # sometimes necessary :/
import toxbootstrap
toxbootstrap.cmdline() # also accepts argv list
.. _Hudson: http://hudson-ci.org/
ToDo
----
1. Detect tox in ``$PATH`` (eg: ``C:\Python26\Scripts`` or
``%APPDATA%\Python\Scripts``)
2. Gracefully ignore PyPI xmlrpc downtime errors when checking for new release.
"""
__version__ = '0.9.1.dev1'
import sys
import os
from os import path
import logging
from subprocess import Popen, PIPE, check_call, CalledProcessError
USETOXDEV=os.environ.get('USETOXDEV', False)
TENV='_toxinstall'
PY3 = sys.version_info[0] == 3
if PY3:
from urllib.request import urlretrieve
import xmlrpc.client as xmlrpclib
else:
from urllib import urlretrieve
import xmlrpclib
logging.basicConfig(level=logging.INFO)
# Last stable: 1.5.1
VIRTUALENVPY_URL = (
'http://bitbucket.org/ianb/virtualenv/raw/eb94c9ebe0ba/virtualenv.py')
def run(cmd, shell=True):
"""Run the given command in shell"""
logging.info('Running command: %s', cmd)
check_call(cmd, shell=shell)
def crun(cmd, shell=True):
"""Run the given command and return its output"""
logging.info('Running command (for output): %s', cmd)
p = Popen(cmd, stdout=PIPE, shell=shell)
stdout, stderr = p.communicate()
return stdout
def wget(url):
"""Download the given file to current directory"""
logging.info('Downloading %s', url)
localpath = path.join(path.abspath(os.getcwd()), path.basename(url))
urlretrieve(url, localpath)
def has_script(venv, name):
"""Check if the virtualenv has the given script
Looks for bin/$name (unix) or Scripts/$name.exe (windows) in the virtualenv
"""
if sys.platform == 'win32':
return any([path.exists(path.join(venv, 'Scripts', name)),
path.exists(path.join(venv, 'Scripts', name + '.exe'))])
else:
return path.exists(path.join(venv, 'bin', name))
def activate_path(venv):
"""Return the full path to the script virtualenv directory"""
if sys.platform == 'win32':
p = path.abspath(path.join(venv, 'Scripts'))
else:
p = path.abspath(path.join(venv, 'bin'))
assert path.exists(p), p
os.environ['PATH'] = p + os.pathsep + os.environ['PATH']
logging.info("added to PATH: %s", p)
def get_script_path(venv, name):
"""Return the full path to the script in virtualenv directory"""
if sys.platform == 'win32':
p = path.join(venv, 'Scripts', name)
if not path.exists(p):
p = path.join(venv, 'Scripts', name + '.exe')
else:
p = path.join(venv, 'bin', name)
if not path.exists(p):
raise NameError('cannot find a script named "%s"' % (name,))
return p
def get_tox_version(venv):
"""Return the installed version of tox"""
py = get_script_path(venv, 'python')
s = 'import tox,sys; sys.stdout.write(str(tox.__version__))'
if sys.version_info[:2] >= (2, 6):
return crun('%s -s -c "%s"' % (py, s))
else:
return crun('%s -c "%s"' % (py, s))
def parse_simple_version(v):
"""A simplified version of pkg_resources.parse_version
This method can only parse simple versions like the ones with a set of
numbers separated by dots (eg: 1.2.3)
"""
return [int(c) for c in v.split('.')]
def pypi_get_latest_version(pkgname):
"""Return the latest version of package from PyPI"""
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
versions = pypi.package_releases('tox')
assert versions
versions.sort(key=parse_simple_version, reverse=True)
return versions[0]
def ensuredir(p):
if not path.isdir(p):
os.makedirs(p)
def cmdline(argv=None):
logging.info('toxbootstrap version %s', __version__)
currentdir = os.getcwd()
#os.chdir(path.abspath(path.dirname(__file__)))
ensuredir('.tox')
os.chdir('.tox')
os.environ['PATH'] = os.path.abspath(TENV) + os.path.pathsep + os.environ['PATH']
# create virtual environment
if not path.isdir(TENV) or not has_script(TENV, 'python') or \
not has_script(TENV, 'pip'):
# get virtualenv.py
if not path.isfile('virtualenv.py'):
wget(VIRTUALENVPY_URL)
assert path.isfile('virtualenv.py')
# XXX: we use --no-site-packages because: if tox is installed in global
# site-packages, then pip will not install it locally. ideal fix for
# this should be to first look for tox in the global scripts/ directory
run('%s virtualenv.py --no-site-packages --distribute %s' %
(sys.executable, TENV))
assert has_script(TENV, 'python'), 'no python script'
assert has_script(TENV, 'pip'), 'no pip script'
activate_path(TENV)
pip = get_script_path(TENV, 'pip')
# install/upgrade tox itself
if USETOXDEV:
if 'PIP_DOWNLOAD_CACHE' in os.environ:
cache = ""
else:
cache = "--download-cache=_download"
ensuredir('_download')
run('%s install -q -i http://pypi.testrun.org '
'--upgrade %s tox' % (pip, cache))
elif any([
not has_script(TENV, 'tox'),
get_tox_version(TENV) != pypi_get_latest_version('tox')]):
run('%s install --upgrade --download-cache=_download tox' % (pip,))
toxversion = get_tox_version(TENV)
assert has_script(TENV, 'tox')
tox_script = path.abspath(get_script_path(TENV, 'tox'))
logging.info('tox is installed at %s version %s', tox_script, toxversion)
virtualenv = get_script_path(TENV, 'virtualenv')
venv_version = crun('%s --version' % (virtualenv,)).strip()
logging.info('virtualenv at %s version %s', virtualenv, venv_version)
# XXX: virtualenv 1.5 is broken; replace it
if venv_version == '1.5':
logging.info(
'Replacing the unstable virtualenv-1.5 with the latest stable')
run('%s uninstall -y virtualenv' % (pip,))
run('%s install virtualenv!=1.5' % (pip,))
# Now run the locally-installed tox
os.chdir(currentdir)
try:
run([tox_script] + (argv or []), shell=False)
except CalledProcessError:
_, e, _ = sys.exc_info()
logging.error('tox exited with error code %d', e.returncode)
sys.exit(e.returncode)
if __name__ == '__main__':
cmdline(sys.argv[1:])
|
import numpy as np
from scipy.ndimage.measurements import maximum_position, label, find_objects
from scipy.ndimage.morphology import generate_binary_structure
def search_candidates(image, threshold, n_d_x, n_d_y):
threshold = np.percentile(image.ravel(), threshold)
a = image.copy()
# Keep only tail of image values distribution with signal
a[a < threshold] = 0
s = generate_binary_structure(2, 2)
# Label image
labeled_array, num_features = label(a, structure=s)
# Find objects
objects = find_objects(labeled_array)
# Container of object's properties
_objects = np.empty(num_features, dtype=[('label', 'int'),
('dx', '<f8'),
('dy', '<f8'),
('max_pos', 'int',
(2,))])
labels = np.arange(num_features) + 1
dx = [int(obj[1].stop - obj[1].start) for obj in objects]
dy = [int(obj[0].stop - obj[0].start) for obj in objects]
# Filling objects structured array
_objects['label'] = labels
_objects['dx'] = dx
_objects['dy'] = dy
# Classify objects
_objects = _objects[np.logical_and(_objects['dy'] > n_d_y,
_objects['dx'] > n_d_x)]
# Fetch positions of only successfuly classified objects
_objects['max_pos'] = maximum_position(image, labels=labeled_array,
index=_objects['label'])
_objects = _objects[np.lexsort((_objects['dx'], _objects['dy']))[::-1]]
return _objects
1d search of peaks, func for getting image's properties
import numpy as np
from scipy.ndimage.measurements import maximum_position, label, find_objects
from scipy.ndimage.morphology import generate_binary_structure
from skimage.measure import regionprops
# TODO: add algorithm in 1D - just searching peaks in freq. averaged
# de-dispersed auto-spectra.
def find_peaks(array, nstd=4, med_width=30, gauss_width=2):
import scipy
array = scipy.signal.medfilt(array, med_width)
garray = scipy.ndimage.filters.gaussian_filter1d(array, gauss_width)
ind = garray[garray > nstd * np.std(garray)]
return ind
def max_pos(object, image):
"""
Returns maximum position and widths in both direction.
:param object:
``skimage.measure._regionprops._RegionProperties`` instance.
:param image:
Original image
:return:
Tuple of max position & 2 widths of region.
"""
subimage = image[object.bbox[0]: object.bbox[2],
object.bbox[1]: object.bbox[3]]
indx = np.unravel_index(subimage.argmax(), subimage.shape)
return (object.bbox[0] + indx[0], object.bbox[1] + indx[1]),\
object.bbox[2] - object.bbox[0], object.bbox[3] - object.bbox[1]
def search_candidates(image, threshold, n_d_x, n_d_y):
threshold = np.percentile(image.ravel(), threshold)
a = image.copy()
# Keep only tail of image values distribution with signal
a[a < threshold] = 0
s = generate_binary_structure(2, 2)
# Label image
labeled_array, num_features = label(a, structure=s)
# Find objects
objects = find_objects(labeled_array)
props = regionprops(labeled_array, intensity_image=image)
# Container of object's properties
_objects = np.empty(num_features, dtype=[('label', 'int'),
('dx', '<f8'),
('dy', '<f8'),
('max_pos', 'int',
(2,))])
labels = np.arange(num_features) + 1
dx = [int(obj[1].stop - obj[1].start) for obj in objects]
dy = [int(obj[0].stop - obj[0].start) for obj in objects]
# Filling objects structured array
_objects['label'] = labels
_objects['dx'] = dx
_objects['dy'] = dy
# Classify objects
_objects = _objects[np.logical_and(_objects['dy'] > n_d_y,
_objects['dx'] > n_d_x)]
# Fetch positions of only successfuly classified objects
_objects['max_pos'] = maximum_position(image, labels=labeled_array,
index=_objects['label'])
_objects = _objects[np.lexsort((_objects['dx'], _objects['dy']))[::-1]]
return _objects
def get_props(image, threshold):
"""
Rerurn measured properties list of imaged labeled at specified threshold.
:param image:
Numpy 2D array with image.
:param threshold:
Threshold to label image. [0.-100.]
:return:
List of RegionProperties -
(``skimage.measure._regionprops._RegionProperties`` instances)
"""
threshold = np.percentile(image.ravel(), threshold)
a = image.copy()
# Keep only tail of image values distribution with signal
a[a < threshold] = 0
s = generate_binary_structure(2, 2)
# Label image
labeled_array, num_features = label(a, structure=s)
return regionprops(labeled_array, intensity_image=image)
|
try:
import matplotlib
matplotlib.use('Agg') # stops it from using X11 and breaking
import matplotlib.pyplot as plt
except:
matplotlib = None
try:
from mako.template import Template
from mako.lookup import TemplateLookup
except:
Template = None
TemplateLookup = None
try:
from lxml import etree
from lxml.etree import Element, SubElement
except:
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
from collections import defaultdict, OrderedDict
import re, os
pjoin = os.path.join
from io import StringIO
from textwrap import dedent
from datetime import datetime
class ParseError(Exception):
pass
def from_isoformat(t):
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%f")
class Webpage(object):
#ns = "{http://www.w3.org/1999/xhtml}"
space = re.compile('[ /:\n]')
def __init__(self, stats, fdir, title):
if not matplotlib:
raise ImportError("matplotlib not installed.")
if not Template or not TemplateLookup:
raise ImportError("mako not installed.")
self.base = Template(base)
self.statblock = Template(statblock)
self.statdiv = Template(statdiv)
self.generaldiv = Template(generaldiv)
self.chronodiv = Template(chronodiv)
#if not isinstance(stats, Statistics):
# raise TypeError("Input must be Statistics object.")
self.stats = stats
try: os.makedirs(fdir)
except: pass
self.fdir = fdir
self.title = title
def generate(self):
footer = "Generated: %s" % datetime.utcnow().strftime("%Y-%m-%d %H:%M (UTC)")
divs = []
divs.append(self.generate_regressions())
divs.append(self.generate_coverages())
divs.append(self.generate_ambiguities())
divs.append(self.generate_hfsts())
# others
out = self.base.render(dirname=self.title, divs=divs, footer=footer)
f = open(pjoin(self.fdir, "index.html"), 'w')
f.write(out)
f.close()
f = open(pjoin(self.fdir, "style.css"), 'w')
f.write(css)
f.close()
f = open(pjoin(self.fdir, "stats.js"), 'w')
f.write(js)
f.close()
def generate_regressions(self):
images = self.plot_regressions()
divs = []
stat_type = "regression"
data = self.stats.get(stat_type)
stat_type_title = "Regression Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
avg = 0.0
for i in rev.values():
avg += float(i['Percent'])
avg /= float(len(rev))
gen_stats = {
"First test": first,
"Last test": last,
"Average": avg
}
stat_title_human, stat_cksum = cfg, last
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
def generate_coverages(self):
self.plot_coverage()
images = []
divs = []
stat_type = "coverage"
data = self.stats.get(stat_type)
stat_type_title = "Coverage Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
avg = 0.0
for i in rev.values():
avg += float(i['Percent'])
avg /= float(len(rev))
gen_stats = {
"First test": first,
"Last test": last,
"Average": avg
}
stat_title_human, stat_cksum = cfg, last
stat_cksum = stat_cksum.upper()
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
def generate_ambiguities(self):
images = []#self.plot_regressions()
divs = []
stat_type = "ambiguity"
data = self.stats.get(stat_type)
stat_type_title = "Ambiguity Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
avg = 0.0
for i in rev.values():
avg += float(i['Average'])
avg /= float(len(rev))
gen_stats = {
"First test": first,
"Last test": last,
"Overall average": avg
}
stat_title_human, stat_cksum = cfg, last
stat_cksum = stat_cksum.upper()
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
def generate_hfsts(self):
images = []#self.plot_regressions()
divs = []
stat_type = "morph"
data = self.stats.get(stat_type)
stat_type_title = "Morph (HFST) Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
gen_stats = {
"First test": first,
"Last test": last
}
stat_title_human, stat_cksum = cfg, last
stat_cksum = stat_cksum.upper()
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
# coverage over time
# number of rules over time
# mean ambiguity over time
# number of dict entries over time
# translation speed over time
# WER/PER/BLEU over time
# percentage of regression tests passed over time
'''def plot_coverages(self):
data = self.stats.get_coverages()
out = []
def coverage_over_time(title, data):
plt.title(title)
plt.xlabel("Time")
plt.ylabel("Coverage (%)")
'''
def plot_coverage(self):
coverage = self.stats.get('coverage')
out = []
for dictionary, revisions in coverage.items():
title = "%s\n%s" % (dictionary, "Coverage Percentage Over Time")
plt.title(title)
plt.xlabel("Revision")
plt.ylabel("Coverage (%)")
x = list(revisions.keys())
y = [ i['Percent'] for i in revisions.values() ]
x.insert(0, 0)
y.insert(0, 0)
plt.plot(x, y)
print(x[1], x[-1])
print(x)
print(y)
plt.xlim(xmin=x[0])
png = "%s.png" % self.space.sub('_', title)
plt.savefig(pjoin(self.fdir, png))
out.append(png)
plt.clf()
return out
def plot_regressions(self):
#def
out = []
regs = self.stats.get('regression')
for title, reg in regs.items():
t = "%s\n%s" % (title, "Passes over time")
plt.title(t)
plt.xlabel('Test ID')
plt.ylabel('Passes (%)')
x = range(len(reg))
y = [[], [], [], []]
for rev, vals in reg.items():
y[0].append(vals['Percent'])
y[1].append(vals['Total'])
y[2].append(vals['Passes'])
y[3].append(vals['Fails'])
plt.plot(x, y[0])
png = "%s.png" % self.space.sub('_', t)
plt.savefig(pjoin(self.fdir, png))
out.append(png)
plt.clf()
t = "%s\n%s" % (title, "Statistics")
plt.title(t)
plt.ylabel('Quantity')
plt.plot(x, y[1], 'b', x, y[2], 'g', x, y[3], 'r')
png = "%s.png" % self.space.sub('_', t)
plt.savefig(pjoin(self.fdir, png))
out.append(png)
plt.clf()
return out
class Statistics(object):
version = "0.1"
type = "apertium"
xmlns = "http://apertium.org/xml/statistics/0.1"
ns = "{%s}" % xmlns
@staticmethod
def node_equal(a, b):
return a.tag == b.tag and a.attrib == b.attrib
def __init__(self, f=None):
self.elements = {
"general": self.get_general,
"regression": self.get_regression,
"coverage": self.get_coverage,
"ambiguity": self.get_ambiguity,
"morph": self.get_morph
}
if f is None:
return
self.f = f
if os.path.exists(f):
try:
self.tree = etree.parse(open(f, 'rb'))
if self.tree.getroot().tag == Statistics.ns + "statistics":
#if self.tree.getroot().get('version') == "1.0":
#print "[STUB] Do version specific crap here for 1.0"
#else:
#pass
#print "[DEBUG] Version incorrect."
self.root = self.tree.getroot()
#print "[DEBUG] Imported tree."
else:
raise ParseError("File does not seem to be a statistics file.")
except:
raise
else:
kwargs = {
"type": Statistics.type,
"version": Statistics.version
}
if etree.__name__ == "lxml.etree":
kwargs['nsmap'] = {None: Statistics.xmlns}
else:
kwargs["xmlns"] = Statistics.xmlns
self.root = Element(Statistics.ns + "statistics", **kwargs)
self.tree = etree.ElementTree(self.root)
def write(self):
self.tree.write(self.f, encoding="utf-8", xml_declaration=True)
def add(self, parent, xml):
ns = self.ns
if parent not in self.elements:
raise AttributeError("Element not supported.")
# Get new node, fix namespace prefix
old_node = None
new_node = etree.fromstring(xml)
if not new_node.tag.startswith(ns):
new_node.tag = ns + new_node.tag
# If parent node doesn't exist, create it
parent_node = self.root.find(ns + parent)
if parent_node is None:
parent_node = SubElement(self.root, ns + parent)
# Try to find an equal node for second level node
for i in parent_node.getiterator(new_node.tag):
if self.node_equal(new_node, i):
old_node = i
break
if old_node is None:
parent_node.append(new_node)
return
# Try to find an equal node for third level node
rev_node = new_node.find("revision")
for i in old_node.getiterator(rev_node.tag):
a = i.attrib.get("value")
b = rev_node.attrib.get("value")
if not None in (a, b) and a == b:
i = rev_node # Overwrite old data
return
# Else append as no override required
old_node.append(new_node.find("revision"))
def get(self, tag):
if not tag in self.elements:
raise AttributeError("Element not supported.")
root = self.root.find(self.ns + tag)
if root is None:
return dict()
out = defaultdict(dict)
return self.elements[tag](root)
def get_general(self, root):
return dict() # stub
def get_regression(self, root):
regressions = defaultdict(dict)
for d in root.getiterator(self.ns + "title"):
title = d.attrib['value']
for rev in d.getiterator(self.ns + 'revision'):
r = rev.attrib['value']
regressions[title][r] = {
"Timestamp": rev.attrib['timestamp'],
"Percent": rev.find(self.ns + "percent").text,
"Total": rev.find(self.ns + "total").text,
"Passes": rev.find(self.ns + "passes").text,
"Fails": rev.find(self.ns + "fails").text
}
out = dict()
for k, v in regressions.items():
out[k] = OrderedDict(sorted(v.items()))
return out
def get_coverage(self, root):
coverages = defaultdict(dict)
for d in root.getiterator(self.ns + "dictionary"):
dct = d.attrib["value"]
for rev in d.getiterator(self.ns + "revision"):
r = rev.attrib['value']
c = rev.find(self.ns + "corpus")
coverages[dct][r] = OrderedDict({
"Checksum": rev.attrib["checksum"],
"Timestamp": rev.attrib['timestamp'],
"Corpus": "%s__%s" % (c.attrib["value"], c.attrib["checksum"]),
"Percent": rev.find(self.ns + "percent").text,
"Total": rev.find(self.ns + "total").text,
"Known": rev.find(self.ns + "known").text,
"Unknown": rev.find(self.ns + "unknown").text,
#'':'',
#"Top words:": ''#OrderedDict()
})
#for j in i.find("top").getiterator("word"):
# coverages[dct][rev][j.text] = j.attrib["count"]
##for j in i.find("top").getiterator("word"):
## coverages[dct][rev]['top'][j.text] = j.attrib["count"]
out = dict()
for k, v in coverages.items():
out[k] = OrderedDict(sorted(v.items()))
return out
def get_ambiguity(self, root):
ambiguities = defaultdict(dict)
for d in root.getiterator(self.ns + "dictionary"):
dct = d.attrib["value"]
for rev in d.getiterator(self.ns + "revision"):
r = rev.attrib['value']
ambiguities[dct][r] = {
"Checksum": rev.attrib["checksum"],
"Timestamp": rev.attrib['timestamp'],
"Surface forms": rev.find(self.ns + "surface-forms").text,
"Analyses": rev.find(self.ns + "analyses").text,
"Average": rev.find(self.ns + "average").text
}
out = dict()
for k, v in ambiguities.items():
out[k] = OrderedDict(sorted(v.items()))
return out
def get_morph(self, root):
morphs = defaultdict(dict)
for d in root.getiterator(self.ns + "config"):
cfg = d.attrib["value"]
for rev in d.getiterator(self.ns + "revision"):
r = rev.attrib['value']
g = rev.find(self.ns + "gen")
m = rev.find(self.ns + "morph")
morphs[cfg][r] = {
"Checksum": rev.attrib["checksum"],
"Timestamp": rev.attrib['timestamp'],
"Gen": "%s__%s" % (g.attrib['value'], g.attrib["checksum"]),
"Morph": "%s__%s" % (m.attrib['value'], m.attrib["checksum"]),
'':'',
#"Tests": OrderedDict(),
"Total": rev.find(self.ns + "total").text,
"Passes": rev.find(self.ns + "passes").text,
"Fails": rev.find(self.ns + "fails").text
}
#for j in i.find("tests").getiterator("test"):
# hfsts[cfg][rev]['tests'][j.text] = {
# "passes": j.attrib['passes'],
# "fails": j.attrib['fails']
# }
out = dict()
for k, v in morphs.items():
out[k] = OrderedDict(sorted(v.items()))
return out
css = """
* {
border: 0;
padding: 0; }
body {
background-color: #777777; }
div {
border: 1px solid black;
margin: 12px; }
h1, h2 {
margin: 0;
padding: 0;
padding-left: 12px;
font-variant: small-caps; }
h1 {
padding-top: 8px; }
table {
border-collapse: collapse; }
/*table, th, td {
border: 1px solid black;
}*/
div#container {
padding: 0;
margin: 0 auto;
width: 100%; }
div#header {
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px;
background-color: white; }
div#header h1 {
margin-top: 6px; }
div#footer {
border: 0;
padding: 0;
margin: 0;
color: black;
text-align: center;
font-size: 9pt; }
div.s-container {
background-color: white;
border: 1px solid black;
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px;
clear: both; }
div.s-container div.s-stats {
margin: 0;
padding: 0;
border: 0; }
div.s-container div.s-stats h1 {
border-top: 1px dotted black;
font-size: 16pt;
padding-left: 16px;
text-decoration: none; }
div.s-container div.s-stats h2 {
font-size: 8pt;
padding-left: 16px; }
div.s-container div.s-stats hr {
clear: both;
border: 0;
margin: 0;
padding: 0; }
div.s-container div.s-stats div.s-imgs {
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px; }
div.s-container div.s-stats div.s-imgs img {
width: 267px;
height: 200px;
border: 1px solid black;
margin: 12px; }
div.s-container div.s-stats div.s-data {
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px; }
div.s-container div.s-stats div.s-data h1 {
font-size: 14pt;
border: 0; }
div.s-container div.s-stats div.s-data div.s-general {
/*float: left;
margin-right: 0;
width: 47.75%;*/ }
div.s-container div.s-stats div.s-data div.s-general table {
margin: 12px; }
div.s-container div.s-stats div.s-data div.s-general table tr td {
padding-left: 6px;
padding-right: 6px;
text-align: right; }
div.s-container div.s-stats div.s-data div.s-general table tr td:nth-child(2) {
text-align: left; }
div.s-container div.s-stats div.s-data div.s-chrono {
/*float: right;
margin-left: 0;
width: 47.75%;*/ }
div.s-container div.s-stats div.s-data div.s-chrono ul li {
margin-left: 24px; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div {
margin-left: -12px;
padding: 6px; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div table {
margin: 12px; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div table tr td {
padding-left: 6px;
padding-right: 6px;
text-align: right; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div table tr td:nth-child(2) {
text-align: left; }
"""
js = """function toggle(id)
{
var div = document.getElementById(id);
if (div.style.display == 'block') {
div.style.display = 'none';
}
else {
div.style.display = 'block';
}
}
function init()
{
var cdivs = document.getElementsByClassName("cdiv");
for (var i = 0; i < cdivs.length; ++i) {
cdivs[i].style.display = "none";
}
}
window.addEventListener("load", init, false);
"""
base = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Statistics - ${dirname}</title>
<script type="application/javascript" src="stats.js"></script>
<link rel="stylesheet" href="style.css" type="text/css" />
</head>
<body>
<div id="header">
<h1>${dirname}</h1>
</div>
<!-- divs gonna div -->
% for div in divs:
${div}
% endfor
<div id="footer">
${footer}
</div>
</body>
"""
statblock = """
<div id="${stat_type}" class="s-container">
<h1>${stat_type_title}</h1>
% for div in divs:
${div}
% endfor
</div>
"""
statdiv = """
<div id="${stat_type}-${stat_title}" class="s-stats">
<h1>${stat_title_human}</h1>
<h2>${stat_cksum}</h2>
<div id="${stat_type}-${stat_title}-imgs" class="s-imgs">
% for src in images:
<a href="${src}"><img src="${src}" /></a>
% endfor
</div>
<div id="${stat_type}-${stat_title}-data" class="s-data">
${general}
${chrono}
<hr />
</div>
</div>
"""
generaldiv = """
<div id="${stat_type}-${stat_title}-general" class="s-general">
<h1>General Statistics</h1>
<table>
% for left, right in gen_stats.items():
<tr>
<td>${left}:</td>
<td>${right}</td>
</tr>
% endfor
</table>
</div>
"""
chronodiv = """
<div id="${stat_type}-${stat_title}-chrono" class="s-chrono">
<h1>Chronological Statistics</h1>
<ul>
% for c, date in enumerate(reversed(list(chrono_stats.keys()))):
<li>
<a href="javascript:toggle('${stat_type}-${stat_title}-chrono-${c}-div')">${date}</a>
<div class="cdiv" id="${stat_type}-${stat_title}-chrono-${c}-div">
<table>
% for k, v in chrono_stats[date].items():
<%
if "percent" in k.lower():
v = "%s%%" % v
elif "__" in v:
tmp = v.rsplit('__', 1)
v = "%s (%s)" % (tmp[0], tmp[1].upper())
%>
<tr>
<td>${k}</td>
<td>${v}</td>
</tr>
% endfor
</table>
</div>
</li>
% endfor
</ul>
</div>
"""
NO TIME DEBUG
try:
import matplotlib
matplotlib.use('Agg') # stops it from using X11 and breaking
import matplotlib.pyplot as plt
except:
matplotlib = None
try:
from mako.template import Template
from mako.lookup import TemplateLookup
except:
Template = None
TemplateLookup = None
try:
from lxml import etree
from lxml.etree import Element, SubElement
except:
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
from collections import defaultdict, OrderedDict
import re, os
pjoin = os.path.join
from io import StringIO
from textwrap import dedent
from datetime import datetime
class ParseError(Exception):
pass
def from_isoformat(t):
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%f")
class Webpage(object):
#ns = "{http://www.w3.org/1999/xhtml}"
space = re.compile('[ /:\n]')
def __init__(self, stats, fdir, title):
if not matplotlib:
raise ImportError("matplotlib not installed.")
if not Template or not TemplateLookup:
raise ImportError("mako not installed.")
self.base = Template(base)
self.statblock = Template(statblock)
self.statdiv = Template(statdiv)
self.generaldiv = Template(generaldiv)
self.chronodiv = Template(chronodiv)
#if not isinstance(stats, Statistics):
# raise TypeError("Input must be Statistics object.")
self.stats = stats
try: os.makedirs(fdir)
except: pass
self.fdir = fdir
self.title = title
def generate(self):
footer = "Generated: %s" % datetime.utcnow().strftime("%Y-%m-%d %H:%M (UTC)")
divs = []
divs.append(self.generate_regressions())
divs.append(self.generate_coverages())
divs.append(self.generate_ambiguities())
divs.append(self.generate_hfsts())
# others
out = self.base.render(dirname=self.title, divs=divs, footer=footer)
f = open(pjoin(self.fdir, "index.html"), 'w')
f.write(out)
f.close()
f = open(pjoin(self.fdir, "style.css"), 'w')
f.write(css)
f.close()
f = open(pjoin(self.fdir, "stats.js"), 'w')
f.write(js)
f.close()
def generate_regressions(self):
images = self.plot_regressions()
divs = []
stat_type = "regression"
data = self.stats.get(stat_type)
stat_type_title = "Regression Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
avg = 0.0
for i in rev.values():
avg += float(i['Percent'])
avg /= float(len(rev))
gen_stats = {
"First test": first,
"Last test": last,
"Average": avg
}
stat_title_human, stat_cksum = cfg, last
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
def generate_coverages(self):
self.plot_coverage()
images = []
divs = []
stat_type = "coverage"
data = self.stats.get(stat_type)
stat_type_title = "Coverage Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
avg = 0.0
for i in rev.values():
avg += float(i['Percent'])
avg /= float(len(rev))
gen_stats = {
"First test": first,
"Last test": last,
"Average": avg
}
stat_title_human, stat_cksum = cfg, last
stat_cksum = stat_cksum.upper()
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
def generate_ambiguities(self):
images = []#self.plot_regressions()
divs = []
stat_type = "ambiguity"
data = self.stats.get(stat_type)
stat_type_title = "Ambiguity Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
avg = 0.0
for i in rev.values():
avg += float(i['Average'])
avg /= float(len(rev))
gen_stats = {
"First test": first,
"Last test": last,
"Overall average": avg
}
stat_title_human, stat_cksum = cfg, last
stat_cksum = stat_cksum.upper()
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
def generate_hfsts(self):
images = []#self.plot_regressions()
divs = []
stat_type = "morph"
data = self.stats.get(stat_type)
stat_type_title = "Morph (HFST) Tests"
for cfg, rev in data.items():
tsk = list(rev.keys())
first = tsk[0]
last = tsk[-1]
gen_stats = {
"First test": first,
"Last test": last
}
stat_title_human, stat_cksum = cfg, last
stat_cksum = stat_cksum.upper()
stat_title = self.space.sub('_', stat_title_human.lower())
general = self.generaldiv.render(stat_title=stat_title, stat_type=stat_type, gen_stats=gen_stats)
chrono = self.chronodiv.render(stat_title=stat_title, stat_type=stat_type, chrono_stats=rev)
stats = self.statdiv.render(stat_title_human=stat_title_human, stat_title=stat_title, stat_type=stat_type,
stat_cksum=stat_cksum, chrono=chrono, general=general, images=images)
divs.append(stats)
return self.statblock.render(stat_type=stat_type, stat_type_title=stat_type_title, divs=divs)
# coverage over time
# number of rules over time
# mean ambiguity over time
# number of dict entries over time
# translation speed over time
# WER/PER/BLEU over time
# percentage of regression tests passed over time
'''def plot_coverages(self):
data = self.stats.get_coverages()
out = []
def coverage_over_time(title, data):
plt.title(title)
plt.xlabel("Time")
plt.ylabel("Coverage (%)")
'''
def plot_coverage(self):
coverage = self.stats.get('coverage')
out = []
for dictionary, revisions in coverage.items():
title = "%s\n%s" % (dictionary, "Coverage Percentage Over Time")
plt.title(title)
plt.xlabel("Revision")
plt.ylabel("Coverage (%)")
x = list(revisions.keys())
y = [ i['Percent'] for i in revisions.values() ]
x.insert(0, 0)
y.insert(0, 0)
plt.plot(x, y)
print(x[1], x[-1])
print(x)
print(y)
plt.xlim(xmin=int(x[1]))
png = "%s.png" % self.space.sub('_', title)
plt.savefig(pjoin(self.fdir, png))
out.append(png)
plt.clf()
return out
def plot_regressions(self):
#def
out = []
regs = self.stats.get('regression')
for title, reg in regs.items():
t = "%s\n%s" % (title, "Passes over time")
plt.title(t)
plt.xlabel('Test ID')
plt.ylabel('Passes (%)')
x = range(len(reg))
y = [[], [], [], []]
for rev, vals in reg.items():
y[0].append(vals['Percent'])
y[1].append(vals['Total'])
y[2].append(vals['Passes'])
y[3].append(vals['Fails'])
plt.plot(x, y[0])
png = "%s.png" % self.space.sub('_', t)
plt.savefig(pjoin(self.fdir, png))
out.append(png)
plt.clf()
t = "%s\n%s" % (title, "Statistics")
plt.title(t)
plt.ylabel('Quantity')
plt.plot(x, y[1], 'b', x, y[2], 'g', x, y[3], 'r')
png = "%s.png" % self.space.sub('_', t)
plt.savefig(pjoin(self.fdir, png))
out.append(png)
plt.clf()
return out
class Statistics(object):
version = "0.1"
type = "apertium"
xmlns = "http://apertium.org/xml/statistics/0.1"
ns = "{%s}" % xmlns
@staticmethod
def node_equal(a, b):
return a.tag == b.tag and a.attrib == b.attrib
def __init__(self, f=None):
self.elements = {
"general": self.get_general,
"regression": self.get_regression,
"coverage": self.get_coverage,
"ambiguity": self.get_ambiguity,
"morph": self.get_morph
}
if f is None:
return
self.f = f
if os.path.exists(f):
try:
self.tree = etree.parse(open(f, 'rb'))
if self.tree.getroot().tag == Statistics.ns + "statistics":
#if self.tree.getroot().get('version') == "1.0":
#print "[STUB] Do version specific crap here for 1.0"
#else:
#pass
#print "[DEBUG] Version incorrect."
self.root = self.tree.getroot()
#print "[DEBUG] Imported tree."
else:
raise ParseError("File does not seem to be a statistics file.")
except:
raise
else:
kwargs = {
"type": Statistics.type,
"version": Statistics.version
}
if etree.__name__ == "lxml.etree":
kwargs['nsmap'] = {None: Statistics.xmlns}
else:
kwargs["xmlns"] = Statistics.xmlns
self.root = Element(Statistics.ns + "statistics", **kwargs)
self.tree = etree.ElementTree(self.root)
def write(self):
self.tree.write(self.f, encoding="utf-8", xml_declaration=True)
def add(self, parent, xml):
ns = self.ns
if parent not in self.elements:
raise AttributeError("Element not supported.")
# Get new node, fix namespace prefix
old_node = None
new_node = etree.fromstring(xml)
if not new_node.tag.startswith(ns):
new_node.tag = ns + new_node.tag
# If parent node doesn't exist, create it
parent_node = self.root.find(ns + parent)
if parent_node is None:
parent_node = SubElement(self.root, ns + parent)
# Try to find an equal node for second level node
for i in parent_node.getiterator(new_node.tag):
if self.node_equal(new_node, i):
old_node = i
break
if old_node is None:
parent_node.append(new_node)
return
# Try to find an equal node for third level node
rev_node = new_node.find("revision")
for i in old_node.getiterator(rev_node.tag):
a = i.attrib.get("value")
b = rev_node.attrib.get("value")
if not None in (a, b) and a == b:
i = rev_node # Overwrite old data
return
# Else append as no override required
old_node.append(new_node.find("revision"))
def get(self, tag):
if not tag in self.elements:
raise AttributeError("Element not supported.")
root = self.root.find(self.ns + tag)
if root is None:
return dict()
out = defaultdict(dict)
return self.elements[tag](root)
def get_general(self, root):
return dict() # stub
def get_regression(self, root):
regressions = defaultdict(dict)
for d in root.getiterator(self.ns + "title"):
title = d.attrib['value']
for rev in d.getiterator(self.ns + 'revision'):
r = rev.attrib['value']
regressions[title][r] = {
"Timestamp": rev.attrib['timestamp'],
"Percent": rev.find(self.ns + "percent").text,
"Total": rev.find(self.ns + "total").text,
"Passes": rev.find(self.ns + "passes").text,
"Fails": rev.find(self.ns + "fails").text
}
out = dict()
for k, v in regressions.items():
out[k] = OrderedDict(sorted(v.items()))
return out
def get_coverage(self, root):
coverages = defaultdict(dict)
for d in root.getiterator(self.ns + "dictionary"):
dct = d.attrib["value"]
for rev in d.getiterator(self.ns + "revision"):
r = rev.attrib['value']
c = rev.find(self.ns + "corpus")
coverages[dct][r] = OrderedDict({
"Checksum": rev.attrib["checksum"],
"Timestamp": rev.attrib['timestamp'],
"Corpus": "%s__%s" % (c.attrib["value"], c.attrib["checksum"]),
"Percent": rev.find(self.ns + "percent").text,
"Total": rev.find(self.ns + "total").text,
"Known": rev.find(self.ns + "known").text,
"Unknown": rev.find(self.ns + "unknown").text,
#'':'',
#"Top words:": ''#OrderedDict()
})
#for j in i.find("top").getiterator("word"):
# coverages[dct][rev][j.text] = j.attrib["count"]
##for j in i.find("top").getiterator("word"):
## coverages[dct][rev]['top'][j.text] = j.attrib["count"]
out = dict()
for k, v in coverages.items():
out[k] = OrderedDict(sorted(v.items()))
return out
def get_ambiguity(self, root):
ambiguities = defaultdict(dict)
for d in root.getiterator(self.ns + "dictionary"):
dct = d.attrib["value"]
for rev in d.getiterator(self.ns + "revision"):
r = rev.attrib['value']
ambiguities[dct][r] = {
"Checksum": rev.attrib["checksum"],
"Timestamp": rev.attrib['timestamp'],
"Surface forms": rev.find(self.ns + "surface-forms").text,
"Analyses": rev.find(self.ns + "analyses").text,
"Average": rev.find(self.ns + "average").text
}
out = dict()
for k, v in ambiguities.items():
out[k] = OrderedDict(sorted(v.items()))
return out
def get_morph(self, root):
morphs = defaultdict(dict)
for d in root.getiterator(self.ns + "config"):
cfg = d.attrib["value"]
for rev in d.getiterator(self.ns + "revision"):
r = rev.attrib['value']
g = rev.find(self.ns + "gen")
m = rev.find(self.ns + "morph")
morphs[cfg][r] = {
"Checksum": rev.attrib["checksum"],
"Timestamp": rev.attrib['timestamp'],
"Gen": "%s__%s" % (g.attrib['value'], g.attrib["checksum"]),
"Morph": "%s__%s" % (m.attrib['value'], m.attrib["checksum"]),
'':'',
#"Tests": OrderedDict(),
"Total": rev.find(self.ns + "total").text,
"Passes": rev.find(self.ns + "passes").text,
"Fails": rev.find(self.ns + "fails").text
}
#for j in i.find("tests").getiterator("test"):
# hfsts[cfg][rev]['tests'][j.text] = {
# "passes": j.attrib['passes'],
# "fails": j.attrib['fails']
# }
out = dict()
for k, v in morphs.items():
out[k] = OrderedDict(sorted(v.items()))
return out
css = """
* {
border: 0;
padding: 0; }
body {
background-color: #777777; }
div {
border: 1px solid black;
margin: 12px; }
h1, h2 {
margin: 0;
padding: 0;
padding-left: 12px;
font-variant: small-caps; }
h1 {
padding-top: 8px; }
table {
border-collapse: collapse; }
/*table, th, td {
border: 1px solid black;
}*/
div#container {
padding: 0;
margin: 0 auto;
width: 100%; }
div#header {
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px;
background-color: white; }
div#header h1 {
margin-top: 6px; }
div#footer {
border: 0;
padding: 0;
margin: 0;
color: black;
text-align: center;
font-size: 9pt; }
div.s-container {
background-color: white;
border: 1px solid black;
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px;
clear: both; }
div.s-container div.s-stats {
margin: 0;
padding: 0;
border: 0; }
div.s-container div.s-stats h1 {
border-top: 1px dotted black;
font-size: 16pt;
padding-left: 16px;
text-decoration: none; }
div.s-container div.s-stats h2 {
font-size: 8pt;
padding-left: 16px; }
div.s-container div.s-stats hr {
clear: both;
border: 0;
margin: 0;
padding: 0; }
div.s-container div.s-stats div.s-imgs {
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px; }
div.s-container div.s-stats div.s-imgs img {
width: 267px;
height: 200px;
border: 1px solid black;
margin: 12px; }
div.s-container div.s-stats div.s-data {
margin-top: 12px;
margin-bottom: 12px;
margin-left: 6px;
margin-right: 6px;
border-radius: 7px;
-moz-border-radius: 7px;
-webkit-border-radius: 7px; }
div.s-container div.s-stats div.s-data h1 {
font-size: 14pt;
border: 0; }
div.s-container div.s-stats div.s-data div.s-general {
/*float: left;
margin-right: 0;
width: 47.75%;*/ }
div.s-container div.s-stats div.s-data div.s-general table {
margin: 12px; }
div.s-container div.s-stats div.s-data div.s-general table tr td {
padding-left: 6px;
padding-right: 6px;
text-align: right; }
div.s-container div.s-stats div.s-data div.s-general table tr td:nth-child(2) {
text-align: left; }
div.s-container div.s-stats div.s-data div.s-chrono {
/*float: right;
margin-left: 0;
width: 47.75%;*/ }
div.s-container div.s-stats div.s-data div.s-chrono ul li {
margin-left: 24px; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div {
margin-left: -12px;
padding: 6px; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div table {
margin: 12px; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div table tr td {
padding-left: 6px;
padding-right: 6px;
text-align: right; }
div.s-container div.s-stats div.s-data div.s-chrono ul li div table tr td:nth-child(2) {
text-align: left; }
"""
js = """function toggle(id)
{
var div = document.getElementById(id);
if (div.style.display == 'block') {
div.style.display = 'none';
}
else {
div.style.display = 'block';
}
}
function init()
{
var cdivs = document.getElementsByClassName("cdiv");
for (var i = 0; i < cdivs.length; ++i) {
cdivs[i].style.display = "none";
}
}
window.addEventListener("load", init, false);
"""
base = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>Statistics - ${dirname}</title>
<script type="application/javascript" src="stats.js"></script>
<link rel="stylesheet" href="style.css" type="text/css" />
</head>
<body>
<div id="header">
<h1>${dirname}</h1>
</div>
<!-- divs gonna div -->
% for div in divs:
${div}
% endfor
<div id="footer">
${footer}
</div>
</body>
"""
statblock = """
<div id="${stat_type}" class="s-container">
<h1>${stat_type_title}</h1>
% for div in divs:
${div}
% endfor
</div>
"""
statdiv = """
<div id="${stat_type}-${stat_title}" class="s-stats">
<h1>${stat_title_human}</h1>
<h2>${stat_cksum}</h2>
<div id="${stat_type}-${stat_title}-imgs" class="s-imgs">
% for src in images:
<a href="${src}"><img src="${src}" /></a>
% endfor
</div>
<div id="${stat_type}-${stat_title}-data" class="s-data">
${general}
${chrono}
<hr />
</div>
</div>
"""
generaldiv = """
<div id="${stat_type}-${stat_title}-general" class="s-general">
<h1>General Statistics</h1>
<table>
% for left, right in gen_stats.items():
<tr>
<td>${left}:</td>
<td>${right}</td>
</tr>
% endfor
</table>
</div>
"""
chronodiv = """
<div id="${stat_type}-${stat_title}-chrono" class="s-chrono">
<h1>Chronological Statistics</h1>
<ul>
% for c, date in enumerate(reversed(list(chrono_stats.keys()))):
<li>
<a href="javascript:toggle('${stat_type}-${stat_title}-chrono-${c}-div')">${date}</a>
<div class="cdiv" id="${stat_type}-${stat_title}-chrono-${c}-div">
<table>
% for k, v in chrono_stats[date].items():
<%
if "percent" in k.lower():
v = "%s%%" % v
elif "__" in v:
tmp = v.rsplit('__', 1)
v = "%s (%s)" % (tmp[0], tmp[1].upper())
%>
<tr>
<td>${k}</td>
<td>${v}</td>
</tr>
% endfor
</table>
</div>
</li>
% endfor
</ul>
</div>
""" |
import logging
from typing import Any, Dict, Optional, Union
from globus_sdk import client, paging, response, utils
from globus_sdk._types import UUIDLike
from globus_sdk.scopes import SearchScopes
from .data import SearchQuery, SearchScrollQuery
from .errors import SearchAPIError
log = logging.getLogger(__name__)
class SearchClient(client.BaseClient):
r"""
Client for the Globus Search API
This class provides helper methods for most common resources in the
API, and basic ``get``, ``put``, ``post``, and ``delete`` methods
from the base client that can be used to access any API resource.
:param authorizer: An authorizer instance used for all calls to
Globus Search
:type authorizer: :class:`GlobusAuthorizer \
<globus_sdk.authorizers.base.GlobusAuthorizer>`
**Methods**
.. automethodlist:: globus_sdk.SearchClient
"""
error_class = SearchAPIError
service_name = "search"
scopes = SearchScopes
#
# Index Management
#
@utils.doc_api_method("Get Index Metadata", "search/reference/index_show/")
def get_index(
self, index_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> index = sc.get_index(index_id)
>>> assert index['index_id'] == index_id
>>> print(index["display_name"],
>>> "(" + index_id + "):",
>>> index["description"])
"""
log.info(f"SearchClient.get_index({index_id})")
return self.get(f"/v1/index/{index_id}", query_params=query_params)
#
# Search queries
#
@utils.doc_api_method("GET Search Query", "search/reference/get_query/")
@paging.has_paginator(
paging.HasNextPaginator,
items_key="gmeta",
get_page_size=lambda x: x["count"],
max_total_results=10000,
page_size=100,
)
def search(
self,
index_id: UUIDLike,
q: str,
*,
offset: int = 0,
limit: int = 10,
advanced: bool = False,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/search``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> result = sc.search(index_id, 'query string')
>>> advanced_result = sc.search(index_id, 'author: "Ada Lovelace"',
>>> advanced=True)
"""
if query_params is None:
query_params = {}
query_params.update(
{
"q": q,
"offset": offset,
"limit": limit,
"advanced": advanced,
}
)
log.info(f"SearchClient.search({index_id}, ...)")
return self.get(f"/v1/index/{index_id}/search", query_params=query_params)
@utils.doc_api_method("POST Search Query", "search/reference/post_query")
@paging.has_paginator(
paging.HasNextPaginator,
items_key="gmeta",
get_page_size=lambda x: x["count"],
max_total_results=10000,
page_size=100,
)
def post_search(
self,
index_id: UUIDLike,
data: Union[Dict[str, Any], SearchQuery],
*,
offset: Optional[int] = None,
limit: Optional[int] = None,
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/search``
:param index_id: The index on which to search
:type index_id: str or UUID
:param data: A Search Query document containing the query and any other fields
:type data: dict or SearchQuery
:param offset: offset used in paging (overwrites any offset in ``data``)
:type offset: int, optional
:param limit: limit the number of results (overwrites any limit in ``data``)
:type limit: int, optional
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> query_data = {
>>> "@datatype": "GSearchRequest",
>>> "q": "user query",
>>> "filters": [
>>> {
>>> "type": "range",
>>> "field_name": "path.to.date",
>>> "values": [
>>> {"from": "*",
>>> "to": "2014-11-07"}
>>> ]
>>> }
>>> ],
>>> "facets": [
>>> {"name": "Publication Date",
>>> "field_name": "path.to.date",
>>> "type": "date_histogram",
>>> "date_interval": "year"}
>>> ],
>>> "sort": [
>>> {"field_name": "path.to.date",
>>> "order": "asc"}
>>> ]
>>> }
>>> search_result = sc.post_search(index_id, query_data)
"""
log.info(f"SearchClient.post_search({index_id}, ...)")
add_kwargs = {}
if offset is not None:
add_kwargs["offset"] = offset
if limit is not None:
add_kwargs["limit"] = limit
if add_kwargs:
data = {**data, **add_kwargs}
return self.post(f"v1/index/{index_id}/search", data=data)
@utils.doc_api_method("Scroll Query", "search/reference/scroll_query")
@paging.has_paginator(paging.MarkerPaginator, items_key="gmeta")
def scroll(
self,
index_id: UUIDLike,
data: Union[Dict[str, Any], SearchScrollQuery],
*,
marker: Optional[str] = None,
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/scroll``
:param index_id: The index on which to search
:type index_id: str or UUID
:param data: A Search Scroll Query document
:type data: dict or SearchScrollQuery
:param marker: marker used in paging (overwrites any marker in ``data``)
:type marker: str, optional
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> scroll_result = sc.scroll(index_id, {"q": "*"})
"""
log.info(f"SearchClient.scroll({index_id}, ...)")
add_kwargs = {}
if marker is not None:
add_kwargs["marker"] = marker
if add_kwargs:
data = {**data, **add_kwargs}
return self.post(f"v1/index/{index_id}/scroll", data=data)
#
# Bulk data indexing
#
@utils.doc_api_method("Ingest", "search/reference/ingest")
def ingest(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/ingest``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> ingest_data = {
>>> "ingest_type": "GMetaEntry",
>>> "ingest_data": {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> }
>>> }
>>> sc.ingest(index_id, ingest_data)
or with multiple entries at once via a GMetaList:
>>> sc = globus_sdk.SearchClient(...)
>>> ingest_data = {
>>> "ingest_type": "GMetaList",
>>> "ingest_data": {
>>> "gmeta": [
>>> {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> },
>>> {
>>> "subject": "https://example.com/foo/bar",
>>> "id": "otherentry",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some otherval"
>>> }
>>> }
>>> ]
>>> }
>>> }
>>> sc.ingest(index_id, ingest_data)
"""
log.info(f"SearchClient.ingest({index_id}, ...)")
return self.post(f"/v1/index/{index_id}/ingest", data=data)
#
# Bulk delete
#
@utils.doc_api_method("Delete By Query", "search/reference/delete_by_query")
def delete_by_query(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/delete_by_query``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> query_data = {
>>> "q": "user query",
>>> "filters": [
>>> {
>>> "type": "range",
>>> "field_name": "path.to.date",
>>> "values": [
>>> {"from": "*",
>>> "to": "2014-11-07"}
>>> ]
>>> }
>>> ]
>>> }
>>> sc.delete_by_query(index_id, query_data)
"""
log.info(f"SearchClient.delete_by_query({index_id}, ...)")
return self.post(f"/v1/index/{index_id}/delete_by_query", data=data)
#
# Subject Operations
#
@utils.doc_api_method("Get Subject", "search/reference/get_subject")
def get_subject(
self,
index_id: UUIDLike,
subject: str,
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/subject``
**Examples**
Fetch the data for subject ``http://example.com/abc`` from index
``index_id``:
>>> sc = globus_sdk.SearchClient(...)
>>> subject_data = sc.get_subject(index_id, 'http://example.com/abc')
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
log.info(f"SearchClient.get_subject({index_id}, {subject}, ...)")
return self.get(f"/v1/index/{index_id}/subject", query_params=query_params)
@utils.doc_api_method("Delete Subject", "search/reference/delete_subject")
def delete_subject(
self,
index_id: UUIDLike,
subject: str,
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``DELETE /v1/index/<index_id>/subject``
**Examples**
Delete all data for subject ``http://example.com/abc`` from index
``index_id``, even data which is not visible to the current user:
>>> sc = globus_sdk.SearchClient(...)
>>> subject_data = sc.get_subject(index_id, 'http://example.com/abc')
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
log.info(f"SearchClient.delete_subject({index_id}, {subject}, ...)")
return self.delete(f"/v1/index/{index_id}/subject", query_params=query_params)
#
# Entry Operations
#
@utils.doc_api_method("Get Entry", "search/reference/get_entry")
def get_entry(
self,
index_id: UUIDLike,
subject: str,
*,
entry_id: Optional[str] = None,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/entry``
**Examples**
Lookup the entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> entry_data = sc.get_entry(index_id, 'http://example.com/foo/bar')
Lookup the entry with a subject of ``https://example.com/foo/bar`` and
an entry_id of ``foo/bar``:
>>> sc = globus_sdk.SearchClient(...)
>>> entry_data = sc.get_entry(index_id, 'http://example.com/foo/bar',
>>> entry_id='foo/bar')
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
if entry_id is not None:
query_params["entry_id"] = entry_id
log.info(
"SearchClient.get_entry({}, {}, {}, ...)".format(
index_id, subject, entry_id
)
)
return self.get(f"/v1/index/{index_id}/entry", query_params=query_params)
@utils.doc_api_method("Create Entry", "search/reference/create_or_update_entry")
def create_entry(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/entry``
**Examples**
Create an entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.create_entry(index_id, {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> })
Create an entry with a subject of ``https://example.com/foo/bar`` and
an entry_id of ``foo/bar``:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.create_entry(index_id, {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "id": "foo/bar",
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> })
"""
log.info(f"SearchClient.create_entry({index_id}, ...)")
return self.post(f"/v1/index/{index_id}/entry", data=data)
@utils.doc_api_method("Update Entry", "search/reference/create_or_update_entry")
def update_entry(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``PUT /v1/index/<index_id>/entry``
**Examples**
Update an entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.update_entry(index_id, {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> })
"""
log.info(f"SearchClient.update_entry({index_id}, ...)")
return self.put(f"/v1/index/{index_id}/entry", data=data)
@utils.doc_api_method("Delete Entry", "search/reference/delete_entry")
def delete_entry(
self,
index_id: UUIDLike,
subject: str,
*,
entry_id: Optional[str] = None,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``DELETE /v1/index/<index_id>/entry``
**Examples**
Delete an entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.delete_entry(index_id, "https://example.com/foo/bar")
Delete an entry with a subject of ``https://example.com/foo/bar`` and
an entry_id of "foo/bar":
>>> sc = globus_sdk.SearchClient(...)
>>> sc.delete_entry(index_id, "https://example.com/foo/bar",
>>> entry_id="foo/bar")
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
if entry_id is not None:
query_params["entry_id"] = entry_id
log.info(
"SearchClient.delete_entry({}, {}, {}, ...)".format(
index_id, subject, entry_id
)
)
return self.delete(f"/v1/index/{index_id}/entry", query_params=query_params)
#
# Task Management
#
@utils.doc_api_method("Get Task", "search/reference/get_task")
def get_task(
self, task_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/task/<task_id>``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> task = sc.get_task(task_id)
>>> assert task['index_id'] == known_index_id
>>> print(task["task_id"] + " | " + task['state'])
"""
log.info(f"SearchClient.get_task({task_id})")
return self.get(f"/v1/task/{task_id}", query_params=query_params)
@utils.doc_api_method("Task List", "search/reference/task_list")
def get_task_list(
self, index_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/task_list/<index_id>``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> task_list = sc.get_task_list(index_id)
>>> for task in task_list['tasks']:
>>> print(task["task_id"] + " | " + task['state'])
"""
log.info(f"SearchClient.get_task_list({index_id})")
return self.get(f"/v1/task_list/{index_id}", query_params=query_params)
#
# Role Management
#
@utils.doc_api_method("Create Role", "search/reference/role_create/")
def create_role(
self,
index_id: UUIDLike,
data: Dict[str, Any],
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/role``
Create a new role on an index. You must already have the ``owner`` or
``admin`` role on an index to create additional roles.
Roles are specified as a role name (one of ``"owner"``, ``"admin"``, or
``"writer"``) and a `Principal URN
<https://docs.globus.org/api/search/overview/#principal_urns>`_.
:param index_id: The index on which to create the role
:type index_id: uuid or str
:param data: The partial role document to use for creation
:type data: dict
:param query_params: Any additional query params to pass
:type query_params: dict, optional
**Examples**
>>> identity_id = "46bd0f56-e24f-11e5-a510-131bef46955c"
>>> sc = globus_sdk.SearchClient(...)
>>> sc.create_role(
>>> index_id,
>>> {
>>> "role_name": "writer",
>>> "principal": f"urn:globus:auth:identity:{identity_id}"
>>> }
>>> )
"""
log.info("SearchClient.create_role(%s, ...)", index_id)
return self.post(
f"/v1/index/{index_id}/role", data=data, query_params=query_params
)
@utils.doc_api_method("Get Role List", "search/reference/role_list/")
def get_role_list(
self, index_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/role_list``
List all roles on an index. You must have the ``owner`` or ``admin``
role on an index to list roles.
:param index_id: The index on which to list roles
:type index_id: uuid or str
:param query_params: Any additional query params to pass
:type query_params: dict, optional
"""
log.info("SearchClient.get_role_list(%s)", index_id)
return self.get(f"/v1/index/{index_id}/role_list", query_params=query_params)
@utils.doc_api_method("Role Delete", "search/reference/role_delete/")
def delete_role(
self,
index_id: UUIDLike,
role_id: str,
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``DELETE /v1/index/<index_id>/role/<role_id>``
Delete a role from an index. You must have the ``owner`` or ``admin``
role on an index to delete roles. You cannot remove the last ``owner`` from an
index.
:param index_id: The index from which to delete a role
:type index_id: uuid or str
:param role_id: The role to delete
:type role_id: str
:param query_params: Any additional query params to pass
:type query_params: dict, optional
"""
log.info("SearchClient.delete_role(%s, %s)", index_id, role_id)
return self.delete(
f"/v1/index/{index_id}/role/{role_id}", query_params=query_params
)
Fix index id key in doc example (#598)
import logging
from typing import Any, Dict, Optional, Union
from globus_sdk import client, paging, response, utils
from globus_sdk._types import UUIDLike
from globus_sdk.scopes import SearchScopes
from .data import SearchQuery, SearchScrollQuery
from .errors import SearchAPIError
log = logging.getLogger(__name__)
class SearchClient(client.BaseClient):
r"""
Client for the Globus Search API
This class provides helper methods for most common resources in the
API, and basic ``get``, ``put``, ``post``, and ``delete`` methods
from the base client that can be used to access any API resource.
:param authorizer: An authorizer instance used for all calls to
Globus Search
:type authorizer: :class:`GlobusAuthorizer \
<globus_sdk.authorizers.base.GlobusAuthorizer>`
**Methods**
.. automethodlist:: globus_sdk.SearchClient
"""
error_class = SearchAPIError
service_name = "search"
scopes = SearchScopes
#
# Index Management
#
@utils.doc_api_method("Get Index Metadata", "search/reference/index_show/")
def get_index(
self, index_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> index = sc.get_index(index_id)
>>> assert index['id'] == index_id
>>> print(index["display_name"],
>>> "(" + index_id + "):",
>>> index["description"])
"""
log.info(f"SearchClient.get_index({index_id})")
return self.get(f"/v1/index/{index_id}", query_params=query_params)
#
# Search queries
#
@utils.doc_api_method("GET Search Query", "search/reference/get_query/")
@paging.has_paginator(
paging.HasNextPaginator,
items_key="gmeta",
get_page_size=lambda x: x["count"],
max_total_results=10000,
page_size=100,
)
def search(
self,
index_id: UUIDLike,
q: str,
*,
offset: int = 0,
limit: int = 10,
advanced: bool = False,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/search``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> result = sc.search(index_id, 'query string')
>>> advanced_result = sc.search(index_id, 'author: "Ada Lovelace"',
>>> advanced=True)
"""
if query_params is None:
query_params = {}
query_params.update(
{
"q": q,
"offset": offset,
"limit": limit,
"advanced": advanced,
}
)
log.info(f"SearchClient.search({index_id}, ...)")
return self.get(f"/v1/index/{index_id}/search", query_params=query_params)
@utils.doc_api_method("POST Search Query", "search/reference/post_query")
@paging.has_paginator(
paging.HasNextPaginator,
items_key="gmeta",
get_page_size=lambda x: x["count"],
max_total_results=10000,
page_size=100,
)
def post_search(
self,
index_id: UUIDLike,
data: Union[Dict[str, Any], SearchQuery],
*,
offset: Optional[int] = None,
limit: Optional[int] = None,
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/search``
:param index_id: The index on which to search
:type index_id: str or UUID
:param data: A Search Query document containing the query and any other fields
:type data: dict or SearchQuery
:param offset: offset used in paging (overwrites any offset in ``data``)
:type offset: int, optional
:param limit: limit the number of results (overwrites any limit in ``data``)
:type limit: int, optional
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> query_data = {
>>> "@datatype": "GSearchRequest",
>>> "q": "user query",
>>> "filters": [
>>> {
>>> "type": "range",
>>> "field_name": "path.to.date",
>>> "values": [
>>> {"from": "*",
>>> "to": "2014-11-07"}
>>> ]
>>> }
>>> ],
>>> "facets": [
>>> {"name": "Publication Date",
>>> "field_name": "path.to.date",
>>> "type": "date_histogram",
>>> "date_interval": "year"}
>>> ],
>>> "sort": [
>>> {"field_name": "path.to.date",
>>> "order": "asc"}
>>> ]
>>> }
>>> search_result = sc.post_search(index_id, query_data)
"""
log.info(f"SearchClient.post_search({index_id}, ...)")
add_kwargs = {}
if offset is not None:
add_kwargs["offset"] = offset
if limit is not None:
add_kwargs["limit"] = limit
if add_kwargs:
data = {**data, **add_kwargs}
return self.post(f"v1/index/{index_id}/search", data=data)
@utils.doc_api_method("Scroll Query", "search/reference/scroll_query")
@paging.has_paginator(paging.MarkerPaginator, items_key="gmeta")
def scroll(
self,
index_id: UUIDLike,
data: Union[Dict[str, Any], SearchScrollQuery],
*,
marker: Optional[str] = None,
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/scroll``
:param index_id: The index on which to search
:type index_id: str or UUID
:param data: A Search Scroll Query document
:type data: dict or SearchScrollQuery
:param marker: marker used in paging (overwrites any marker in ``data``)
:type marker: str, optional
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> scroll_result = sc.scroll(index_id, {"q": "*"})
"""
log.info(f"SearchClient.scroll({index_id}, ...)")
add_kwargs = {}
if marker is not None:
add_kwargs["marker"] = marker
if add_kwargs:
data = {**data, **add_kwargs}
return self.post(f"v1/index/{index_id}/scroll", data=data)
#
# Bulk data indexing
#
@utils.doc_api_method("Ingest", "search/reference/ingest")
def ingest(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/ingest``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> ingest_data = {
>>> "ingest_type": "GMetaEntry",
>>> "ingest_data": {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> }
>>> }
>>> sc.ingest(index_id, ingest_data)
or with multiple entries at once via a GMetaList:
>>> sc = globus_sdk.SearchClient(...)
>>> ingest_data = {
>>> "ingest_type": "GMetaList",
>>> "ingest_data": {
>>> "gmeta": [
>>> {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> },
>>> {
>>> "subject": "https://example.com/foo/bar",
>>> "id": "otherentry",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some otherval"
>>> }
>>> }
>>> ]
>>> }
>>> }
>>> sc.ingest(index_id, ingest_data)
"""
log.info(f"SearchClient.ingest({index_id}, ...)")
return self.post(f"/v1/index/{index_id}/ingest", data=data)
#
# Bulk delete
#
@utils.doc_api_method("Delete By Query", "search/reference/delete_by_query")
def delete_by_query(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/delete_by_query``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> query_data = {
>>> "q": "user query",
>>> "filters": [
>>> {
>>> "type": "range",
>>> "field_name": "path.to.date",
>>> "values": [
>>> {"from": "*",
>>> "to": "2014-11-07"}
>>> ]
>>> }
>>> ]
>>> }
>>> sc.delete_by_query(index_id, query_data)
"""
log.info(f"SearchClient.delete_by_query({index_id}, ...)")
return self.post(f"/v1/index/{index_id}/delete_by_query", data=data)
#
# Subject Operations
#
@utils.doc_api_method("Get Subject", "search/reference/get_subject")
def get_subject(
self,
index_id: UUIDLike,
subject: str,
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/subject``
**Examples**
Fetch the data for subject ``http://example.com/abc`` from index
``index_id``:
>>> sc = globus_sdk.SearchClient(...)
>>> subject_data = sc.get_subject(index_id, 'http://example.com/abc')
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
log.info(f"SearchClient.get_subject({index_id}, {subject}, ...)")
return self.get(f"/v1/index/{index_id}/subject", query_params=query_params)
@utils.doc_api_method("Delete Subject", "search/reference/delete_subject")
def delete_subject(
self,
index_id: UUIDLike,
subject: str,
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``DELETE /v1/index/<index_id>/subject``
**Examples**
Delete all data for subject ``http://example.com/abc`` from index
``index_id``, even data which is not visible to the current user:
>>> sc = globus_sdk.SearchClient(...)
>>> subject_data = sc.get_subject(index_id, 'http://example.com/abc')
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
log.info(f"SearchClient.delete_subject({index_id}, {subject}, ...)")
return self.delete(f"/v1/index/{index_id}/subject", query_params=query_params)
#
# Entry Operations
#
@utils.doc_api_method("Get Entry", "search/reference/get_entry")
def get_entry(
self,
index_id: UUIDLike,
subject: str,
*,
entry_id: Optional[str] = None,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/entry``
**Examples**
Lookup the entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> entry_data = sc.get_entry(index_id, 'http://example.com/foo/bar')
Lookup the entry with a subject of ``https://example.com/foo/bar`` and
an entry_id of ``foo/bar``:
>>> sc = globus_sdk.SearchClient(...)
>>> entry_data = sc.get_entry(index_id, 'http://example.com/foo/bar',
>>> entry_id='foo/bar')
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
if entry_id is not None:
query_params["entry_id"] = entry_id
log.info(
"SearchClient.get_entry({}, {}, {}, ...)".format(
index_id, subject, entry_id
)
)
return self.get(f"/v1/index/{index_id}/entry", query_params=query_params)
@utils.doc_api_method("Create Entry", "search/reference/create_or_update_entry")
def create_entry(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/entry``
**Examples**
Create an entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.create_entry(index_id, {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> })
Create an entry with a subject of ``https://example.com/foo/bar`` and
an entry_id of ``foo/bar``:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.create_entry(index_id, {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "id": "foo/bar",
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> })
"""
log.info(f"SearchClient.create_entry({index_id}, ...)")
return self.post(f"/v1/index/{index_id}/entry", data=data)
@utils.doc_api_method("Update Entry", "search/reference/create_or_update_entry")
def update_entry(
self, index_id: UUIDLike, data: Dict[str, Any]
) -> response.GlobusHTTPResponse:
"""
``PUT /v1/index/<index_id>/entry``
**Examples**
Update an entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.update_entry(index_id, {
>>> "subject": "https://example.com/foo/bar",
>>> "visible_to": ["public"],
>>> "content": {
>>> "foo/bar": "some val"
>>> }
>>> })
"""
log.info(f"SearchClient.update_entry({index_id}, ...)")
return self.put(f"/v1/index/{index_id}/entry", data=data)
@utils.doc_api_method("Delete Entry", "search/reference/delete_entry")
def delete_entry(
self,
index_id: UUIDLike,
subject: str,
*,
entry_id: Optional[str] = None,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``DELETE /v1/index/<index_id>/entry``
**Examples**
Delete an entry with a subject of ``https://example.com/foo/bar`` and
a null entry_id:
>>> sc = globus_sdk.SearchClient(...)
>>> sc.delete_entry(index_id, "https://example.com/foo/bar")
Delete an entry with a subject of ``https://example.com/foo/bar`` and
an entry_id of "foo/bar":
>>> sc = globus_sdk.SearchClient(...)
>>> sc.delete_entry(index_id, "https://example.com/foo/bar",
>>> entry_id="foo/bar")
"""
if query_params is None:
query_params = {}
query_params["subject"] = subject
if entry_id is not None:
query_params["entry_id"] = entry_id
log.info(
"SearchClient.delete_entry({}, {}, {}, ...)".format(
index_id, subject, entry_id
)
)
return self.delete(f"/v1/index/{index_id}/entry", query_params=query_params)
#
# Task Management
#
@utils.doc_api_method("Get Task", "search/reference/get_task")
def get_task(
self, task_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/task/<task_id>``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> task = sc.get_task(task_id)
>>> assert task['index_id'] == known_index_id
>>> print(task["task_id"] + " | " + task['state'])
"""
log.info(f"SearchClient.get_task({task_id})")
return self.get(f"/v1/task/{task_id}", query_params=query_params)
@utils.doc_api_method("Task List", "search/reference/task_list")
def get_task_list(
self, index_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/task_list/<index_id>``
**Examples**
>>> sc = globus_sdk.SearchClient(...)
>>> task_list = sc.get_task_list(index_id)
>>> for task in task_list['tasks']:
>>> print(task["task_id"] + " | " + task['state'])
"""
log.info(f"SearchClient.get_task_list({index_id})")
return self.get(f"/v1/task_list/{index_id}", query_params=query_params)
#
# Role Management
#
@utils.doc_api_method("Create Role", "search/reference/role_create/")
def create_role(
self,
index_id: UUIDLike,
data: Dict[str, Any],
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``POST /v1/index/<index_id>/role``
Create a new role on an index. You must already have the ``owner`` or
``admin`` role on an index to create additional roles.
Roles are specified as a role name (one of ``"owner"``, ``"admin"``, or
``"writer"``) and a `Principal URN
<https://docs.globus.org/api/search/overview/#principal_urns>`_.
:param index_id: The index on which to create the role
:type index_id: uuid or str
:param data: The partial role document to use for creation
:type data: dict
:param query_params: Any additional query params to pass
:type query_params: dict, optional
**Examples**
>>> identity_id = "46bd0f56-e24f-11e5-a510-131bef46955c"
>>> sc = globus_sdk.SearchClient(...)
>>> sc.create_role(
>>> index_id,
>>> {
>>> "role_name": "writer",
>>> "principal": f"urn:globus:auth:identity:{identity_id}"
>>> }
>>> )
"""
log.info("SearchClient.create_role(%s, ...)", index_id)
return self.post(
f"/v1/index/{index_id}/role", data=data, query_params=query_params
)
@utils.doc_api_method("Get Role List", "search/reference/role_list/")
def get_role_list(
self, index_id: UUIDLike, *, query_params: Optional[Dict[str, Any]] = None
) -> response.GlobusHTTPResponse:
"""
``GET /v1/index/<index_id>/role_list``
List all roles on an index. You must have the ``owner`` or ``admin``
role on an index to list roles.
:param index_id: The index on which to list roles
:type index_id: uuid or str
:param query_params: Any additional query params to pass
:type query_params: dict, optional
"""
log.info("SearchClient.get_role_list(%s)", index_id)
return self.get(f"/v1/index/{index_id}/role_list", query_params=query_params)
@utils.doc_api_method("Role Delete", "search/reference/role_delete/")
def delete_role(
self,
index_id: UUIDLike,
role_id: str,
*,
query_params: Optional[Dict[str, Any]] = None,
) -> response.GlobusHTTPResponse:
"""
``DELETE /v1/index/<index_id>/role/<role_id>``
Delete a role from an index. You must have the ``owner`` or ``admin``
role on an index to delete roles. You cannot remove the last ``owner`` from an
index.
:param index_id: The index from which to delete a role
:type index_id: uuid or str
:param role_id: The role to delete
:type role_id: str
:param query_params: Any additional query params to pass
:type query_params: dict, optional
"""
log.info("SearchClient.delete_role(%s, %s)", index_id, role_id)
return self.delete(
f"/v1/index/{index_id}/role/{role_id}", query_params=query_params
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.