CombinedText stringlengths 4 3.42M |
|---|
// Copyright (C) 2017, 2018 Damon Revoe. All rights reserved.
// Use of this source code is governed by the MIT
// license, which can be found in the LICENSE file.
package main
import (
"path/filepath"
"strings"
"testing"
)
func TestNoPackages(t *testing.T) {
pi, err := buildPackageIndex(false,
packageDefinitionList{}, [][]string{})
if err != nil {
t.Error("Building index for an empty list returned an error")
}
if pi.packageByName == nil || pi.orderedPackages == nil ||
len(pi.packageByName) != 0 || len(pi.orderedPackages) != 0 {
t.Error("Index structures are not properly initialized")
}
}
func makePackageIndexForTesting(packagesAndDependencies []string, quiet bool) (
*packageIndex, error) {
var packages packageDefinitionList
var deps [][]string
for _, packageLine := range packagesAndDependencies {
split := strings.SplitN(packageLine, ":", 2)
packages = append(packages, &packageDefinition{
PackageName: split[0],
pathname: filepath.Join(split[0],
packageDefinitionFilename)})
if len(split) > 1 {
deps = append(deps, strings.Split(split[1], ","))
} else {
deps = append(deps, []string{})
}
}
return buildPackageIndex(quiet, packages, deps)
}
func TestDuplicateDefinition(t *testing.T) {
pi, err := makePackageIndexForTesting([]string{
"base", "client", "base"}, false)
if pi != nil || err == nil || !strings.Contains(err.Error(),
"duplicate package name: base") {
t.Error("Package duplicate was not detected")
}
}
func confirmCircularDependencyError(t *testing.T, err error, cycle string) {
if err == nil {
t.Error("Circular dependency was not detected")
} else if !strings.Contains(err.Error(),
"circular dependency detected: "+cycle) {
t.Error("Unexpected circular dependency error: " +
err.Error())
}
}
func TestCircularDependency(t *testing.T) {
_, err := makePackageIndexForTesting([]string{
"a:b", "b:c", "c:a"}, false)
confirmCircularDependencyError(t, err, "a -> b -> c -> a")
_, err = makePackageIndexForTesting([]string{
"a:b", "b:c", "c:b,d", "d"}, false)
confirmCircularDependencyError(t, err, "b -> c -> b")
_, err = makePackageIndexForTesting([]string{
"a:b,a", "b"}, false)
confirmCircularDependencyError(t, err, "a -> a")
}
func TestDiamondDependency(t *testing.T) {
pi, err := makePackageIndexForTesting([]string{
"d:b,c", "b:a", "c:a", "a"}, false)
if err != nil {
t.Error("Unexpected error")
}
if len(pi.packageByName) != len(pi.orderedPackages) {
t.Error("Index size mismatch")
}
packageOrder := packageNames(pi.orderedPackages)
if packageOrder != "a, b, c, d" {
t.Error("Invalid package order: " + packageOrder)
}
checkIndirectDependencies := func(pkgName, expectedDeps string) {
deps := packageNames(pi.packageByName[pkgName].allRequired)
if deps != expectedDeps {
t.Error("Indirect dependencies for " + pkgName +
" do not match: expected=" + expectedDeps +
"; actual=" + deps)
}
}
checkIndirectDependencies("a", "")
checkIndirectDependencies("b", "a")
checkIndirectDependencies("c", "a")
checkIndirectDependencies("d", "a, b, c")
}
func TestSelectionGraph(t *testing.T) {
pi, err := makePackageIndexForTesting([]string{
"d:a,b,c", "b:a", "c:a", "a"}, true)
if err != nil {
t.Error("Unexpected error")
}
a := pi.packageByName["a"]
d := pi.packageByName["d"]
selection := packageDefinitionList{a, d}
selectionGraph := establishDependenciesInSelection(selection, pi)
if len(selectionGraph) != len(selection) {
t.Error("Unexpected number of selected vertices")
}
if len(selectionGraph[a]) != 0 || len(selectionGraph[d]) != 1 ||
selectionGraph[d][0] != a {
t.Error("Unexpected selection graph topology")
}
}
Provide means for easy testing of selection graph
// Copyright (C) 2017, 2018 Damon Revoe. All rights reserved.
// Use of this source code is governed by the MIT
// license, which can be found in the LICENSE file.
package main
import (
"path/filepath"
"strings"
"testing"
)
func TestNoPackages(t *testing.T) {
pi, err := buildPackageIndex(false,
packageDefinitionList{}, [][]string{})
if err != nil {
t.Error("Building index for an empty list returned an error")
}
if pi.packageByName == nil || pi.orderedPackages == nil ||
len(pi.packageByName) != 0 || len(pi.orderedPackages) != 0 {
t.Error("Index structures are not properly initialized")
}
}
func makePackageIndexForTesting(packagesAndDependencies []string, quiet bool) (
*packageIndex, error) {
var packages packageDefinitionList
var deps [][]string
for _, packageLine := range packagesAndDependencies {
split := strings.SplitN(packageLine, ":", 2)
packages = append(packages, &packageDefinition{
PackageName: split[0],
pathname: filepath.Join(split[0],
packageDefinitionFilename)})
if len(split) > 1 {
deps = append(deps, strings.Split(split[1], ","))
} else {
deps = append(deps, []string{})
}
}
return buildPackageIndex(quiet, packages, deps)
}
func TestDuplicateDefinition(t *testing.T) {
pi, err := makePackageIndexForTesting([]string{
"base", "client", "base"}, false)
if pi != nil || err == nil || !strings.Contains(err.Error(),
"duplicate package name: base") {
t.Error("Package duplicate was not detected")
}
}
func confirmCircularDependencyError(t *testing.T, err error, cycle string) {
if err == nil {
t.Error("Circular dependency was not detected")
} else if !strings.Contains(err.Error(),
"circular dependency detected: "+cycle) {
t.Error("Unexpected circular dependency error: " +
err.Error())
}
}
func TestCircularDependency(t *testing.T) {
_, err := makePackageIndexForTesting([]string{
"a:b", "b:c", "c:a"}, false)
confirmCircularDependencyError(t, err, "a -> b -> c -> a")
_, err = makePackageIndexForTesting([]string{
"a:b", "b:c", "c:b,d", "d"}, false)
confirmCircularDependencyError(t, err, "b -> c -> b")
_, err = makePackageIndexForTesting([]string{
"a:b,a", "b"}, false)
confirmCircularDependencyError(t, err, "a -> a")
}
func TestDiamondDependency(t *testing.T) {
pi, err := makePackageIndexForTesting([]string{
"d:b,c", "b:a", "c:a", "a"}, false)
if err != nil {
t.Error("Unexpected error")
}
if len(pi.packageByName) != len(pi.orderedPackages) {
t.Error("Index size mismatch")
}
packageOrder := packageNames(pi.orderedPackages)
if packageOrder != "a, b, c, d" {
t.Error("Invalid package order: " + packageOrder)
}
checkIndirectDependencies := func(pkgName, expectedDeps string) {
deps := packageNames(pi.packageByName[pkgName].allRequired)
if deps != expectedDeps {
t.Error("Indirect dependencies for " + pkgName +
" do not match: expected=" + expectedDeps +
"; actual=" + deps)
}
}
checkIndirectDependencies("a", "")
checkIndirectDependencies("b", "a")
checkIndirectDependencies("c", "a")
checkIndirectDependencies("d", "a, b, c")
}
func checkSelectionGraph(t *testing.T,
selectionGraph map[*packageDefinition]packageDefinitionList,
expected map[string]string) {
if len(selectionGraph) != len(expected) {
t.Error("Unexpected number of selected vertices")
}
for pd, deps := range selectionGraph {
name := pd.PackageName
expectedDepNames, match := expected[name]
if !match {
t.Error("Unexpected package returned: " + name)
}
var depNames string
for _, dep := range deps {
if depNames != "" {
depNames += ", "
}
depNames += dep.PackageName
}
if depNames != expectedDepNames {
t.Error("Unexpected dependencies for " + name + ": " +
depNames + "; expected: " + expectedDepNames)
}
}
}
func TestSelectionGraph(t *testing.T) {
pi, err := makePackageIndexForTesting([]string{
"d:a,b,c", "b:a", "c:a", "a"}, true)
if err != nil {
t.Error("Unexpected error")
}
a := pi.packageByName["a"]
d := pi.packageByName["d"]
selection := packageDefinitionList{a, d}
selectionGraph := establishDependenciesInSelection(selection, pi)
checkSelectionGraph(t, selectionGraph, map[string]string{
"a": "",
"d": "a"})
}
|
// Copyright 2016 The LUCI Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package buildbot
import (
"encoding/json"
"fmt"
"net/http"
"os"
"sort"
"strings"
"time"
"github.com/luci/gae/service/datastore"
"github.com/luci/luci-go/common/clock"
"github.com/luci/luci-go/common/logging"
"github.com/luci/luci-go/milo/api/resp"
"github.com/luci/luci-go/milo/appengine/settings"
"github.com/luci/luci-go/milo/common/miloerror"
"golang.org/x/net/context"
)
// builderRef is used for keying specific builds in a master json.
type builderRef struct {
builder string
buildNum int
}
// buildMap contains all of the current build within a master json. We use this
// because buildbot returns all current builds as within the slaves portion, whereas
// it's eaiser to map thenm by builders instead.
type buildMap map[builderRef]*buildbotBuild
// createRunningBuildMap extracts all of the running builds in a master json
// from the various slaves and dumps it into a map for easy reference.
func createRunningBuildMap(master *buildbotMaster) buildMap {
result := buildMap{}
for _, slave := range master.Slaves {
for _, build := range slave.Runningbuilds {
result[builderRef{build.Buildername, build.Number}] = &build
}
}
return result
}
func getBuildSummary(b *buildbotBuild) *resp.BuildSummary {
started, finished, duration := parseTimes(b.Times)
return &resp.BuildSummary{
Link: &resp.Link{
URL: fmt.Sprintf("%d", b.Number),
Label: fmt.Sprintf("#%d", b.Number),
},
Status: b.toStatus(),
ExecutionTime: resp.Interval{
Started: started,
Finished: finished,
Duration: duration,
},
Text: b.Text,
Blame: blame(b),
Revision: b.Sourcestamp.Revision,
}
}
// getBuilds fetches all of the recent builds from the datastore. Note that
// getBuilds() does not perform ACL checks.
func getBuilds(c context.Context, masterName, builderName string, finished bool) ([]*resp.BuildSummary, error) {
// TODO(hinoka): Builder specific structs.
result := []*resp.BuildSummary{}
ds := datastore.Get(c)
q := datastore.NewQuery("buildbotBuild")
q = q.Eq("finished", finished)
q = q.Eq("master", masterName)
q = q.Eq("builder", builderName)
q = q.Limit(25) // TODO(hinoka): This should be adjustable
q = q.Order("-number")
buildbots := []*buildbotBuild{}
err := ds.GetAll(q, &buildbots)
if err != nil {
return nil, err
}
for _, b := range buildbots {
result = append(result, getBuildSummary(b))
}
return result, nil
}
// getCurrentBuild extracts a build from a map of current builds, and translates
// it into the milo version of the build.
func getCurrentBuild(c context.Context, bMap buildMap, builder string, buildNum int) *resp.BuildSummary {
b, ok := bMap[builderRef{builder, buildNum}]
if !ok {
logging.Warningf(c, "Could not find %s/%d in builder map:\n %s", builder, buildNum, bMap)
return nil
}
return getBuildSummary(b)
}
// getCurrentBuilds extracts the list of all the current builds from a master json
// from the slaves' runningBuilds portion.
func getCurrentBuilds(c context.Context, master *buildbotMaster, builderName string) []*resp.BuildSummary {
b := master.Builders[builderName]
results := []*resp.BuildSummary{}
bMap := createRunningBuildMap(master)
for _, bn := range b.Currentbuilds {
cb := getCurrentBuild(c, bMap, builderName, bn)
if cb != nil {
results = append(results, cb)
}
}
return results
}
var errMasterNotFound = miloerror.Error{
Message: "Master not found",
Code: http.StatusNotFound,
}
// builderImpl is the implementation for getting a milo builder page from buildbot.
// This gets:
// * Current Builds from querying the master json from the datastore.
// * Recent Builds from a cron job that backfills the recent builds.
func builderImpl(c context.Context, masterName, builderName string) (*resp.Builder, error) {
result := &resp.Builder{}
master, internal, t, err := getMasterJSON(c, masterName)
if internal {
allowed, err := settings.IsAllowedInternal(c)
if err != nil {
return nil, err
}
if !allowed {
return nil, errMasterNotFound
}
}
if err != nil {
return nil, fmt.Errorf("Cannot find master %s\n%s", masterName, err.Error())
}
if clock.Now(c).Sub(t) > 2*time.Minute {
warning := fmt.Sprintf(
"WARNING: Master data is stale (last updated %s)", t)
logging.Warningf(c, warning)
result.Warning = warning
}
s, _ := json.Marshal(master)
logging.Debugf(c, "Master: %s", s)
_, ok := master.Builders[builderName]
if !ok {
// This long block is just to return a good error message when an invalid
// buildbot builder is specified.
keys := make([]string, 0, len(master.Builders))
for k := range master.Builders {
keys = append(keys, k)
}
sort.Strings(keys)
avail := strings.Join(keys, "\n")
return nil, fmt.Errorf(
"Cannot find builder %s in master %s.\nAvailable builders: \n%s",
builderName, masterName, avail)
}
recentBuilds, err := getBuilds(c, masterName, builderName, true)
if err != nil {
return nil, err // Or maybe not?
}
currentBuilds := getCurrentBuilds(c, master, builderName)
fmt.Fprintf(os.Stderr, "Number of current builds: %d\n", len(currentBuilds))
result.CurrentBuilds = currentBuilds
for _, fb := range recentBuilds {
// Yes recent builds is synonymous with finished builds.
// TODO(hinoka): Implement limits.
if fb != nil {
result.FinishedBuilds = append(result.FinishedBuilds, fb)
}
}
return result, nil
}
Milo: Make master not found error the same for non-existant master and existant internal masters
BUG=
Review-Url: https://codereview.chromium.org/2298853002
// Copyright 2016 The LUCI Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package buildbot
import (
"encoding/json"
"fmt"
"net/http"
"os"
"sort"
"strings"
"time"
"github.com/luci/gae/service/datastore"
"github.com/luci/luci-go/common/clock"
"github.com/luci/luci-go/common/logging"
"github.com/luci/luci-go/milo/api/resp"
"github.com/luci/luci-go/milo/appengine/settings"
"github.com/luci/luci-go/milo/common/miloerror"
"golang.org/x/net/context"
)
// builderRef is used for keying specific builds in a master json.
type builderRef struct {
builder string
buildNum int
}
// buildMap contains all of the current build within a master json. We use this
// because buildbot returns all current builds as within the slaves portion, whereas
// it's eaiser to map thenm by builders instead.
type buildMap map[builderRef]*buildbotBuild
// createRunningBuildMap extracts all of the running builds in a master json
// from the various slaves and dumps it into a map for easy reference.
func createRunningBuildMap(master *buildbotMaster) buildMap {
result := buildMap{}
for _, slave := range master.Slaves {
for _, build := range slave.Runningbuilds {
result[builderRef{build.Buildername, build.Number}] = &build
}
}
return result
}
func getBuildSummary(b *buildbotBuild) *resp.BuildSummary {
started, finished, duration := parseTimes(b.Times)
return &resp.BuildSummary{
Link: &resp.Link{
URL: fmt.Sprintf("%d", b.Number),
Label: fmt.Sprintf("#%d", b.Number),
},
Status: b.toStatus(),
ExecutionTime: resp.Interval{
Started: started,
Finished: finished,
Duration: duration,
},
Text: b.Text,
Blame: blame(b),
Revision: b.Sourcestamp.Revision,
}
}
// getBuilds fetches all of the recent builds from the datastore. Note that
// getBuilds() does not perform ACL checks.
func getBuilds(c context.Context, masterName, builderName string, finished bool) ([]*resp.BuildSummary, error) {
// TODO(hinoka): Builder specific structs.
result := []*resp.BuildSummary{}
ds := datastore.Get(c)
q := datastore.NewQuery("buildbotBuild")
q = q.Eq("finished", finished)
q = q.Eq("master", masterName)
q = q.Eq("builder", builderName)
q = q.Limit(25) // TODO(hinoka): This should be adjustable
q = q.Order("-number")
buildbots := []*buildbotBuild{}
err := ds.GetAll(q, &buildbots)
if err != nil {
return nil, err
}
for _, b := range buildbots {
result = append(result, getBuildSummary(b))
}
return result, nil
}
// getCurrentBuild extracts a build from a map of current builds, and translates
// it into the milo version of the build.
func getCurrentBuild(c context.Context, bMap buildMap, builder string, buildNum int) *resp.BuildSummary {
b, ok := bMap[builderRef{builder, buildNum}]
if !ok {
logging.Warningf(c, "Could not find %s/%d in builder map:\n %s", builder, buildNum, bMap)
return nil
}
return getBuildSummary(b)
}
// getCurrentBuilds extracts the list of all the current builds from a master json
// from the slaves' runningBuilds portion.
func getCurrentBuilds(c context.Context, master *buildbotMaster, builderName string) []*resp.BuildSummary {
b := master.Builders[builderName]
results := []*resp.BuildSummary{}
bMap := createRunningBuildMap(master)
for _, bn := range b.Currentbuilds {
cb := getCurrentBuild(c, bMap, builderName, bn)
if cb != nil {
results = append(results, cb)
}
}
return results
}
var errMasterNotFound = miloerror.Error{
Message: "Master not found",
Code: http.StatusNotFound,
}
// builderImpl is the implementation for getting a milo builder page from buildbot.
// This gets:
// * Current Builds from querying the master json from the datastore.
// * Recent Builds from a cron job that backfills the recent builds.
func builderImpl(c context.Context, masterName, builderName string) (*resp.Builder, error) {
result := &resp.Builder{}
master, internal, t, err := getMasterJSON(c, masterName)
if internal {
allowed, err := settings.IsAllowedInternal(c)
if err != nil {
return nil, err
}
if !allowed {
return nil, errMasterNotFound
}
}
switch {
case err == datastore.ErrNoSuchEntity:
return nil, errMasterNotFound
case err != nil:
return nil, err
}
if clock.Now(c).Sub(t) > 2*time.Minute {
warning := fmt.Sprintf(
"WARNING: Master data is stale (last updated %s)", t)
logging.Warningf(c, warning)
result.Warning = warning
}
s, _ := json.Marshal(master)
logging.Debugf(c, "Master: %s", s)
_, ok := master.Builders[builderName]
if !ok {
// This long block is just to return a good error message when an invalid
// buildbot builder is specified.
keys := make([]string, 0, len(master.Builders))
for k := range master.Builders {
keys = append(keys, k)
}
sort.Strings(keys)
avail := strings.Join(keys, "\n")
return nil, fmt.Errorf(
"Cannot find builder %s in master %s.\nAvailable builders: \n%s",
builderName, masterName, avail)
}
recentBuilds, err := getBuilds(c, masterName, builderName, true)
if err != nil {
return nil, err // Or maybe not?
}
currentBuilds := getCurrentBuilds(c, master, builderName)
fmt.Fprintf(os.Stderr, "Number of current builds: %d\n", len(currentBuilds))
result.CurrentBuilds = currentBuilds
for _, fb := range recentBuilds {
// Yes recent builds is synonymous with finished builds.
// TODO(hinoka): Implement limits.
if fb != nil {
result.FinishedBuilds = append(result.FinishedBuilds, fb)
}
}
return result, nil
}
|
package fs
import (
//"bufio"
"fmt"
"github.com/Sirupsen/logrus"
//"github.com/docker/libcontainer/cgroups"
"github.com/memoryLimitBySijan"
"os"
//"path/filepath"
"strconv"
)
func SijanAnanya(d *data) {
dir, err := d.join("memory")
defer func() {
if err != nil {
os.RemoveAll(dir)
}
}()
logrus.Debugf("!!!!!calledSijanAnanya")
fmt.Println("This is going t change thewhole code")
si := memoryLimitBySijan.Get()
TotalMemory := si.TotalRam
logrus.Debugf("!!!!!!!!!!!!!!!!!!calledSijanAnanya%v\n", si.TotalRam)
//fmt.Printf("%v\n", si.TotalRam)
// logrus.Debugf(reflect.TypeOf(si.TotalRam))
LimitForEachContainer := TotalMemory * 20 / 100
ByteConverter := 1000 * LimitForEachContainer
var a int64
a = Num64(ByteConverter)
str := strconv.FormatInt(a, 10)
writeFile(dir, "memory.limit_in_bytes", str)
//s str := strconv.FormatUInt(ByteConverter, 10)
//fmt.Println(reflect.TypeOf(strval))
}
func Num64(n interface{}) int64 {
s := fmt.Sprintf("%d", n)
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0
} else {
return i
}
}
decrease do 100
package fs
import (
//"bufio"
"fmt"
"github.com/Sirupsen/logrus"
//"github.com/docker/libcontainer/cgroups"
"github.com/memoryLimitBySijan"
"os"
//"path/filepath"
"strconv"
)
func SijanAnanya(d *data) {
dir, err := d.join("memory")
defer func() {
if err != nil {
os.RemoveAll(dir)
}
}()
logrus.Debugf("!!!!!calledSijanAnanya")
fmt.Println("This is going t change thewhole code")
si := memoryLimitBySijan.Get()
TotalMemory := si.TotalRam - 300
logrus.Debugf("!!!!!!!!!!!!!!!!!!calledSijanAnanya%v\n", si.TotalRam)
//fmt.Printf("%v\n", si.TotalRam)
// logrus.Debugf(reflect.TypeOf(si.TotalRam))
LimitForEachContainer := TotalMemory * 20 / 100
ByteConverter := 1000 * LimitForEachContainer
var a int64
a = Num64(ByteConverter)
str := strconv.FormatInt(a, 10)
writeFile(dir, "memory.limit_in_bytes", str)
//s str := strconv.FormatUInt(ByteConverter, 10)
//fmt.Println(reflect.TypeOf(strval))
}
func Num64(n interface{}) int64 {
s := fmt.Sprintf("%d", n)
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0
} else {
return i
}
}
|
// +build integration
// Package s3manager provides
package s3manager
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/service"
"github.com/aws/aws-sdk-go/internal/test/integration"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/stretchr/testify/assert"
)
var integBuf12MB = make([]byte, 1024*1024*12)
var integMD512MB = fmt.Sprintf("%x", md5.Sum(integBuf12MB))
var bucketName *string
var _ = integration.Imported
func TestMain(m *testing.M) {
setup()
defer teardown() // only called if we panic
result := m.Run()
teardown()
os.Exit(result)
}
func setup() {
// Create a bucket for testing
svc := s3.New(nil)
bucketName = aws.String(
fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID()))
for i := 0; i < 10; i++ {
_, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName})
if err == nil {
break
}
}
for {
_, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName})
if err == nil {
break
}
time.Sleep(1 * time.Second)
}
}
// Delete the bucket
func teardown() {
svc := s3.New(nil)
objs, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName})
for _, o := range objs.Contents {
svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key})
}
uploads, _ := svc.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: bucketName})
for _, u := range uploads.Uploads {
svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: bucketName,
Key: u.Key,
UploadID: u.UploadID,
})
}
svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName})
}
type dlwriter struct {
buf []byte
}
func newDLWriter(size int) *dlwriter {
return &dlwriter{buf: make([]byte, size)}
}
func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) {
if pos > int64(len(d.buf)) {
return 0, io.EOF
}
written := 0
for i, b := range p {
if i >= len(d.buf) {
break
}
d.buf[pos+int64(i)] = b
written++
}
return written, nil
}
func validate(t *testing.T, key string, md5value string) {
mgr := s3manager.NewDownloader(nil)
params := &s3.GetObjectInput{Bucket: bucketName, Key: &key}
w := newDLWriter(1024 * 1024 * 20)
n, err := mgr.Download(w, params)
assert.NoError(t, err)
assert.Equal(t, md5value, fmt.Sprintf("%x", md5.Sum(w.buf[0:n])))
}
func TestUploadConcurrently(t *testing.T) {
key := "12mb-1"
mgr := s3manager.NewUploader(nil)
out, err := mgr.Upload(&s3manager.UploadInput{
Bucket: bucketName,
Key: &key,
Body: bytes.NewReader(integBuf12MB),
})
assert.NoError(t, err)
assert.NotEqual(t, "", out.UploadID)
assert.Regexp(t, `^https?://.+/`+key+`$`, out.Location)
validate(t, key, integMD512MB)
}
func TestUploadFailCleanup(t *testing.T) {
svc := s3.New(nil)
// Break checksum on 2nd part so it fails
part := 0
svc.Handlers.Build.PushBack(func(r *service.Request) {
if r.Operation.Name == "UploadPart" {
if part == 1 {
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
}
part++
}
})
key := "12mb-leave"
mgr := s3manager.NewUploader(&s3manager.UploadOptions{
S3: svc,
LeavePartsOnError: false,
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: bucketName,
Key: &key,
Body: bytes.NewReader(integBuf12MB),
})
assert.Error(t, err)
uploadID := ""
if merr, ok := err.(s3manager.MultiUploadFailure); ok {
uploadID = merr.UploadID()
}
assert.NotEmpty(t, uploadID)
_, err = svc.ListParts(&s3.ListPartsInput{
Bucket: bucketName, Key: &key, UploadID: &uploadID})
assert.Error(t, err)
}
service/s3/s3manager: Fix integration tests for inflection change
// +build integration
// Package s3manager provides
package s3manager
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/service"
"github.com/aws/aws-sdk-go/internal/test/integration"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/stretchr/testify/assert"
)
var integBuf12MB = make([]byte, 1024*1024*12)
var integMD512MB = fmt.Sprintf("%x", md5.Sum(integBuf12MB))
var bucketName *string
var _ = integration.Imported
func TestMain(m *testing.M) {
setup()
defer teardown() // only called if we panic
result := m.Run()
teardown()
os.Exit(result)
}
func setup() {
// Create a bucket for testing
svc := s3.New(nil)
bucketName = aws.String(
fmt.Sprintf("aws-sdk-go-integration-%d-%s", time.Now().Unix(), integration.UniqueID()))
for i := 0; i < 10; i++ {
_, err := svc.CreateBucket(&s3.CreateBucketInput{Bucket: bucketName})
if err == nil {
break
}
}
for {
_, err := svc.HeadBucket(&s3.HeadBucketInput{Bucket: bucketName})
if err == nil {
break
}
time.Sleep(1 * time.Second)
}
}
// Delete the bucket
func teardown() {
svc := s3.New(nil)
objs, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: bucketName})
for _, o := range objs.Contents {
svc.DeleteObject(&s3.DeleteObjectInput{Bucket: bucketName, Key: o.Key})
}
uploads, _ := svc.ListMultipartUploads(&s3.ListMultipartUploadsInput{Bucket: bucketName})
for _, u := range uploads.Uploads {
svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: bucketName,
Key: u.Key,
UploadId: u.UploadId,
})
}
svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: bucketName})
}
type dlwriter struct {
buf []byte
}
func newDLWriter(size int) *dlwriter {
return &dlwriter{buf: make([]byte, size)}
}
func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) {
if pos > int64(len(d.buf)) {
return 0, io.EOF
}
written := 0
for i, b := range p {
if i >= len(d.buf) {
break
}
d.buf[pos+int64(i)] = b
written++
}
return written, nil
}
func validate(t *testing.T, key string, md5value string) {
mgr := s3manager.NewDownloader(nil)
params := &s3.GetObjectInput{Bucket: bucketName, Key: &key}
w := newDLWriter(1024 * 1024 * 20)
n, err := mgr.Download(w, params)
assert.NoError(t, err)
assert.Equal(t, md5value, fmt.Sprintf("%x", md5.Sum(w.buf[0:n])))
}
func TestUploadConcurrently(t *testing.T) {
key := "12mb-1"
mgr := s3manager.NewUploader(nil)
out, err := mgr.Upload(&s3manager.UploadInput{
Bucket: bucketName,
Key: &key,
Body: bytes.NewReader(integBuf12MB),
})
assert.NoError(t, err)
assert.NotEqual(t, "", out.UploadID)
assert.Regexp(t, `^https?://.+/`+key+`$`, out.Location)
validate(t, key, integMD512MB)
}
func TestUploadFailCleanup(t *testing.T) {
svc := s3.New(nil)
// Break checksum on 2nd part so it fails
part := 0
svc.Handlers.Build.PushBack(func(r *service.Request) {
if r.Operation.Name == "UploadPart" {
if part == 1 {
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
}
part++
}
})
key := "12mb-leave"
mgr := s3manager.NewUploader(&s3manager.UploadOptions{
S3: svc,
LeavePartsOnError: false,
})
_, err := mgr.Upload(&s3manager.UploadInput{
Bucket: bucketName,
Key: &key,
Body: bytes.NewReader(integBuf12MB),
})
assert.Error(t, err)
uploadID := ""
if merr, ok := err.(s3manager.MultiUploadFailure); ok {
uploadID = merr.UploadID()
}
assert.NotEmpty(t, uploadID)
_, err = svc.ListParts(&s3.ListPartsInput{
Bucket: bucketName, Key: &key, UploadId: &uploadID})
assert.Error(t, err)
}
|
package services
import (
"errors"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/matrix-org/go-neb/database"
"github.com/matrix-org/go-neb/matrix"
"github.com/matrix-org/go-neb/polling"
"github.com/matrix-org/go-neb/types"
"github.com/mmcdole/gofeed"
"html"
"time"
)
const minPollingIntervalSeconds = (10 * 60) // 10min
type feedPoller struct{}
func (p *feedPoller) IntervalSecs() int64 { return 10 }
func (p *feedPoller) OnPoll(s types.Service, cli *matrix.Client) {
logger := log.WithFields(log.Fields{
"service_id": s.ServiceID(),
"service_type": s.ServiceType(),
})
frService, ok := s.(*feedReaderService)
if !ok {
logger.Error("FeedReader: OnPoll called without an Feed Service instance")
return
}
now := time.Now().Unix() // Second resolution
// Work out which feeds should be polled
var pollFeeds []string
for u, feedInfo := range frService.Feeds {
if feedInfo.NextPollTimestampSecs == 0 || now >= feedInfo.NextPollTimestampSecs {
// re-query this feed
pollFeeds = append(pollFeeds, u)
}
}
// Query each feed and send new items to subscribed rooms
for _, u := range pollFeeds {
feed, items, err := p.queryFeed(frService, u)
if err != nil {
logger.WithField("feed_url", u).WithError(err).Error("Failed to query feed")
continue
}
for _, i := range items {
if err := p.sendToRooms(frService, cli, u, feed, i); err != nil {
logger.WithFields(log.Fields{
"feed_url": u,
log.ErrorKey: err,
"item": i,
}).Error("Failed to send item to room")
}
}
}
// Persist the service to save the next poll times if we did some queries
if len(pollFeeds) == 0 {
return
}
if _, err := database.GetServiceDB().StoreService(frService); err != nil {
logger.WithError(err).Error("Failed to persist next poll times for service")
}
}
// Query the given feed, update relevant timestamps and return NEW items
func (p *feedPoller) queryFeed(s *feedReaderService, feedURL string) (*gofeed.Feed, []gofeed.Item, error) {
var items []gofeed.Item
fp := gofeed.NewParser()
feed, err := fp.ParseURL(feedURL)
if err != nil {
return nil, items, err
}
// Work out which items are new, if any (based on the last updated TS we have)
// If the TS is 0 then this is the first ever poll, so let's not send 10s of events
// into the room and just do new ones from this point onwards.
if s.Feeds[feedURL].FeedUpdatedTimestampSecs != 0 {
for _, i := range feed.Items {
if i == nil || i.PublishedParsed == nil {
continue
}
if i.PublishedParsed.Unix() > s.Feeds[feedURL].FeedUpdatedTimestampSecs {
items = append(items, *i)
}
}
}
now := time.Now().Unix() // Second resolution
// Work out when this feed was last updated
var feedLastUpdatedTs int64
if feed.UpdatedParsed != nil {
feedLastUpdatedTs = feed.UpdatedParsed.Unix()
} else if len(feed.Items) > 0 {
i := feed.Items[0]
if i != nil && i.PublishedParsed != nil {
feedLastUpdatedTs = i.PublishedParsed.Unix()
}
}
// Work out when to next poll this feed
nextPollTsSec := now + minPollingIntervalSeconds
if s.Feeds[feedURL].PollIntervalMins > 10 {
nextPollTsSec = now + int64(s.Feeds[feedURL].PollIntervalMins*60)
}
// TODO: Handle the 'sy' Syndication extension to control update interval.
// See http://www.feedforall.com/syndication.htm and http://web.resource.org/rss/1.0/modules/syndication/
p.updateFeedInfo(s, feedURL, nextPollTsSec, feedLastUpdatedTs)
return feed, items, nil
}
func (p *feedPoller) updateFeedInfo(s *feedReaderService, feedURL string, nextPollTs, feedUpdatedTs int64) {
for u := range s.Feeds {
if u != feedURL {
continue
}
f := s.Feeds[u]
f.NextPollTimestampSecs = nextPollTs
f.FeedUpdatedTimestampSecs = feedUpdatedTs
s.Feeds[u] = f
}
}
func (p *feedPoller) sendToRooms(s *feedReaderService, cli *matrix.Client, feedURL string, feed *gofeed.Feed, item gofeed.Item) error {
logger := log.WithField("feed_url", feedURL).WithField("title", item.Title)
logger.Info("New feed item")
var rooms []string
for roomID, urls := range s.Rooms {
for _, u := range urls {
if u == feedURL {
rooms = append(rooms, roomID)
break
}
}
}
for _, roomID := range rooms {
if _, err := cli.SendMessageEvent(roomID, "m.room.message", itemToHTML(feed, item)); err != nil {
logger.WithError(err).WithField("room_id", roomID).Error("Failed to send to room")
}
}
return nil
}
// SomeOne posted a new article: Title Of The Entry ( https://someurl.com/blag )
func itemToHTML(feed *gofeed.Feed, item gofeed.Item) matrix.HTMLMessage {
return matrix.GetHTMLMessage("m.notice", fmt.Sprintf(
"<i>%s</i> posted a new article: %s ( %s )",
html.EscapeString(feed.Title), html.EscapeString(item.Title), html.EscapeString(item.Link),
))
}
type feedReaderService struct {
types.DefaultService
id string
serviceUserID string
Feeds map[string]struct { // feed_url => { }
PollIntervalMins int `json:"poll_interval_mins"`
NextPollTimestampSecs int64 // Internal: When we should poll again
FeedUpdatedTimestampSecs int64 // Internal: The last time the feed was updated
} `json:"feeds"`
Rooms map[string][]string `json:"rooms"` // room_id => [ feed_url ]
}
func (s *feedReaderService) ServiceUserID() string { return s.serviceUserID }
func (s *feedReaderService) ServiceID() string { return s.id }
func (s *feedReaderService) ServiceType() string { return "feedreader" }
func (s *feedReaderService) Poller() types.Poller { return &feedPoller{} }
// Register will check the liveness of each RSS feed given. If all feeds check out okay, no error is returned.
func (s *feedReaderService) Register(oldService types.Service, client *matrix.Client) error {
if len(s.Feeds) == 0 {
// this is an error UNLESS the old service had some feeds in which case they are deleting us :(
var numOldFeeds int
oldFeedService, ok := oldService.(*feedReaderService)
if !ok {
log.WithField("service_id", oldService.ServiceID()).Error("Old service isn't a FeedReaderService")
} else {
numOldFeeds = len(oldFeedService.Feeds)
}
if numOldFeeds == 0 {
return errors.New("An RSS feed must be specified.")
}
}
return nil
}
func (s *feedReaderService) PostRegister(oldService types.Service) {
if len(s.Feeds) == 0 { // bye-bye :(
logger := log.WithFields(log.Fields{
"service_id": s.ServiceID(),
"service_type": s.ServiceType(),
})
logger.Info("Deleting service (0 feeds)")
polling.StopPolling(s)
if err := database.GetServiceDB().DeleteService(s.ServiceID()); err != nil {
logger.WithError(err).Error("Failed to delete service")
}
}
}
func init() {
types.RegisterService(func(serviceID, serviceUserID, webhookEndpointURL string) types.Service {
r := &feedReaderService{
id: serviceID,
serviceUserID: serviceUserID,
}
return r
})
}
Send feed items in reverse-order for chronological times
Also add checks on Register() to make sure the feeds are valid
package services
import (
"errors"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/matrix-org/go-neb/database"
"github.com/matrix-org/go-neb/matrix"
"github.com/matrix-org/go-neb/polling"
"github.com/matrix-org/go-neb/types"
"github.com/mmcdole/gofeed"
"html"
"time"
)
const minPollingIntervalSeconds = (10 * 60) // 10min
type feedPoller struct{}
func (p *feedPoller) IntervalSecs() int64 { return 10 }
func (p *feedPoller) OnPoll(s types.Service, cli *matrix.Client) {
logger := log.WithFields(log.Fields{
"service_id": s.ServiceID(),
"service_type": s.ServiceType(),
})
frService, ok := s.(*feedReaderService)
if !ok {
logger.Error("FeedReader: OnPoll called without an Feed Service instance")
return
}
now := time.Now().Unix() // Second resolution
// Work out which feeds should be polled
var pollFeeds []string
for u, feedInfo := range frService.Feeds {
if feedInfo.NextPollTimestampSecs == 0 || now >= feedInfo.NextPollTimestampSecs {
// re-query this feed
pollFeeds = append(pollFeeds, u)
}
}
// Query each feed and send new items to subscribed rooms
for _, u := range pollFeeds {
feed, items, err := p.queryFeed(frService, u)
if err != nil {
logger.WithField("feed_url", u).WithError(err).Error("Failed to query feed")
continue
}
// Loop backwards since [0] is the most recent and we want to send in chronological order
for i := len(items) - 1; i >= 0; i-- {
item := items[i]
if err := p.sendToRooms(frService, cli, u, feed, item); err != nil {
logger.WithFields(log.Fields{
"feed_url": u,
log.ErrorKey: err,
"item": item,
}).Error("Failed to send item to room")
}
}
}
// Persist the service to save the next poll times if we did some queries
if len(pollFeeds) == 0 {
return
}
if _, err := database.GetServiceDB().StoreService(frService); err != nil {
logger.WithError(err).Error("Failed to persist next poll times for service")
}
}
// Query the given feed, update relevant timestamps and return NEW items
func (p *feedPoller) queryFeed(s *feedReaderService, feedURL string) (*gofeed.Feed, []gofeed.Item, error) {
log.WithField("feed_url", feedURL).Info("Querying feed")
var items []gofeed.Item
fp := gofeed.NewParser()
feed, err := fp.ParseURL(feedURL)
if err != nil {
return nil, items, err
}
// Work out which items are new, if any (based on the last updated TS we have)
// If the TS is 0 then this is the first ever poll, so let's not send 10s of events
// into the room and just do new ones from this point onwards.
if s.Feeds[feedURL].FeedUpdatedTimestampSecs != 0 {
for _, i := range feed.Items {
if i == nil || i.PublishedParsed == nil {
continue
}
if i.PublishedParsed.Unix() > s.Feeds[feedURL].FeedUpdatedTimestampSecs {
items = append(items, *i)
}
}
}
now := time.Now().Unix() // Second resolution
// Work out when this feed was last updated
var feedLastUpdatedTs int64
if feed.UpdatedParsed != nil {
feedLastUpdatedTs = feed.UpdatedParsed.Unix()
} else if len(feed.Items) > 0 {
i := feed.Items[0]
if i != nil && i.PublishedParsed != nil {
feedLastUpdatedTs = i.PublishedParsed.Unix()
}
}
// Work out when to next poll this feed
nextPollTsSec := now + minPollingIntervalSeconds
if s.Feeds[feedURL].PollIntervalMins > int(minPollingIntervalSeconds/60) {
nextPollTsSec = now + int64(s.Feeds[feedURL].PollIntervalMins*60)
}
// TODO: Handle the 'sy' Syndication extension to control update interval.
// See http://www.feedforall.com/syndication.htm and http://web.resource.org/rss/1.0/modules/syndication/
p.updateFeedInfo(s, feedURL, nextPollTsSec, feedLastUpdatedTs)
return feed, items, nil
}
func (p *feedPoller) updateFeedInfo(s *feedReaderService, feedURL string, nextPollTs, feedUpdatedTs int64) {
for u := range s.Feeds {
if u != feedURL {
continue
}
f := s.Feeds[u]
f.NextPollTimestampSecs = nextPollTs
f.FeedUpdatedTimestampSecs = feedUpdatedTs
s.Feeds[u] = f
}
}
func (p *feedPoller) sendToRooms(s *feedReaderService, cli *matrix.Client, feedURL string, feed *gofeed.Feed, item gofeed.Item) error {
logger := log.WithField("feed_url", feedURL).WithField("title", item.Title)
logger.Info("New feed item")
var rooms []string
for roomID, urls := range s.Rooms {
for _, u := range urls {
if u == feedURL {
rooms = append(rooms, roomID)
break
}
}
}
for _, roomID := range rooms {
if _, err := cli.SendMessageEvent(roomID, "m.room.message", itemToHTML(feed, item)); err != nil {
logger.WithError(err).WithField("room_id", roomID).Error("Failed to send to room")
}
}
return nil
}
// SomeOne posted a new article: Title Of The Entry ( https://someurl.com/blag )
func itemToHTML(feed *gofeed.Feed, item gofeed.Item) matrix.HTMLMessage {
return matrix.GetHTMLMessage("m.notice", fmt.Sprintf(
"<i>%s</i> posted a new article: %s ( %s )",
html.EscapeString(feed.Title), html.EscapeString(item.Title), html.EscapeString(item.Link),
))
}
type feedReaderService struct {
types.DefaultService
id string
serviceUserID string
Feeds map[string]struct { // feed_url => { }
PollIntervalMins int `json:"poll_interval_mins"`
NextPollTimestampSecs int64 // Internal: When we should poll again
FeedUpdatedTimestampSecs int64 // Internal: The last time the feed was updated
} `json:"feeds"`
Rooms map[string][]string `json:"rooms"` // room_id => [ feed_url ]
}
func (s *feedReaderService) ServiceUserID() string { return s.serviceUserID }
func (s *feedReaderService) ServiceID() string { return s.id }
func (s *feedReaderService) ServiceType() string { return "feedreader" }
func (s *feedReaderService) Poller() types.Poller { return &feedPoller{} }
// Register will check the liveness of each RSS feed given. If all feeds check out okay, no error is returned.
func (s *feedReaderService) Register(oldService types.Service, client *matrix.Client) error {
if len(s.Feeds) == 0 {
// this is an error UNLESS the old service had some feeds in which case they are deleting us :(
var numOldFeeds int
oldFeedService, ok := oldService.(*feedReaderService)
if !ok {
log.WithField("service_id", oldService.ServiceID()).Error("Old service isn't a FeedReaderService")
} else {
numOldFeeds = len(oldFeedService.Feeds)
}
if numOldFeeds == 0 {
return errors.New("An RSS feed must be specified.")
}
return nil
}
// Make sure we can parse the feed
for feedURL := range s.Feeds {
fp := gofeed.NewParser()
if _, err := fp.ParseURL(feedURL); err != nil {
return fmt.Errorf("Failed to read URL %s: %s", feedURL, err.Error())
}
}
// Make sure all feeds are accounted for (appear at least once) in the room map, AND make sure there
// are no weird new feeds in those rooms
for roomID, roomFeeds := range s.Rooms {
for _, f := range roomFeeds {
if _, exists := s.Feeds[f]; !exists {
return fmt.Errorf("Feed URL %s in room %s does not exist in the Feeds section", f, roomID)
}
}
}
return nil
}
func (s *feedReaderService) PostRegister(oldService types.Service) {
if len(s.Feeds) == 0 { // bye-bye :(
logger := log.WithFields(log.Fields{
"service_id": s.ServiceID(),
"service_type": s.ServiceType(),
})
logger.Info("Deleting service (0 feeds)")
polling.StopPolling(s)
if err := database.GetServiceDB().DeleteService(s.ServiceID()); err != nil {
logger.WithError(err).Error("Failed to delete service")
}
}
}
func init() {
types.RegisterService(func(serviceID, serviceUserID, webhookEndpointURL string) types.Service {
r := &feedReaderService{
id: serviceID,
serviceUserID: serviceUserID,
}
return r
})
}
|
package fetch
// The following code was sourced and modified from the
// https://github.com/andrew-d/goscrape package governed by MIT license.
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
"time"
"github.com/mafredri/cdp"
"github.com/mafredri/cdp/devtool"
"github.com/mafredri/cdp/protocol/dom"
"github.com/mafredri/cdp/protocol/network"
"github.com/mafredri/cdp/protocol/page"
"github.com/mafredri/cdp/protocol/runtime"
"github.com/mafredri/cdp/rpcc"
"github.com/slotix/dataflowkit/errs"
"github.com/spf13/viper"
"golang.org/x/net/publicsuffix"
"golang.org/x/sync/errgroup"
)
//Type represents types of fetcher
type Type string
//Fetcher types
const (
//Base fetcher is used for downloading html web page using Go standard library's http
Base Type = "Base"
//Headless chrome is used to download content from JS driven web pages
Chrome = "Chrome"
)
// Fetcher is the interface that must be satisfied by things that can fetch
// remote URLs and return their contents.
//
// Note: Fetchers may or may not be safe to use concurrently. Please read the
// documentation for each fetcher for more details.
type Fetcher interface {
// Fetch is called to retrieve HTML content of a document from the remote server.
Fetch(request Request) (io.ReadCloser, error)
getCookieJar() http.CookieJar
setCookieJar(jar http.CookieJar)
getCookies(u *url.URL) ([]*http.Cookie, error)
setCookies(u *url.URL, cookies []*http.Cookie) error
}
//Request struct contains request information sent to Fetchers
type Request struct {
// Type defines Fetcher type. It may be "chrome" or "base". Defaults to "base".
Type string `json:"type"`
// URL to be retrieved
URL string `json:"url"`
// HTTP method : GET, POST
Method string
// FormData is a string value for passing formdata parameters.
//
// For example it may be used for processing pages which require authentication
//
// Example:
//
// "auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fexample.com%2F&ips_username=user&ips_password=userpassword&rememberMe=1"
//
FormData string `json:"formData,omitempty"`
//UserToken identifies user to keep personal cookies information.
UserToken string `json:"userToken"`
// Actions contains the list of action we have to perform on page
Actions string `json:"actions"`
}
// BaseFetcher is a Fetcher that uses the Go standard library's http
// client to fetch URLs.
type BaseFetcher struct {
client *http.Client
}
// ChromeFetcher is used to fetch Java Script rendeded pages.
type ChromeFetcher struct {
cdpClient *cdp.Client
client *http.Client
cookies []*http.Cookie
}
//newFetcher creates instances of Fetcher for downloading a web page.
func newFetcher(t Type) Fetcher {
switch t {
case Base:
return newBaseFetcher()
case Chrome:
return newChromeFetcher()
default:
logger.Panic(fmt.Sprintf("unhandled type: %#v", t))
}
panic("unreachable")
}
// newBaseFetcher creates instances of newBaseFetcher{} to fetch
// a page content from regular websites as-is
// without running js scripts on the page.
func newBaseFetcher() *BaseFetcher {
var client *http.Client
proxy := viper.GetString("PROXY")
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
if err != nil {
logger.Error(err.Error())
return nil
}
transport := &http.Transport{Proxy: http.ProxyURL(proxyURL)}
client = &http.Client{Transport: transport}
} else {
client = &http.Client{}
}
f := &BaseFetcher{
client: client,
}
jarOpts := &cookiejar.Options{PublicSuffixList: publicsuffix.List}
var err error
f.client.Jar, err = cookiejar.New(jarOpts)
if err != nil {
return nil
}
return f
}
// Fetch retrieves document from the remote server. It returns web page content along with cache and expiration information.
func (bf *BaseFetcher) Fetch(request Request) (io.ReadCloser, error) {
resp, err := bf.response(request)
if err != nil {
return nil, err
}
return resp.Body, nil
}
//Response return response after document fetching using BaseFetcher
func (bf *BaseFetcher) response(r Request) (*http.Response, error) {
//URL validation
if _, err := url.ParseRequestURI(r.getURL()); err != nil {
return nil, err
}
var err error
var req *http.Request
if r.FormData == "" {
req, err = http.NewRequest(r.Method, r.URL, nil)
if err != nil {
return nil, err
}
} else {
//if form data exists send POST request
formData := parseFormData(r.FormData)
req, err = http.NewRequest("POST", r.URL, strings.NewReader(formData.Encode()))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(formData.Encode())))
}
//TODO: Add UA to requests
//req.Header.Add("User-Agent", "Dataflow kit - https://github.com/slotix/dataflowkit")
return bf.doRequest(req)
}
func (bf *BaseFetcher) doRequest(req *http.Request) (*http.Response, error) {
resp, err := bf.client.Do(req)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp, nil
default:
return nil, errs.StatusError{
resp.StatusCode,
errors.New(http.StatusText(resp.StatusCode)),
}
}
}
func (bf *BaseFetcher) getCookieJar() http.CookieJar { //*cookiejar.Jar {
return bf.client.Jar
}
func (bf *BaseFetcher) setCookieJar(jar http.CookieJar) {
bf.client.Jar = jar
}
func (bf *BaseFetcher) getCookies(u *url.URL) ([]*http.Cookie, error) {
return bf.client.Jar.Cookies(u), nil
}
func (bf *BaseFetcher) setCookies(u *url.URL, cookies []*http.Cookie) error {
bf.client.Jar.SetCookies(u, cookies)
return nil
}
// parseFormData is used for converting formdata string to url.Values type
func parseFormData(fd string) url.Values {
//"auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fexample.com%2F&ips_username=usr&ips_password=passw&rememberMe=0"
formData := url.Values{}
pairs := strings.Split(fd, "&")
for _, pair := range pairs {
kv := strings.Split(pair, "=")
formData.Add(kv[0], kv[1])
}
return formData
}
// Static type assertion
var _ Fetcher = &BaseFetcher{}
// NewChromeFetcher returns ChromeFetcher
func newChromeFetcher() *ChromeFetcher {
var client *http.Client
proxy := viper.GetString("PROXY")
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
if err != nil {
logger.Error(err.Error())
return nil
}
transport := &http.Transport{Proxy: http.ProxyURL(proxyURL)}
client = &http.Client{Transport: transport}
} else {
client = &http.Client{}
}
f := &ChromeFetcher{
client: client,
}
return f
}
// LogCodec captures the output from writing RPC requests and reading
// responses on the connection. It implements rpcc.Codec via
// WriteRequest and ReadResponse.
type LogCodec struct{ conn io.ReadWriter }
// WriteRequest marshals v into a buffer, writes its contents onto the
// connection and logs it.
func (c *LogCodec) WriteRequest(req *rpcc.Request) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(req); err != nil {
return err
}
fmt.Printf("SEND: %s", buf.Bytes())
_, err := c.conn.Write(buf.Bytes())
if err != nil {
return err
}
return nil
}
// ReadResponse unmarshals from the connection into v whilst echoing
// what is read into a buffer for logging.
func (c *LogCodec) ReadResponse(resp *rpcc.Response) error {
var buf bytes.Buffer
if err := json.NewDecoder(io.TeeReader(c.conn, &buf)).Decode(resp); err != nil {
return err
}
fmt.Printf("RECV: %s\n", buf.String())
return nil
}
// Fetch retrieves document from the remote server. It returns web page content along with cache and expiration information.
func (f *ChromeFetcher) Fetch(request Request) (io.ReadCloser, error) {
//URL validation
if _, err := url.ParseRequestURI(strings.TrimSpace(request.getURL())); err != nil {
return nil, err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
devt := devtool.New(viper.GetString("CHROME"), devtool.WithClient(f.client))
//https://github.com/mafredri/cdp/issues/60
//pt, err := devt.Get(ctx, devtool.Page)
pt, err := devt.Create(ctx)
if err != nil {
return nil, err
}
var conn *rpcc.Conn
if viper.GetBool("CHROME_TRACE") {
newLogCodec := func(conn io.ReadWriter) rpcc.Codec {
return &LogCodec{conn: conn}
}
// Connect to WebSocket URL (page) that speaks the Chrome Debugging Protocol.
conn, err = rpcc.DialContext(ctx, pt.WebSocketDebuggerURL, rpcc.WithCodec(newLogCodec))
} else {
conn, err = rpcc.DialContext(ctx, pt.WebSocketDebuggerURL)
}
if err != nil {
fmt.Println(err)
return nil, err
}
defer conn.Close() // Cleanup.
defer devt.Close(ctx, pt)
// Create a new CDP Client that uses conn.
f.cdpClient = cdp.NewClient(conn)
if err = runBatch(
// Enable all the domain events that we're interested in.
func() error { return f.cdpClient.DOM.Enable(ctx) },
func() error { return f.cdpClient.Network.Enable(ctx, nil) },
func() error { return f.cdpClient.Page.Enable(ctx) },
func() error { return f.cdpClient.Runtime.Enable(ctx) },
); err != nil {
return nil, err
}
err = f.loadCookies()
if err != nil {
return nil, err
}
domLoadTimeout := 60 * time.Second
if request.FormData == "" {
err = f.navigate(ctx, f.cdpClient.Page, "GET", request.getURL(), "", domLoadTimeout)
} else {
formData := parseFormData(request.FormData)
err = f.navigate(ctx, f.cdpClient.Page, "POST", request.getURL(), formData.Encode(), domLoadTimeout)
}
if err != nil {
return nil, err
}
if err := f.runActions(ctx, request.Actions); err != nil {
logger.Warn(err.Error())
}
u, err := url.Parse(request.getURL())
if err != nil {
return nil, err
}
f.cookies, err = f.saveCookies(u)
if err != nil {
return nil, err
}
// Fetch the document root node. We can pass nil here
// since this method only takes optional arguments.
doc, err := f.cdpClient.DOM.GetDocument(ctx, nil)
if err != nil {
return nil, err
}
// Get the outer HTML for the page.
result, err := f.cdpClient.DOM.GetOuterHTML(ctx, &dom.GetOuterHTMLArgs{
NodeID: &doc.Root.NodeID,
})
if err != nil {
return nil, err
}
readCloser := ioutil.NopCloser(strings.NewReader(result.OuterHTML))
return readCloser, nil
}
func (f *ChromeFetcher) runActions(ctx context.Context, actionsJSON string) error {
acts := []map[string]json.RawMessage{}
err := json.Unmarshal([]byte(actionsJSON), &acts)
if err != nil {
return err
}
for _, actionMap := range acts {
for actionType, params := range actionMap {
action, err := NewAction(actionType, params)
if err == nil {
return action.Execute(ctx, f)
}
}
}
return nil
}
func (f *ChromeFetcher) setCookieJar(jar http.CookieJar) {
f.client.Jar = jar
}
func (f *ChromeFetcher) getCookieJar() http.CookieJar {
return f.client.Jar
}
// Static type assertion
var _ Fetcher = &ChromeFetcher{}
// navigate to the URL and wait for DOMContentEventFired. An error is
// returned if timeout happens before DOMContentEventFired.
func (f *ChromeFetcher) navigate(ctx context.Context, pageClient cdp.Page, method, url string, formData string, timeout time.Duration) error {
defer time.Sleep(750 * time.Millisecond)
ctxTimeout, cancelTimeout := context.WithTimeout(context.Background(), timeout)
// Make sure Page events are enabled.
err := pageClient.Enable(ctxTimeout)
if err != nil {
return err
}
// Navigate to GitHub, block until ready.
loadEventFired, err := pageClient.LoadEventFired(ctxTimeout)
if err != nil {
return err
}
defer loadEventFired.Close()
loadingFailed, err := f.cdpClient.Network.LoadingFailed(ctxTimeout)
if err != nil {
return err
}
defer loadingFailed.Close()
// exceptionThrown, err := f.cdpClient.Runtime.ExceptionThrown(ctxTimeout)
// if err != nil {
// return err
// }
//defer exceptionThrown.Close()
if method == "GET" {
_, err = pageClient.Navigate(ctxTimeout, page.NewNavigateArgs(url))
if err != nil {
return err
}
} else {
/* ast := "*" */
pattern := network.RequestPattern{URLPattern: &url}
patterns := []network.RequestPattern{pattern}
f.cdpClient.Network.SetCacheDisabled(ctxTimeout, network.NewSetCacheDisabledArgs(true))
interArgs := network.NewSetRequestInterceptionArgs(patterns)
err = f.cdpClient.Network.SetRequestInterception(ctxTimeout, interArgs)
if err != nil {
return err
}
kill := make(chan bool)
go f.interceptRequest(ctxTimeout, url, formData, kill)
_, err = pageClient.Navigate(ctxTimeout, page.NewNavigateArgs(url))
if err != nil {
return err
}
kill <- true
}
select {
// case <-exceptionThrown.Ready():
// ev, err := exceptionThrown.Recv()
// if err != nil {
// return err
// }
// return errs.StatusError{400, errors.New(ev.ExceptionDetails.Error())}
case <-loadEventFired.Ready():
_, err = loadEventFired.Recv()
if err != nil {
return err
}
case <-loadingFailed.Ready():
reply, err := loadingFailed.Recv()
if err != nil {
return err
}
if reply.Type == network.ResourceTypeDocument {
return errs.StatusError{400, errors.New(reply.ErrorText)}
}
case <-ctx.Done():
cancelTimeout()
return nil /*
case <-ctxTimeout.Done():
return errs.StatusError{400, errors.New("Fetch timeout")} */
}
return nil
}
func (f *ChromeFetcher) setCookies(u *url.URL, cookies []*http.Cookie) error {
f.cookies = cookies
return nil
}
func (f *ChromeFetcher) loadCookies() error {
/* u, err := url.Parse(cookiesURL)
if err != nil {
return err
} */
for _, c := range f.cookies {
c1 := network.SetCookieArgs{
Name: c.Name,
Value: c.Value,
Path: &c.Path,
/* Expires: expire, */
Domain: &c.Domain,
HTTPOnly: &c.HttpOnly,
Secure: &c.Secure,
}
if !c.Expires.IsZero() {
duration := c.Expires.Sub(time.Unix(0, 0))
c1.Expires = network.TimeSinceEpoch(duration / time.Second)
}
_, err := f.cdpClient.Network.SetCookie(context.Background(), &c1)
if err != nil {
return err
}
}
return nil
}
func (f *ChromeFetcher) getCookies(u *url.URL) ([]*http.Cookie, error) {
return f.cookies, nil
}
func (f *ChromeFetcher) saveCookies(u *url.URL) ([]*http.Cookie, error) {
ncookies, err := f.cdpClient.Network.GetCookies(context.Background(), &network.GetCookiesArgs{URLs: []string{u.String()}})
if err != nil {
return nil, err
}
cookies := []*http.Cookie{}
for _, c := range ncookies.Cookies {
c1 := http.Cookie{
Name: c.Name,
Value: c.Value,
Path: c.Path,
/* Expires: expire, */
Domain: c.Domain,
HttpOnly: c.HTTPOnly,
Secure: c.Secure,
}
if c.Expires > -1 {
sec, dec := math.Modf(c.Expires)
expire := time.Unix(int64(sec), int64(dec*(1e9)))
/* logger.Info(expire.String())
logger.Info(expire.Format("2006-01-02 15:04:05")) */
c1.Expires = expire
}
cookies = append(cookies, &c1)
}
return cookies, nil
}
func (f *ChromeFetcher) interceptRequest(ctx context.Context, originURL string, formData string, kill chan bool) {
var sig = false
cl, err := f.cdpClient.Network.RequestIntercepted(ctx)
if err != nil {
panic(err)
}
defer cl.Close()
for {
if sig {
return
}
select {
case <-cl.Ready():
r, err := cl.Recv()
if err != nil {
logger.Error(err.Error())
sig = true
continue
}
if len(formData) > 0 && r.Request.URL == originURL && r.RedirectURL == nil {
interceptedArgs := network.NewContinueInterceptedRequestArgs(r.InterceptionID)
interceptedArgs.SetMethod("POST")
interceptedArgs.SetPostData(formData)
fData := fmt.Sprintf(`{"Content-Type":"application/x-www-form-urlencoded","Content-Length":%d}`, len(formData))
interceptedArgs.Headers = []byte(fData)
if err = f.cdpClient.Network.ContinueInterceptedRequest(ctx, interceptedArgs); err != nil {
logger.Error(err.Error())
sig = true
continue
}
} else {
interceptedArgs := network.NewContinueInterceptedRequestArgs(r.InterceptionID)
if r.ResourceType == network.ResourceTypeImage || r.ResourceType == network.ResourceTypeStylesheet || isExclude(r.Request.URL) {
interceptedArgs.SetErrorReason(network.ErrorReasonAborted)
}
if err = f.cdpClient.Network.ContinueInterceptedRequest(ctx, interceptedArgs); err != nil {
logger.Error(err.Error())
sig = true
continue
}
continue
}
case <-kill:
sig = true
break
}
}
}
func isExclude(origin string) bool {
excludeRes := viper.GetStringSlice("EXCLUDERES")
for _, res := range excludeRes {
if strings.Index(origin, res) != -1 {
return true
}
}
return false
}
func (f ChromeFetcher) RunJSFromFile(ctx context.Context, path string, entryPointFunction string) error {
exp, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
exp = append(exp, entryPointFunction...)
compileReply, err := f.cdpClient.Runtime.CompileScript(ctx, &runtime.CompileScriptArgs{
Expression: string(exp),
PersistScript: true,
})
if err != nil {
panic(err)
}
awaitPromise := true
_, err = f.cdpClient.Runtime.RunScript(ctx, &runtime.RunScriptArgs{
ScriptID: *compileReply.ScriptID,
AwaitPromise: &awaitPromise,
})
return err
}
// removeNodes deletes all provided nodeIDs from the DOM.
// func removeNodes(ctx context.Context, domClient cdp.DOM, nodes ...dom.NodeID) error {
// var rmNodes []runBatchFunc
// for _, id := range nodes {
// arg := dom.NewRemoveNodeArgs(id)
// rmNodes = append(rmNodes, func() error { return domClient.RemoveNode(ctx, arg) })
// }
// return runBatch(rmNodes...)
// }
// runBatchFunc is the function signature for runBatch.
type runBatchFunc func() error
// runBatch runs all functions simultaneously and waits until
// execution has completed or an error is encountered.
func runBatch(fn ...runBatchFunc) error {
eg := errgroup.Group{}
for _, f := range fn {
eg.Go(f)
}
return eg.Wait()
}
//GetURL returns URL to be fetched
func (req Request) getURL() string {
return strings.TrimRight(strings.TrimSpace(req.URL), "/")
}
// Host returns Host value from Request
func (req Request) Host() (string, error) {
u, err := url.Parse(req.getURL())
if err != nil {
return "", err
}
return u.Host, nil
}
skip action execution if action string is empty
package fetch
// The following code was sourced and modified from the
// https://github.com/andrew-d/goscrape package governed by MIT license.
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
"time"
"github.com/mafredri/cdp"
"github.com/mafredri/cdp/devtool"
"github.com/mafredri/cdp/protocol/dom"
"github.com/mafredri/cdp/protocol/network"
"github.com/mafredri/cdp/protocol/page"
"github.com/mafredri/cdp/protocol/runtime"
"github.com/mafredri/cdp/rpcc"
"github.com/slotix/dataflowkit/errs"
"github.com/spf13/viper"
"golang.org/x/net/publicsuffix"
"golang.org/x/sync/errgroup"
)
//Type represents types of fetcher
type Type string
//Fetcher types
const (
//Base fetcher is used for downloading html web page using Go standard library's http
Base Type = "Base"
//Headless chrome is used to download content from JS driven web pages
Chrome = "Chrome"
)
// Fetcher is the interface that must be satisfied by things that can fetch
// remote URLs and return their contents.
//
// Note: Fetchers may or may not be safe to use concurrently. Please read the
// documentation for each fetcher for more details.
type Fetcher interface {
// Fetch is called to retrieve HTML content of a document from the remote server.
Fetch(request Request) (io.ReadCloser, error)
getCookieJar() http.CookieJar
setCookieJar(jar http.CookieJar)
getCookies(u *url.URL) ([]*http.Cookie, error)
setCookies(u *url.URL, cookies []*http.Cookie) error
}
//Request struct contains request information sent to Fetchers
type Request struct {
// Type defines Fetcher type. It may be "chrome" or "base". Defaults to "base".
Type string `json:"type"`
// URL to be retrieved
URL string `json:"url"`
// HTTP method : GET, POST
Method string
// FormData is a string value for passing formdata parameters.
//
// For example it may be used for processing pages which require authentication
//
// Example:
//
// "auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fexample.com%2F&ips_username=user&ips_password=userpassword&rememberMe=1"
//
FormData string `json:"formData,omitempty"`
//UserToken identifies user to keep personal cookies information.
UserToken string `json:"userToken"`
// Actions contains the list of action we have to perform on page
Actions string `json:"actions"`
}
// BaseFetcher is a Fetcher that uses the Go standard library's http
// client to fetch URLs.
type BaseFetcher struct {
client *http.Client
}
// ChromeFetcher is used to fetch Java Script rendeded pages.
type ChromeFetcher struct {
cdpClient *cdp.Client
client *http.Client
cookies []*http.Cookie
}
//newFetcher creates instances of Fetcher for downloading a web page.
func newFetcher(t Type) Fetcher {
switch t {
case Base:
return newBaseFetcher()
case Chrome:
return newChromeFetcher()
default:
logger.Panic(fmt.Sprintf("unhandled type: %#v", t))
}
panic("unreachable")
}
// newBaseFetcher creates instances of newBaseFetcher{} to fetch
// a page content from regular websites as-is
// without running js scripts on the page.
func newBaseFetcher() *BaseFetcher {
var client *http.Client
proxy := viper.GetString("PROXY")
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
if err != nil {
logger.Error(err.Error())
return nil
}
transport := &http.Transport{Proxy: http.ProxyURL(proxyURL)}
client = &http.Client{Transport: transport}
} else {
client = &http.Client{}
}
f := &BaseFetcher{
client: client,
}
jarOpts := &cookiejar.Options{PublicSuffixList: publicsuffix.List}
var err error
f.client.Jar, err = cookiejar.New(jarOpts)
if err != nil {
return nil
}
return f
}
// Fetch retrieves document from the remote server. It returns web page content along with cache and expiration information.
func (bf *BaseFetcher) Fetch(request Request) (io.ReadCloser, error) {
resp, err := bf.response(request)
if err != nil {
return nil, err
}
return resp.Body, nil
}
//Response return response after document fetching using BaseFetcher
func (bf *BaseFetcher) response(r Request) (*http.Response, error) {
//URL validation
if _, err := url.ParseRequestURI(r.getURL()); err != nil {
return nil, err
}
var err error
var req *http.Request
if r.FormData == "" {
req, err = http.NewRequest(r.Method, r.URL, nil)
if err != nil {
return nil, err
}
} else {
//if form data exists send POST request
formData := parseFormData(r.FormData)
req, err = http.NewRequest("POST", r.URL, strings.NewReader(formData.Encode()))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(formData.Encode())))
}
//TODO: Add UA to requests
//req.Header.Add("User-Agent", "Dataflow kit - https://github.com/slotix/dataflowkit")
return bf.doRequest(req)
}
func (bf *BaseFetcher) doRequest(req *http.Request) (*http.Response, error) {
resp, err := bf.client.Do(req)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case 200:
return resp, nil
default:
return nil, errs.StatusError{
resp.StatusCode,
errors.New(http.StatusText(resp.StatusCode)),
}
}
}
func (bf *BaseFetcher) getCookieJar() http.CookieJar { //*cookiejar.Jar {
return bf.client.Jar
}
func (bf *BaseFetcher) setCookieJar(jar http.CookieJar) {
bf.client.Jar = jar
}
func (bf *BaseFetcher) getCookies(u *url.URL) ([]*http.Cookie, error) {
return bf.client.Jar.Cookies(u), nil
}
func (bf *BaseFetcher) setCookies(u *url.URL, cookies []*http.Cookie) error {
bf.client.Jar.SetCookies(u, cookies)
return nil
}
// parseFormData is used for converting formdata string to url.Values type
func parseFormData(fd string) url.Values {
//"auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fexample.com%2F&ips_username=usr&ips_password=passw&rememberMe=0"
formData := url.Values{}
pairs := strings.Split(fd, "&")
for _, pair := range pairs {
kv := strings.Split(pair, "=")
formData.Add(kv[0], kv[1])
}
return formData
}
// Static type assertion
var _ Fetcher = &BaseFetcher{}
// NewChromeFetcher returns ChromeFetcher
func newChromeFetcher() *ChromeFetcher {
var client *http.Client
proxy := viper.GetString("PROXY")
if len(proxy) > 0 {
proxyURL, err := url.Parse(proxy)
if err != nil {
logger.Error(err.Error())
return nil
}
transport := &http.Transport{Proxy: http.ProxyURL(proxyURL)}
client = &http.Client{Transport: transport}
} else {
client = &http.Client{}
}
f := &ChromeFetcher{
client: client,
}
return f
}
// LogCodec captures the output from writing RPC requests and reading
// responses on the connection. It implements rpcc.Codec via
// WriteRequest and ReadResponse.
type LogCodec struct{ conn io.ReadWriter }
// WriteRequest marshals v into a buffer, writes its contents onto the
// connection and logs it.
func (c *LogCodec) WriteRequest(req *rpcc.Request) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(req); err != nil {
return err
}
fmt.Printf("SEND: %s", buf.Bytes())
_, err := c.conn.Write(buf.Bytes())
if err != nil {
return err
}
return nil
}
// ReadResponse unmarshals from the connection into v whilst echoing
// what is read into a buffer for logging.
func (c *LogCodec) ReadResponse(resp *rpcc.Response) error {
var buf bytes.Buffer
if err := json.NewDecoder(io.TeeReader(c.conn, &buf)).Decode(resp); err != nil {
return err
}
fmt.Printf("RECV: %s\n", buf.String())
return nil
}
// Fetch retrieves document from the remote server. It returns web page content along with cache and expiration information.
func (f *ChromeFetcher) Fetch(request Request) (io.ReadCloser, error) {
//URL validation
if _, err := url.ParseRequestURI(strings.TrimSpace(request.getURL())); err != nil {
return nil, err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
devt := devtool.New(viper.GetString("CHROME"), devtool.WithClient(f.client))
//https://github.com/mafredri/cdp/issues/60
//pt, err := devt.Get(ctx, devtool.Page)
pt, err := devt.Create(ctx)
if err != nil {
return nil, err
}
var conn *rpcc.Conn
if viper.GetBool("CHROME_TRACE") {
newLogCodec := func(conn io.ReadWriter) rpcc.Codec {
return &LogCodec{conn: conn}
}
// Connect to WebSocket URL (page) that speaks the Chrome Debugging Protocol.
conn, err = rpcc.DialContext(ctx, pt.WebSocketDebuggerURL, rpcc.WithCodec(newLogCodec))
} else {
conn, err = rpcc.DialContext(ctx, pt.WebSocketDebuggerURL)
}
if err != nil {
fmt.Println(err)
return nil, err
}
defer conn.Close() // Cleanup.
defer devt.Close(ctx, pt)
// Create a new CDP Client that uses conn.
f.cdpClient = cdp.NewClient(conn)
if err = runBatch(
// Enable all the domain events that we're interested in.
func() error { return f.cdpClient.DOM.Enable(ctx) },
func() error { return f.cdpClient.Network.Enable(ctx, nil) },
func() error { return f.cdpClient.Page.Enable(ctx) },
func() error { return f.cdpClient.Runtime.Enable(ctx) },
); err != nil {
return nil, err
}
err = f.loadCookies()
if err != nil {
return nil, err
}
domLoadTimeout := 60 * time.Second
if request.FormData == "" {
err = f.navigate(ctx, f.cdpClient.Page, "GET", request.getURL(), "", domLoadTimeout)
} else {
formData := parseFormData(request.FormData)
err = f.navigate(ctx, f.cdpClient.Page, "POST", request.getURL(), formData.Encode(), domLoadTimeout)
}
if err != nil {
return nil, err
}
if err := f.runActions(ctx, request.Actions); err != nil {
logger.Warn(err.Error())
}
u, err := url.Parse(request.getURL())
if err != nil {
return nil, err
}
f.cookies, err = f.saveCookies(u)
if err != nil {
return nil, err
}
// Fetch the document root node. We can pass nil here
// since this method only takes optional arguments.
doc, err := f.cdpClient.DOM.GetDocument(ctx, nil)
if err != nil {
return nil, err
}
// Get the outer HTML for the page.
result, err := f.cdpClient.DOM.GetOuterHTML(ctx, &dom.GetOuterHTMLArgs{
NodeID: &doc.Root.NodeID,
})
if err != nil {
return nil, err
}
readCloser := ioutil.NopCloser(strings.NewReader(result.OuterHTML))
return readCloser, nil
}
func (f *ChromeFetcher) runActions(ctx context.Context, actionsJSON string) error {
if len(actionsJSON) == 0 {
return nil
}
acts := []map[string]json.RawMessage{}
err := json.Unmarshal([]byte(actionsJSON), &acts)
if err != nil {
return err
}
for _, actionMap := range acts {
for actionType, params := range actionMap {
action, err := NewAction(actionType, params)
if err == nil {
return action.Execute(ctx, f)
}
}
}
return nil
}
func (f *ChromeFetcher) setCookieJar(jar http.CookieJar) {
f.client.Jar = jar
}
func (f *ChromeFetcher) getCookieJar() http.CookieJar {
return f.client.Jar
}
// Static type assertion
var _ Fetcher = &ChromeFetcher{}
// navigate to the URL and wait for DOMContentEventFired. An error is
// returned if timeout happens before DOMContentEventFired.
func (f *ChromeFetcher) navigate(ctx context.Context, pageClient cdp.Page, method, url string, formData string, timeout time.Duration) error {
defer time.Sleep(750 * time.Millisecond)
ctxTimeout, cancelTimeout := context.WithTimeout(context.Background(), timeout)
// Make sure Page events are enabled.
err := pageClient.Enable(ctxTimeout)
if err != nil {
return err
}
// Navigate to GitHub, block until ready.
loadEventFired, err := pageClient.LoadEventFired(ctxTimeout)
if err != nil {
return err
}
defer loadEventFired.Close()
loadingFailed, err := f.cdpClient.Network.LoadingFailed(ctxTimeout)
if err != nil {
return err
}
defer loadingFailed.Close()
// exceptionThrown, err := f.cdpClient.Runtime.ExceptionThrown(ctxTimeout)
// if err != nil {
// return err
// }
//defer exceptionThrown.Close()
if method == "GET" {
_, err = pageClient.Navigate(ctxTimeout, page.NewNavigateArgs(url))
if err != nil {
return err
}
} else {
/* ast := "*" */
pattern := network.RequestPattern{URLPattern: &url}
patterns := []network.RequestPattern{pattern}
f.cdpClient.Network.SetCacheDisabled(ctxTimeout, network.NewSetCacheDisabledArgs(true))
interArgs := network.NewSetRequestInterceptionArgs(patterns)
err = f.cdpClient.Network.SetRequestInterception(ctxTimeout, interArgs)
if err != nil {
return err
}
kill := make(chan bool)
go f.interceptRequest(ctxTimeout, url, formData, kill)
_, err = pageClient.Navigate(ctxTimeout, page.NewNavigateArgs(url))
if err != nil {
return err
}
kill <- true
}
select {
// case <-exceptionThrown.Ready():
// ev, err := exceptionThrown.Recv()
// if err != nil {
// return err
// }
// return errs.StatusError{400, errors.New(ev.ExceptionDetails.Error())}
case <-loadEventFired.Ready():
_, err = loadEventFired.Recv()
if err != nil {
return err
}
case <-loadingFailed.Ready():
reply, err := loadingFailed.Recv()
if err != nil {
return err
}
if reply.Type == network.ResourceTypeDocument {
return errs.StatusError{400, errors.New(reply.ErrorText)}
}
case <-ctx.Done():
cancelTimeout()
return nil /*
case <-ctxTimeout.Done():
return errs.StatusError{400, errors.New("Fetch timeout")} */
}
return nil
}
func (f *ChromeFetcher) setCookies(u *url.URL, cookies []*http.Cookie) error {
f.cookies = cookies
return nil
}
func (f *ChromeFetcher) loadCookies() error {
/* u, err := url.Parse(cookiesURL)
if err != nil {
return err
} */
for _, c := range f.cookies {
c1 := network.SetCookieArgs{
Name: c.Name,
Value: c.Value,
Path: &c.Path,
/* Expires: expire, */
Domain: &c.Domain,
HTTPOnly: &c.HttpOnly,
Secure: &c.Secure,
}
if !c.Expires.IsZero() {
duration := c.Expires.Sub(time.Unix(0, 0))
c1.Expires = network.TimeSinceEpoch(duration / time.Second)
}
_, err := f.cdpClient.Network.SetCookie(context.Background(), &c1)
if err != nil {
return err
}
}
return nil
}
func (f *ChromeFetcher) getCookies(u *url.URL) ([]*http.Cookie, error) {
return f.cookies, nil
}
func (f *ChromeFetcher) saveCookies(u *url.URL) ([]*http.Cookie, error) {
ncookies, err := f.cdpClient.Network.GetCookies(context.Background(), &network.GetCookiesArgs{URLs: []string{u.String()}})
if err != nil {
return nil, err
}
cookies := []*http.Cookie{}
for _, c := range ncookies.Cookies {
c1 := http.Cookie{
Name: c.Name,
Value: c.Value,
Path: c.Path,
/* Expires: expire, */
Domain: c.Domain,
HttpOnly: c.HTTPOnly,
Secure: c.Secure,
}
if c.Expires > -1 {
sec, dec := math.Modf(c.Expires)
expire := time.Unix(int64(sec), int64(dec*(1e9)))
/* logger.Info(expire.String())
logger.Info(expire.Format("2006-01-02 15:04:05")) */
c1.Expires = expire
}
cookies = append(cookies, &c1)
}
return cookies, nil
}
func (f *ChromeFetcher) interceptRequest(ctx context.Context, originURL string, formData string, kill chan bool) {
var sig = false
cl, err := f.cdpClient.Network.RequestIntercepted(ctx)
if err != nil {
panic(err)
}
defer cl.Close()
for {
if sig {
return
}
select {
case <-cl.Ready():
r, err := cl.Recv()
if err != nil {
logger.Error(err.Error())
sig = true
continue
}
if len(formData) > 0 && r.Request.URL == originURL && r.RedirectURL == nil {
interceptedArgs := network.NewContinueInterceptedRequestArgs(r.InterceptionID)
interceptedArgs.SetMethod("POST")
interceptedArgs.SetPostData(formData)
fData := fmt.Sprintf(`{"Content-Type":"application/x-www-form-urlencoded","Content-Length":%d}`, len(formData))
interceptedArgs.Headers = []byte(fData)
if err = f.cdpClient.Network.ContinueInterceptedRequest(ctx, interceptedArgs); err != nil {
logger.Error(err.Error())
sig = true
continue
}
} else {
interceptedArgs := network.NewContinueInterceptedRequestArgs(r.InterceptionID)
if r.ResourceType == network.ResourceTypeImage || r.ResourceType == network.ResourceTypeStylesheet || isExclude(r.Request.URL) {
interceptedArgs.SetErrorReason(network.ErrorReasonAborted)
}
if err = f.cdpClient.Network.ContinueInterceptedRequest(ctx, interceptedArgs); err != nil {
logger.Error(err.Error())
sig = true
continue
}
continue
}
case <-kill:
sig = true
break
}
}
}
func isExclude(origin string) bool {
excludeRes := viper.GetStringSlice("EXCLUDERES")
for _, res := range excludeRes {
if strings.Index(origin, res) != -1 {
return true
}
}
return false
}
func (f ChromeFetcher) RunJSFromFile(ctx context.Context, path string, entryPointFunction string) error {
exp, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
exp = append(exp, entryPointFunction...)
compileReply, err := f.cdpClient.Runtime.CompileScript(ctx, &runtime.CompileScriptArgs{
Expression: string(exp),
PersistScript: true,
})
if err != nil {
panic(err)
}
awaitPromise := true
_, err = f.cdpClient.Runtime.RunScript(ctx, &runtime.RunScriptArgs{
ScriptID: *compileReply.ScriptID,
AwaitPromise: &awaitPromise,
})
return err
}
// removeNodes deletes all provided nodeIDs from the DOM.
// func removeNodes(ctx context.Context, domClient cdp.DOM, nodes ...dom.NodeID) error {
// var rmNodes []runBatchFunc
// for _, id := range nodes {
// arg := dom.NewRemoveNodeArgs(id)
// rmNodes = append(rmNodes, func() error { return domClient.RemoveNode(ctx, arg) })
// }
// return runBatch(rmNodes...)
// }
// runBatchFunc is the function signature for runBatch.
type runBatchFunc func() error
// runBatch runs all functions simultaneously and waits until
// execution has completed or an error is encountered.
func runBatch(fn ...runBatchFunc) error {
eg := errgroup.Group{}
for _, f := range fn {
eg.Go(f)
}
return eg.Wait()
}
//GetURL returns URL to be fetched
func (req Request) getURL() string {
return strings.TrimRight(strings.TrimSpace(req.URL), "/")
}
// Host returns Host value from Request
func (req Request) Host() (string, error) {
u, err := url.Parse(req.getURL())
if err != nil {
return "", err
}
return u.Host, nil
}
|
// +build debug
package dlog
import (
"log"
"runtime"
"strings"
"github.com/kirillDanshin/myutils"
)
// D dumps a value
func (*WithCaller) D(v ...interface{}) {
c, _ := GetCaller()
log.Println(c)
for _, v := range v {
log.Printf("%+#v", v)
}
}
// F is a build-time enabled printf
func (*WithCaller) F(f string, v ...interface{}) {
c, _ := GetCaller()
log.Printf(myutils.Concat(c.String(), "\n", f), v...)
}
// P is a build-time enabled print
func (*WithCaller) P(v ...interface{}) {
c, _ := GetCaller()
log.Println(c)
log.Print(v...)
}
// Ln is a build-time enabled println
func (*WithCaller) Ln(v ...interface{}) {
c, _ := GetCaller()
log.Println(c)
log.Println(v...)
}
// GetCaller returns caller's file, line and func name
func GetCaller(stackBack ...int) (*Caller, bool) {
sb := 2
if len(stackBack) > 0 {
sb = stackBack[0] + 1
}
pc, file, line, ok := runtime.Caller(sb)
if !ok {
return &Caller{
File: CallerUnknown,
Line: 0,
FuncName: CallerUnknown,
}, false
}
if li := strings.LastIndex(file, "/"); li > 0 {
file = file[li+1:]
}
return &Caller{
File: file,
Line: line,
FuncName: runtime.FuncForPC(pc).Name(),
}, true
}
make more clean output
// +build debug
package dlog
import (
"log"
"runtime"
"strings"
"github.com/kirillDanshin/myutils"
)
// D dumps a value
func (*WithCaller) D(v ...interface{}) {
c, _ := GetCaller()
log.Print(c, ": [")
for _, v := range v {
log.Printf("%+#v", v)
}
log.Println("]")
}
// F is a build-time enabled printf
func (*WithCaller) F(f string, v ...interface{}) {
c, _ := GetCaller()
log.Printf(myutils.Concat(c.String(), "\n", f), v...)
}
// P is a build-time enabled print
func (*WithCaller) P(v ...interface{}) {
c, _ := GetCaller()
log.Print(c, ": [")
log.Print(v...)
log.Println("]")
}
// Ln is a build-time enabled println
func (*WithCaller) Ln(v ...interface{}) {
c, _ := GetCaller()
log.Print(c, ": [")
log.Println(v...)
log.Println("]")
}
// GetCaller returns caller's file, line and func name
func GetCaller(stackBack ...int) (*Caller, bool) {
sb := 2
if len(stackBack) > 0 {
sb = stackBack[0] + 1
}
pc, file, line, ok := runtime.Caller(sb)
if !ok {
return &Caller{
File: CallerUnknown,
Line: 0,
FuncName: CallerUnknown,
}, false
}
if li := strings.LastIndex(file, "/"); li > 0 {
file = file[li+1:]
}
return &Caller{
File: file,
Line: line,
FuncName: runtime.FuncForPC(pc).Name(),
}, true
}
|
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.package recipe
package integration
import (
"bytes"
"fmt"
"reflect"
"sort"
"sync"
"testing"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/storage/storagepb"
)
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
watchRequest *pb.WatchRequest
wresps []*pb.WatchResponse
}{
// watch the key, matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the key, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld")}}},
[]*pb.WatchResponse{},
},
// watch the prefix, matching
{
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
RangeEnd: []byte("fop")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the prefix, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld"),
RangeEnd: []byte("helloworle")}}},
[]*pb.WatchResponse{},
},
// multiple puts, one watcher with matching key
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
// multiple puts, one watcher with matching prefix
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
RangeEnd: []byte("fop")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := toGRPC(clus.RandClient()).Watch
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
}
err = wStream.Send(tt.watchRequest)
if err != nil {
t.Fatalf("#%d: wStream.Send error: %v", i, err)
}
// ensure watcher request created a new watcher
cresp, err := wStream.Recv()
if err != nil {
t.Errorf("#%d: wStream.Recv error: %v", i, err)
continue
}
if cresp.Created != true {
t.Errorf("#%d: did not create watchid, got +%v", i, cresp)
continue
}
if cresp.Canceled {
t.Errorf("#%d: canceled watcher on create", i, cresp)
continue
}
createdWatchId := cresp.WatchId
if cresp.Header == nil || cresp.Header.Revision != 1 {
t.Errorf("#%d: header revision got +%v, wanted revison 1", i, cresp)
continue
}
// asynchronously create keys
go func() {
for _, k := range tt.putKeys {
kvc := toGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
}()
// check stream results
for j, wresp := range tt.wresps {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
}
if resp.Header == nil {
t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
}
if resp.Header.Revision != wresp.Header.Revision {
t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
}
if wresp.Created != resp.Created {
t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
}
if resp.WatchId != createdWatchId {
t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
}
if !reflect.DeepEqual(resp.Events, wresp.Events) {
t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
}
}
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
func TestV3WatchCancelSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 0)
}
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
func TestV3WatchCancelUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 1)
}
func testV3WatchCancel(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
wresp, errR := wStream.Recv()
if errR != nil {
t.Errorf("wStream.Recv error: %v", errR)
}
if !wresp.Created {
t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
}
creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: wresp.WatchId}}}
if err := wStream.Send(creq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
cresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if !cresp.Canceled {
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
}
kvc := toGRPC(clus.RandClient()).KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Errorf("couldn't put key (%v)", err)
}
// watch got canceled, so this should block
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
// TestV3WatchCurrentPutOverlap ensures current watchers receive all events with
// overlapping puts.
func TestV3WatchCurrentPutOverlap(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
// last mod_revision that will be observed
nrRevisions := 32
// first revision already allocated as empty revision
for i := 1; i < nrRevisions; i++ {
go func() {
kvc := toGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}()
}
// maps watcher to current expected revision
progress := make(map[int64]int64)
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("first watch request failed (%v)", err)
}
more := true
progress[-1] = 0 // watcher creation pending
for more {
resp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if resp.Created {
// accept events > header revision
progress[resp.WatchId] = resp.Header.Revision + 1
if resp.Header.Revision == int64(nrRevisions) {
// covered all revisions; create no more watchers
progress[-1] = int64(nrRevisions) + 1
} else if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
} else if len(resp.Events) == 0 {
t.Fatalf("got events %v, want non-empty", resp.Events)
} else {
wRev, ok := progress[resp.WatchId]
if !ok {
t.Fatalf("got %+v, but watch id shouldn't exist ", resp)
}
if resp.Events[0].Kv.ModRevision != wRev {
t.Fatalf("got %+v, wanted first revision %d", resp, wRev)
}
lastRev := resp.Events[len(resp.Events)-1].Kv.ModRevision
progress[resp.WatchId] = lastRev + 1
}
more = false
for _, v := range progress {
if v <= int64(nrRevisions) {
more = true
break
}
}
}
if rok, nr := waitResponse(wStream, time.Second); !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 0)
}
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 1)
}
// testV3WatchMultipleWatchers tests multiple watchers on the same key
// and one watcher with matching prefix. It first puts the key
// that matches all watchers, and another key that matches only
// one watcher to test if it receives expected events.
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := toGRPC(clus.RandClient()).KV
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
watchKeyN := 4
for i := 0; i < watchKeyN+1; i++ {
var wreq *pb.WatchRequest
if i < watchKeyN {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
} else {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev}}}
}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
}
ids := make(map[int64]struct{})
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
ids[wresp.WatchId] = struct{}{}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if _, ok := ids[wresp.WatchId]; !ok {
t.Errorf("watchId %d is not created!", wresp.WatchId)
} else {
delete(ids, wresp.WatchId)
}
if len(wresp.Events) == 0 {
t.Errorf("#%d: no events received", i)
}
for _, ev := range wresp.Events {
if string(ev.Kv.Key) != "foo" {
t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
}
if string(ev.Kv.Value) != "bar" {
t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
}
}
}
// now put one key that has only one matching watcher
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
wresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if len(wresp.Events) != 1 {
t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
}
if string(wresp.Events[0].Kv.Key) != "fo" {
t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
}
// now Recv should block because there is no more events coming
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 0)
}
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 1)
}
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
kvc := toGRPC(clus.RandClient()).KV
txn := pb.TxnRequest{}
for i := 0; i < 3; i++ {
ru := &pb.RequestUnion{}
ru.Request = &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}}
txn.Success = append(txn.Success, ru)
}
tresp, err := kvc.Txn(context.Background(), &txn)
if err != nil {
t.Fatalf("kvc.Txn error: %v", err)
}
if !tresp.Succeeded {
t.Fatalf("kvc.Txn failed: %+v", tresp)
}
events := []*storagepb.Event{}
for len(events) < 3 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
}
sort.Sort(eventsSortByKey(events))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
if !reflect.DeepEqual(events, wevents) {
t.Errorf("events got = %+v, want = %+v", events, wevents)
}
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
type eventsSortByKey []*storagepb.Event
func (evs eventsSortByKey) Len() int { return len(evs) }
func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
allWevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
},
}
events := []*storagepb.Event{}
for len(events) < 4 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
// if PUT requests are committed by now, first receive would return
// multiple events, but if not, it returns a single event. In SSD,
// it should return 4 events at once.
}
if !reflect.DeepEqual(events, allWevents) {
t.Errorf("events got = %+v, want = %+v", events, allWevents)
}
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 0)
}
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 1)
}
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := toGRPC(clus.RandClient()).Watch
kvc := toGRPC(clus.RandClient()).KV
streams := make([]pb.Watch_WatchClient, 5)
for i := range streams {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := wAPI.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
streams[i] = wStream
}
for _, wStream := range streams {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
var wg sync.WaitGroup
wg.Add(len(streams))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
for i := range streams {
go func(i int) {
defer wg.Done()
wStream := streams[i]
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if wresp.WatchId != 0 {
t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
}
if !reflect.DeepEqual(wresp.Events, wevents) {
t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
}
// now Recv should block because there is no more events coming
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}(i)
}
wg.Wait()
clus.Terminate(t)
}
// waitResponse waits on the given stream for given duration.
// If there is no more events, true and a nil response will be
// returned closing the WatchClient stream. Or the response will
// be returned.
func waitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
rCh := make(chan *pb.WatchResponse)
go func() {
resp, _ := wc.Recv()
rCh <- resp
}()
select {
case nr := <-rCh:
return false, nr
case <-time.After(timeout):
}
wc.CloseSend()
rv, ok := <-rCh
if rv != nil || !ok {
return false, rv
}
return true, nil
}
// TestV3WatchFutureRevision ensures invalid future revision to Watch APIs
// returns WatchResponse of true Created and true Canceled.
func TestV3WatchInvalidFutureRevision(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 100}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.WatchId != -1 || !resp.Created || !resp.Canceled || len(resp.Events) != 0 {
t.Errorf("invalid start-rev expected -1, true, true, 0, but got %d, %v, %v, %d",
resp.WatchId, resp.Created, resp.Canceled, len(resp.Events))
}
}
func TestWatchWithProgressNotify(t *testing.T) {
testInterval := 3 * time.Second
pi := v3rpc.ProgressReportInterval
v3rpc.ProgressReportInterval = testInterval
defer func() { v3rpc.ProgressReportInterval = pi }()
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
// create two watchers, one with progressNotify set.
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
// two creation + one notification
for i := 0; i < 3; i++ {
rok, resp := waitResponse(wStream, testInterval+time.Second)
if resp.Created {
continue
}
if rok {
t.Errorf("failed to receive response from watch stream")
}
if resp.Header.Revision != 1 {
t.Errorf("revision = %d, want 1", resp.Header.Revision)
}
if len(resp.Events) != 0 {
t.Errorf("len(resp.Events) = %d, want 0", len(resp.Events))
}
}
// no more notification
rok, resp := waitResponse(wStream, testInterval+time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", resp)
}
}
integration: add test for full range watching
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.package recipe
package integration
import (
"bytes"
"fmt"
"reflect"
"sort"
"sync"
"testing"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/storage/storagepb"
)
// TestV3WatchFromCurrentRevision tests Watch APIs from current revision.
func TestV3WatchFromCurrentRevision(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
putKeys []string
watchRequest *pb.WatchRequest
wresps []*pb.WatchResponse
}{
// watch the key, matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the key, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld")}}},
[]*pb.WatchResponse{},
},
// watch the prefix, matching
{
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
RangeEnd: []byte("fop")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// watch the prefix, non-matching
{
[]string{"foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("helloworld"),
RangeEnd: []byte("helloworle")}}},
[]*pb.WatchResponse{},
},
// watch full range, matching
{
[]string{"fooLong"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte(""),
RangeEnd: []byte("\x00")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
},
},
// multiple puts, one watcher with matching key
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
// multiple puts, one watcher with matching prefix
{
[]string{"foo", "foo", "foo"},
&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"),
RangeEnd: []byte("fop")}}},
[]*pb.WatchResponse{
{
Header: &pb.ResponseHeader{Revision: 2},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 3},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2},
},
},
},
{
Header: &pb.ResponseHeader{Revision: 4},
Created: false,
Events: []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3},
},
},
},
},
},
}
for i, tt := range tests {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := toGRPC(clus.RandClient()).Watch
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, err := wAPI.Watch(ctx)
if err != nil {
t.Fatalf("#%d: wAPI.Watch error: %v", i, err)
}
err = wStream.Send(tt.watchRequest)
if err != nil {
t.Fatalf("#%d: wStream.Send error: %v", i, err)
}
// ensure watcher request created a new watcher
cresp, err := wStream.Recv()
if err != nil {
t.Errorf("#%d: wStream.Recv error: %v", i, err)
continue
}
if cresp.Created != true {
t.Errorf("#%d: did not create watchid, got +%v", i, cresp)
continue
}
if cresp.Canceled {
t.Errorf("#%d: canceled watcher on create", i, cresp)
continue
}
createdWatchId := cresp.WatchId
if cresp.Header == nil || cresp.Header.Revision != 1 {
t.Errorf("#%d: header revision got +%v, wanted revison 1", i, cresp)
continue
}
// asynchronously create keys
go func() {
for _, k := range tt.putKeys {
kvc := toGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("#%d: couldn't put key (%v)", i, err)
}
}
}()
// check stream results
for j, wresp := range tt.wresps {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err)
}
if resp.Header == nil {
t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j)
}
if resp.Header.Revision != wresp.Header.Revision {
t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision)
}
if wresp.Created != resp.Created {
t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created)
}
if resp.WatchId != createdWatchId {
t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId)
}
if !reflect.DeepEqual(resp.Events, wresp.Events) {
t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events)
}
}
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
}
// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map.
func TestV3WatchCancelSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 0)
}
// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map.
func TestV3WatchCancelUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchCancel(t, 1)
}
func testV3WatchCancel(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
wresp, errR := wStream.Recv()
if errR != nil {
t.Errorf("wStream.Recv error: %v", errR)
}
if !wresp.Created {
t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
}
creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: wresp.WatchId}}}
if err := wStream.Send(creq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
cresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if !cresp.Canceled {
t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
}
kvc := toGRPC(clus.RandClient()).KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Errorf("couldn't put key (%v)", err)
}
// watch got canceled, so this should block
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
// TestV3WatchCurrentPutOverlap ensures current watchers receive all events with
// overlapping puts.
func TestV3WatchCurrentPutOverlap(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
// last mod_revision that will be observed
nrRevisions := 32
// first revision already allocated as empty revision
for i := 1; i < nrRevisions; i++ {
go func() {
kvc := toGRPC(clus.RandClient()).KV
req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
if _, err := kvc.Put(context.TODO(), req); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
}()
}
// maps watcher to current expected revision
progress := make(map[int64]int64)
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("first watch request failed (%v)", err)
}
more := true
progress[-1] = 0 // watcher creation pending
for more {
resp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if resp.Created {
// accept events > header revision
progress[resp.WatchId] = resp.Header.Revision + 1
if resp.Header.Revision == int64(nrRevisions) {
// covered all revisions; create no more watchers
progress[-1] = int64(nrRevisions) + 1
} else if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
} else if len(resp.Events) == 0 {
t.Fatalf("got events %v, want non-empty", resp.Events)
} else {
wRev, ok := progress[resp.WatchId]
if !ok {
t.Fatalf("got %+v, but watch id shouldn't exist ", resp)
}
if resp.Events[0].Kv.ModRevision != wRev {
t.Fatalf("got %+v, wanted first revision %d", resp, wRev)
}
lastRev := resp.Events[len(resp.Events)-1].Kv.ModRevision
progress[resp.WatchId] = lastRev + 1
}
more = false
for _, v := range progress {
if v <= int64(nrRevisions) {
more = true
break
}
}
}
if rok, nr := waitResponse(wStream, time.Second); !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}
func TestV3WatchMultipleWatchersSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 0)
}
func TestV3WatchMultipleWatchersUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleWatchers(t, 1)
}
// testV3WatchMultipleWatchers tests multiple watchers on the same key
// and one watcher with matching prefix. It first puts the key
// that matches all watchers, and another key that matches only
// one watcher to test if it receives expected events.
func testV3WatchMultipleWatchers(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
kvc := toGRPC(clus.RandClient()).KV
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
watchKeyN := 4
for i := 0; i < watchKeyN+1; i++ {
var wreq *pb.WatchRequest
if i < watchKeyN {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
} else {
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev}}}
}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
}
ids := make(map[int64]struct{})
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
ids[wresp.WatchId] = struct{}{}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
for i := 0; i < watchKeyN+1; i++ {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if _, ok := ids[wresp.WatchId]; !ok {
t.Errorf("watchId %d is not created!", wresp.WatchId)
} else {
delete(ids, wresp.WatchId)
}
if len(wresp.Events) == 0 {
t.Errorf("#%d: no events received", i)
}
for _, ev := range wresp.Events {
if string(ev.Kv.Key) != "foo" {
t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key)
}
if string(ev.Kv.Value) != "bar" {
t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value)
}
}
}
// now put one key that has only one matching watcher
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
wresp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if len(wresp.Events) != 1 {
t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events))
}
if string(wresp.Events[0].Kv.Key) != "fo" {
t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key)
}
// now Recv should block because there is no more events coming
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
clus.Terminate(t)
}
func TestV3WatchMultipleEventsTxnSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 0)
}
func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleEventsTxn(t, 1)
}
// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events.
func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
kvc := toGRPC(clus.RandClient()).KV
txn := pb.TxnRequest{}
for i := 0; i < 3; i++ {
ru := &pb.RequestUnion{}
ru.Request = &pb.RequestUnion_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}}
txn.Success = append(txn.Success, ru)
}
tresp, err := kvc.Txn(context.Background(), &txn)
if err != nil {
t.Fatalf("kvc.Txn error: %v", err)
}
if !tresp.Succeeded {
t.Fatalf("kvc.Txn failed: %+v", tresp)
}
events := []*storagepb.Event{}
for len(events) < 3 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
}
sort.Sort(eventsSortByKey(events))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
if !reflect.DeepEqual(events, wevents) {
t.Errorf("events got = %+v, want = %+v", events, wevents)
}
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
// can't defer because tcp ports will be in use
clus.Terminate(t)
}
type eventsSortByKey []*storagepb.Event
func (evs eventsSortByKey) Len() int { return len(evs) }
func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
allWevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2},
},
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2},
},
}
events := []*storagepb.Event{}
for len(events) < 4 {
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.Created {
continue
}
events = append(events, resp.Events...)
// if PUT requests are committed by now, first receive would return
// multiple events, but if not, it returns a single event. In SSD,
// it should return 4 events at once.
}
if !reflect.DeepEqual(events, allWevents) {
t.Errorf("events got = %+v, want = %+v", events, allWevents)
}
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}
func TestV3WatchMultipleStreamsSynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 0)
}
func TestV3WatchMultipleStreamsUnsynced(t *testing.T) {
defer testutil.AfterTest(t)
testV3WatchMultipleStreams(t, 1)
}
// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams.
func testV3WatchMultipleStreams(t *testing.T, startRev int64) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
wAPI := toGRPC(clus.RandClient()).Watch
kvc := toGRPC(clus.RandClient()).KV
streams := make([]pb.Watch_WatchClient, 5)
for i := range streams {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, errW := wAPI.Watch(ctx)
if errW != nil {
t.Fatalf("wAPI.Watch error: %v", errW)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: startRev}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("wStream.Send error: %v", err)
}
streams[i] = wStream
}
for _, wStream := range streams {
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if !wresp.Created {
t.Fatalf("wresp.Created got = %v, want = true", wresp.Created)
}
}
if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
t.Fatalf("couldn't put key (%v)", err)
}
var wg sync.WaitGroup
wg.Add(len(streams))
wevents := []*storagepb.Event{
{
Type: storagepb.PUT,
Kv: &storagepb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1},
},
}
for i := range streams {
go func(i int) {
defer wg.Done()
wStream := streams[i]
wresp, err := wStream.Recv()
if err != nil {
t.Fatalf("wStream.Recv error: %v", err)
}
if wresp.WatchId != 0 {
t.Errorf("watchId got = %d, want = 0", wresp.WatchId)
}
if !reflect.DeepEqual(wresp.Events, wevents) {
t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents)
}
// now Recv should block because there is no more events coming
rok, nr := waitResponse(wStream, 1*time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
}
}(i)
}
wg.Wait()
clus.Terminate(t)
}
// waitResponse waits on the given stream for given duration.
// If there is no more events, true and a nil response will be
// returned closing the WatchClient stream. Or the response will
// be returned.
func waitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) {
rCh := make(chan *pb.WatchResponse)
go func() {
resp, _ := wc.Recv()
rCh <- resp
}()
select {
case nr := <-rCh:
return false, nr
case <-time.After(timeout):
}
wc.CloseSend()
rv, ok := <-rCh
if rv != nil || !ok {
return false, rv
}
return true, nil
}
// TestV3WatchFutureRevision ensures invalid future revision to Watch APIs
// returns WatchResponse of true Created and true Canceled.
func TestV3WatchInvalidFutureRevision(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 100}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
resp, err := wStream.Recv()
if err != nil {
t.Errorf("wStream.Recv error: %v", err)
}
if resp.WatchId != -1 || !resp.Created || !resp.Canceled || len(resp.Events) != 0 {
t.Errorf("invalid start-rev expected -1, true, true, 0, but got %d, %v, %v, %d",
resp.WatchId, resp.Created, resp.Canceled, len(resp.Events))
}
}
func TestWatchWithProgressNotify(t *testing.T) {
testInterval := 3 * time.Second
pi := v3rpc.ProgressReportInterval
v3rpc.ProgressReportInterval = testInterval
defer func() { v3rpc.ProgressReportInterval = pi }()
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wStream, wErr := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if wErr != nil {
t.Fatalf("wAPI.Watch error: %v", wErr)
}
// create two watchers, one with progressNotify set.
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
t.Fatalf("watch request failed (%v)", err)
}
// two creation + one notification
for i := 0; i < 3; i++ {
rok, resp := waitResponse(wStream, testInterval+time.Second)
if resp.Created {
continue
}
if rok {
t.Errorf("failed to receive response from watch stream")
}
if resp.Header.Revision != 1 {
t.Errorf("revision = %d, want 1", resp.Header.Revision)
}
if len(resp.Events) != 0 {
t.Errorf("len(resp.Events) = %d, want 0", len(resp.Events))
}
}
// no more notification
rok, resp := waitResponse(wStream, testInterval+time.Second)
if !rok {
t.Errorf("unexpected pb.WatchResponse is received %+v", resp)
}
}
|
// A facebook graph api client in go.
// https://github.com/huandu/facebook/
//
// Copyright 2012 - 2014, Huan Du
// Licensed under the MIT license
// https://github.com/huandu/facebook/blob/master/LICENSE
package facebook
import (
"bytes"
"fmt"
"net/http"
)
type pagingData struct {
Data []Result `facebook:",required"`
Paging *pagingNavigator
}
type pagingNavigator struct {
Previous string
Next string
}
func newPagingResult(session *Session, res Result) (*PagingResult, error) {
// quick check whether Result is a paging response.
if _, ok := res["data"]; !ok {
return nil, fmt.Errorf("current Result is not a paging response.")
}
pr := &PagingResult{
session: session,
}
paging := &pr.paging
err := res.Decode(paging)
if err != nil {
return nil, err
}
if paging.Paging != nil {
pr.previous = paging.Paging.Previous
pr.next = paging.Paging.Next
}
return pr, nil
}
// Get current data.
func (pr *PagingResult) Data() []Result {
return pr.paging.Data
}
// Read previous page.
func (pr *PagingResult) Previous() (noMore bool, err error) {
if !pr.HasPrevious() {
noMore = true
return
}
return pr.navigate(&pr.previous)
}
// Read next page.
func (pr *PagingResult) Next() (noMore bool, err error) {
if !pr.HasNext() {
noMore = true
return
}
return pr.navigate(&pr.next)
}
// Check whether there is previous page.
func (pr *PagingResult) HasPrevious() bool {
return pr.previous != ""
}
// Check whether there is next page.
func (pr *PagingResult) HasNext() bool {
return pr.next != ""
}
func (pr *PagingResult) navigate(url *string) (noMore bool, err error) {
var pagingUrl string
// add session information in paging url.
params := Params{}
pr.session.prepareParams(params)
if len(params) == 0 {
pagingUrl = *url
} else {
buf := &bytes.Buffer{}
buf.WriteString(*url)
buf.WriteRune('&')
params.Encode(buf)
pagingUrl = buf.String()
}
var request *http.Request
var res Result
request, err = http.NewRequest("GET", pagingUrl, nil)
if err != nil {
return
}
res, err = pr.session.Request(request)
if err != nil {
return
}
paging := &pr.paging
err = res.Decode(paging)
if err != nil {
return
}
if paging.Paging == nil || len(paging.Data) == 0 {
*url = ""
noMore = true
} else {
pr.previous = paging.Paging.Previous
pr.next = paging.Paging.Next
}
return
}
fix infinite loop in paging by clearing Next/Previous fields from previous query
// A facebook graph api client in go.
// https://github.com/huandu/facebook/
//
// Copyright 2012 - 2014, Huan Du
// Licensed under the MIT license
// https://github.com/huandu/facebook/blob/master/LICENSE
package facebook
import (
"bytes"
"fmt"
"net/http"
)
type pagingData struct {
Data []Result `facebook:",required"`
Paging *pagingNavigator
}
type pagingNavigator struct {
Previous string
Next string
}
func newPagingResult(session *Session, res Result) (*PagingResult, error) {
// quick check whether Result is a paging response.
if _, ok := res["data"]; !ok {
return nil, fmt.Errorf("current Result is not a paging response.")
}
pr := &PagingResult{
session: session,
}
paging := &pr.paging
err := res.Decode(paging)
if err != nil {
return nil, err
}
if paging.Paging != nil {
pr.previous = paging.Paging.Previous
pr.next = paging.Paging.Next
}
return pr, nil
}
// Get current data.
func (pr *PagingResult) Data() []Result {
return pr.paging.Data
}
// Read previous page.
func (pr *PagingResult) Previous() (noMore bool, err error) {
if !pr.HasPrevious() {
noMore = true
return
}
return pr.navigate(&pr.previous)
}
// Read next page.
func (pr *PagingResult) Next() (noMore bool, err error) {
if !pr.HasNext() {
noMore = true
return
}
return pr.navigate(&pr.next)
}
// Check whether there is previous page.
func (pr *PagingResult) HasPrevious() bool {
return pr.previous != ""
}
// Check whether there is next page.
func (pr *PagingResult) HasNext() bool {
return pr.next != ""
}
func (pr *PagingResult) navigate(url *string) (noMore bool, err error) {
var pagingUrl string
// add session information in paging url.
params := Params{}
pr.session.prepareParams(params)
if len(params) == 0 {
pagingUrl = *url
} else {
buf := &bytes.Buffer{}
buf.WriteString(*url)
buf.WriteRune('&')
params.Encode(buf)
pagingUrl = buf.String()
}
var request *http.Request
var res Result
request, err = http.NewRequest("GET", pagingUrl, nil)
if err != nil {
return
}
res, err = pr.session.Request(request)
if err != nil {
return
}
if pr.paging.Paging != nil {
pr.paging.Paging.Next = ""
pr.paging.Paging.Previous = ""
}
paging := &pr.paging
err = res.Decode(paging)
if err != nil {
return
}
if paging.Paging == nil || len(paging.Data) == 0 {
*url = ""
noMore = true
} else {
pr.previous = paging.Paging.Previous
pr.next = paging.Paging.Next
}
return
}
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otelbeego
import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
// config provides configuration for the beego OpenTelemetry
// middleware. Configuration is modified using the provided Options.
type config struct {
tracerProvider trace.TracerProvider
meterProvider metric.MeterProvider
propagators propagation.TextMapPropagator
filters []Filter
formatter SpanNameFormatter
}
// Option applies a configuration to the given config.
type Option interface {
Apply(*config)
}
// OptionFunc is a function type that applies a particular
// configuration to the beego middleware in question.
type OptionFunc func(c *config)
// Apply will apply the option to the config, c.
func (o OptionFunc) Apply(c *config) {
o(c)
}
// ------------------------------------------ Options
// WithTracerProvider specifies a tracer provider to use for creating a tracer.
// If none is specified, the global provider is used.
func WithTracerProvider(provider trace.TracerProvider) Option {
return OptionFunc(func(cfg *config) {
cfg.tracerProvider = provider
})
}
// WithMeterProvider specifies a meter provider to use for creating a meter.
// If none is specified, the global provider is used.
func WithMeterProvider(provider metric.MeterProvider) Option {
return OptionFunc(func(cfg *config) {
cfg.meterProvider = provider
})
}
// WithPropagators sets the propagators used in the middleware.
// Defaults to global.Propagators().
func WithPropagators(propagators propagation.TextMapPropagator) OptionFunc {
return OptionFunc(func(c *config) {
c.propagators = propagators
})
}
// WithFilter adds the given filter for use in the middleware.
// Defaults to no filters.
func WithFilter(f Filter) OptionFunc {
return OptionFunc(func(c *config) {
c.filters = append(c.filters, f)
})
}
// WithSpanNameFormatter sets the formatter to be used to format
// span names. Defaults to the path template.
func WithSpanNameFormatter(f SpanNameFormatter) OptionFunc {
return OptionFunc(func(c *config) {
c.formatter = f
})
}
// ------------------------------------------ Private Functions
func newConfig(options ...Option) *config {
config := &config{
tracerProvider: otel.GetTracerProvider(),
meterProvider: global.GetMeterProvider(),
propagators: otel.GetTextMapPropagator(),
filters: []Filter{},
formatter: defaultSpanNameFormatter,
}
for _, option := range options {
option.Apply(config)
}
return config
}
otelbeego: Refactor option type (#853)
Co-authored-by: Tyler Yahn <b79042b10fd9cd75597e3dac7c4e4fe9e14fe996@users.noreply.github.com>
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otelbeego
import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
// config provides configuration for the beego OpenTelemetry
// middleware. Configuration is modified using the provided Options.
type config struct {
tracerProvider trace.TracerProvider
meterProvider metric.MeterProvider
propagators propagation.TextMapPropagator
filters []Filter
formatter SpanNameFormatter
}
// Option applies a configuration to the given config.
type Option interface {
apply(*config)
}
// optionFunc is a function type that applies a particular
// configuration to the beego middleware in question.
type optionFunc func(c *config)
// Apply will apply the option to the config, c.
func (o optionFunc) apply(c *config) {
o(c)
}
// ------------------------------------------ Options
// WithTracerProvider specifies a tracer provider to use for creating a tracer.
// If none is specified, the global provider is used.
func WithTracerProvider(provider trace.TracerProvider) Option {
return optionFunc(func(cfg *config) {
cfg.tracerProvider = provider
})
}
// WithMeterProvider specifies a meter provider to use for creating a meter.
// If none is specified, the global provider is used.
func WithMeterProvider(provider metric.MeterProvider) Option {
return optionFunc(func(cfg *config) {
cfg.meterProvider = provider
})
}
// WithPropagators sets the propagators used in the middleware.
// Defaults to global.Propagators().
func WithPropagators(propagators propagation.TextMapPropagator) Option {
return optionFunc(func(c *config) {
c.propagators = propagators
})
}
// WithFilter adds the given filter for use in the middleware.
// Defaults to no filters.
func WithFilter(f Filter) Option {
return optionFunc(func(c *config) {
c.filters = append(c.filters, f)
})
}
// WithSpanNameFormatter sets the formatter to be used to format
// span names. Defaults to the path template.
func WithSpanNameFormatter(f SpanNameFormatter) Option {
return optionFunc(func(c *config) {
c.formatter = f
})
}
// ------------------------------------------ Private Functions
func newConfig(options ...Option) *config {
config := &config{
tracerProvider: otel.GetTracerProvider(),
meterProvider: global.GetMeterProvider(),
propagators: otel.GetTextMapPropagator(),
filters: []Filter{},
formatter: defaultSpanNameFormatter,
}
for _, option := range options {
option.apply(config)
}
return config
}
|
package resolver
import (
"fmt"
"regexp"
"strings"
"syscall"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
)
// This file implements the Yarn PnP specification: https://yarnpkg.com/advanced/pnp-spec/
type pnpData struct {
// A list of package locators that are roots of the dependency tree. There
// will typically be one entry for each workspace in the project (always at
// least one, as the top-level package is a workspace by itself).
dependencyTreeRoots map[string]string
// Keys are the package idents, values are sets of references. Combining the
// ident with each individual reference yields the set of affected locators.
fallbackExclusionList map[string]map[string]bool
// A map of locators that all packages are allowed to access, regardless
// whether they list them in their dependencies or not.
fallbackPool map[string]pnpIdentAndReference
// A nullable regexp. If set, all project-relative importer paths should be
// matched against it. If the match succeeds, the resolution should follow
// the classic Node.js resolution algorithm rather than the Plug'n'Play one.
// Note that unlike other paths in the manifest, the one checked against this
// regexp won't begin by `./`.
ignorePatternData *regexp.Regexp
// This is the main part of the PnP data file. This table contains the list
// of all packages, first keyed by package ident then by package reference.
// One entry will have `null` in both fields and represents the absolute
// top-level package.
packageRegistryData map[string]map[string]pnpPackage
packageLocatorsByLocations map[string]pnpPackageLocatorByLocation
// If true, should a dependency resolution fail for an importer that isn't
// explicitly listed in `fallbackExclusionList`, the runtime must first check
// whether the resolution would succeed for any of the packages in
// `fallbackPool`; if it would, transparently return this resolution. Note
// that all dependencies from the top-level package are implicitly part of
// the fallback pool, even if not listed here.
enableTopLevelFallback bool
absPath string
absDirPath string
}
// This is called both a "locator" and a "dependency target" in the specification.
// When it's used as a dependency target, it can only be in one of three states:
//
// 1. A reference, to link with the dependency name
// In this case ident is "".
//
// 2. An aliased package
// In this case neither ident nor reference are "".
//
// 3. A missing peer dependency
// In this case ident and reference are "".
type pnpIdentAndReference struct {
ident string // Empty if null
reference string // Empty if null
}
type pnpPackage struct {
packageDependencies map[string]pnpIdentAndReference
packageLocation string
discardFromLookup bool
}
type pnpPackageLocatorByLocation struct {
locator pnpIdentAndReference
discardFromLookup bool
}
// Note: If this returns successfully then the node module resolution algorithm
// (i.e. NM_RESOLVE in the Yarn PnP specification) is always run afterward
func (r resolverQuery) pnpResolve(specifier string, parentURL string, parentManifest *pnpData) (string, bool) {
// If specifier is a Node.js builtin, then
if BuiltInNodeModules[specifier] {
// Set resolved to specifier itself and return it
return specifier, true
}
// Otherwise, if `specifier` is either an absolute path or a path prefixed with "./" or "../", then
if r.fs.IsAbs(specifier) || strings.HasPrefix(specifier, "./") || strings.HasPrefix(specifier, "../") {
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
return specifier, true
}
// Otherwise,
// Note: specifier is now a bare identifier
// Let unqualified be RESOLVE_TO_UNQUALIFIED(specifier, parentURL)
// Set resolved to NM_RESOLVE(unqualified, parentURL)
return r.resolveToUnqualified(specifier, parentURL, parentManifest)
}
func parseBareIdentifier(specifier string) (ident string, modulePath string, ok bool) {
slash := strings.IndexByte(specifier, '/')
// If specifier starts with "@", then
if strings.HasPrefix(specifier, "@") {
// If specifier doesn't contain a "/" separator, then
if slash == -1 {
// Throw an error
return
}
// Otherwise,
// Set ident to the substring of specifier until the second "/" separator or the end of string, whatever happens first
if slash2 := strings.IndexByte(specifier[slash+1:], '/'); slash2 != -1 {
ident = specifier[:slash+1+slash2]
} else {
ident = specifier
}
} else {
// Otherwise,
// Set ident to the substring of specifier until the first "/" separator or the end of string, whatever happens first
if slash != -1 {
ident = specifier[:slash]
} else {
ident = specifier
}
}
// Set modulePath to the substring of specifier starting from ident.length
modulePath = specifier[len(ident):]
// Return {ident, modulePath}
ok = true
return
}
func (r resolverQuery) resolveToUnqualified(specifier string, parentURL string, manifest *pnpData) (string, bool) {
// Let resolved be undefined
// Let ident and modulePath be the result of PARSE_BARE_IDENTIFIER(specifier)
ident, modulePath, ok := parseBareIdentifier(specifier)
if !ok {
return "", false
}
// Let manifest be FIND_PNP_MANIFEST(parentURL)
// (this is already done by the time we get here)
// If manifest is null, then
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
if manifest == nil {
return specifier, true
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Using Yarn PnP manifest from %q to resolve %q", manifest.absPath, ident))
}
// Let parentLocator be FIND_LOCATOR(manifest, parentURL)
parentLocator, ok := r.findLocator(manifest, parentURL)
// If parentLocator is null, then
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
if !ok {
return specifier, true
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found parent locator: [%s, %s]", quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
}
// Let parentPkg be GET_PACKAGE(manifest, parentLocator)
parentPkg, ok := r.getPackage(manifest, parentLocator.ident, parentLocator.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return "", false
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found parent package at %q", parentPkg.packageLocation))
}
// Let referenceOrAlias be the entry from parentPkg.packageDependencies referenced by ident
referenceOrAlias, ok := parentPkg.packageDependencies[ident]
// If referenceOrAlias is null or undefined, then
if !ok || referenceOrAlias.reference == "" {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Failed to find %q in \"packageDependencies\" of parent package", ident))
}
// If manifest.enableTopLevelFallback is true, then
if manifest.enableTopLevelFallback {
if r.debugLogs != nil {
r.debugLogs.addNote(" Searching for a fallback because \"enableTopLevelFallback\" is true")
}
// If parentLocator isn't in manifest.fallbackExclusionList, then
if set, _ := manifest.fallbackExclusionList[parentLocator.ident]; !set[parentLocator.reference] {
// Let fallback be RESOLVE_VIA_FALLBACK(manifest, ident)
fallback, _ := r.resolveViaFallback(manifest, ident)
// If fallback is neither null nor undefined
if fallback.reference != "" {
// Set referenceOrAlias to fallback
referenceOrAlias = fallback
ok = true
}
} else if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Stopping because [%s, %s] is in \"fallbackExclusionList\"",
quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
}
}
}
// If referenceOrAlias is still undefined, then
if !ok {
// Throw a resolution error
return "", false
}
// If referenceOrAlias is still null, then
if referenceOrAlias.reference == "" {
// Note: It means that parentPkg has an unfulfilled peer dependency on ident
// Throw a resolution error
return "", false
}
if r.debugLogs != nil {
var referenceOrAliasStr string
if referenceOrAlias.ident != "" {
referenceOrAliasStr = fmt.Sprintf("[%q, %q]", referenceOrAlias.ident, referenceOrAlias.reference)
} else {
referenceOrAliasStr = quoteOrNullIfEmpty(referenceOrAlias.reference)
}
r.debugLogs.addNote(fmt.Sprintf(" Found dependency locator: [%s, %s]", quoteOrNullIfEmpty(ident), referenceOrAliasStr))
}
// Otherwise, if referenceOrAlias is an array, then
var dependencyPkg pnpPackage
if referenceOrAlias.ident != "" {
// Let alias be referenceOrAlias
alias := referenceOrAlias
// Let dependencyPkg be GET_PACKAGE(manifest, alias)
dependencyPkg, ok = r.getPackage(manifest, alias.ident, alias.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return "", false
}
} else {
// Otherwise,
// Let dependencyPkg be GET_PACKAGE(manifest, {ident, reference})
dependencyPkg, ok = r.getPackage(manifest, ident, referenceOrAlias.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return "", false
}
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found package %q at %q", ident, dependencyPkg.packageLocation))
}
// Return path.resolve(manifest.dirPath, dependencyPkg.packageLocation, modulePath)
result := r.fs.Join(manifest.absDirPath, dependencyPkg.packageLocation, modulePath)
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Resolved %q via Yarn PnP to %q", specifier, result))
}
return result, true
}
func (r resolverQuery) findLocator(manifest *pnpData, moduleUrl string) (pnpIdentAndReference, bool) {
// Let relativeUrl be the relative path between manifest and moduleUrl
relativeUrl, ok := r.fs.Rel(manifest.absDirPath, moduleUrl)
if !ok {
return pnpIdentAndReference{}, false
} else {
// Relative URLs on Windows will use \ instead of /, which will break
// everything we do below. Use normal slashes to keep things working.
relativeUrl = strings.ReplaceAll(relativeUrl, "\\", "/")
}
// The relative path must not start with ./; trim it if needed
if strings.HasPrefix(relativeUrl, "./") {
relativeUrl = relativeUrl[2:]
}
// If relativeUrl matches manifest.ignorePatternData, then
if manifest.ignorePatternData != nil && manifest.ignorePatternData.MatchString(relativeUrl) {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Ignoring %q because it matches \"ignorePatternData\"", relativeUrl))
}
// Return null
return pnpIdentAndReference{}, false
}
// Note: Make sure relativeUrl always starts with a ./ or ../
if !strings.HasSuffix(relativeUrl, "/") {
relativeUrl += "/"
}
if !strings.HasPrefix(relativeUrl, "./") && !strings.HasPrefix(relativeUrl, "../") {
relativeUrl = "./" + relativeUrl
}
// This is the inner loop from Yarn's PnP resolver implementation. This is
// different from the specification, which contains a hypothetical slow
// algorithm instead. The algorithm from the specification can sometimes
// produce different results from the one used by the implementation, so
// we follow the implementation.
for {
entry, ok := manifest.packageLocatorsByLocations[relativeUrl]
if !ok || entry.discardFromLookup {
// Remove the last path component and try again
relativeUrl = relativeUrl[:strings.LastIndexByte(relativeUrl[:len(relativeUrl)-1], '/')+1]
if relativeUrl == "" {
break
}
continue
}
return entry.locator, true
}
return pnpIdentAndReference{}, false
}
func (r resolverQuery) resolveViaFallback(manifest *pnpData, ident string) (pnpIdentAndReference, bool) {
// Let topLevelPkg be GET_PACKAGE(manifest, {null, null})
topLevelPkg, ok := r.getPackage(manifest, "", "")
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return pnpIdentAndReference{}, false
}
// Let referenceOrAlias be the entry from topLevelPkg.packageDependencies referenced by ident
referenceOrAlias, ok := topLevelPkg.packageDependencies[ident]
// If referenceOrAlias is defined, then
if ok {
// Return it immediately
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found fallback for %q in \"packageDependencies\" of top-level package: [%s, %s]", ident,
quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
}
return referenceOrAlias, true
}
// Otherwise,
// Let referenceOrAlias be the entry from manifest.fallbackPool referenced by ident
referenceOrAlias, ok = manifest.fallbackPool[ident]
// Return it immediatly, whether it's defined or not
if r.debugLogs != nil {
if ok {
r.debugLogs.addNote(fmt.Sprintf(" Found fallback for %q in \"fallbackPool\": [%s, %s]", ident,
quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
} else {
r.debugLogs.addNote(fmt.Sprintf(" Failed to find fallback for %q in \"fallbackPool\"", ident))
}
}
return referenceOrAlias, ok
}
func (r resolverQuery) getPackage(manifest *pnpData, ident string, reference string) (pnpPackage, bool) {
if inner, ok := manifest.packageRegistryData[ident]; ok {
if pkg, ok := inner[reference]; ok {
return pkg, true
}
}
if r.debugLogs != nil {
// We aren't supposed to get here according to the Yarn PnP specification:
// "Note: pkg cannot be undefined here; all packages referenced in any of the
// Plug'n'Play data tables MUST have a corresponding entry inside packageRegistryData."
r.debugLogs.addNote(fmt.Sprintf(" Yarn PnP invariant violation: GET_PACKAGE failed to find a package: [%s, %s]",
quoteOrNullIfEmpty(ident), quoteOrNullIfEmpty(reference)))
}
return pnpPackage{}, false
}
func quoteOrNullIfEmpty(str string) string {
if str != "" {
return fmt.Sprintf("%q", str)
}
return "null"
}
func compileYarnPnPData(absPath string, absDirPath string, json js_ast.Expr) *pnpData {
data := pnpData{
absPath: absPath,
absDirPath: absDirPath,
}
if value, _, ok := getProperty(json, "dependencyTreeRoots"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.dependencyTreeRoots = make(map[string]string, len(array.Items))
for _, item := range array.Items {
if name, _, ok := getProperty(item, "name"); ok {
if reference, _, ok := getProperty(item, "reference"); ok {
if name, ok := getString(name); ok {
if reference, ok := getString(reference); ok {
data.dependencyTreeRoots[name] = reference
}
}
}
}
}
}
}
if value, _, ok := getProperty(json, "enableTopLevelFallback"); ok {
if enableTopLevelFallback, ok := getBool(value); ok {
data.enableTopLevelFallback = enableTopLevelFallback
}
}
if value, _, ok := getProperty(json, "fallbackExclusionList"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.fallbackExclusionList = make(map[string]map[string]bool, len(array.Items))
for _, item := range array.Items {
if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
if ident, ok := getStringOrNull(tuple.Items[0]); ok {
if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
references := make(map[string]bool, len(array2.Items))
for _, item2 := range array2.Items {
if reference, ok := getString(item2); ok {
references[reference] = true
}
}
data.fallbackExclusionList[ident] = references
}
}
}
}
}
}
if value, _, ok := getProperty(json, "fallbackPool"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.fallbackPool = make(map[string]pnpIdentAndReference, len(array.Items))
for _, item := range array.Items {
if array2, ok := item.Data.(*js_ast.EArray); ok && len(array2.Items) == 2 {
if ident, ok := getString(array2.Items[0]); ok {
if dependencyTarget, ok := getDependencyTarget(array2.Items[1]); ok {
data.fallbackPool[ident] = dependencyTarget
}
}
}
}
}
}
if value, _, ok := getProperty(json, "ignorePatternData"); ok {
if ignorePatternData, ok := getString(value); ok {
data.ignorePatternData, _ = regexp.Compile(ignorePatternData)
}
}
if value, _, ok := getProperty(json, "packageRegistryData"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.packageRegistryData = make(map[string]map[string]pnpPackage, len(array.Items))
data.packageLocatorsByLocations = make(map[string]pnpPackageLocatorByLocation)
for _, item := range array.Items {
if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
if packageIdent, ok := getStringOrNull(tuple.Items[0]); ok {
if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
references := make(map[string]pnpPackage, len(array2.Items))
data.packageRegistryData[packageIdent] = references
for _, item2 := range array2.Items {
if tuple2, ok := item2.Data.(*js_ast.EArray); ok && len(tuple2.Items) == 2 {
if packageReference, ok := getStringOrNull(tuple2.Items[0]); ok {
pkg := tuple2.Items[1]
if packageLocation, _, ok := getProperty(pkg, "packageLocation"); ok {
if packageDependencies, _, ok := getProperty(pkg, "packageDependencies"); ok {
if packageLocation, ok := getString(packageLocation); ok {
if array3, ok := packageDependencies.Data.(*js_ast.EArray); ok {
deps := make(map[string]pnpIdentAndReference, len(array3.Items))
discardFromLookup := false
for _, dep := range array3.Items {
if array4, ok := dep.Data.(*js_ast.EArray); ok && len(array4.Items) == 2 {
if ident, ok := getString(array4.Items[0]); ok {
if dependencyTarget, ok := getDependencyTarget(array4.Items[1]); ok {
deps[ident] = dependencyTarget
}
}
}
}
if value, _, ok := getProperty(pkg, "discardFromLookup"); ok {
if value, ok := getBool(value); ok {
discardFromLookup = value
}
}
references[packageReference] = pnpPackage{
packageLocation: packageLocation,
packageDependencies: deps,
discardFromLookup: discardFromLookup,
}
// This is what Yarn's PnP implementation does (specifically in
// "hydrateRuntimeState"), so we replicate that behavior here:
if entry, ok := data.packageLocatorsByLocations[packageLocation]; !ok {
data.packageLocatorsByLocations[packageLocation] = pnpPackageLocatorByLocation{
locator: pnpIdentAndReference{ident: packageIdent, reference: packageReference},
discardFromLookup: discardFromLookup,
}
} else {
entry.discardFromLookup = entry.discardFromLookup && discardFromLookup
if !discardFromLookup {
entry.locator = pnpIdentAndReference{ident: packageIdent, reference: packageReference}
}
data.packageLocatorsByLocations[packageLocation] = entry
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return &data
}
func getStringOrNull(json js_ast.Expr) (string, bool) {
switch value := json.Data.(type) {
case *js_ast.EString:
return helpers.UTF16ToString(value.Value), true
case *js_ast.ENull:
return "", true
}
return "", false
}
func getDependencyTarget(json js_ast.Expr) (pnpIdentAndReference, bool) {
switch d := json.Data.(type) {
case *js_ast.ENull:
return pnpIdentAndReference{}, true
case *js_ast.EString:
return pnpIdentAndReference{reference: helpers.UTF16ToString(d.Value)}, true
case *js_ast.EArray:
if len(d.Items) == 2 {
if name, ok := getString(d.Items[0]); ok {
if reference, ok := getString(d.Items[1]); ok {
return pnpIdentAndReference{
ident: name,
reference: reference,
}, true
}
}
}
}
return pnpIdentAndReference{}, false
}
type pnpDataMode uint8
const (
pnpIgnoreErrorsAboutMissingFiles pnpDataMode = iota
pnpReportErrorsAboutMissingFiles
)
func (r resolverQuery) extractYarnPnPDataFromJSON(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr) {
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
}
if err != nil {
if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
r.log.AddError(nil, logger.Range{},
fmt.Sprintf("Cannot read file %q: %s",
r.PrettyPath(logger.Path{Text: pnpDataPath, Namespace: "file"}), err.Error()))
}
return
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
}
keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
source := logger.Source{
KeyPath: keyPath,
PrettyPath: r.PrettyPath(keyPath),
Contents: contents,
}
result, _ = r.caches.JSONCache.Parse(r.log, source, js_parser.JSONOptions{})
return
}
func (r resolverQuery) tryToExtractYarnPnPDataFromJS(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr) {
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
}
if err != nil {
if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
r.log.AddError(nil, logger.Range{},
fmt.Sprintf("Cannot read file %q: %s",
r.PrettyPath(logger.Path{Text: pnpDataPath, Namespace: "file"}), err.Error()))
}
return
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
}
keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
source := logger.Source{
KeyPath: keyPath,
PrettyPath: r.PrettyPath(keyPath),
Contents: contents,
}
ast, _ := r.caches.JSCache.Parse(r.log, source, js_parser.OptionsForYarnPnP())
if r.debugLogs != nil && ast.ManifestForYarnPnP.Data != nil {
r.debugLogs.addNote(fmt.Sprintf(" Extracted JSON data from %q", pnpDataPath))
}
return ast.ManifestForYarnPnP
}
pnp: remove unused `dependencyTreeRoots` data
package resolver
import (
"fmt"
"regexp"
"strings"
"syscall"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
)
// This file implements the Yarn PnP specification: https://yarnpkg.com/advanced/pnp-spec/
type pnpData struct {
// Keys are the package idents, values are sets of references. Combining the
// ident with each individual reference yields the set of affected locators.
fallbackExclusionList map[string]map[string]bool
// A map of locators that all packages are allowed to access, regardless
// whether they list them in their dependencies or not.
fallbackPool map[string]pnpIdentAndReference
// A nullable regexp. If set, all project-relative importer paths should be
// matched against it. If the match succeeds, the resolution should follow
// the classic Node.js resolution algorithm rather than the Plug'n'Play one.
// Note that unlike other paths in the manifest, the one checked against this
// regexp won't begin by `./`.
ignorePatternData *regexp.Regexp
// This is the main part of the PnP data file. This table contains the list
// of all packages, first keyed by package ident then by package reference.
// One entry will have `null` in both fields and represents the absolute
// top-level package.
packageRegistryData map[string]map[string]pnpPackage
packageLocatorsByLocations map[string]pnpPackageLocatorByLocation
// If true, should a dependency resolution fail for an importer that isn't
// explicitly listed in `fallbackExclusionList`, the runtime must first check
// whether the resolution would succeed for any of the packages in
// `fallbackPool`; if it would, transparently return this resolution. Note
// that all dependencies from the top-level package are implicitly part of
// the fallback pool, even if not listed here.
enableTopLevelFallback bool
absPath string
absDirPath string
}
// This is called both a "locator" and a "dependency target" in the specification.
// When it's used as a dependency target, it can only be in one of three states:
//
// 1. A reference, to link with the dependency name
// In this case ident is "".
//
// 2. An aliased package
// In this case neither ident nor reference are "".
//
// 3. A missing peer dependency
// In this case ident and reference are "".
type pnpIdentAndReference struct {
ident string // Empty if null
reference string // Empty if null
}
type pnpPackage struct {
packageDependencies map[string]pnpIdentAndReference
packageLocation string
discardFromLookup bool
}
type pnpPackageLocatorByLocation struct {
locator pnpIdentAndReference
discardFromLookup bool
}
// Note: If this returns successfully then the node module resolution algorithm
// (i.e. NM_RESOLVE in the Yarn PnP specification) is always run afterward
func (r resolverQuery) pnpResolve(specifier string, parentURL string, parentManifest *pnpData) (string, bool) {
// If specifier is a Node.js builtin, then
if BuiltInNodeModules[specifier] {
// Set resolved to specifier itself and return it
return specifier, true
}
// Otherwise, if `specifier` is either an absolute path or a path prefixed with "./" or "../", then
if r.fs.IsAbs(specifier) || strings.HasPrefix(specifier, "./") || strings.HasPrefix(specifier, "../") {
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
return specifier, true
}
// Otherwise,
// Note: specifier is now a bare identifier
// Let unqualified be RESOLVE_TO_UNQUALIFIED(specifier, parentURL)
// Set resolved to NM_RESOLVE(unqualified, parentURL)
return r.resolveToUnqualified(specifier, parentURL, parentManifest)
}
func parseBareIdentifier(specifier string) (ident string, modulePath string, ok bool) {
slash := strings.IndexByte(specifier, '/')
// If specifier starts with "@", then
if strings.HasPrefix(specifier, "@") {
// If specifier doesn't contain a "/" separator, then
if slash == -1 {
// Throw an error
return
}
// Otherwise,
// Set ident to the substring of specifier until the second "/" separator or the end of string, whatever happens first
if slash2 := strings.IndexByte(specifier[slash+1:], '/'); slash2 != -1 {
ident = specifier[:slash+1+slash2]
} else {
ident = specifier
}
} else {
// Otherwise,
// Set ident to the substring of specifier until the first "/" separator or the end of string, whatever happens first
if slash != -1 {
ident = specifier[:slash]
} else {
ident = specifier
}
}
// Set modulePath to the substring of specifier starting from ident.length
modulePath = specifier[len(ident):]
// Return {ident, modulePath}
ok = true
return
}
func (r resolverQuery) resolveToUnqualified(specifier string, parentURL string, manifest *pnpData) (string, bool) {
// Let resolved be undefined
// Let ident and modulePath be the result of PARSE_BARE_IDENTIFIER(specifier)
ident, modulePath, ok := parseBareIdentifier(specifier)
if !ok {
return "", false
}
// Let manifest be FIND_PNP_MANIFEST(parentURL)
// (this is already done by the time we get here)
// If manifest is null, then
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
if manifest == nil {
return specifier, true
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Using Yarn PnP manifest from %q to resolve %q", manifest.absPath, ident))
}
// Let parentLocator be FIND_LOCATOR(manifest, parentURL)
parentLocator, ok := r.findLocator(manifest, parentURL)
// If parentLocator is null, then
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
if !ok {
return specifier, true
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found parent locator: [%s, %s]", quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
}
// Let parentPkg be GET_PACKAGE(manifest, parentLocator)
parentPkg, ok := r.getPackage(manifest, parentLocator.ident, parentLocator.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return "", false
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found parent package at %q", parentPkg.packageLocation))
}
// Let referenceOrAlias be the entry from parentPkg.packageDependencies referenced by ident
referenceOrAlias, ok := parentPkg.packageDependencies[ident]
// If referenceOrAlias is null or undefined, then
if !ok || referenceOrAlias.reference == "" {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Failed to find %q in \"packageDependencies\" of parent package", ident))
}
// If manifest.enableTopLevelFallback is true, then
if manifest.enableTopLevelFallback {
if r.debugLogs != nil {
r.debugLogs.addNote(" Searching for a fallback because \"enableTopLevelFallback\" is true")
}
// If parentLocator isn't in manifest.fallbackExclusionList, then
if set, _ := manifest.fallbackExclusionList[parentLocator.ident]; !set[parentLocator.reference] {
// Let fallback be RESOLVE_VIA_FALLBACK(manifest, ident)
fallback, _ := r.resolveViaFallback(manifest, ident)
// If fallback is neither null nor undefined
if fallback.reference != "" {
// Set referenceOrAlias to fallback
referenceOrAlias = fallback
ok = true
}
} else if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Stopping because [%s, %s] is in \"fallbackExclusionList\"",
quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
}
}
}
// If referenceOrAlias is still undefined, then
if !ok {
// Throw a resolution error
return "", false
}
// If referenceOrAlias is still null, then
if referenceOrAlias.reference == "" {
// Note: It means that parentPkg has an unfulfilled peer dependency on ident
// Throw a resolution error
return "", false
}
if r.debugLogs != nil {
var referenceOrAliasStr string
if referenceOrAlias.ident != "" {
referenceOrAliasStr = fmt.Sprintf("[%q, %q]", referenceOrAlias.ident, referenceOrAlias.reference)
} else {
referenceOrAliasStr = quoteOrNullIfEmpty(referenceOrAlias.reference)
}
r.debugLogs.addNote(fmt.Sprintf(" Found dependency locator: [%s, %s]", quoteOrNullIfEmpty(ident), referenceOrAliasStr))
}
// Otherwise, if referenceOrAlias is an array, then
var dependencyPkg pnpPackage
if referenceOrAlias.ident != "" {
// Let alias be referenceOrAlias
alias := referenceOrAlias
// Let dependencyPkg be GET_PACKAGE(manifest, alias)
dependencyPkg, ok = r.getPackage(manifest, alias.ident, alias.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return "", false
}
} else {
// Otherwise,
// Let dependencyPkg be GET_PACKAGE(manifest, {ident, reference})
dependencyPkg, ok = r.getPackage(manifest, ident, referenceOrAlias.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return "", false
}
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found package %q at %q", ident, dependencyPkg.packageLocation))
}
// Return path.resolve(manifest.dirPath, dependencyPkg.packageLocation, modulePath)
result := r.fs.Join(manifest.absDirPath, dependencyPkg.packageLocation, modulePath)
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Resolved %q via Yarn PnP to %q", specifier, result))
}
return result, true
}
func (r resolverQuery) findLocator(manifest *pnpData, moduleUrl string) (pnpIdentAndReference, bool) {
// Let relativeUrl be the relative path between manifest and moduleUrl
relativeUrl, ok := r.fs.Rel(manifest.absDirPath, moduleUrl)
if !ok {
return pnpIdentAndReference{}, false
} else {
// Relative URLs on Windows will use \ instead of /, which will break
// everything we do below. Use normal slashes to keep things working.
relativeUrl = strings.ReplaceAll(relativeUrl, "\\", "/")
}
// The relative path must not start with ./; trim it if needed
if strings.HasPrefix(relativeUrl, "./") {
relativeUrl = relativeUrl[2:]
}
// If relativeUrl matches manifest.ignorePatternData, then
if manifest.ignorePatternData != nil && manifest.ignorePatternData.MatchString(relativeUrl) {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Ignoring %q because it matches \"ignorePatternData\"", relativeUrl))
}
// Return null
return pnpIdentAndReference{}, false
}
// Note: Make sure relativeUrl always starts with a ./ or ../
if !strings.HasSuffix(relativeUrl, "/") {
relativeUrl += "/"
}
if !strings.HasPrefix(relativeUrl, "./") && !strings.HasPrefix(relativeUrl, "../") {
relativeUrl = "./" + relativeUrl
}
// This is the inner loop from Yarn's PnP resolver implementation. This is
// different from the specification, which contains a hypothetical slow
// algorithm instead. The algorithm from the specification can sometimes
// produce different results from the one used by the implementation, so
// we follow the implementation.
for {
entry, ok := manifest.packageLocatorsByLocations[relativeUrl]
if !ok || entry.discardFromLookup {
// Remove the last path component and try again
relativeUrl = relativeUrl[:strings.LastIndexByte(relativeUrl[:len(relativeUrl)-1], '/')+1]
if relativeUrl == "" {
break
}
continue
}
return entry.locator, true
}
return pnpIdentAndReference{}, false
}
func (r resolverQuery) resolveViaFallback(manifest *pnpData, ident string) (pnpIdentAndReference, bool) {
// Let topLevelPkg be GET_PACKAGE(manifest, {null, null})
topLevelPkg, ok := r.getPackage(manifest, "", "")
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return pnpIdentAndReference{}, false
}
// Let referenceOrAlias be the entry from topLevelPkg.packageDependencies referenced by ident
referenceOrAlias, ok := topLevelPkg.packageDependencies[ident]
// If referenceOrAlias is defined, then
if ok {
// Return it immediately
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found fallback for %q in \"packageDependencies\" of top-level package: [%s, %s]", ident,
quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
}
return referenceOrAlias, true
}
// Otherwise,
// Let referenceOrAlias be the entry from manifest.fallbackPool referenced by ident
referenceOrAlias, ok = manifest.fallbackPool[ident]
// Return it immediatly, whether it's defined or not
if r.debugLogs != nil {
if ok {
r.debugLogs.addNote(fmt.Sprintf(" Found fallback for %q in \"fallbackPool\": [%s, %s]", ident,
quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
} else {
r.debugLogs.addNote(fmt.Sprintf(" Failed to find fallback for %q in \"fallbackPool\"", ident))
}
}
return referenceOrAlias, ok
}
func (r resolverQuery) getPackage(manifest *pnpData, ident string, reference string) (pnpPackage, bool) {
if inner, ok := manifest.packageRegistryData[ident]; ok {
if pkg, ok := inner[reference]; ok {
return pkg, true
}
}
if r.debugLogs != nil {
// We aren't supposed to get here according to the Yarn PnP specification:
// "Note: pkg cannot be undefined here; all packages referenced in any of the
// Plug'n'Play data tables MUST have a corresponding entry inside packageRegistryData."
r.debugLogs.addNote(fmt.Sprintf(" Yarn PnP invariant violation: GET_PACKAGE failed to find a package: [%s, %s]",
quoteOrNullIfEmpty(ident), quoteOrNullIfEmpty(reference)))
}
return pnpPackage{}, false
}
func quoteOrNullIfEmpty(str string) string {
if str != "" {
return fmt.Sprintf("%q", str)
}
return "null"
}
func compileYarnPnPData(absPath string, absDirPath string, json js_ast.Expr) *pnpData {
data := pnpData{
absPath: absPath,
absDirPath: absDirPath,
}
if value, _, ok := getProperty(json, "enableTopLevelFallback"); ok {
if enableTopLevelFallback, ok := getBool(value); ok {
data.enableTopLevelFallback = enableTopLevelFallback
}
}
if value, _, ok := getProperty(json, "fallbackExclusionList"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.fallbackExclusionList = make(map[string]map[string]bool, len(array.Items))
for _, item := range array.Items {
if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
if ident, ok := getStringOrNull(tuple.Items[0]); ok {
if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
references := make(map[string]bool, len(array2.Items))
for _, item2 := range array2.Items {
if reference, ok := getString(item2); ok {
references[reference] = true
}
}
data.fallbackExclusionList[ident] = references
}
}
}
}
}
}
if value, _, ok := getProperty(json, "fallbackPool"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.fallbackPool = make(map[string]pnpIdentAndReference, len(array.Items))
for _, item := range array.Items {
if array2, ok := item.Data.(*js_ast.EArray); ok && len(array2.Items) == 2 {
if ident, ok := getString(array2.Items[0]); ok {
if dependencyTarget, ok := getDependencyTarget(array2.Items[1]); ok {
data.fallbackPool[ident] = dependencyTarget
}
}
}
}
}
}
if value, _, ok := getProperty(json, "ignorePatternData"); ok {
if ignorePatternData, ok := getString(value); ok {
data.ignorePatternData, _ = regexp.Compile(ignorePatternData)
}
}
if value, _, ok := getProperty(json, "packageRegistryData"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.packageRegistryData = make(map[string]map[string]pnpPackage, len(array.Items))
data.packageLocatorsByLocations = make(map[string]pnpPackageLocatorByLocation)
for _, item := range array.Items {
if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
if packageIdent, ok := getStringOrNull(tuple.Items[0]); ok {
if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
references := make(map[string]pnpPackage, len(array2.Items))
data.packageRegistryData[packageIdent] = references
for _, item2 := range array2.Items {
if tuple2, ok := item2.Data.(*js_ast.EArray); ok && len(tuple2.Items) == 2 {
if packageReference, ok := getStringOrNull(tuple2.Items[0]); ok {
pkg := tuple2.Items[1]
if packageLocation, _, ok := getProperty(pkg, "packageLocation"); ok {
if packageDependencies, _, ok := getProperty(pkg, "packageDependencies"); ok {
if packageLocation, ok := getString(packageLocation); ok {
if array3, ok := packageDependencies.Data.(*js_ast.EArray); ok {
deps := make(map[string]pnpIdentAndReference, len(array3.Items))
discardFromLookup := false
for _, dep := range array3.Items {
if array4, ok := dep.Data.(*js_ast.EArray); ok && len(array4.Items) == 2 {
if ident, ok := getString(array4.Items[0]); ok {
if dependencyTarget, ok := getDependencyTarget(array4.Items[1]); ok {
deps[ident] = dependencyTarget
}
}
}
}
if value, _, ok := getProperty(pkg, "discardFromLookup"); ok {
if value, ok := getBool(value); ok {
discardFromLookup = value
}
}
references[packageReference] = pnpPackage{
packageLocation: packageLocation,
packageDependencies: deps,
discardFromLookup: discardFromLookup,
}
// This is what Yarn's PnP implementation does (specifically in
// "hydrateRuntimeState"), so we replicate that behavior here:
if entry, ok := data.packageLocatorsByLocations[packageLocation]; !ok {
data.packageLocatorsByLocations[packageLocation] = pnpPackageLocatorByLocation{
locator: pnpIdentAndReference{ident: packageIdent, reference: packageReference},
discardFromLookup: discardFromLookup,
}
} else {
entry.discardFromLookup = entry.discardFromLookup && discardFromLookup
if !discardFromLookup {
entry.locator = pnpIdentAndReference{ident: packageIdent, reference: packageReference}
}
data.packageLocatorsByLocations[packageLocation] = entry
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return &data
}
func getStringOrNull(json js_ast.Expr) (string, bool) {
switch value := json.Data.(type) {
case *js_ast.EString:
return helpers.UTF16ToString(value.Value), true
case *js_ast.ENull:
return "", true
}
return "", false
}
func getDependencyTarget(json js_ast.Expr) (pnpIdentAndReference, bool) {
switch d := json.Data.(type) {
case *js_ast.ENull:
return pnpIdentAndReference{}, true
case *js_ast.EString:
return pnpIdentAndReference{reference: helpers.UTF16ToString(d.Value)}, true
case *js_ast.EArray:
if len(d.Items) == 2 {
if name, ok := getString(d.Items[0]); ok {
if reference, ok := getString(d.Items[1]); ok {
return pnpIdentAndReference{
ident: name,
reference: reference,
}, true
}
}
}
}
return pnpIdentAndReference{}, false
}
type pnpDataMode uint8
const (
pnpIgnoreErrorsAboutMissingFiles pnpDataMode = iota
pnpReportErrorsAboutMissingFiles
)
func (r resolverQuery) extractYarnPnPDataFromJSON(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr) {
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
}
if err != nil {
if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
r.log.AddError(nil, logger.Range{},
fmt.Sprintf("Cannot read file %q: %s",
r.PrettyPath(logger.Path{Text: pnpDataPath, Namespace: "file"}), err.Error()))
}
return
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
}
keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
source := logger.Source{
KeyPath: keyPath,
PrettyPath: r.PrettyPath(keyPath),
Contents: contents,
}
result, _ = r.caches.JSONCache.Parse(r.log, source, js_parser.JSONOptions{})
return
}
func (r resolverQuery) tryToExtractYarnPnPDataFromJS(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr) {
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
}
if err != nil {
if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
r.log.AddError(nil, logger.Range{},
fmt.Sprintf("Cannot read file %q: %s",
r.PrettyPath(logger.Path{Text: pnpDataPath, Namespace: "file"}), err.Error()))
}
return
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
}
keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
source := logger.Source{
KeyPath: keyPath,
PrettyPath: r.PrettyPath(keyPath),
Contents: contents,
}
ast, _ := r.caches.JSCache.Parse(r.log, source, js_parser.OptionsForYarnPnP())
if r.debugLogs != nil && ast.ManifestForYarnPnP.Data != nil {
r.debugLogs.addNote(fmt.Sprintf(" Extracted JSON data from %q", pnpDataPath))
}
return ast.ManifestForYarnPnP
}
|
//
// Broker peering simulation (part 2).
// Prototypes the request-reply flow
//
package main
import (
zmq "github.com/pebbe/zmq3"
"fmt"
"math/rand"
"os"
//"strings"
"time"
)
const (
NBR_CLIENTS = 10
NBR_WORKERS = 3
WORKER_READY = "**READY**" // Signals worker is ready
)
var (
peers = make(map[string]bool)
)
// The client task does a request-reply dialog using a standard
// synchronous REQ socket:
func client_task(name string, i int) {
clientname := fmt.Sprintf("Client-%s-%d", name, i)
client, _ := zmq.NewSocket(zmq.REQ)
defer client.Close()
client.SetIdentity(clientname)
client.Connect("ipc://" + name + "-localfe.ipc")
for {
// Send request, get reply
client.Send("HELLO from " + clientname, 0)
reply, err := client.Recv(0)
if err != nil {
fmt.Println("client_task interrupted", name)
break // Interrupted
}
fmt.Printf("%s: %s\n", clientname, reply)
time.Sleep(time.Duration(500 + rand.Intn(1000)) * time.Millisecond)
}
}
// The worker task plugs into the load-balancer using a REQ
// socket:
func worker_task(name string, i int) {
workername := fmt.Sprintf("Worker-%s-%d", name, i)
worker, _ := zmq.NewSocket(zmq.REQ)
defer worker.Close()
worker.SetIdentity(workername)
worker.Connect("ipc://" + name + "-localbe.ipc")
// Tell broker we're ready for work
worker.SendMessage(WORKER_READY)
// Process messages as they arrive
for {
msg, err := worker.RecvMessage(0)
if err != nil {
fmt.Println("worker_task interrupted", name)
break // Interrupted
}
i := len(msg) - 1
fmt.Printf("%s: %s\n", workername, msg[i])
worker.SendMessage(msg[:i], "OK from " + workername )
}
}
// The main task begins by setting-up its frontend and backend sockets
// and then starting its client and worker tasks:
func main() {
// First argument is this broker's name
// Other arguments are our peers' names
//
if len(os.Args) < 2 {
fmt.Println("syntax: peering2 me {you}…")
os.Exit(1)
}
for _, peer := range(os.Args[2:]) {
peers[peer] = true
}
self := os.Args[1]
fmt.Println("I: preparing broker at", self)
rand.Seed(time.Now().Unix())
// Bind cloud frontend to endpoint
cloudfe, _ := zmq.NewSocket(zmq.ROUTER)
defer cloudfe.Close()
cloudfe.SetIdentity(self)
cloudfe.Bind("ipc://" + self + "-cloud.ipc")
// Connect cloud backend to all peers
cloudbe, _ := zmq.NewSocket(zmq.ROUTER)
defer cloudbe.Close()
cloudbe.SetIdentity(self)
for _, peer := range os.Args[2:] {
fmt.Println("I: connecting to cloud frontend at", peer)
cloudbe.Connect("ipc://" + peer + "-cloud.ipc")
}
// Prepare local frontend and backend
localfe, _ := zmq.NewSocket(zmq.ROUTER)
defer localfe.Close()
localfe.Bind("ipc://" + self + "-localfe.ipc")
localbe, _ := zmq.NewSocket(zmq.ROUTER)
defer localbe.Close()
localbe.Bind("ipc://" + self + "-localbe.ipc")
// Get user to tell us when we can start…
fmt.Print("Press Enter when all brokers are started: ")
var line string
fmt.Scanln(&line)
// Start local workers
for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ {
go worker_task(self, worker_nbr)
}
// Start local clients
for client_nbr := 0; client_nbr < NBR_CLIENTS; client_nbr++ {
go client_task(self, client_nbr)
}
// Here we handle the request-reply flow. We're using load-balancing
// to poll workers at all times, and clients only when there are one or
// more workers available.
// Least recently used queue of available workers
workers := make([]string, 0)
chLocalbe := pool(localbe)
chLocalfe := pool(localfe)
chCloudbe := pool(cloudbe)
chCloudfe := pool(cloudfe)
msg := []string{}
number_of_peers := len(os.Args) - 2
chWait := make(<-chan time.Time)
chTimeout := chWait
LOOP:
for {
// First, route any waiting replies from workers
// If we have no workers anyhow, wait indefinitely
if len(workers) > 0 {
chTimeout = time.After(time.Second)
} else {
chTimeout = chWait
}
msg = msg[:]
select {
case msg = <-chLocalbe:
// Handle reply from local worker
var identity string
identity, msg = unwrap(msg)
workers = append(workers, identity)
// If it's READY, don't route the message any further
if msg[0] == WORKER_READY {
msg = msg[0:0]
}
case msg = <-chCloudbe:
// Or handle reply from peer broker
// We don't use peer broker identity for anything
_, msg = unwrap(msg)
case <-chTimeout:
}
if len(msg) > 0 {
// Route reply to cloud if it's addressed to a broker
if peers[msg[0]] {
cloudfe.SendMessage(msg)
} else {
localfe.SendMessage(msg)
}
}
// Now we route as many client requests as we have worker capacity
// for. We may reroute requests from our local frontend, but not from
// the cloud frontend. We reroute randomly now, just to test things
// out. In the next version we'll do this properly by calculating
// cloud capacity:
for len(workers) > 0 {
var reroutable bool
// We'll do peer brokers first, to prevent starvation
select {
case msg = <-chCloudfe:
reroutable = false
default:
select {
case msg = <-chLocalfe:
reroutable = true
default:
continue LOOP // No work, go back to backends
}
}
// If reroutable, send to cloud 20% of the time
// Here we'd normally use cloud status information
//
if reroutable && number_of_peers > 0 && rand.Intn(5) == 0 {
// Route to random broker peer
random_peer := os.Args[2+rand.Intn(number_of_peers)]
cloudbe.SendMessage(random_peer, "", msg)
} else {
localbe.SendMessage(workers[0], "", msg)
workers = workers[1:]
}
}
}
fmt.Println("Exit")
}
// Return channel for reading messages from socket
func pool(soc *zmq.Socket) chan []string {
ch := make(chan []string)
go func() {
for {
msg, err := soc.RecvMessage(0)
if err != nil {
close(ch)
return
}
ch <- msg
}
}()
return ch
}
// Pops frame off front of message and returns it as 'head'
// If next frame is empty, pops that empty frame.
// Return remaining frames of message as 'tail'
func unwrap(msg []string) (head string, tail []string) {
head = msg[0]
if len(msg) > 1 && msg[1] == "" {
tail = msg[2:]
} else {
tail = msg[1:]
}
return
}
Rewrote example peering2 using polling. The previous version with goroutines crashed.
//
// Broker peering simulation (part 2).
// Prototypes the request-reply flow
//
package main
import (
zmq "github.com/pebbe/zmq3"
"fmt"
"log"
"math/rand"
"os"
"time"
)
const (
NBR_CLIENTS = 10
NBR_WORKERS = 3
WORKER_READY = "**READY**" // Signals worker is ready
)
var (
peers = make(map[string]bool)
)
// The client task does a request-reply dialog using a standard
// synchronous REQ socket:
func client_task(name string, i int) {
clientname := fmt.Sprintf("Client-%s-%d", name, i)
client, _ := zmq.NewSocket(zmq.REQ)
defer client.Close()
client.SetIdentity(clientname)
client.Connect("ipc://" + name + "-localfe.ipc")
for {
// Send request, get reply
client.Send("HELLO from "+clientname, 0)
reply, err := client.Recv(0)
if err != nil {
fmt.Println("client_task interrupted", name)
break // Interrupted
}
fmt.Printf("%s: %s\n", clientname, reply)
time.Sleep(time.Duration(500+rand.Intn(1000)) * time.Millisecond)
}
}
// The worker task plugs into the load-balancer using a REQ
// socket:
func worker_task(name string, i int) {
workername := fmt.Sprintf("Worker-%s-%d", name, i)
worker, _ := zmq.NewSocket(zmq.REQ)
defer worker.Close()
worker.SetIdentity(workername)
worker.Connect("ipc://" + name + "-localbe.ipc")
// Tell broker we're ready for work
worker.SendMessage(WORKER_READY)
// Process messages as they arrive
for {
msg, err := worker.RecvMessage(0)
if err != nil {
fmt.Println("worker_task interrupted", name)
break // Interrupted
}
i := len(msg) - 1
fmt.Printf("%s: %s\n", workername, msg[i])
worker.SendMessage(msg[:i], "OK from "+workername)
}
}
// The main task begins by setting-up its frontend and backend sockets
// and then starting its client and worker tasks:
func main() {
// First argument is this broker's name
// Other arguments are our peers' names
//
if len(os.Args) < 2 {
fmt.Println("syntax: peering2 me {you}…")
os.Exit(1)
}
for _, peer := range os.Args[2:] {
peers[peer] = true
}
self := os.Args[1]
fmt.Println("I: preparing broker at", self)
rand.Seed(time.Now().Unix())
// Bind cloud frontend to endpoint
cloudfe, _ := zmq.NewSocket(zmq.ROUTER)
defer cloudfe.Close()
cloudfe.SetIdentity(self)
cloudfe.Bind("ipc://" + self + "-cloud.ipc")
// Connect cloud backend to all peers
cloudbe, _ := zmq.NewSocket(zmq.ROUTER)
defer cloudbe.Close()
cloudbe.SetIdentity(self)
for _, peer := range os.Args[2:] {
fmt.Println("I: connecting to cloud frontend at", peer)
cloudbe.Connect("ipc://" + peer + "-cloud.ipc")
}
// Prepare local frontend and backend
localfe, _ := zmq.NewSocket(zmq.ROUTER)
defer localfe.Close()
localfe.Bind("ipc://" + self + "-localfe.ipc")
localbe, _ := zmq.NewSocket(zmq.ROUTER)
defer localbe.Close()
localbe.Bind("ipc://" + self + "-localbe.ipc")
// Get user to tell us when we can start…
fmt.Print("Press Enter when all brokers are started: ")
var line string
fmt.Scanln(&line)
// Start local workers
for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ {
go worker_task(self, worker_nbr)
}
// Start local clients
for client_nbr := 0; client_nbr < NBR_CLIENTS; client_nbr++ {
go client_task(self, client_nbr)
}
// Here we handle the request-reply flow. We're using load-balancing
// to poll workers at all times, and clients only when there are one or
// more workers available.
// Least recently used queue of available workers
workers := make([]string, 0)
backends := zmq.NewPoller()
backends.Register(localbe, zmq.POLLIN)
backends.Register(cloudbe, zmq.POLLIN)
frontends := zmq.NewPoller()
frontends.Register(localfe, zmq.POLLIN)
frontends.Register(cloudfe, zmq.POLLIN)
msg := []string{}
number_of_peers := len(os.Args) - 2
for {
// First, route any waiting replies from workers
// If we have no workers anyhow, wait indefinitely
timeout := time.Second
if len(workers) == 0 {
timeout = -1
}
events, err := backends.Poll(timeout)
if err != nil {
log.Println(err)
break // Interrupted
}
msg = msg[:]
if events[0]&zmq.POLLIN != 0 {
// Handle reply from local worker
msg, err = localbe.RecvMessage(0)
if err != nil {
log.Println(err)
break // Interrupted
}
var identity string
identity, msg = unwrap(msg)
workers = append(workers, identity)
// If it's READY, don't route the message any further
if msg[0] == WORKER_READY {
msg = msg[0:0]
}
} else if events[1]&zmq.POLLIN != 0 {
// Or handle reply from peer broker
msg, err = cloudbe.RecvMessage(0)
if err != nil {
log.Println(err)
break // Interrupted
}
// We don't use peer broker identity for anything
_, msg = unwrap(msg)
}
if len(msg) > 0 {
// Route reply to cloud if it's addressed to a broker
if peers[msg[0]] {
cloudfe.SendMessage(msg)
} else {
localfe.SendMessage(msg)
}
}
// Now we route as many client requests as we have worker capacity
// for. We may reroute requests from our local frontend, but not from
// the cloud frontend. We reroute randomly now, just to test things
// out. In the next version we'll do this properly by calculating
// cloud capacity:
for len(workers) > 0 {
events, err := frontends.Poll(0)
if err != nil {
log.Println(err)
break // Interrupted
}
var reroutable bool
// We'll do peer brokers first, to prevent starvation
if events[1]&zmq.POLLIN != 0 {
msg, _ = cloudfe.RecvMessage(0)
reroutable = false
} else if events[0]&zmq.POLLIN != 0 {
msg, _ = localfe.RecvMessage(0)
reroutable = true
} else {
break // No work, go back to backends
}
// If reroutable, send to cloud 20% of the time
// Here we'd normally use cloud status information
//
if reroutable && number_of_peers > 0 && rand.Intn(5) == 0 {
// Route to random broker peer
random_peer := os.Args[2+rand.Intn(number_of_peers)]
cloudbe.SendMessage(random_peer, "", msg)
} else {
localbe.SendMessage(workers[0], "", msg)
workers = workers[1:]
}
}
}
fmt.Println("Exit")
}
// Pops frame off front of message and returns it as 'head'
// If next frame is empty, pops that empty frame.
// Return remaining frames of message as 'tail'
func unwrap(msg []string) (head string, tail []string) {
head = msg[0]
if len(msg) > 1 && msg[1] == "" {
tail = msg[2:]
} else {
tail = msg[1:]
}
return
}
|
package controller
import (
// "archive/zip"
"encoding/json"
// "errors"
"fmt"
"path/filepath"
"strings"
// "time"
"github.com/eaciit/colony-core/v0"
"github.com/eaciit/colony-manager/helper"
"github.com/eaciit/dbox"
_ "github.com/eaciit/dbox/dbc/jsons"
// "github.com/eaciit/hdc/hdfs"
"github.com/eaciit/knot/knot.v1"
// "github.com/eaciit/live"
// "github.com/eaciit/sshclient"
// "github.com/eaciit/toolkit"
// "golang.org/x/crypto/ssh"
)
var (
leSourcePath = filepath.Join(EC_DATA_PATH, "langenvironment", "installer")
)
const (
DESTINSTALL_PATH = "/usr/local/"
SERVER_WIN = "windows"
SERVER_LINUX = "linux"
SERVER_OSX = "osx"
LANG_GO = "go"
LANG_JAVA = "java"
LANG_SCALA = "scala"
INSTALLER_LINUX_GO = "go1.6.linux-x86_64.tar.gz"
)
type LangenvironmentController struct {
App
}
func getArch(serverOS string, lang string, arch string) string {
var result string
if serverOS == SERVER_LINUX {
if lang == LANG_GO {
result = fmt.Sprintf("%s1.6.%s-%s.%s", LANG_GO, SERVER_LINUX, strings.TrimSpace(arch), "tar.gz")
}
} else if serverOS == SERVER_WIN {
if lang == LANG_GO {
result = fmt.Sprintf("%s1.6.%s-%s.%s", LANG_GO, SERVER_WIN, strings.TrimSpace(arch), "zip")
}
}
return result
}
func CreateLangenvironmentController(l *knot.Server) *LangenvironmentController {
var controller = new(LangenvironmentController)
controller.Server = l
return controller
}
func (l *LangenvironmentController) GetSampleDataForSetupLang() colonycore.LanguageEnvironmentPayload {
// s := `[{ "ServerId": "vagrant-test1", "Lang": [ "go" ] }, { "ServerId": "vagrant-test2", "Lang": [ "go", "java" ] }]`
s := `{ "ServerId": "test", "Lang": "go" }`
r := colonycore.LanguageEnvironmentPayload{}
err := json.Unmarshal([]byte(s), &r)
if err != nil {
fmt.Println(err)
}
return r
}
func (le *LangenvironmentController) GetLanguage(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputJson
cursor, err := colonycore.Find(new(colonycore.LanguageEnviroment), nil)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
data := []colonycore.LanguageEnviroment{}
err = cursor.Fetch(&data, 0, false)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
// fmt.Println(data)
defer cursor.Close()
return helper.CreateResult(true, data, "")
}
func (l *LangenvironmentController) SetupFromSH(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputJson
payload := l.GetSampleDataForSetupLang()
dataServers := new(colonycore.Server)
err := colonycore.Get(dataServers, payload.ServerId)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
serverPathSeparator := CreateApplicationController(l.Server).GetServerPathSeparator(dataServers)
sshSetting, sshClient, err := CreateServerController(l.Server).SSHConnect(dataServers)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
defer sshClient.Close()
dataLanguage := []colonycore.LanguageEnviroment{}
cursor, err := colonycore.Find(new(colonycore.LanguageEnviroment), dbox.Eq("language", payload.Lang))
cursor.Fetch(&dataLanguage, 0, false)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
defer cursor.Close()
fmt.Println(dataLanguage)
if cursor.Count() > 0 {
for _, eachLang := range dataLanguage {
var sourcePath string
var destinationPath string
var pathstring []string
if eachLang.Language == LANG_GO {
pathstring = []string{dataServers.DataPath, "langenvironment", "installer", LANG_GO}
sourcePath = filepath.Join(leSourcePath, LANG_GO, dataServers.OS, INSTALLER_LINUX_GO)
destinationPath = strings.Join(append(pathstring, dataServers.OS), serverPathSeparator)
}
installShPath := filepath.Join(leSourcePath, LANG_GO, dataServers.OS, "install.sh")
pathstring = append(pathstring, dataServers.OS)
installShdestPath := strings.Join(append(pathstring, "install.sh"), serverPathSeparator)
installFilePath := strings.Join(append(pathstring, INSTALLER_LINUX_GO), serverPathSeparator)
// compressPath := filepath.Join(leSourcePath)
// err = toolkit.TarCompress(compressPath, filepath.Join(compressPath, fmt.Sprintf("%s.tar", dataServers.OS)))
// if err != nil {
// fmt.Println(err)
// }
err = sshSetting.SshCopyByPath(sourcePath, destinationPath)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
err = sshSetting.SshCopyByPath(installShPath, destinationPath)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
//sed -i 's/^M//' install.sh
cmdSedInstall := fmt.Sprintf("sed -i 's/\r//g' %s", installShdestPath)
_, err = sshSetting.GetOutputCommandSsh(cmdSedInstall)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
// // chmod +x install.sh
cmdChmodCli := fmt.Sprintf("chmod -x %s", installShdestPath)
_, err = sshSetting.GetOutputCommandSsh(cmdChmodCli)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
// // sh install.sh installFilePath DESTINSTALL_PATH projectpath
cmdShCli := fmt.Sprintf("bash %s %s %s %s", installShdestPath, installFilePath, DESTINSTALL_PATH, "goproject")
fmt.Println("sh command :: ", cmdShCli)
_, err = sshSetting.GetOutputCommandSsh(cmdShCli)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
}
}
return helper.CreateResult(true, payload, "")
}
update controller langenvironment : add java language process
package controller
import (
// "archive/zip"
"encoding/json"
// "errors"
"fmt"
"path/filepath"
"strings"
// "time"
"github.com/eaciit/colony-core/v0"
"github.com/eaciit/colony-manager/helper"
"github.com/eaciit/dbox"
_ "github.com/eaciit/dbox/dbc/jsons"
// "github.com/eaciit/hdc/hdfs"
"github.com/eaciit/knot/knot.v1"
// "github.com/eaciit/live"
// "github.com/eaciit/sshclient"
// "github.com/eaciit/toolkit"
// "golang.org/x/crypto/ssh"
)
var (
leSourcePath = filepath.Join(EC_DATA_PATH, "langenvironment", "installer")
)
const (
DESTINSTALL_PATH = "/usr/local/"
SERVER_WIN = "windows"
SERVER_LINUX = "linux"
SERVER_OSX = "osx"
LANG_GO = "go"
LANG_JAVA = "java"
LANG_SCALA = "scala"
// INSTALLER_LINUX_GO = "go1.6.linux-x86_64.tar.gz"
// INSTALLER_LINUX_JAVA = "jdk-8u77-linux-x86_64.tar.gz"
)
type LangenvironmentController struct {
App
}
func getArch(serverOS string, lang string, arch string) string {
var result string
if serverOS == SERVER_LINUX {
if lang == LANG_GO {
result = fmt.Sprintf("%s1.6.%s-%s.%s", LANG_GO, SERVER_LINUX, strings.TrimSpace(arch), "tar.gz")
}
} else if serverOS == SERVER_WIN {
if lang == LANG_GO {
result = fmt.Sprintf("%s1.6.%s-%s.%s", LANG_GO, SERVER_WIN, strings.TrimSpace(arch), "zip")
}
}
return result
}
func CreateLangenvironmentController(l *knot.Server) *LangenvironmentController {
var controller = new(LangenvironmentController)
controller.Server = l
return controller
}
func (l *LangenvironmentController) GetSampleDataForSetupLang() colonycore.LanguageEnvironmentPayload {
// s := `[{ "ServerId": "vagrant-test1", "Lang": [ "go" ] }, { "ServerId": "vagrant-test2", "Lang": [ "go", "java" ] }]`
s := `{ "ServerId": "test", "Lang": "java" }`
r := colonycore.LanguageEnvironmentPayload{}
err := json.Unmarshal([]byte(s), &r)
if err != nil {
fmt.Println(err)
}
return r
}
func (le *LangenvironmentController) GetLanguage(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputJson
cursor, err := colonycore.Find(new(colonycore.LanguageEnviroment), nil)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
data := []colonycore.LanguageEnviroment{}
err = cursor.Fetch(&data, 0, false)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
// fmt.Println(data)
defer cursor.Close()
return helper.CreateResult(true, data, "")
}
func (l *LangenvironmentController) SetupFromSH(r *knot.WebContext) interface{} {
r.Config.OutputType = knot.OutputJson
payload := l.GetSampleDataForSetupLang()
dataServers := new(colonycore.Server)
err := colonycore.Get(dataServers, payload.ServerId)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
serverPathSeparator := CreateApplicationController(l.Server).GetServerPathSeparator(dataServers)
sshSetting, sshClient, err := CreateServerController(l.Server).SSHConnect(dataServers)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
defer sshClient.Close()
dataLanguage := []colonycore.LanguageEnviroment{}
cursor, err := colonycore.Find(new(colonycore.LanguageEnviroment), dbox.Eq("language", payload.Lang))
cursor.Fetch(&dataLanguage, 0, false)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
defer cursor.Close()
fmt.Println(dataLanguage)
if cursor.Count() > 0 {
fmt.Println("dsds")
for _, eachLang := range dataLanguage {
fmt.Println(" :: ", eachLang.Installer)
for _, dataInstaller := range eachLang.Installer {
var sourcePath string
var destinationPath string
pathstring := []string{dataServers.DataPath, "langenvironment", "installer"}
var installShPath string
if strings.ToLower(dataServers.OS) == strings.ToLower(dataInstaller.OS) {
fmt.Println("data servers : ", leSourcePath)
if eachLang.Language == LANG_GO {
// pathstring = []string{dataServers.DataPath, "langenvironment", "installer", LANG_GO}
pathstring = append(pathstring, LANG_GO)
pathstring = append(pathstring, dataServers.OS)
sourcePath = filepath.Join(leSourcePath, LANG_GO, dataServers.OS, dataInstaller.InstallerSource)
destinationPath = strings.Join(pathstring, serverPathSeparator)
installShPath = filepath.Join(leSourcePath, LANG_GO, dataServers.OS, "install.sh")
} else if eachLang.Language == LANG_JAVA {
// pathstring = []string{dataServers.DataPath, "langenvironment", "installer", LANG_JAVA}
pathstring = append(pathstring, LANG_JAVA)
pathstring = append(pathstring, dataServers.OS)
sourcePath = filepath.Join(leSourcePath, LANG_JAVA, dataServers.OS, dataInstaller.InstallerSource)
destinationPath = strings.Join(pathstring, serverPathSeparator)
installShPath = filepath.Join(leSourcePath, LANG_JAVA, dataServers.OS, "install.sh")
}
installShdestPath := strings.Join(append(pathstring, "install.sh"), serverPathSeparator)
installFilePath := strings.Join(append(pathstring, dataInstaller.InstallerSource), serverPathSeparator)
err = sshSetting.SshCopyByPath(sourcePath, destinationPath)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
err = sshSetting.SshCopyByPath(installShPath, destinationPath)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
//sed -i 's/^M//' install.sh
cmdSedInstall := fmt.Sprintf("sed -i 's/\r//g' %s", installShdestPath)
_, err = sshSetting.GetOutputCommandSsh(cmdSedInstall)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
// // chmod +x install.sh
cmdChmodCli := fmt.Sprintf("chmod -x %s", installShdestPath)
_, err = sshSetting.GetOutputCommandSsh(cmdChmodCli)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
// // sh install.sh installFilePath DESTINSTALL_PATH projectpath
cmdShCli := fmt.Sprintf("bash %s %s %s %s", installShdestPath, installFilePath, DESTINSTALL_PATH, "goproject")
// fmt.Println("sh command :: ", cmdShCli)
outputCmd, err := sshSetting.GetOutputCommandSsh(cmdShCli)
if err != nil {
return helper.CreateResult(false, nil, err.Error())
}
fmt.Println(" --- > ", outputCmd)
}
}
// var sourcePath string
// var destinationPath string
// var pathstring []string
// var installShPath string
// if eachLang.Language == LANG_GO {
// pathstring = []string{dataServers.DataPath, "langenvironment", "installer", LANG_GO}
// sourcePath = filepath.Join(leSourcePath, LANG_GO, dataServers.OS, INSTALLER_LINUX_GO)
// destinationPath = strings.Join(append(pathstring, dataServers.OS), serverPathSeparator)
// installShPath = filepath.Join(leSourcePath, LANG_GO, dataServers.OS, "install.sh")
// } else if eachLang.Language == LANG_JAVA {
// pathstring = []string{dataServers.DataPath, "langenvironment", "installer", LANG_JAVA}
// sourcePath = filepath.Join(leSourcePath, LANG_JAVA, dataServers.OS, INSTALLER_LINUX_JAVA)
// destinationPath = strings.Join(append(pathstring, dataServers.OS), serverPathSeparator)
// installShPath = filepath.Join(leSourcePath, LANG_JAVA, dataServers.OS, "install.sh")
// }
// pathstring = append(pathstring, dataServers.OS)
// installShdestPath := strings.Join(append(pathstring, "install.sh"), serverPathSeparator)
// installFilePath := strings.Join(append(pathstring, INSTALLER_LINUX_GO), serverPathSeparator)
// compressPath := filepath.Join(leSourcePath)
// err = toolkit.TarCompress(compressPath, filepath.Join(compressPath, fmt.Sprintf("%s.tar", dataServers.OS)))
// if err != nil {
// fmt.Println(err)
// }
// err = sshSetting.SshCopyByPath(sourcePath, destinationPath)
// if err != nil {
// return helper.CreateResult(false, nil, err.Error())
// }
// err = sshSetting.SshCopyByPath(installShPath, destinationPath)
// if err != nil {
// return helper.CreateResult(false, nil, err.Error())
// }
// //sed -i 's/^M//' install.sh
// cmdSedInstall := fmt.Sprintf("sed -i 's/\r//g' %s", installShdestPath)
// _, err = sshSetting.GetOutputCommandSsh(cmdSedInstall)
// if err != nil {
// return helper.CreateResult(false, nil, err.Error())
// }
// // // chmod +x install.sh
// cmdChmodCli := fmt.Sprintf("chmod -x %s", installShdestPath)
// _, err = sshSetting.GetOutputCommandSsh(cmdChmodCli)
// if err != nil {
// return helper.CreateResult(false, nil, err.Error())
// }
// // // sh install.sh installFilePath DESTINSTALL_PATH projectpath
// cmdShCli := fmt.Sprintf("bash %s %s %s %s", installShdestPath, installFilePath, DESTINSTALL_PATH, "goproject")
// // fmt.Println("sh command :: ", cmdShCli)
// outputCmd, err := sshSetting.GetOutputCommandSsh(cmdShCli)
// if err != nil {
// return helper.CreateResult(false, nil, err.Error())
// }
// fmt.Println(" --- > ", outputCmd)
}
}
return helper.CreateResult(true, payload, "")
}
|
package index
import (
"math/rand"
"restic"
"restic/repository"
"restic/test"
"testing"
"time"
)
var (
snapshotTime = time.Unix(1470492820, 207401672)
depth = 3
)
func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) {
repo, cleanup := repository.TestRepository(t)
for i := 0; i < 3; i++ {
restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup)
}
return repo, cleanup
}
func validateIndex(t testing.TB, repo restic.Repository, idx *Index) {
for id := range repo.List(restic.DataFile, nil) {
if _, ok := idx.Packs[id]; !ok {
t.Errorf("pack %v missing from index", id.Str())
}
}
}
func TestIndexNew(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx, err := New(repo, nil)
if err != nil {
t.Fatalf("New() returned error %v", err)
}
if idx == nil {
t.Fatalf("New() returned nil index")
}
validateIndex(t, repo, idx)
}
func TestIndexLoad(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
loadIdx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
if loadIdx == nil {
t.Fatalf("Load() returned nil index")
}
validateIndex(t, repo, loadIdx)
newIdx, err := New(repo, nil)
if err != nil {
t.Fatalf("New() returned error %v", err)
}
if len(loadIdx.Packs) != len(newIdx.Packs) {
t.Errorf("number of packs does not match: want %v, got %v",
len(loadIdx.Packs), len(newIdx.Packs))
}
validateIndex(t, repo, newIdx)
for packID, packNew := range newIdx.Packs {
packLoad, ok := loadIdx.Packs[packID]
if !ok {
t.Errorf("loaded index does not list pack %v", packID.Str())
continue
}
if len(packNew.Entries) != len(packLoad.Entries) {
t.Errorf(" number of entries in pack %v does not match: %d != %d\n %v\n %v",
packID.Str(), len(packNew.Entries), len(packLoad.Entries),
packNew.Entries, packLoad.Entries)
continue
}
for _, entryNew := range packNew.Entries {
found := false
for _, entryLoad := range packLoad.Entries {
if !entryLoad.ID.Equal(entryNew.ID) {
continue
}
if entryLoad.Type != entryNew.Type {
continue
}
if entryLoad.Offset != entryNew.Offset {
continue
}
if entryLoad.Length != entryNew.Length {
continue
}
found = true
break
}
if !found {
t.Errorf("blob not found in loaded index: %v", entryNew)
}
}
}
}
func BenchmarkIndexNew(b *testing.B) {
repo, cleanup := createFilledRepo(b, 3, 0)
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx, err := New(repo, nil)
if err != nil {
b.Fatalf("New() returned error %v", err)
}
if idx == nil {
b.Fatalf("New() returned nil index")
}
b.Logf("idx %v packs", len(idx.Packs))
}
}
func BenchmarkIndexSave(b *testing.B) {
repo, cleanup := createFilledRepo(b, 3, 0)
defer cleanup()
idx, err := New(repo, nil)
test.OK(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
id, err := idx.Save(repo, nil)
if err != nil {
b.Fatalf("New() returned error %v", err)
}
b.Logf("saved as %v", id.Str())
}
}
func TestIndexDuplicateBlobs(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0.01)
defer cleanup()
idx, err := New(repo, nil)
if err != nil {
t.Fatal(err)
}
dups := idx.DuplicateBlobs()
if len(dups) == 0 {
t.Errorf("no duplicate blobs found")
}
t.Logf("%d packs, %d duplicate blobs", len(idx.Packs), len(dups))
packs := idx.PacksForBlobs(dups)
if len(packs) == 0 {
t.Errorf("no packs with duplicate blobs found")
}
t.Logf("%d packs with duplicate blobs", len(packs))
}
func loadIndex(t testing.TB, repo restic.Repository) *Index {
idx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
return idx
}
func TestIndexSave(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx := loadIndex(t, repo)
packs := make(map[restic.ID][]restic.Blob)
for id := range idx.Packs {
if rand.Float32() < 0.5 {
packs[id] = idx.Packs[id].Entries
}
}
t.Logf("save %d/%d packs in a new index\n", len(packs), len(idx.Packs))
id, err := Save(repo, packs, idx.IndexIDs.List())
if err != nil {
t.Fatalf("unable to save new index: %v", err)
}
t.Logf("new index saved as %v", id.Str())
for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str())
err = repo.Backend().Remove(restic.IndexFile, id.String())
if err != nil {
t.Errorf("error removing index %v: %v", id, err)
}
}
idx2 := loadIndex(t, repo)
t.Logf("load new index with %d packs", len(idx2.Packs))
if len(idx2.Packs) != len(packs) {
t.Errorf("wrong number of packs in new index, want %d, got %d", len(packs), len(idx2.Packs))
}
for id := range packs {
if _, ok := idx2.Packs[id]; !ok {
t.Errorf("pack %v is not contained in new index", id.Str())
}
}
for id := range idx2.Packs {
if _, ok := packs[id]; !ok {
t.Errorf("pack %v is not contained in new index", id.Str())
}
}
}
func TestIndexAddRemovePack(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
done := make(chan struct{})
defer close(done)
packID := <-repo.List(restic.DataFile, done)
t.Logf("selected pack %v", packID.Str())
blobs := idx.Packs[packID].Entries
idx.RemovePack(packID)
if _, ok := idx.Packs[packID]; ok {
t.Errorf("removed pack %v found in index.Packs", packID.Str())
}
for _, blob := range blobs {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
_, err := idx.FindBlob(h)
if err == nil {
t.Errorf("removed blob %v found in index", h)
}
}
}
// example index serialization from doc/Design.md
var docExample = []byte(`
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}
]
}
`)
func TestIndexLoadDocReference(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
id, err := repo.SaveUnpacked(restic.IndexFile, docExample)
if err != nil {
t.Fatalf("SaveUnpacked() returned error %v", err)
}
t.Logf("index saved as %v", id.Str())
idx := loadIndex(t, repo)
blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
if err != nil {
t.Errorf("FindBlob() returned error %v", err)
}
if len(locs) != 1 {
t.Errorf("blob found %d times, expected just one", len(locs))
}
l := locs[0]
if !l.ID.Equal(blobID) {
t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
}
if l.Type != restic.DataBlob {
t.Errorf("want type %v, got %v", restic.DataBlob, l.Type)
}
if l.Offset != 150 {
t.Errorf("wrong offset, want %d, got %v", 150, l.Offset)
}
if l.Length != 123 {
t.Errorf("wrong length, want %d, got %v", 123, l.Length)
}
}
Improve BenchmarkIndexSave
package index
import (
"math/rand"
"restic"
"restic/repository"
"restic/test"
"testing"
"time"
)
var (
snapshotTime = time.Unix(1470492820, 207401672)
depth = 3
)
func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) {
repo, cleanup := repository.TestRepository(t)
for i := 0; i < 3; i++ {
restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup)
}
return repo, cleanup
}
func validateIndex(t testing.TB, repo restic.Repository, idx *Index) {
for id := range repo.List(restic.DataFile, nil) {
if _, ok := idx.Packs[id]; !ok {
t.Errorf("pack %v missing from index", id.Str())
}
}
}
func TestIndexNew(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx, err := New(repo, nil)
if err != nil {
t.Fatalf("New() returned error %v", err)
}
if idx == nil {
t.Fatalf("New() returned nil index")
}
validateIndex(t, repo, idx)
}
func TestIndexLoad(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
loadIdx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
if loadIdx == nil {
t.Fatalf("Load() returned nil index")
}
validateIndex(t, repo, loadIdx)
newIdx, err := New(repo, nil)
if err != nil {
t.Fatalf("New() returned error %v", err)
}
if len(loadIdx.Packs) != len(newIdx.Packs) {
t.Errorf("number of packs does not match: want %v, got %v",
len(loadIdx.Packs), len(newIdx.Packs))
}
validateIndex(t, repo, newIdx)
for packID, packNew := range newIdx.Packs {
packLoad, ok := loadIdx.Packs[packID]
if !ok {
t.Errorf("loaded index does not list pack %v", packID.Str())
continue
}
if len(packNew.Entries) != len(packLoad.Entries) {
t.Errorf(" number of entries in pack %v does not match: %d != %d\n %v\n %v",
packID.Str(), len(packNew.Entries), len(packLoad.Entries),
packNew.Entries, packLoad.Entries)
continue
}
for _, entryNew := range packNew.Entries {
found := false
for _, entryLoad := range packLoad.Entries {
if !entryLoad.ID.Equal(entryNew.ID) {
continue
}
if entryLoad.Type != entryNew.Type {
continue
}
if entryLoad.Offset != entryNew.Offset {
continue
}
if entryLoad.Length != entryNew.Length {
continue
}
found = true
break
}
if !found {
t.Errorf("blob not found in loaded index: %v", entryNew)
}
}
}
}
func BenchmarkIndexNew(b *testing.B) {
repo, cleanup := createFilledRepo(b, 3, 0)
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx, err := New(repo, nil)
if err != nil {
b.Fatalf("New() returned error %v", err)
}
if idx == nil {
b.Fatalf("New() returned nil index")
}
b.Logf("idx %v packs", len(idx.Packs))
}
}
func BenchmarkIndexSave(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
idx, err := New(repo, nil)
test.OK(b, err)
for i := 0; i < 8000; i++ {
entries := make([]restic.Blob, 0, 200)
for j := 0; j < len(entries); j++ {
entries = append(entries, restic.Blob{
ID: restic.NewRandomID(),
Length: 1000,
Offset: 5,
Type: restic.DataBlob,
})
}
idx.AddPack(restic.NewRandomID(), 10000, entries)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
id, err := idx.Save(repo, nil)
if err != nil {
b.Fatalf("New() returned error %v", err)
}
b.Logf("saved as %v", id.Str())
}
}
func TestIndexDuplicateBlobs(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0.01)
defer cleanup()
idx, err := New(repo, nil)
if err != nil {
t.Fatal(err)
}
dups := idx.DuplicateBlobs()
if len(dups) == 0 {
t.Errorf("no duplicate blobs found")
}
t.Logf("%d packs, %d duplicate blobs", len(idx.Packs), len(dups))
packs := idx.PacksForBlobs(dups)
if len(packs) == 0 {
t.Errorf("no packs with duplicate blobs found")
}
t.Logf("%d packs with duplicate blobs", len(packs))
}
func loadIndex(t testing.TB, repo restic.Repository) *Index {
idx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
return idx
}
func TestIndexSave(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx := loadIndex(t, repo)
packs := make(map[restic.ID][]restic.Blob)
for id := range idx.Packs {
if rand.Float32() < 0.5 {
packs[id] = idx.Packs[id].Entries
}
}
t.Logf("save %d/%d packs in a new index\n", len(packs), len(idx.Packs))
id, err := Save(repo, packs, idx.IndexIDs.List())
if err != nil {
t.Fatalf("unable to save new index: %v", err)
}
t.Logf("new index saved as %v", id.Str())
for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str())
err = repo.Backend().Remove(restic.IndexFile, id.String())
if err != nil {
t.Errorf("error removing index %v: %v", id, err)
}
}
idx2 := loadIndex(t, repo)
t.Logf("load new index with %d packs", len(idx2.Packs))
if len(idx2.Packs) != len(packs) {
t.Errorf("wrong number of packs in new index, want %d, got %d", len(packs), len(idx2.Packs))
}
for id := range packs {
if _, ok := idx2.Packs[id]; !ok {
t.Errorf("pack %v is not contained in new index", id.Str())
}
}
for id := range idx2.Packs {
if _, ok := packs[id]; !ok {
t.Errorf("pack %v is not contained in new index", id.Str())
}
}
}
func TestIndexAddRemovePack(t *testing.T) {
repo, cleanup := createFilledRepo(t, 3, 0)
defer cleanup()
idx, err := Load(repo, nil)
if err != nil {
t.Fatalf("Load() returned error %v", err)
}
done := make(chan struct{})
defer close(done)
packID := <-repo.List(restic.DataFile, done)
t.Logf("selected pack %v", packID.Str())
blobs := idx.Packs[packID].Entries
idx.RemovePack(packID)
if _, ok := idx.Packs[packID]; ok {
t.Errorf("removed pack %v found in index.Packs", packID.Str())
}
for _, blob := range blobs {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
_, err := idx.FindBlob(h)
if err == nil {
t.Errorf("removed blob %v found in index", h)
}
}
}
// example index serialization from doc/Design.md
var docExample = []byte(`
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}
]
}
`)
func TestIndexLoadDocReference(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
id, err := repo.SaveUnpacked(restic.IndexFile, docExample)
if err != nil {
t.Fatalf("SaveUnpacked() returned error %v", err)
}
t.Logf("index saved as %v", id.Str())
idx := loadIndex(t, repo)
blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
if err != nil {
t.Errorf("FindBlob() returned error %v", err)
}
if len(locs) != 1 {
t.Errorf("blob found %d times, expected just one", len(locs))
}
l := locs[0]
if !l.ID.Equal(blobID) {
t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
}
if l.Type != restic.DataBlob {
t.Errorf("want type %v, got %v", restic.DataBlob, l.Type)
}
if l.Offset != 150 {
t.Errorf("wrong offset, want %d, got %v", 150, l.Offset)
}
if l.Length != 123 {
t.Errorf("wrong length, want %d, got %v", 123, l.Length)
}
}
|
package manager
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"strings"
"time"
log "github.com/Sirupsen/logrus"
r "github.com/dancannon/gorethink"
"github.com/gorilla/sessions"
"github.com/samalba/dockerclient"
"github.com/shipyard/shipyard"
"github.com/shipyard/shipyard/auth"
"github.com/shipyard/shipyard/dockerhub"
"github.com/shipyard/shipyard/version"
)
const (
tblNameConfig = "config"
tblNameEvents = "events"
tblNameAccounts = "accounts"
tblNameRoles = "roles"
tblNameServiceKeys = "service_keys"
tblNameExtensions = "extensions"
tblNameWebhookKeys = "webhook_keys"
tblNameRegistries = "registries"
tblNameConsole = "console"
storeKey = "shipyard"
trackerHost = "http://tracker.shipyard-project.com"
NodeHealthUp = "up"
NodeHealthDown = "down"
)
var (
ErrAccountExists = errors.New("account already exists")
ErrAccountDoesNotExist = errors.New("account does not exist")
ErrRoleDoesNotExist = errors.New("role does not exist")
ErrNodeDoesNotExist = errors.New("node does not exist")
ErrServiceKeyDoesNotExist = errors.New("service key does not exist")
ErrInvalidAuthToken = errors.New("invalid auth token")
ErrExtensionDoesNotExist = errors.New("extension does not exist")
ErrWebhookKeyDoesNotExist = errors.New("webhook key does not exist")
ErrRegistryDoesNotExist = errors.New("registry does not exist")
ErrConsoleSessionDoesNotExist = errors.New("console session does not exist")
store = sessions.NewCookieStore([]byte(storeKey))
)
type (
DefaultManager struct {
storeKey string
database string
authKey string
session *r.Session
authenticator auth.Authenticator
store *sessions.CookieStore
client *dockerclient.DockerClient
disableUsageInfo bool
}
ScaleResult struct {
Scaled []string
Errors []string
}
Manager interface {
Accounts() ([]*auth.Account, error)
Account(username string) (*auth.Account, error)
Authenticate(username, password string) (bool, error)
GetAuthenticator() auth.Authenticator
SaveAccount(account *auth.Account) error
DeleteAccount(account *auth.Account) error
Roles() ([]*auth.ACL, error)
Role(name string) (*auth.ACL, error)
Store() *sessions.CookieStore
StoreKey() string
Container(id string) (*dockerclient.ContainerInfo, error)
ScaleContainer(id string, numInstances int) ScaleResult
SaveServiceKey(key *auth.ServiceKey) error
RemoveServiceKey(key string) error
SaveEvent(event *shipyard.Event) error
Events(limit int) ([]*shipyard.Event, error)
PurgeEvents() error
ServiceKey(key string) (*auth.ServiceKey, error)
ServiceKeys() ([]*auth.ServiceKey, error)
NewAuthToken(username string, userAgent string) (*auth.AuthToken, error)
VerifyAuthToken(username, token string) error
VerifyServiceKey(key string) error
NewServiceKey(description string) (*auth.ServiceKey, error)
ChangePassword(username, password string) error
WebhookKey(key string) (*dockerhub.WebhookKey, error)
WebhookKeys() ([]*dockerhub.WebhookKey, error)
NewWebhookKey(image string) (*dockerhub.WebhookKey, error)
SaveWebhookKey(key *dockerhub.WebhookKey) error
DeleteWebhookKey(id string) error
DockerClient() *dockerclient.DockerClient
Nodes() ([]*shipyard.Node, error)
Node(name string) (*shipyard.Node, error)
AddRegistry(registry *shipyard.Registry) error
RemoveRegistry(registry *shipyard.Registry) error
Registries() ([]*shipyard.Registry, error)
Registry(name string) (*shipyard.Registry, error)
CreateConsoleSession(c *shipyard.ConsoleSession) error
RemoveConsoleSession(c *shipyard.ConsoleSession) error
ConsoleSession(token string) (*shipyard.ConsoleSession, error)
ValidateConsoleSessionToken(containerId, token string) bool
}
)
func NewManager(addr string, database string, authKey string, client *dockerclient.DockerClient, disableUsageInfo bool, authenticator auth.Authenticator) (Manager, error) {
session, err := r.Connect(r.ConnectOpts{
Address: addr,
Database: database,
AuthKey: authKey,
MaxIdle: 10,
IdleTimeout: time.Second * 30,
})
if err != nil {
return nil, err
}
log.Info("checking database")
r.DbCreate(database).Run(session)
m := &DefaultManager{
database: database,
authKey: authKey,
session: session,
authenticator: authenticator,
store: store,
client: client,
storeKey: storeKey,
disableUsageInfo: disableUsageInfo,
}
m.initdb()
m.init()
return m, nil
}
func (m DefaultManager) Store() *sessions.CookieStore {
return m.store
}
func (m DefaultManager) DockerClient() *dockerclient.DockerClient {
return m.client
}
func (m DefaultManager) StoreKey() string {
return m.storeKey
}
func (m DefaultManager) initdb() {
// create tables if needed
tables := []string{tblNameConfig, tblNameEvents, tblNameAccounts, tblNameRoles, tblNameConsole, tblNameServiceKeys, tblNameRegistries, tblNameExtensions, tblNameWebhookKeys}
for _, tbl := range tables {
_, err := r.Table(tbl).Run(m.session)
if err != nil {
if _, err := r.Db(m.database).TableCreate(tbl).Run(m.session); err != nil {
log.Fatalf("error creating table: %s", err)
}
}
}
}
func (m DefaultManager) init() error {
// anonymous usage info
go m.usageReport()
return nil
}
func (m DefaultManager) logEvent(eventType, message string, tags []string) {
evt := &shipyard.Event{
Type: eventType,
Time: time.Now(),
Message: message,
Tags: tags,
}
if err := m.SaveEvent(evt); err != nil {
log.Errorf("error logging event: %s", err)
}
}
func (m DefaultManager) usageReport() {
if m.disableUsageInfo {
return
}
m.uploadUsage()
t := time.NewTicker(1 * time.Hour).C
for {
select {
case <-t:
go m.uploadUsage()
}
}
}
func (m DefaultManager) uploadUsage() {
id := "anon"
ifaces, err := net.Interfaces()
if err == nil {
for _, iface := range ifaces {
if iface.Name != "lo" {
hw := iface.HardwareAddr.String()
id = strings.Replace(hw, ":", "", -1)
break
}
}
}
usage := &shipyard.Usage{
ID: id,
Version: version.Version,
}
b, err := json.Marshal(usage)
if err != nil {
log.Warnf("error serializing usage info: %s", err)
}
buf := bytes.NewBuffer(b)
if _, err := http.Post(fmt.Sprintf("%s/update", trackerHost), "application/json", buf); err != nil {
log.Warnf("error sending usage info: %s", err)
}
}
func (m DefaultManager) Container(id string) (*dockerclient.ContainerInfo, error) {
return m.client.InspectContainer(id)
}
func (m DefaultManager) ScaleContainer(id string, numInstances int) ScaleResult {
var (
errChan = make(chan (error))
resChan = make(chan (string))
result = ScaleResult{Scaled: make([]string, 0), Errors: make([]string, 0)}
)
containerInfo, err := m.Container(id)
if err != nil {
result.Errors = append(result.Errors, err.Error())
return result
}
for i := 1; i < numInstances; i++ {
go func(instance int) {
for {
log.Debugf("scaling: id=%s #=%d", containerInfo.Id, instance)
config := containerInfo.Config
// clear hostname to get a newly generated
config.Hostname = ""
hostConfig := containerInfo.HostConfig
id, err := m.client.CreateContainer(config, "")
if err != nil {
errChan <- err
return
}
if err := m.client.StartContainer(id, hostConfig); err != nil {
errChan <- err
return
}
resChan <- id
}
}(i)
}
for i := 1; i < numInstances; i++ {
select {
case id := <-resChan:
result.Scaled = append(result.Scaled, id)
case err := <-errChan:
log.Errorf("error scaling container: err=%s", strings.TrimSpace(err.Error()))
result.Errors = append(result.Errors, strings.TrimSpace(err.Error()))
}
}
return result
}
func (m DefaultManager) SaveServiceKey(key *auth.ServiceKey) error {
if _, err := r.Table(tblNameServiceKeys).Insert(key).RunWrite(m.session); err != nil {
return err
}
m.logEvent("add-service-key", fmt.Sprintf("description=%s", key.Description), []string{"security"})
return nil
}
func (m DefaultManager) RemoveServiceKey(key string) error {
if _, err := r.Table(tblNameServiceKeys).Filter(map[string]string{"key": key}).Delete().RunWrite(m.session); err != nil {
return err
}
m.logEvent("delete-service-key", fmt.Sprintf("key=%s", key), []string{"security"})
return nil
}
func (m DefaultManager) SaveEvent(event *shipyard.Event) error {
if _, err := r.Table(tblNameEvents).Insert(event).RunWrite(m.session); err != nil {
return err
}
return nil
}
func (m DefaultManager) Events(limit int) ([]*shipyard.Event, error) {
t := r.Table(tblNameEvents).OrderBy(r.Desc("Time"))
if limit > -1 {
t.Limit(limit)
}
res, err := t.Run(m.session)
if err != nil {
return nil, err
}
events := []*shipyard.Event{}
if err := res.All(&events); err != nil {
return nil, err
}
return events, nil
}
func (m DefaultManager) PurgeEvents() error {
if _, err := r.Table(tblNameEvents).Delete().RunWrite(m.session); err != nil {
return err
}
return nil
}
func (m DefaultManager) ServiceKey(key string) (*auth.ServiceKey, error) {
res, err := r.Table(tblNameServiceKeys).Filter(map[string]string{"key": key}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrServiceKeyDoesNotExist
}
var k *auth.ServiceKey
if err := res.One(&k); err != nil {
return nil, err
}
return k, nil
}
func (m DefaultManager) ServiceKeys() ([]*auth.ServiceKey, error) {
res, err := r.Table(tblNameServiceKeys).Run(m.session)
if err != nil {
return nil, err
}
keys := []*auth.ServiceKey{}
if err := res.All(&keys); err != nil {
return nil, err
}
return keys, nil
}
func (m DefaultManager) Accounts() ([]*auth.Account, error) {
res, err := r.Table(tblNameAccounts).OrderBy(r.Asc("username")).Run(m.session)
if err != nil {
return nil, err
}
accounts := []*auth.Account{}
if err := res.All(&accounts); err != nil {
return nil, err
}
return accounts, nil
}
func (m DefaultManager) Account(username string) (*auth.Account, error) {
res, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrAccountDoesNotExist
}
var account *auth.Account
if err := res.One(&account); err != nil {
return nil, err
}
return account, nil
}
func (m DefaultManager) SaveAccount(account *auth.Account) error {
var (
hash string
eventType string
)
if account.Password != "" {
h, err := auth.Hash(account.Password)
if err != nil {
return err
}
hash = h
}
// check if exists; if so, update
acct, err := m.Account(account.Username)
if err != nil && err != ErrAccountDoesNotExist {
return err
}
// update
if acct != nil {
updates := map[string]interface{}{
"first_name": account.FirstName,
"last_name": account.LastName,
"roles": account.Roles,
}
if account.Password != "" {
updates["password"] = hash
}
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": account.Username}).Update(updates).RunWrite(m.session); err != nil {
return err
}
eventType = "update-account"
} else {
account.Password = hash
if _, err := r.Table(tblNameAccounts).Insert(account).RunWrite(m.session); err != nil {
return err
}
eventType = "add-account"
}
m.logEvent(eventType, fmt.Sprintf("username=%s", account.Username), []string{"security"})
return nil
}
func (m DefaultManager) DeleteAccount(account *auth.Account) error {
res, err := r.Table(tblNameAccounts).Filter(map[string]string{"id": account.ID}).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrAccountDoesNotExist
}
m.logEvent("delete-account", fmt.Sprintf("username=%s", account.Username), []string{"security"})
return nil
}
func (m DefaultManager) Roles() ([]*auth.ACL, error) {
roles := auth.DefaultACLs()
return roles, nil
}
func (m DefaultManager) Role(name string) (*auth.ACL, error) {
acls, err := m.Roles()
if err != nil {
return nil, err
}
for _, r := range acls {
if r.RoleName == name {
return r, nil
}
}
return nil, nil
}
func (m DefaultManager) GetAuthenticator() auth.Authenticator {
return m.authenticator
}
func (m DefaultManager) Authenticate(username, password string) (bool, error) {
// only get the account to get the hashed password if using the builtin auth
passwordHash := ""
if m.authenticator.Name() == "builtin" {
acct, err := m.Account(username)
if err != nil {
log.Error(err)
return false, err
}
passwordHash = acct.Password
}
return m.authenticator.Authenticate(username, password, passwordHash)
}
func (m DefaultManager) NewAuthToken(username string, userAgent string) (*auth.AuthToken, error) {
tk, err := m.authenticator.GenerateToken()
if err != nil {
return nil, err
}
acct, err := m.Account(username)
if err != nil {
return nil, err
}
token := &auth.AuthToken{}
tokens := acct.Tokens
found := false
for _, t := range tokens {
if t.UserAgent == userAgent {
found = true
t.Token = tk
token = t
break
}
}
if !found {
token = &auth.AuthToken{
UserAgent: userAgent,
Token: tk,
}
tokens = append(tokens, token)
}
// delete token
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Filter(r.Row.Field("user_agent").Eq(userAgent)).Delete().Run(m.session); err != nil {
return nil, err
}
// add
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Update(map[string]interface{}{"tokens": tokens}).RunWrite(m.session); err != nil {
return nil, err
}
return token, nil
}
func (m DefaultManager) VerifyAuthToken(username, token string) error {
acct, err := m.Account(username)
if err != nil {
return err
}
found := false
for _, t := range acct.Tokens {
if token == t.Token {
found = true
break
}
}
if !found {
return ErrInvalidAuthToken
}
return nil
}
func (m DefaultManager) VerifyServiceKey(key string) error {
if _, err := m.ServiceKey(key); err != nil {
return err
}
return nil
}
func (m DefaultManager) NewServiceKey(description string) (*auth.ServiceKey, error) {
k, err := m.authenticator.GenerateToken()
if err != nil {
return nil, err
}
key := &auth.ServiceKey{
Key: k[24:],
Description: description,
}
if err := m.SaveServiceKey(key); err != nil {
return nil, err
}
return key, nil
}
func (m DefaultManager) ChangePassword(username, password string) error {
if !m.authenticator.IsUpdateSupported() {
return fmt.Errorf("not supported for authenticator: %s", m.authenticator.Name())
}
hash, err := auth.Hash(password)
if err != nil {
return err
}
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Update(map[string]string{"password": hash}).Run(m.session); err != nil {
return err
}
m.logEvent("change-password", username, []string{"security"})
return nil
}
func (m DefaultManager) WebhookKey(key string) (*dockerhub.WebhookKey, error) {
res, err := r.Table(tblNameWebhookKeys).Filter(map[string]string{"key": key}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrWebhookKeyDoesNotExist
}
var k *dockerhub.WebhookKey
if err := res.One(&k); err != nil {
return nil, err
}
return k, nil
}
func (m DefaultManager) WebhookKeys() ([]*dockerhub.WebhookKey, error) {
res, err := r.Table(tblNameWebhookKeys).OrderBy(r.Asc("image")).Run(m.session)
if err != nil {
return nil, err
}
keys := []*dockerhub.WebhookKey{}
if err := res.All(&keys); err != nil {
return nil, err
}
return keys, nil
}
func (m DefaultManager) NewWebhookKey(image string) (*dockerhub.WebhookKey, error) {
k := generateId(16)
key := &dockerhub.WebhookKey{
Key: k,
Image: image,
}
if err := m.SaveWebhookKey(key); err != nil {
return nil, err
}
return key, nil
}
func (m DefaultManager) SaveWebhookKey(key *dockerhub.WebhookKey) error {
if _, err := r.Table(tblNameWebhookKeys).Insert(key).RunWrite(m.session); err != nil {
return err
}
m.logEvent("add-webhook-key", fmt.Sprintf("image=%s", key.Image), []string{"webhook"})
return nil
}
func (m DefaultManager) DeleteWebhookKey(id string) error {
key, err := m.WebhookKey(id)
if err != nil {
return err
}
res, err := r.Table(tblNameWebhookKeys).Get(key.ID).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrWebhookKeyDoesNotExist
}
m.logEvent("delete-webhook-key", fmt.Sprintf("image=%s", key.Image), []string{"webhook"})
return nil
}
func (m DefaultManager) Nodes() ([]*shipyard.Node, error) {
info, err := m.client.Info()
if err != nil {
return nil, err
}
nodes, err := parseClusterNodes(info.DriverStatus)
if err != nil {
return nil, err
}
return nodes, nil
}
func (m DefaultManager) Node(name string) (*shipyard.Node, error) {
nodes, err := m.Nodes()
if err != nil {
return nil, err
}
for _, node := range nodes {
if node.Name == name {
return node, nil
}
}
return nil, nil
}
func (m DefaultManager) AddRegistry(registry *shipyard.Registry) error {
if _, err := r.Table(tblNameRegistries).Insert(registry).RunWrite(m.session); err != nil {
return err
}
m.logEvent("add-registry", fmt.Sprintf("name=%s endpoint=%s", registry.Name, registry.Addr), []string{"registry"})
return nil
}
func (m DefaultManager) RemoveRegistry(registry *shipyard.Registry) error {
res, err := r.Table(tblNameRegistries).Get(registry.ID).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrRegistryDoesNotExist
}
m.logEvent("delete-registry", fmt.Sprintf("name=%s endpoint=%s", registry.Name, registry.Addr), []string{"registry"})
return nil
}
func (m DefaultManager) Registries() ([]*shipyard.Registry, error) {
res, err := r.Table(tblNameRegistries).OrderBy(r.Asc("name")).Run(m.session)
if err != nil {
return nil, err
}
regs := []*shipyard.Registry{}
if err := res.All(®s); err != nil {
return nil, err
}
registries := []*shipyard.Registry{}
for _, r := range regs {
reg, err := shipyard.NewRegistry(r.ID, r.Name, r.Addr)
if err != nil {
return nil, err
}
registries = append(registries, reg)
}
return registries, nil
}
func (m DefaultManager) Registry(name string) (*shipyard.Registry, error) {
res, err := r.Table(tblNameRegistries).Filter(map[string]string{"name": name}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrRegistryDoesNotExist
}
var reg *shipyard.Registry
if err := res.One(®); err != nil {
return nil, err
}
registry, err := shipyard.NewRegistry(reg.ID, reg.Name, reg.Addr)
if err != nil {
return nil, err
}
return registry, nil
}
func (m DefaultManager) CreateConsoleSession(c *shipyard.ConsoleSession) error {
if _, err := r.Table(tblNameConsole).Insert(c).RunWrite(m.session); err != nil {
return err
}
m.logEvent("create-console-session", fmt.Sprintf("container=%s", c.ContainerID), []string{"console"})
return nil
}
func (m DefaultManager) RemoveConsoleSession(c *shipyard.ConsoleSession) error {
res, err := r.Table(tblNameConsole).Get(c.ID).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrConsoleSessionDoesNotExist
}
return nil
}
func (m DefaultManager) ConsoleSession(token string) (*shipyard.ConsoleSession, error) {
res, err := r.Table(tblNameConsole).Filter(map[string]string{"token": token}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrConsoleSessionDoesNotExist
}
var c *shipyard.ConsoleSession
if err := res.One(&c); err != nil {
return nil, err
}
return c, nil
}
func (m DefaultManager) ValidateConsoleSessionToken(containerId string, token string) bool {
cs, err := m.ConsoleSession(token)
if err != nil {
log.Errorf("error validating console session token: %s", err)
return false
}
if cs == nil || cs.ContainerID != containerId {
log.Warnf("unauthorized token request: %s", token)
return false
}
if err := m.RemoveConsoleSession(cs); err != nil {
log.Error(err)
return false
}
return true
}
fixed issue when scaling container by 1
Signed-off-by: Lynda O'Leary <ca6aff33f1c72706d97ec149b2993bafa69e3e2b@hotmail.com>
package manager
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"strings"
"time"
log "github.com/Sirupsen/logrus"
r "github.com/dancannon/gorethink"
"github.com/gorilla/sessions"
"github.com/samalba/dockerclient"
"github.com/shipyard/shipyard"
"github.com/shipyard/shipyard/auth"
"github.com/shipyard/shipyard/dockerhub"
"github.com/shipyard/shipyard/version"
)
const (
tblNameConfig = "config"
tblNameEvents = "events"
tblNameAccounts = "accounts"
tblNameRoles = "roles"
tblNameServiceKeys = "service_keys"
tblNameExtensions = "extensions"
tblNameWebhookKeys = "webhook_keys"
tblNameRegistries = "registries"
tblNameConsole = "console"
storeKey = "shipyard"
trackerHost = "http://tracker.shipyard-project.com"
NodeHealthUp = "up"
NodeHealthDown = "down"
)
var (
ErrAccountExists = errors.New("account already exists")
ErrAccountDoesNotExist = errors.New("account does not exist")
ErrRoleDoesNotExist = errors.New("role does not exist")
ErrNodeDoesNotExist = errors.New("node does not exist")
ErrServiceKeyDoesNotExist = errors.New("service key does not exist")
ErrInvalidAuthToken = errors.New("invalid auth token")
ErrExtensionDoesNotExist = errors.New("extension does not exist")
ErrWebhookKeyDoesNotExist = errors.New("webhook key does not exist")
ErrRegistryDoesNotExist = errors.New("registry does not exist")
ErrConsoleSessionDoesNotExist = errors.New("console session does not exist")
store = sessions.NewCookieStore([]byte(storeKey))
)
type (
DefaultManager struct {
storeKey string
database string
authKey string
session *r.Session
authenticator auth.Authenticator
store *sessions.CookieStore
client *dockerclient.DockerClient
disableUsageInfo bool
}
ScaleResult struct {
Scaled []string
Errors []string
}
Manager interface {
Accounts() ([]*auth.Account, error)
Account(username string) (*auth.Account, error)
Authenticate(username, password string) (bool, error)
GetAuthenticator() auth.Authenticator
SaveAccount(account *auth.Account) error
DeleteAccount(account *auth.Account) error
Roles() ([]*auth.ACL, error)
Role(name string) (*auth.ACL, error)
Store() *sessions.CookieStore
StoreKey() string
Container(id string) (*dockerclient.ContainerInfo, error)
ScaleContainer(id string, numInstances int) ScaleResult
SaveServiceKey(key *auth.ServiceKey) error
RemoveServiceKey(key string) error
SaveEvent(event *shipyard.Event) error
Events(limit int) ([]*shipyard.Event, error)
PurgeEvents() error
ServiceKey(key string) (*auth.ServiceKey, error)
ServiceKeys() ([]*auth.ServiceKey, error)
NewAuthToken(username string, userAgent string) (*auth.AuthToken, error)
VerifyAuthToken(username, token string) error
VerifyServiceKey(key string) error
NewServiceKey(description string) (*auth.ServiceKey, error)
ChangePassword(username, password string) error
WebhookKey(key string) (*dockerhub.WebhookKey, error)
WebhookKeys() ([]*dockerhub.WebhookKey, error)
NewWebhookKey(image string) (*dockerhub.WebhookKey, error)
SaveWebhookKey(key *dockerhub.WebhookKey) error
DeleteWebhookKey(id string) error
DockerClient() *dockerclient.DockerClient
Nodes() ([]*shipyard.Node, error)
Node(name string) (*shipyard.Node, error)
AddRegistry(registry *shipyard.Registry) error
RemoveRegistry(registry *shipyard.Registry) error
Registries() ([]*shipyard.Registry, error)
Registry(name string) (*shipyard.Registry, error)
CreateConsoleSession(c *shipyard.ConsoleSession) error
RemoveConsoleSession(c *shipyard.ConsoleSession) error
ConsoleSession(token string) (*shipyard.ConsoleSession, error)
ValidateConsoleSessionToken(containerId, token string) bool
}
)
func NewManager(addr string, database string, authKey string, client *dockerclient.DockerClient, disableUsageInfo bool, authenticator auth.Authenticator) (Manager, error) {
session, err := r.Connect(r.ConnectOpts{
Address: addr,
Database: database,
AuthKey: authKey,
MaxIdle: 10,
IdleTimeout: time.Second * 30,
})
if err != nil {
return nil, err
}
log.Info("checking database")
r.DbCreate(database).Run(session)
m := &DefaultManager{
database: database,
authKey: authKey,
session: session,
authenticator: authenticator,
store: store,
client: client,
storeKey: storeKey,
disableUsageInfo: disableUsageInfo,
}
m.initdb()
m.init()
return m, nil
}
func (m DefaultManager) Store() *sessions.CookieStore {
return m.store
}
func (m DefaultManager) DockerClient() *dockerclient.DockerClient {
return m.client
}
func (m DefaultManager) StoreKey() string {
return m.storeKey
}
func (m DefaultManager) initdb() {
// create tables if needed
tables := []string{tblNameConfig, tblNameEvents, tblNameAccounts, tblNameRoles, tblNameConsole, tblNameServiceKeys, tblNameRegistries, tblNameExtensions, tblNameWebhookKeys}
for _, tbl := range tables {
_, err := r.Table(tbl).Run(m.session)
if err != nil {
if _, err := r.Db(m.database).TableCreate(tbl).Run(m.session); err != nil {
log.Fatalf("error creating table: %s", err)
}
}
}
}
func (m DefaultManager) init() error {
// anonymous usage info
go m.usageReport()
return nil
}
func (m DefaultManager) logEvent(eventType, message string, tags []string) {
evt := &shipyard.Event{
Type: eventType,
Time: time.Now(),
Message: message,
Tags: tags,
}
if err := m.SaveEvent(evt); err != nil {
log.Errorf("error logging event: %s", err)
}
}
func (m DefaultManager) usageReport() {
if m.disableUsageInfo {
return
}
m.uploadUsage()
t := time.NewTicker(1 * time.Hour).C
for {
select {
case <-t:
go m.uploadUsage()
}
}
}
func (m DefaultManager) uploadUsage() {
id := "anon"
ifaces, err := net.Interfaces()
if err == nil {
for _, iface := range ifaces {
if iface.Name != "lo" {
hw := iface.HardwareAddr.String()
id = strings.Replace(hw, ":", "", -1)
break
}
}
}
usage := &shipyard.Usage{
ID: id,
Version: version.Version,
}
b, err := json.Marshal(usage)
if err != nil {
log.Warnf("error serializing usage info: %s", err)
}
buf := bytes.NewBuffer(b)
if _, err := http.Post(fmt.Sprintf("%s/update", trackerHost), "application/json", buf); err != nil {
log.Warnf("error sending usage info: %s", err)
}
}
func (m DefaultManager) Container(id string) (*dockerclient.ContainerInfo, error) {
return m.client.InspectContainer(id)
}
func (m DefaultManager) ScaleContainer(id string, numInstances int) ScaleResult {
var (
errChan = make(chan (error))
resChan = make(chan (string))
result = ScaleResult{Scaled: make([]string, 0), Errors: make([]string, 0)}
)
containerInfo, err := m.Container(id)
if err != nil {
result.Errors = append(result.Errors, err.Error())
return result
}
for i := 0; i < numInstances; i++ {
go func(instance int) {
log.Debugf("scaling: id=%s #=%d", containerInfo.Id, instance)
config := containerInfo.Config
// clear hostname to get a newly generated
config.Hostname = ""
hostConfig := containerInfo.HostConfig
id, err := m.client.CreateContainer(config, "")
if err != nil {
errChan <- err
return
}
if err := m.client.StartContainer(id, hostConfig); err != nil {
errChan <- err
return
}
resChan <- id
}(i)
}
for i := 0; i < numInstances; i++ {
select {
case id := <-resChan:
result.Scaled = append(result.Scaled, id)
case err := <-errChan:
log.Errorf("error scaling container: err=%s", strings.TrimSpace(err.Error()))
result.Errors = append(result.Errors, strings.TrimSpace(err.Error()))
}
}
return result
}
func (m DefaultManager) SaveServiceKey(key *auth.ServiceKey) error {
if _, err := r.Table(tblNameServiceKeys).Insert(key).RunWrite(m.session); err != nil {
return err
}
m.logEvent("add-service-key", fmt.Sprintf("description=%s", key.Description), []string{"security"})
return nil
}
func (m DefaultManager) RemoveServiceKey(key string) error {
if _, err := r.Table(tblNameServiceKeys).Filter(map[string]string{"key": key}).Delete().RunWrite(m.session); err != nil {
return err
}
m.logEvent("delete-service-key", fmt.Sprintf("key=%s", key), []string{"security"})
return nil
}
func (m DefaultManager) SaveEvent(event *shipyard.Event) error {
if _, err := r.Table(tblNameEvents).Insert(event).RunWrite(m.session); err != nil {
return err
}
return nil
}
func (m DefaultManager) Events(limit int) ([]*shipyard.Event, error) {
t := r.Table(tblNameEvents).OrderBy(r.Desc("Time"))
if limit > -1 {
t.Limit(limit)
}
res, err := t.Run(m.session)
if err != nil {
return nil, err
}
events := []*shipyard.Event{}
if err := res.All(&events); err != nil {
return nil, err
}
return events, nil
}
func (m DefaultManager) PurgeEvents() error {
if _, err := r.Table(tblNameEvents).Delete().RunWrite(m.session); err != nil {
return err
}
return nil
}
func (m DefaultManager) ServiceKey(key string) (*auth.ServiceKey, error) {
res, err := r.Table(tblNameServiceKeys).Filter(map[string]string{"key": key}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrServiceKeyDoesNotExist
}
var k *auth.ServiceKey
if err := res.One(&k); err != nil {
return nil, err
}
return k, nil
}
func (m DefaultManager) ServiceKeys() ([]*auth.ServiceKey, error) {
res, err := r.Table(tblNameServiceKeys).Run(m.session)
if err != nil {
return nil, err
}
keys := []*auth.ServiceKey{}
if err := res.All(&keys); err != nil {
return nil, err
}
return keys, nil
}
func (m DefaultManager) Accounts() ([]*auth.Account, error) {
res, err := r.Table(tblNameAccounts).OrderBy(r.Asc("username")).Run(m.session)
if err != nil {
return nil, err
}
accounts := []*auth.Account{}
if err := res.All(&accounts); err != nil {
return nil, err
}
return accounts, nil
}
func (m DefaultManager) Account(username string) (*auth.Account, error) {
res, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrAccountDoesNotExist
}
var account *auth.Account
if err := res.One(&account); err != nil {
return nil, err
}
return account, nil
}
func (m DefaultManager) SaveAccount(account *auth.Account) error {
var (
hash string
eventType string
)
if account.Password != "" {
h, err := auth.Hash(account.Password)
if err != nil {
return err
}
hash = h
}
// check if exists; if so, update
acct, err := m.Account(account.Username)
if err != nil && err != ErrAccountDoesNotExist {
return err
}
// update
if acct != nil {
updates := map[string]interface{}{
"first_name": account.FirstName,
"last_name": account.LastName,
"roles": account.Roles,
}
if account.Password != "" {
updates["password"] = hash
}
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": account.Username}).Update(updates).RunWrite(m.session); err != nil {
return err
}
eventType = "update-account"
} else {
account.Password = hash
if _, err := r.Table(tblNameAccounts).Insert(account).RunWrite(m.session); err != nil {
return err
}
eventType = "add-account"
}
m.logEvent(eventType, fmt.Sprintf("username=%s", account.Username), []string{"security"})
return nil
}
func (m DefaultManager) DeleteAccount(account *auth.Account) error {
res, err := r.Table(tblNameAccounts).Filter(map[string]string{"id": account.ID}).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrAccountDoesNotExist
}
m.logEvent("delete-account", fmt.Sprintf("username=%s", account.Username), []string{"security"})
return nil
}
func (m DefaultManager) Roles() ([]*auth.ACL, error) {
roles := auth.DefaultACLs()
return roles, nil
}
func (m DefaultManager) Role(name string) (*auth.ACL, error) {
acls, err := m.Roles()
if err != nil {
return nil, err
}
for _, r := range acls {
if r.RoleName == name {
return r, nil
}
}
return nil, nil
}
func (m DefaultManager) GetAuthenticator() auth.Authenticator {
return m.authenticator
}
func (m DefaultManager) Authenticate(username, password string) (bool, error) {
// only get the account to get the hashed password if using the builtin auth
passwordHash := ""
if m.authenticator.Name() == "builtin" {
acct, err := m.Account(username)
if err != nil {
log.Error(err)
return false, err
}
passwordHash = acct.Password
}
return m.authenticator.Authenticate(username, password, passwordHash)
}
func (m DefaultManager) NewAuthToken(username string, userAgent string) (*auth.AuthToken, error) {
tk, err := m.authenticator.GenerateToken()
if err != nil {
return nil, err
}
acct, err := m.Account(username)
if err != nil {
return nil, err
}
token := &auth.AuthToken{}
tokens := acct.Tokens
found := false
for _, t := range tokens {
if t.UserAgent == userAgent {
found = true
t.Token = tk
token = t
break
}
}
if !found {
token = &auth.AuthToken{
UserAgent: userAgent,
Token: tk,
}
tokens = append(tokens, token)
}
// delete token
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Filter(r.Row.Field("user_agent").Eq(userAgent)).Delete().Run(m.session); err != nil {
return nil, err
}
// add
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Update(map[string]interface{}{"tokens": tokens}).RunWrite(m.session); err != nil {
return nil, err
}
return token, nil
}
func (m DefaultManager) VerifyAuthToken(username, token string) error {
acct, err := m.Account(username)
if err != nil {
return err
}
found := false
for _, t := range acct.Tokens {
if token == t.Token {
found = true
break
}
}
if !found {
return ErrInvalidAuthToken
}
return nil
}
func (m DefaultManager) VerifyServiceKey(key string) error {
if _, err := m.ServiceKey(key); err != nil {
return err
}
return nil
}
func (m DefaultManager) NewServiceKey(description string) (*auth.ServiceKey, error) {
k, err := m.authenticator.GenerateToken()
if err != nil {
return nil, err
}
key := &auth.ServiceKey{
Key: k[24:],
Description: description,
}
if err := m.SaveServiceKey(key); err != nil {
return nil, err
}
return key, nil
}
func (m DefaultManager) ChangePassword(username, password string) error {
if !m.authenticator.IsUpdateSupported() {
return fmt.Errorf("not supported for authenticator: %s", m.authenticator.Name())
}
hash, err := auth.Hash(password)
if err != nil {
return err
}
if _, err := r.Table(tblNameAccounts).Filter(map[string]string{"username": username}).Update(map[string]string{"password": hash}).Run(m.session); err != nil {
return err
}
m.logEvent("change-password", username, []string{"security"})
return nil
}
func (m DefaultManager) WebhookKey(key string) (*dockerhub.WebhookKey, error) {
res, err := r.Table(tblNameWebhookKeys).Filter(map[string]string{"key": key}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrWebhookKeyDoesNotExist
}
var k *dockerhub.WebhookKey
if err := res.One(&k); err != nil {
return nil, err
}
return k, nil
}
func (m DefaultManager) WebhookKeys() ([]*dockerhub.WebhookKey, error) {
res, err := r.Table(tblNameWebhookKeys).OrderBy(r.Asc("image")).Run(m.session)
if err != nil {
return nil, err
}
keys := []*dockerhub.WebhookKey{}
if err := res.All(&keys); err != nil {
return nil, err
}
return keys, nil
}
func (m DefaultManager) NewWebhookKey(image string) (*dockerhub.WebhookKey, error) {
k := generateId(16)
key := &dockerhub.WebhookKey{
Key: k,
Image: image,
}
if err := m.SaveWebhookKey(key); err != nil {
return nil, err
}
return key, nil
}
func (m DefaultManager) SaveWebhookKey(key *dockerhub.WebhookKey) error {
if _, err := r.Table(tblNameWebhookKeys).Insert(key).RunWrite(m.session); err != nil {
return err
}
m.logEvent("add-webhook-key", fmt.Sprintf("image=%s", key.Image), []string{"webhook"})
return nil
}
func (m DefaultManager) DeleteWebhookKey(id string) error {
key, err := m.WebhookKey(id)
if err != nil {
return err
}
res, err := r.Table(tblNameWebhookKeys).Get(key.ID).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrWebhookKeyDoesNotExist
}
m.logEvent("delete-webhook-key", fmt.Sprintf("image=%s", key.Image), []string{"webhook"})
return nil
}
func (m DefaultManager) Nodes() ([]*shipyard.Node, error) {
info, err := m.client.Info()
if err != nil {
return nil, err
}
nodes, err := parseClusterNodes(info.DriverStatus)
if err != nil {
return nil, err
}
return nodes, nil
}
func (m DefaultManager) Node(name string) (*shipyard.Node, error) {
nodes, err := m.Nodes()
if err != nil {
return nil, err
}
for _, node := range nodes {
if node.Name == name {
return node, nil
}
}
return nil, nil
}
func (m DefaultManager) AddRegistry(registry *shipyard.Registry) error {
if _, err := r.Table(tblNameRegistries).Insert(registry).RunWrite(m.session); err != nil {
return err
}
m.logEvent("add-registry", fmt.Sprintf("name=%s endpoint=%s", registry.Name, registry.Addr), []string{"registry"})
return nil
}
func (m DefaultManager) RemoveRegistry(registry *shipyard.Registry) error {
res, err := r.Table(tblNameRegistries).Get(registry.ID).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrRegistryDoesNotExist
}
m.logEvent("delete-registry", fmt.Sprintf("name=%s endpoint=%s", registry.Name, registry.Addr), []string{"registry"})
return nil
}
func (m DefaultManager) Registries() ([]*shipyard.Registry, error) {
res, err := r.Table(tblNameRegistries).OrderBy(r.Asc("name")).Run(m.session)
if err != nil {
return nil, err
}
regs := []*shipyard.Registry{}
if err := res.All(®s); err != nil {
return nil, err
}
registries := []*shipyard.Registry{}
for _, r := range regs {
reg, err := shipyard.NewRegistry(r.ID, r.Name, r.Addr)
if err != nil {
return nil, err
}
registries = append(registries, reg)
}
return registries, nil
}
func (m DefaultManager) Registry(name string) (*shipyard.Registry, error) {
res, err := r.Table(tblNameRegistries).Filter(map[string]string{"name": name}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrRegistryDoesNotExist
}
var reg *shipyard.Registry
if err := res.One(®); err != nil {
return nil, err
}
registry, err := shipyard.NewRegistry(reg.ID, reg.Name, reg.Addr)
if err != nil {
return nil, err
}
return registry, nil
}
func (m DefaultManager) CreateConsoleSession(c *shipyard.ConsoleSession) error {
if _, err := r.Table(tblNameConsole).Insert(c).RunWrite(m.session); err != nil {
return err
}
m.logEvent("create-console-session", fmt.Sprintf("container=%s", c.ContainerID), []string{"console"})
return nil
}
func (m DefaultManager) RemoveConsoleSession(c *shipyard.ConsoleSession) error {
res, err := r.Table(tblNameConsole).Get(c.ID).Delete().Run(m.session)
if err != nil {
return err
}
if res.IsNil() {
return ErrConsoleSessionDoesNotExist
}
return nil
}
func (m DefaultManager) ConsoleSession(token string) (*shipyard.ConsoleSession, error) {
res, err := r.Table(tblNameConsole).Filter(map[string]string{"token": token}).Run(m.session)
if err != nil {
return nil, err
}
if res.IsNil() {
return nil, ErrConsoleSessionDoesNotExist
}
var c *shipyard.ConsoleSession
if err := res.One(&c); err != nil {
return nil, err
}
return c, nil
}
func (m DefaultManager) ValidateConsoleSessionToken(containerId string, token string) bool {
cs, err := m.ConsoleSession(token)
if err != nil {
log.Errorf("error validating console session token: %s", err)
return false
}
if cs == nil || cs.ContainerID != containerId {
log.Warnf("unauthorized token request: %s", token)
return false
}
if err := m.RemoveConsoleSession(cs); err != nil {
log.Error(err)
return false
}
return true
}
|
package render
import (
"image"
"image/color"
"image/draw"
)
// Composite Types, distinct from Compound Types,
// Display all of their parts at the same time,
// and respect the positions and layers of their
// parts.
type Composite struct {
rs []Modifiable
offsets []Point
}
func NewComposite(sl []Modifiable) *Composite {
cs := new(Composite)
cs.rs = sl
cs.offsets = make([]Point, len(sl))
return cs
}
func (cs *Composite) AppendOffset(r Modifiable, p Point) {
cs.rs = append(cs.rs, r)
cs.offsets = append(cs.offsets, p)
}
func (cs *Composite) Append(r Modifiable) {
cs.rs = append(cs.rs, r)
cs.offsets = append(cs.offsets, Point{})
}
func (cs *Composite) Add(i int, r Modifiable) {
cs.rs[i] = r
}
func (cs *Composite) AddOffset(i int, p Point) {
cs.offsets[i] = p
}
func (cs *Composite) SetOffsets(ps []Point) {
for i, p := range ps {
cs.offsets[i] = p
}
}
func (cs *Composite) Get(i int) Modifiable {
return cs.rs[i]
}
func (cs *Composite) Draw(buff draw.Image) {
for i, c := range cs.rs {
switch t := c.(type) {
case *Composite:
t.Draw(buff)
continue
case *Reverting:
t.updateAnimation()
case *Animation:
t.updateAnimation()
case *Sequence:
t.update()
}
img := c.GetRGBA()
drawX := int(c.GetX()) + int(cs.offsets[i].X)
drawY := int(c.GetY()) + int(cs.offsets[i].Y)
ShinyDraw(buff, img, drawX, drawY)
}
}
func (cs *Composite) GetRGBA() *image.RGBA {
return nil
}
func (cs *Composite) ShiftX(x float64) {
for _, v := range cs.rs {
v.ShiftX(x)
}
}
func (cs *Composite) ShiftY(y float64) {
for _, v := range cs.rs {
v.ShiftY(y)
}
}
func (cs *Composite) AlwaysDirty() bool {
return true
}
func (cs *Composite) GetX() float64 {
return 0.0
}
func (cs *Composite) GetY() float64 {
return 0.0
}
// This should be changed so that compositeSlice (and map)
// has a persistent concept of what it's smallest
// x and y are.
func (cs *Composite) SetPos(x, y float64) {
for _, v := range cs.rs {
v.SetPos(x, y)
}
}
func (cs *Composite) GetLayer() int {
return 0
}
func (cs *Composite) SetLayer(l int) {
for _, v := range cs.rs {
v.SetLayer(l)
}
}
func (cs *Composite) UnDraw() {
for _, v := range cs.rs {
v.UnDraw()
}
}
func (cs *Composite) FlipX() Modifiable {
for _, v := range cs.rs {
v.FlipX()
}
return cs
}
func (cs *Composite) FlipY() Modifiable {
for _, v := range cs.rs {
v.FlipY()
}
return cs
}
func (cs *Composite) ApplyColor(c color.Color) Modifiable {
for _, v := range cs.rs {
v.ApplyColor(c)
}
return cs
}
func (cs *Composite) Copy() Modifiable {
cs2 := new(Composite)
cs2.rs = make([]Modifiable, len(cs.rs))
cs2.offsets = make([]Point, len(cs.rs))
for i, v := range cs.rs {
cs2.rs[i] = v.Copy()
cs2.offsets[i] = cs.offsets[i]
}
return cs2
}
func (cs *Composite) FillMask(img image.RGBA) Modifiable {
for _, v := range cs.rs {
v.FillMask(img)
}
return cs
}
func (cs *Composite) ApplyMask(img image.RGBA) Modifiable {
for _, v := range cs.rs {
v.ApplyMask(img)
}
return cs
}
func (cs *Composite) Rotate(degrees int) Modifiable {
for _, v := range cs.rs {
v.Rotate(degrees)
}
return cs
}
func (cs *Composite) Scale(xRatio float64, yRatio float64) Modifiable {
for _, v := range cs.rs {
v.Scale(xRatio, yRatio)
}
return cs
}
func (cs *Composite) Fade(alpha int) Modifiable {
for _, v := range cs.rs {
v.Fade(alpha)
}
return cs
}
func (cs *Composite) String() string {
s := "Composite{"
for _, v := range cs.rs {
s += v.String() + "\n"
}
return s
}
New inventory model, a bit less hacked together but not really
package render
import (
"image"
"image/color"
"image/draw"
)
// Composite Types, distinct from Compound Types,
// Display all of their parts at the same time,
// and respect the positions and layers of their
// parts.
type Composite struct {
rs []Modifiable
offsets []Point
}
func NewComposite(sl []Modifiable) *Composite {
cs := new(Composite)
cs.rs = sl
cs.offsets = make([]Point, len(sl))
return cs
}
func (cs *Composite) AppendOffset(r Modifiable, p Point) {
cs.rs = append(cs.rs, r)
cs.offsets = append(cs.offsets, p)
}
func (cs *Composite) Append(r Modifiable) {
cs.rs = append(cs.rs, r)
cs.offsets = append(cs.offsets, Point{})
}
func (cs *Composite) Add(i int, r Modifiable) {
cs.rs[i] = r
}
func (cs *Composite) AddOffset(i int, p Point) {
cs.offsets[i] = p
}
func (cs *Composite) SetOffsets(ps []Point) {
for i, p := range ps {
cs.offsets[i] = p
}
}
func (cs *Composite) Get(i int) Modifiable {
return cs.rs[i]
}
func (cs *Composite) Draw(buff draw.Image) {
for i, c := range cs.rs {
switch t := c.(type) {
case *Composite:
t.Draw(buff)
continue
case *Reverting:
t.updateAnimation()
case *Animation:
t.updateAnimation()
case *Sequence:
t.update()
}
img := c.GetRGBA()
drawX := int(c.GetX()) + int(cs.offsets[i].X)
drawY := int(c.GetY()) + int(cs.offsets[i].Y)
ShinyDraw(buff, img, drawX, drawY)
}
}
func (cs *Composite) GetRGBA() *image.RGBA {
return nil
}
func (cs *Composite) ShiftX(x float64) {
for _, v := range cs.rs {
v.ShiftX(x)
}
}
func (cs *Composite) ShiftY(y float64) {
for _, v := range cs.rs {
v.ShiftY(y)
}
}
func (cs *Composite) AlwaysDirty() bool {
return true
}
func (cs *Composite) GetX() float64 {
return 0.0
}
func (cs *Composite) GetY() float64 {
return 0.0
}
// This should be changed so that compositeSlice (and map)
// has a persistent concept of what it's smallest
// x and y are.
func (cs *Composite) SetPos(x, y float64) {
for _, v := range cs.rs {
v.SetPos(x, y)
}
}
func (cs *Composite) GetLayer() int {
return 0
}
func (cs *Composite) SetLayer(l int) {
for _, v := range cs.rs {
v.SetLayer(l)
}
}
func (cs *Composite) UnDraw() {
for _, v := range cs.rs {
v.UnDraw()
}
}
func (cs *Composite) FlipX() Modifiable {
for _, v := range cs.rs {
v.FlipX()
}
return cs
}
func (cs *Composite) FlipY() Modifiable {
for _, v := range cs.rs {
v.FlipY()
}
return cs
}
func (cs *Composite) ApplyColor(c color.Color) Modifiable {
for _, v := range cs.rs {
v.ApplyColor(c)
}
return cs
}
func (cs *Composite) Copy() Modifiable {
cs2 := new(Composite)
cs2.rs = make([]Modifiable, len(cs.rs))
cs2.offsets = make([]Point, len(cs.rs))
for i, v := range cs.rs {
cs2.rs[i] = v.Copy()
cs2.offsets[i] = cs.offsets[i]
}
return cs2
}
func (cs *Composite) FillMask(img image.RGBA) Modifiable {
for _, v := range cs.rs {
v.FillMask(img)
}
return cs
}
func (cs *Composite) ApplyMask(img image.RGBA) Modifiable {
for _, v := range cs.rs {
v.ApplyMask(img)
}
return cs
}
func (cs *Composite) Rotate(degrees int) Modifiable {
for _, v := range cs.rs {
v.Rotate(degrees)
}
return cs
}
func (cs *Composite) Scale(xRatio float64, yRatio float64) Modifiable {
for _, v := range cs.rs {
v.Scale(xRatio, yRatio)
}
return cs
}
func (cs *Composite) Fade(alpha int) Modifiable {
for _, v := range cs.rs {
v.Fade(alpha)
}
return cs
}
func (cs *Composite) String() string {
s := "Composite{"
for _, v := range cs.rs {
s += v.String() + "\n"
}
return s
}
type CompositeR struct {
rs []Renderable
offsets []Point
unDraw bool
}
func NewCompositeR(sl []Renderable) *CompositeR {
cs := new(CompositeR)
cs.rs = sl
cs.offsets = make([]Point, len(sl))
return cs
}
func (cs *CompositeR) AppendOffset(r Renderable, p Point) {
cs.rs = append(cs.rs, r)
cs.offsets = append(cs.offsets, p)
}
func (cs *CompositeR) Append(r Renderable) {
cs.rs = append(cs.rs, r)
cs.offsets = append(cs.offsets, Point{})
}
func (cs *CompositeR) Add(i int, r Renderable) {
cs.rs[i] = r
}
func (cs *CompositeR) AddOffset(i int, p Point) {
cs.offsets[i] = p
}
func (cs *CompositeR) SetOffsets(ps []Point) {
for i, p := range ps {
cs.offsets[i] = p
}
}
func (cs *CompositeR) Get(i int) Renderable {
return cs.rs[i]
}
func (cs *CompositeR) Draw(buff draw.Image) {
for i, c := range cs.rs {
switch t := c.(type) {
case *CompositeR:
t.Draw(buff)
continue
case *Text:
t.Draw(buff)
continue
}
img := c.GetRGBA()
drawX := int(c.GetX()) + int(cs.offsets[i].X)
drawY := int(c.GetY()) + int(cs.offsets[i].Y)
ShinyDraw(buff, img, drawX, drawY)
}
}
func (cs *CompositeR) GetRGBA() *image.RGBA {
return nil
}
func (cs *CompositeR) ShiftX(x float64) {
for _, v := range cs.rs {
v.ShiftX(x)
}
}
func (cs *CompositeR) ShiftY(y float64) {
for _, v := range cs.rs {
v.ShiftY(y)
}
}
func (cs *CompositeR) AlwaysDirty() bool {
return true
}
func (cs *CompositeR) GetX() float64 {
return 0.0
}
func (cs *CompositeR) GetY() float64 {
return 0.0
}
// This should be changed so that compositeSlice (and map)
// has a persistent concept of what it's smallest
// x and y are.
func (cs *CompositeR) SetPos(x, y float64) {
for _, v := range cs.rs {
v.SetPos(x, y)
}
}
func (cs *CompositeR) GetLayer() int {
if cs.unDraw {
return -1
}
return 0
}
func (cs *CompositeR) SetLayer(l int) {
for _, v := range cs.rs {
v.SetLayer(l)
}
}
func (cs *CompositeR) UnDraw() {
for _, v := range cs.rs {
v.UnDraw()
}
cs.unDraw = true
}
func (cs *CompositeR) String() string {
s := "CompositeR{"
for _, v := range cs.rs {
s += v.String() + "\n"
}
return s
}
|
package main
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"github.com/jaxi/chess"
)
// TCPPlayer - The Implementation of TCP protocol for chess game
type TCPPlayer struct {
conn net.Conn
io.Reader
io.Writer
}
// ShowTurn displays the message about who is playing
func (tc TCPPlayer) ShowTurn(b *chess.Board) {
tw := map[chess.Side]string{
chess.WHITE: "White",
chess.BLACK: "Black",
}
tc.conn.Write([]byte(tw[b.Turn()] + "'s turn: "))
}
// RenderBoard render the board in CLI
func (tc TCPPlayer) RenderBoard(b *chess.Board) {
tc.conn.Write([]byte(b.String()))
}
// FetchMove returns the move from your STDIN
func (tc TCPPlayer) FetchMove() (chess.Move, error) {
scanner := bufio.NewScanner(tc.conn)
scanner.Split(bufio.ScanWords)
nums := make([]int, 4)
i := 0
var err error
for i = 0; i < 4 && scanner.Scan(); i++ {
s := scanner.Text()
if i%2 == 0 {
nums[i], err = strconv.Atoi(strings.TrimSpace(s))
if err != nil {
return chess.Move{}, errors.New("Invalid input")
}
} else {
if len(s) == 1 {
nums[i] = int(([]byte(s))[0])
} else {
return chess.Move{}, errors.New("Invalid input")
}
}
}
if i == 0 {
return chess.NewMove(-1, -1, -1, -1), nil
}
return chess.NewMove(nums[0]-1, nums[1]-'a', nums[2]-1, nums[3]-'a'), nil
}
// ErrorMessage shows the message about what's going wrong
func (tc TCPPlayer) ErrorMessage(b *chess.Board) {
tc.conn.Write([]byte("Wait a minute. There's something wrong with your move!\n"))
}
func main() {
fmt.Println("Launching server...")
ln, _ := net.Listen("tcp", ":8081")
b := chess.NewBoard()
opened := make([]bool, 2)
conns := make([]net.Conn, 2)
for {
for i := range conns {
if opened[i] == false {
conns[i], _ = ln.Accept()
opened[i] = true
defer conns[i].Close()
}
}
tc1 := TCPPlayer{conn: conns[0]}
tc2 := TCPPlayer{conn: conns[1]}
idx := b.AdvanceLooping([]chess.Player{tc1, tc2})
fmt.Println(idx)
opened[idx] = false
}
}
Oops, forgot tidy up...
package main
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"github.com/jaxi/chess"
)
// TCPPlayer - The Implementation of TCP protocol for chess game
type TCPPlayer struct {
conn net.Conn
io.Reader
io.Writer
}
// ShowTurn displays the message about who is playing
func (tc TCPPlayer) ShowTurn(b *chess.Board) {
tw := map[chess.Side]string{
chess.WHITE: "White",
chess.BLACK: "Black",
}
tc.conn.Write([]byte(tw[b.Turn()] + "'s turn: "))
}
// RenderBoard render the board in CLI
func (tc TCPPlayer) RenderBoard(b *chess.Board) {
tc.conn.Write([]byte(b.String()))
}
// FetchMove returns the move from your STDIN
func (tc TCPPlayer) FetchMove() (chess.Move, error) {
scanner := bufio.NewScanner(tc.conn)
scanner.Split(bufio.ScanWords)
nums := make([]int, 4)
i := 0
var err error
for i = 0; i < 4 && scanner.Scan(); i++ {
s := scanner.Text()
if i%2 == 0 {
nums[i], err = strconv.Atoi(strings.TrimSpace(s))
if err != nil {
return chess.Move{}, errors.New("Invalid input")
}
} else {
if len(s) == 1 {
nums[i] = int(([]byte(s))[0])
} else {
return chess.Move{}, errors.New("Invalid input")
}
}
}
if i == 0 {
return chess.NewMove(-1, -1, -1, -1), nil
}
return chess.NewMove(nums[0]-1, nums[1]-'a', nums[2]-1, nums[3]-'a'), nil
}
// ErrorMessage shows the message about what's going wrong
func (tc TCPPlayer) ErrorMessage(b *chess.Board) {
tc.conn.Write([]byte("Wait a minute. There's something wrong with your move!\n"))
}
func main() {
fmt.Println("Launching server...")
ln, _ := net.Listen("tcp", ":8081")
b := chess.NewBoard()
opened := make([]bool, 2)
conns := make([]net.Conn, 2)
for {
for i := range conns {
if opened[i] == false {
conns[i], _ = ln.Accept()
opened[i] = true
defer conns[i].Close()
}
}
tc1 := TCPPlayer{conn: conns[0]}
tc2 := TCPPlayer{conn: conns[1]}
idx := b.AdvanceLooping([]chess.Player{tc1, tc2})
opened[idx] = false
}
}
|
package grab
import "testing"
import "net/http"
func TestURLFilenames(t *testing.T) {
tests := map[string]string{
"http://test.com/path/filename": "filename",
}
for url, expect := range tests {
req, _ := http.NewRequest("GET", url, nil)
resp := &http.Response{
Request: req,
}
actual, err := guessFilename(resp)
if err != nil {
panic(err)
}
if actual != expect {
t.Errorf("expected '%v', got '%v'", expect, actual)
}
}
}
Improved filename tests
package grab
import "testing"
import "net/http"
func TestURLFilenames(t *testing.T) {
expect := "filename"
shouldPass := []string{
"http://test.com/filename",
"http://test.com/path/filename",
"http://test.com/deep/path/filename",
"http://test.com/filename?with=args",
"http://test.com/filename#with-fragment",
}
shoudlFail := []string{
"http://test.com",
"http://test.com/",
"http://test.com/filename/",
"http://test.com/filename/?with=args",
"http://test.com/filename/#with-fragment",
}
for _, url := range shouldPass {
req, _ := http.NewRequest("GET", url, nil)
resp := &http.Response{
Request: req,
}
actual, err := guessFilename(resp)
if err != nil {
t.Errorf("%v", err)
}
if actual != expect {
t.Errorf("expected '%v', got '%v'", expect, actual)
}
}
for _, url := range shoudlFail {
req, _ := http.NewRequest("GET", url, nil)
resp := &http.Response{
Request: req,
}
_, err := guessFilename(resp)
if err != ErrNoFilename {
t.Errorf("expected '%v', got '%v'", ErrNoFilename, err)
}
}
}
|
package slogger
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tychoish/grip/message"
)
func TestSlackError(t *testing.T) {
assert := assert.New(t)
msg := NewStackError("foo %s", "bar")
assert.Implements((*message.Composer)(nil), msg)
assert.Implements((*error)(nil), msg)
assert.True(len(msg.Stacktrace) > 0)
assert.True(strings.HasPrefix(msg.Resolve(), "foo bar"))
assert.Equal("", NewStackError("").Resolve())
assert.Equal(NewStackError("").Resolve(), NewStackError("").Error())
assert.Equal(msg.Resolve(), msg.Error())
// the raw structure always has stack data, generated in the
// constructor, even if the message is nil.
assert.True(len(fmt.Sprintf("%v", NewStackError("").Raw())) > 10)
}
fix stack error test
package slogger
import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tychoish/grip/message"
)
func TestSlackError(t *testing.T) {
assert := assert.New(t)
msg := NewStackError("foo %s", "bar")
assert.Implements((*message.Composer)(nil), msg)
assert.Implements((*error)(nil), msg)
assert.True(len(msg.Stacktrace) > 0)
assert.True(strings.HasPrefix(msg.Resolve(), "foo bar"))
assert.Equal("", NewStackError("").message)
assert.Equal(NewStackError("").Resolve(), NewStackError("").Error())
assert.Equal(msg.Resolve(), msg.Error())
// the raw structure always has stack data, generated in the
// constructor, even if the message is nil.
assert.True(len(fmt.Sprintf("%v", NewStackError("").Raw())) > 10)
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package gentests
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/tools/imports"
)
var (
goruncache map[string]string
)
func init() {
goruncache = make(map[string]string)
}
// Generator represents the "gentests" generator.
//
type Generator struct {
b bytes.Buffer
TestSuite TestSuite
}
// Output returns the generator output.
//
func (g *Generator) Output() (io.Reader, error) {
name := g.TestSuite.Name()
if g.TestSuite.Type == "xpack" {
name = "Xpack_" + name
}
g.genFileHeader()
g.w("func Test" + name + "(t *testing.T) {\n")
g.genInitializeClient()
g.genHelpers()
g.genCommonSetup()
if g.TestSuite.Type == "xpack" {
g.genXPackSetup()
}
if len(g.TestSuite.Setup) > 0 {
g.w("// ----- Test Suite Setup --------------------------------------------------------\n")
g.w("testSuiteSetup := func() {\n")
g.genSetupTeardown(g.TestSuite.Setup)
g.w("}\n")
g.w("_ = testSuiteSetup\n")
g.w("// --------------------------------------------------------------------------------\n")
g.w("\n")
}
if len(g.TestSuite.Teardown) > 0 {
g.w("\t// Teardown\n")
g.w("\tdefer func(t *testing.T) {\n")
g.genSetupTeardown(g.TestSuite.Teardown)
g.w("\t}(t)\n")
}
for i, t := range g.TestSuite.Tests {
g.w("\n")
g.genLocationYAML(t)
g.w("\t" + `t.Run("` + strings.ReplaceAll(t.Name, " ", "_") + `", ` + "func(t *testing.T) {\n")
if !g.genSkip(t) {
g.w("\tdefer recoverPanic(t)\n")
g.w("\tcommonSetup()\n")
if g.TestSuite.Type == "xpack" {
g.w("\txpackSetup()\n")
}
if len(g.TestSuite.Setup) > 0 {
g.w("\ttestSuiteSetup()\n")
}
g.w("\n")
if len(t.Setup) > 0 {
g.w("\t// Test setup\n")
g.genSetupTeardown(t.Setup)
}
if len(t.Teardown) > 0 {
g.w("\t// Test teardown\n")
g.w("\tdefer func(t) {\n")
g.genSetupTeardown(t.Teardown)
g.w("\t}(t *testing.T)\n")
}
if len(t.Setup) > 0 || len(t.Teardown) > 0 {
g.w("\n")
}
g.genSteps(t)
}
g.w("\t})\n")
if i < len(g.TestSuite.Tests)-1 {
g.w("\n")
}
}
g.w("}\n")
return bytes.NewReader(g.b.Bytes()), nil
}
// OutputFormatted returns a formatted generator output.
//
func (g *Generator) OutputFormatted() (io.Reader, error) {
out, err := g.Output()
if err != nil {
return bytes.NewReader(g.b.Bytes()), err
}
var b bytes.Buffer
if _, err := io.Copy(&b, out); err != nil {
return bytes.NewReader(g.b.Bytes()), err
}
fout, err := imports.Process(
"",
g.b.Bytes(),
&imports.Options{
AllErrors: true,
Comments: true,
FormatOnly: false,
TabIndent: true,
TabWidth: 1,
})
if err != nil {
return bytes.NewReader(b.Bytes()), err
}
g.b.Reset()
g.b.Write(fout)
return bytes.NewReader(fout), nil
}
func (g *Generator) w(s string) {
g.b.WriteString(s)
}
func (g *Generator) gorun(code string) (string, error) {
if goruncache[code] != "" {
return goruncache[code], nil
}
dir, err := ioutil.TempDir("tmp", "gorun")
if err != nil {
return "", fmt.Errorf("gorun: %s", err)
}
f, err := os.Create(filepath.Join(dir, "type_for_struct_field.go"))
if err != nil {
return "", fmt.Errorf("gorun: %s", err)
}
defer func() {
f.Close()
os.RemoveAll(dir)
}()
// fmt.Println(code)
if err := ioutil.WriteFile(f.Name(), []byte(code), 0644); err != nil {
return "", fmt.Errorf("gorun: %s", err)
}
cmd := exec.Command("go", "run", f.Name())
out, err := cmd.Output()
if err != nil {
if e, ok := err.(*exec.ExitError); ok {
return "", fmt.Errorf("gorun: %s", e.Stderr)
}
return "", fmt.Errorf("gorun: %s", err)
}
goruncache[code] = string(out)
return string(out), nil
}
func (g *Generator) genFileHeader() {
g.w("// Code generated")
if EsVersion != "" || GitCommit != "" || GitTag != "" {
g.w(" from YAML test suite version")
if GitCommit != "" {
g.w(fmt.Sprintf(" %s", GitCommit))
if GitTag != "" {
g.w(fmt.Sprintf("|%s", GitTag))
}
}
}
g.w(" -- DO NOT EDIT\n")
g.w("\n")
g.w("package esapi_test\n")
g.w(`
import (
encjson "encoding/json"
encyaml "gopkg.in/yaml.v2"
"fmt"
"context"
"crypto/tls"
"os"
"net/url"
"testing"
"time"
"github.com/elastic/go-elasticsearch/v8"
"github.com/elastic/go-elasticsearch/v8/esapi"
"github.com/elastic/elastic-transport-go/v8/elastictransport"
)
var (
// Prevent compilation errors for unused packages
_ = fmt.Printf
_ = encjson.NewDecoder
_ = encyaml.NewDecoder
_ = tls.Certificate{}
_ = url.QueryEscape
)` + "\n")
}
func (g *Generator) genInitializeClient() {
g.w(`
cfg := elasticsearch.Config{}
`)
if g.TestSuite.Type == "xpack" {
g.w(`
cfg.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}` + "\n")
}
g.w(`
if os.Getenv("DEBUG") != "" {
cfg.Logger = &elastictransport.ColorLogger{
Output: os.Stdout,
// EnableRequestBody: true,
EnableResponseBody: true,
}
}` + "\n")
g.w(`
es, eserr := elasticsearch.NewClient(cfg)
if eserr != nil {
t.Fatalf("Error creating the client: %s\n", eserr)
}
`)
}
func (g *Generator) genHelpers() {
g.w(`recoverPanic := func(t *testing.T) {
reLocation := regexp.MustCompile("(.*_test.go:\\d+).*")
if rec := recover(); rec != nil {
var loc string
s := strings.Split(string(debug.Stack()), "\n")
for i := len(s) - 1; i >= 0; i-- {
if reLocation.MatchString(s[i]) {
loc = strings.TrimSpace(s[i])
break
}
}
t.Fatalf("Panic: %s in %s", rec, reLocation.ReplaceAllString(loc, "$1"))
}
}
_ = recoverPanic
` + "\n")
g.w(`
handleResponseError := func(t *testing.T, res *esapi.Response) {
if res.IsError() {
reLocation := regexp.MustCompile("(.*_test.go:\\d+).*")
var loc string
s := strings.Split(string(debug.Stack()), "\n")
for i := len(s) - 1; i >= 0; i-- {
if reLocation.MatchString(s[i]) {
loc = strings.TrimSpace(s[i])
break
}
}
t.Logf("Response error: %s in %s", res, reLocation.ReplaceAllString(loc, "$1"))
}
}
_ = handleResponseError
`)
g.w("\n\n")
}
// Reference: https://github.com/elastic/elasticsearch/blob/master/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
//
func (g *Generator) genCommonSetup() {
g.w(`
// ----- Common Setup -------------------------------------------------------------
commonSetup := func() {
var res *esapi.Response
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForNoInitializingShards(true))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
type Node struct {
UnderscoreNodes struct {
Total int "json:\"total\""
Successful int "json:\"successful\""
Failed int "json:\"failed\""
} "json:\"_nodes\""
ClusterName string "json:\"cluster_name\""
Nodes []struct {
NodeId string "json:\"node_id\""
} "json:\"nodes\""
}
res, _ = es.ShutdownGetNode()
if res != nil && res.Body != nil {
defer res.Body.Close()
}
var r Node
_ = json.NewDecoder(res.Body).Decode(&r)
handleResponseError(t, res)
for _, node := range r.Nodes {
es.ShutdownDeleteNode(node.NodeId)
}
}
{
type Meta struct {
Metadata struct {
Indices map[string]interface {
} "json:\"indices\""
} "json:\"metadata\""
}
indices, _ := es.Cluster.State(
es.Cluster.State.WithMetric("metadata"),
es.Cluster.State.WithFilterPath("metadata.indices.*.settings.index.store.snapshot"),
)
var r Meta
_ = json.NewDecoder(indices.Body).Decode(&r)
for key, _ := range r.Metadata.Indices {
es.Indices.Delete([]string{key})
}
}
{
type Repositories map[string]struct {
Type string "json:\"type\""
}
type Snaps struct {
Snapshots []struct {
Snapshot string "json:\"snapshot\""
Repository string "json:\"repository\""
} "json:\"snapshots\""
}
res, _ = es.Snapshot.GetRepository(es.Snapshot.GetRepository.WithRepository("_all"))
var rep Repositories
_ = json.NewDecoder(res.Body).Decode(&rep)
for repoName, repository := range rep {
if repository.Type == "fs" {
snapshots, _ := es.Snapshot.Get(
repoName, []string{"_all"},
es.Snapshot.Get.WithIgnoreUnavailable(true),
)
var snaps Snaps
_ = json.NewDecoder(snapshots.Body).Decode(&snaps)
for _, snapshot := range snaps.Snapshots {
_, _ = es.Snapshot.Delete(repoName, []string{snapshot.Snapshot})
}
}
_, _ = es.Snapshot.DeleteRepository([]string{repoName})
}
}
{
res, _ = es.Indices.DeleteDataStream(
[]string{"*"},
es.Indices.DeleteDataStream.WithExpandWildcards("all"),
es.Indices.DeleteDataStream.WithExpandWildcards("hidden"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Indices.Delete(
[]string{"*"},
es.Indices.Delete.WithExpandWildcards("all"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
res, _ = es.Indices.GetTemplate()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for templateName, _ := range r {
if strings.HasPrefix(templateName, ".") {
continue
}
if templateName == "security_audit_log" {
continue
}
if templateName == "logstash-index-template" {
continue
}
es.Indices.DeleteTemplate(templateName)
}
}
}
{
res, _ = es.Indices.DeleteIndexTemplate("*")
if res != nil && res.Body != nil { defer res.Body.Close() }
}
{
res, _ = es.Indices.DeleteAlias([]string{"_all"}, []string{"_all"})
if res != nil && res.Body != nil { defer res.Body.Close() }
}
{
var r map[string]interface{}
res, _ = es.Snapshot.GetRepository()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for repositoryID, _ := range r {
var r map[string]interface{}
res, _ = es.Snapshot.Get(repositoryID, []string{"_all"})
json.NewDecoder(res.Body).Decode(&r)
if r["responses"] != nil {
for _, vv := range r["responses"].([]interface{}) {
for _, v := range vv.(map[string]interface{})["snapshots"].([]interface{}) {
snapshotID, ok := v.(map[string]interface{})["snapshot"]
if !ok {
continue
}
es.Snapshot.Delete(repositoryID, []string{fmt.Sprintf("%s", snapshotID)})
}
}
}
es.Snapshot.DeleteRepository([]string{fmt.Sprintf("%s", repositoryID)})
}
}
}
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForStatus("yellow"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
}
_ = commonSetup
`)
}
// Reference: https://github.com/elastic/elasticsearch/blob/master/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java
// Reference: https://github.com/elastic/elasticsearch/blob/master/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java
//
func (g *Generator) genXPackSetup() {
g.w(`
// ----- XPack Setup -------------------------------------------------------------
xpackSetup := func() {
var res *esapi.Response
{
res, _ = es.Indices.DeleteDataStream([]string{"*"})
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
res, _ = es.Indices.GetTemplate()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for templateName, _ := range r {
if strings.HasPrefix(templateName, ".") {
continue
}
es.Indices.DeleteTemplate(templateName)
}
}
}
{
res, _ = es.Watcher.DeleteWatch("my_watch")
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
res, _ = es.Security.GetRole()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for k, v := range r {
reserved, ok := v.(map[string]interface{})["metadata"].(map[string]interface{})["_reserved"].(bool)
if ok && reserved {
continue
}
es.Security.DeleteRole(k)
}
}
}
{
var r map[string]interface{}
res, _ = es.Security.GetUser()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for k, v := range r {
reserved, ok := v.(map[string]interface{})["metadata"].(map[string]interface{})["_reserved"].(bool)
if ok && reserved {
continue
}
es.Security.DeleteUser(k)
}
}
}
{
var r map[string]interface{}
res, _ = es.Security.GetPrivileges()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for k, v := range r {
reserved, ok := v.(map[string]interface{})["metadata"].(map[string]interface{})["_reserved"].(bool)
if ok && reserved {
continue
}
es.Security.DeletePrivileges(k, "_all")
}
}
}
{
res, _ = es.Indices.Refresh(
es.Indices.Refresh.WithIndex("_all"),
es.Indices.Refresh.WithExpandWildcards("open,closed,hidden"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
es.ML.StopDatafeed("_all", es.ML.StopDatafeed.WithContext(ctx))
res, _ = es.ML.GetDatafeeds()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, v := range r["datafeeds"].([]interface{}) {
datafeed, ok := v.(map[string]interface{})
if ok {
datafeedID := datafeed["datafeed_id"].(string)
es.ML.DeleteDatafeed(datafeedID)
}
}
}
}
{
var r map[string]interface{}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
es.ML.CloseJob("_all", es.ML.CloseJob.WithContext(ctx))
res, _ = es.ML.GetJobs()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, v := range r["jobs"].([]interface{}) {
job, ok := v.(map[string]interface{})
if ok {
jobID := job["job_id"].(string)
es.ML.DeleteJob(jobID)
}
}
}
}
{
var r map[string]interface{}
res, _ = es.Rollup.GetJobs(es.Rollup.GetJobs.WithJobID("_all"))
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, v := range r["jobs"].([]interface{}) {
job, ok := v.(map[string]interface{})["config"]
if ok {
jobID := job.(map[string]interface{})["id"].(string)
es.Rollup.StopJob(jobID, es.Rollup.StopJob.WithWaitForCompletion(true))
es.Rollup.DeleteJob(jobID)
}
}
}
}
{
var r map[string]interface{}
res, _ = es.Tasks.List()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, vv := range r["nodes"].(map[string]interface{}) {
for _, v := range vv.(map[string]interface{})["tasks"].(map[string]interface{}) {
cancellable, ok := v.(map[string]interface{})["cancellable"]
if ok && cancellable.(bool) {
taskID := fmt.Sprintf("%v:%v", v.(map[string]interface{})["node"], v.(map[string]interface{})["id"])
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
es.Tasks.Cancel(es.Tasks.Cancel.WithTaskID(taskID), es.Tasks.Cancel.WithContext(ctx))
}
}
}
}
}
{
var r map[string]interface{}
res, _ = es.Snapshot.GetRepository()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for repositoryID, _ := range r {
var r map[string]interface{}
res, _ = es.Snapshot.Get(repositoryID, []string{"_all"})
json.NewDecoder(res.Body).Decode(&r)
for _, vv := range r["responses"].([]interface{}) {
for _, v := range vv.(map[string]interface{})["snapshots"].([]interface{}) {
snapshotID, ok := v.(map[string]interface{})["snapshot"]
if ok {
es.Snapshot.Delete(repositoryID, []string{fmt.Sprintf("%s", snapshotID)})
}
}
}
es.Snapshot.DeleteRepository([]string{fmt.Sprintf("%s", repositoryID)})
}
}
}
{
res, _ = es.Indices.Delete([]string{".ml*"})
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.ILM.RemovePolicy("_all")
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForStatus("yellow"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Security.PutUser("x_pack_rest_user", strings.NewReader(` + "`" + `{"password":"x-pack-test-password", "roles":["superuser"]}` + "`" + `), es.Security.PutUser.WithPretty())
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Indices.Refresh(
es.Indices.Refresh.WithIndex("_all"),
es.Indices.Refresh.WithExpandWildcards("open,closed,hidden"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForStatus("yellow"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var i int
for {
i++
var r map[string]interface{}
res, _ = es.Cluster.PendingTasks()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
if len(r["tasks"].([]interface{})) < 1 {
break
}
}
if i > 30 {
break
}
time.Sleep(time.Second)
}
}
}
_ = xpackSetup
`)
}
func (g *Generator) genLocationYAML(t Test) {
f, err := os.Open(t.Filepath)
if err != nil {
g.w(fmt.Sprintf("// Error opening file: %s\n", err))
}
scanner := bufio.NewScanner(f)
var i int
for scanner.Scan() {
i++
tname := scanner.Text()
tname = strings.TrimRight(tname, `:`)
tname = strings.NewReplacer(`\"`, `"`).Replace(tname)
tname = strings.TrimPrefix(tname, `"`)
tname = strings.TrimSuffix(tname, `"`)
if tname == t.OrigName {
// TODO: Github URL (with proper branch/commit/etc)
g.w("\t// Source: " + t.Filepath + fmt.Sprintf(":%d", i) + "\n\t//\n")
}
}
if err := scanner.Err(); err != nil {
g.w(fmt.Sprintf("// Error reading file: %s\n", err))
}
}
func (g *Generator) genSkip(t Test) (skipped bool) {
// Check the custom skip list
if skips, ok := skipTests[t.BaseFilename()]; ok {
if len(skips) < 1 {
g.w("\t// Skipping all tests in '" + t.BaseFilename() + "'\n")
g.w("\tt.SkipNow()\n\n")
return true
}
for _, skip := range skips {
if skip == t.OrigName {
g.w("\tt.SkipNow()\n\n")
return true
}
}
}
// Check the skip property coming from YAML
if t.Skip {
if t.SkipInfo != "" {
g.w("\tt.Skip(" + strconv.Quote(t.SkipInfo) + ")\n\n")
return true
} else {
g.w("\tt.SkipNow()\n\n")
return true
}
}
return false
}
func (g *Generator) genSetupTeardown(actions []Action) {
g.genVarSection(Test{})
for _, a := range actions {
g.genAction(a, false)
g.w("\n")
}
}
func (g *Generator) genSteps(t Test) {
var skipBody bool
if !t.Steps.ContainsAssertion() && !t.Steps.ContainsCatch() && !t.Steps.ContainsStash() {
skipBody = true
}
g.genVarSection(t, skipBody)
for _, step := range t.Steps {
switch step.(type) {
case Action:
// Generate debug info
var dbg strings.Builder
dbg.WriteString("\t\t// => " + step.(Action).Method() + "(")
var j int
for k, v := range step.(Action).Params() {
j++
dbg.WriteString(k + ": " + strings.Replace(fmt.Sprintf("%v", v), "\n", "|", -1))
if j < len(step.(Action).Params()) {
dbg.WriteString(", ")
}
}
dbg.WriteString(") ")
pad := 101 - dbg.Len()
if pad < 0 {
pad = 0
}
g.w(dbg.String() + strings.Repeat("-", pad) + "\n\t\t//\n")
// Generate the action
g.genAction(step.(Action), skipBody)
g.w("\t\t// " + strings.Repeat("-", 96) + "\n\n")
case Assertion:
// Generate debug info
g.w("\t\t// ~> ")
g.w(fmt.Sprintf("%q: ", step.(Assertion).operation))
g.w(strings.Replace(fmt.Sprintf("%s", step.(Assertion).payload), "\n", "|", -1))
g.w("\n")
// Generate the assertion
g.genAssertion(step.(Assertion))
g.w("\n")
case Stash:
// Generate setting the stash
g.genStashSet(step.(Stash))
g.w("\n")
default:
panic(fmt.Sprintf("Unknown step %T", step))
}
}
}
func (g *Generator) genVarSection(t Test, skipBody ...bool) {
g.w("\t\tvar (\n")
g.w("\t\t\treq esapi.Request\n")
g.w("\t\t\tres *esapi.Response\n")
g.w("\t\t\terr error\n\n")
g.w("\t\t\tstash = make(map[string]interface{}, 0)\n\n")
if (len(skipBody) < 1 || (len(skipBody) > 0 && skipBody[0] == false)) &&
(t.Steps.ContainsAssertion() || t.Steps.ContainsCatch() || true) {
g.w("\t\t\tbody []byte\n")
g.w("\t\t\tmapi map[string]interface{}\n")
g.w("\t\t\tslic []interface{}\n")
}
if t.Steps.ContainsAssertion("is_false", "is_true") {
g.w("\n\t\t\tvalue reflect.Value\n")
}
g.w("\n")
g.w("\t\t\tassertion bool\n")
g.w("\t\t\tactual interface{}\n")
g.w("\t\t\texpected interface{}\n")
g.w("\n")
if t.Steps.ContainsAssertion("match", "match-regexp") {
g.w("\n\t\t\tre *regexp.Regexp\n")
g.w("\t\t\tmatch bool\n")
}
g.w("\t\t)\n\n")
if (len(skipBody) < 1 || (len(skipBody) > 0 && skipBody[0] == false)) &&
(t.Steps.ContainsAssertion() || t.Steps.ContainsCatch() || true) {
g.w("\t\t_ = mapi\n")
g.w("\t\t_ = slic\n")
g.w("\n")
g.w(`handleResponseBody := func(t *testing.T, res *esapi.Response) {
// Reset deserialized structures
mapi = make(map[string]interface{})
slic = make([]interface{}, 0)
var err error
body, err = ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("Error reading body: %s", err)
}
res.Body.Close()
res.Body = ioutil.NopCloser(bytes.NewBuffer(body))
if len(body) < 1 {
// FIXME: Hack to prevent EOF errors
return
}
if len(res.Header) > 0 {
if strings.Contains(res.Header["Content-Type"][0], "text/plain") {
return
}
if strings.Contains(res.Header["Content-Type"][0], "yaml") {
if strings.HasPrefix(string(body), "---\n-") {
if err := encyaml.NewDecoder(res.Body).Decode(&slic); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
} else {
if err := encyaml.NewDecoder(res.Body).Decode(&mapi); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
}
return
}
}
d := encjson.NewDecoder(res.Body)
d.UseNumber()
if strings.HasPrefix(string(body), "[") {
if err := d.Decode(&slic); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
} else {
if err := d.Decode(&mapi); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
}
}` + "\n")
}
g.w("\n")
g.w("\t\t_ = stash\n")
if t.Steps.ContainsAssertion("is_false", "is_true") {
g.w("\t\t_ = value\n")
}
g.w("\t\t_ = assertion\n")
g.w("\t\t_ = actual\n")
g.w("\t\t_ = expected\n")
if t.Steps.ContainsAssertion("match", "match-regexp") {
g.w("\n")
g.w("\t\t_ = re\n")
g.w("\t\t_ = match\n")
}
g.w("\n")
}
func (g *Generator) genAction(a Action, skipBody ...bool) {
varDetection := regexp.MustCompile(".*(\\$\\{(\\w+)\\}).*")
// Initialize the request
g.w("\t\treq = esapi." + a.Request() + "{\n")
// Pass the parameters
for k, v := range a.Params() {
// fmt.Printf("%s.%s: <%T> %v\n", a.Request(), k, v, v)
if strings.HasPrefix(fmt.Sprintf("%s", v), "$") {
v = `stash[` + strconv.Quote(strings.ReplaceAll(strings.ReplaceAll(fmt.Sprintf("%s", v), "{", ""), "}", "")) + `]`
}
switch v.(type) {
case bool:
g.w("\t\t\t" + k + ": ")
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
switch typ {
case "bool":
g.w(strconv.FormatBool(v.(bool)))
case "*bool":
g.w(`esapi.BoolPtr(` + strconv.FormatBool(v.(bool)) + `)`)
case "string":
g.w(`"` + strconv.FormatBool(v.(bool)) + `"`)
case "[]string":
// TODO: Listify
g.w(`[]string{"` + strconv.FormatBool(v.(bool)) + `"}`)
default:
g.w(strconv.FormatBool(v.(bool)))
}
g.w(",\n")
case string:
if k == "Body" {
g.w("\t\t\t" + k + ": ")
body := v.(string)
if varDetection.MatchString(body) {
g.w("strings.NewReader(strings.NewReplacer(")
matchs := varDetection.FindAllStringSubmatch(body, -1)
for _, match := range matchs {
bodyVar := match[1]
stashVar := fmt.Sprintf(`stash["$%s"].(string)`, match[2])
g.w(fmt.Sprintf("`%s`, %s", bodyVar, stashVar))
}
g.w(").Replace(`" + body + "`))")
} else {
if !strings.HasSuffix(body, "\n") {
body = body + "\n"
}
g.w("strings.NewReader(`" + body + "`)")
}
} else {
g.w("\t\t\t" + k + ": ")
// TODO: Handle comma separated strings as lists
// fmt.Printf("%s: %#v\n", a.Request(), apiRegistry[a.Request()])
// fmt.Printf("%s: %#v\n", k, apiRegistry[a.Request()][k])
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
var value string
if strings.HasPrefix(v.(string), "stash[") {
switch typ {
case "bool":
value = `fmt.Sprintf("%v", ` + v.(string) + `)`
case "string":
value = fmt.Sprintf("%s.(string)", v)
case "[]string":
// TODO: Comma-separated list => Quoted list
value = fmt.Sprintf(`[]string{%s.(string)}`, v)
case "int":
value = `func() int {
switch ` + v.(string) + `.(type) {
case int:
return ` + v.(string) + `.(int)
case float64:
return int(` + v.(string) + `.(float64))
}
case json.Number:
v, _ := ` + v.(string) + `.(encjson.Number).Int64()
vv := int(v)
return vv
panic(fmt.Sprintf(` + "`" + `Unexpected type %T for ` + v.(string) + "`" + `, ` + v.(string) + `))
}()`
case "*int":
value = `func() *int {
switch ` + v.(string) + `.(type) {
case int:
v := ` + v.(string) + `.(int)
return &v
case float64:
v := int(` + v.(string) + `.(float64))
return &v
case json.Number:
v, _ := ` + v.(string) + `.(encjson.Number).Int64()
vv := int(v)
return &vv
}
panic(fmt.Sprintf(` + "`" + `Unexpected type %T for ` + v.(string) + "`" + `, ` + v.(string) + `))
}()`
case "time.Duration":
value = `fmt.Sprintf("%d", ` + v.(string) + `)`
default:
panic(fmt.Sprintf("Unexpected type %q for value %v", typ, v))
}
} else {
switch typ {
case "[]string":
value = `[]string{` + fmt.Sprintf("%q", v) + `}`
case "time.Duration":
// re := regexp.MustCompile("^(\\d+).*")
// value = re.ReplaceAllString(fmt.Sprintf("%s", v), "$1")
inputValue := v.(string)
if strings.HasSuffix(inputValue, "d") {
inputValue = inputValue[:len(inputValue)-1]
numericValue, err := strconv.Atoi(inputValue)
if err != nil {
panic(fmt.Sprintf("Cannot convert duration [%s]: %s", inputValue, err))
}
// Convert to hours
inputValue = fmt.Sprintf("%dh", numericValue*24)
}
dur, err := time.ParseDuration(inputValue)
if err != nil {
panic(fmt.Sprintf("Cannot parse duration [%s]: %s", v, err))
}
value = fmt.Sprintf("%d", dur.Nanoseconds())
default:
if strings.HasSuffix(k, "ID") {
value = fmt.Sprintf("url.QueryEscape(%q)", v)
} else {
value = fmt.Sprintf("%q", v)
}
}
}
g.w(value)
}
g.w(",\n")
case int, *int, float64:
g.w("\t\t\t" + k + ": ")
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
var value string
switch typ {
case "string":
value = `"` + fmt.Sprintf("%d", v) + `"`
case "[]string":
value = `[]string{"` + fmt.Sprintf("%d", v) + `"}`
case "time.Duration":
re := regexp.MustCompile("^(\\d+).*")
value = re.ReplaceAllString(fmt.Sprintf("%d", v), "$1")
case "*int":
switch v.(type) {
case int:
g.w(`esapi.IntPtr(` + fmt.Sprintf("%d", v) + `)`)
case float64:
if vv, ok := v.(float64); ok {
g.w(`esapi.IntPtr(` + fmt.Sprintf("%d", int(vv)) + `)`)
}
default:
panic(fmt.Sprintf("Unexpected type [%T] for [%s]", v, k))
}
default:
value = fmt.Sprintf("%v", v)
}
g.w(value)
g.w(",\n")
case []interface{}:
g.w("\t\t\t" + k + ": ")
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
switch typ {
case "string":
switch v.(type) {
case string:
g.w("`" + v.(string) + "`")
case []interface{}:
vvv := make([]string, 0)
for _, vv := range v.([]interface{}) {
vvv = append(vvv, fmt.Sprintf("%s", vv))
}
g.w("`" + strings.Join(vvv, ",") + "`")
default:
panic(fmt.Sprintf("<%s> %s{}.%s: unexpected value <%T> %#v", typ, a.Request(), k, v, v))
}
case "[]string":
qv := make([]string, 0)
for _, vv := range v.([]interface{}) {
// TODO: Check type
qv = append(qv, fmt.Sprintf("%q", vv.(string)))
}
g.w(`[]string{` + strings.Join(qv, ",") + `}`)
case "io.Reader":
// Serialize Bulk payloads ...
if k == "Body" {
var b strings.Builder
for _, vv := range v.([]interface{}) {
switch vv.(type) {
case string:
b.WriteString(vv.(string))
default:
j, err := json.Marshal(convert(vv))
if err != nil {
panic(fmt.Sprintf("%s{}.%s: %s (%s)", a.Request(), k, err, v))
}
b.WriteString(string(j))
}
b.WriteString("\n")
}
b.WriteString("\n")
g.w("\t\tstrings.NewReader(`" + b.String() + "`)")
// ... or just convert the value to JSON
} else {
j, err := json.Marshal(convert(v))
if err != nil {
panic(fmt.Sprintf("%s{}.%s: %s (%s)", a.Request(), k, err, v))
}
g.w("\t\tstrings.NewReader(`" + fmt.Sprintf("%s", j) + "`)")
}
}
g.w(",\n")
case map[interface{}]interface{}:
g.w("\t\t\t" + k + ": ")
// vv := unstash(convert(v).(map[string]interface{}))
// fmt.Println(vv)
j, err := json.Marshal(convert(v))
if err != nil {
panic(fmt.Sprintf("JSON parse error: %s; %s", err, v))
} else {
// Unstash values
reStash := regexp.MustCompile(`("\$[^"]+")`)
j = reStash.ReplaceAll(j, []byte("` + strconv.Quote(fmt.Sprintf(\"%v\", stash[$1])) + `"))
g.w("\t\tstrings.NewReader(`" + fmt.Sprintf("%s", j) + "`)")
g.w(",\n")
}
default:
g.w(fmt.Sprintf("\t\t// TODO: %s (%v)\n", k, v))
}
}
if len(a.headers) > 0 {
if strings.Contains(a.headers["Accept"], "yaml") && strings.HasPrefix(a.Request(), "Cat") {
g.w("\t\t" + `Format: "yaml",` + "\n")
}
g.w("\t\tHeader: http.Header{\n")
for name, value := range a.headers {
if name == "Content-Type" && value == "application/json" {
continue
}
if name == "Authorization" {
auth_fields := strings.Split(value, " ")
auth_name := auth_fields[0]
auth_value := auth_fields[1]
if strings.HasPrefix(auth_value, "$") {
auth_value = `fmt.Sprintf("%s", stash["` + strings.ReplaceAll(strings.ReplaceAll(auth_value, "{", ""), "}", "") + `"])`
} else {
auth_value = `"` + auth_value + `"`
}
g.w("\t\t\t" + `"Authorization": []string{"` + auth_name + ` " + ` + auth_value + `},` + "\n")
} else {
g.w("\t\t\t\"" + name + "\": []string{\"" + value + "\"},\n")
}
}
g.w("\t\t},\n")
}
g.w("\t\t}\n\n")
// Get response
g.w("\t\tres, err = req.Do(context.Background(), es)\n")
g.w(` if err != nil {
t.Fatalf("ERROR: %s", err)
}
defer res.Body.Close()
`)
g.w("\n\n")
if len(a.catch) < 1 {
// Handle error responses
g.w(` handleResponseError(t, res)` + "\n")
} else {
// TODO: Test catch
}
if len(skipBody) < 1 || (len(skipBody) > 0 && skipBody[0] == false) {
// Read and parse the body
g.w(` handleResponseBody(t, res)` + "\n")
}
}
func (g *Generator) genAssertion(a Assertion) {
g.w(a.Condition())
g.w(a.Error() + "\n")
g.w("}\n") // Close the condition
}
func (g *Generator) genStashSet(s Stash) {
g.w(fmt.Sprintf("// Set %q\n", s.Key()))
var stash string
value := s.ExpandedValue()
switch {
case strings.Contains(value, "_arbitrary_key_"):
key := strings.Trim(s.FirstValue(), "._arbitrary_key_")
stash = `for k, _ := range mapi["` + key + `"].(map[string]interface{}) {
stash["` + s.Key() + `"] = k
}
`
case strings.HasPrefix(value, `mapi["#`):
switch {
case strings.HasPrefix(value, `mapi["#base64EncodeCredentials`):
i, j := strings.Index(value, "("), strings.Index(value, ")")
values := strings.Split(value[i+1:j], ",")
value = `base64.StdEncoding.EncodeToString([]byte(`
value += `strings.Join([]string{`
for n, v := range values {
value += `mapi["` + v + `"].(string)`
if n < len(values)-1 {
value += ","
}
}
value += `}, ":")`
value += `))`
stash = fmt.Sprintf("stash[%q] = %s\n", s.Key(), value)
default:
panic(fmt.Sprintf("Unknown transformation: %s", value))
}
default:
stash = fmt.Sprintf("stash[%q] = %s\n", s.Key(), value)
}
g.w(stash)
}
func convert(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
var ks string
switch k.(type) {
case string:
ks = k.(string)
case int:
ks = fmt.Sprintf("%d", k)
default:
ks = fmt.Sprintf("%v", k)
}
m2[ks] = convert(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convert(v)
}
}
return i
}
[Tests] Remove calendar, filters, transforms & trained models (#420)
during xpack preflight
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package gentests
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/tools/imports"
)
var (
goruncache map[string]string
)
func init() {
goruncache = make(map[string]string)
}
// Generator represents the "gentests" generator.
//
type Generator struct {
b bytes.Buffer
TestSuite TestSuite
}
// Output returns the generator output.
//
func (g *Generator) Output() (io.Reader, error) {
name := g.TestSuite.Name()
if g.TestSuite.Type == "xpack" {
name = "Xpack_" + name
}
g.genFileHeader()
g.w("func Test" + name + "(t *testing.T) {\n")
g.genInitializeClient()
g.genHelpers()
g.genCommonSetup()
if g.TestSuite.Type == "xpack" {
g.genXPackSetup()
}
if len(g.TestSuite.Setup) > 0 {
g.w("// ----- Test Suite Setup --------------------------------------------------------\n")
g.w("testSuiteSetup := func() {\n")
g.genSetupTeardown(g.TestSuite.Setup)
g.w("}\n")
g.w("_ = testSuiteSetup\n")
g.w("// --------------------------------------------------------------------------------\n")
g.w("\n")
}
if len(g.TestSuite.Teardown) > 0 {
g.w("\t// Teardown\n")
g.w("\tdefer func(t *testing.T) {\n")
g.genSetupTeardown(g.TestSuite.Teardown)
g.w("\t}(t)\n")
}
for i, t := range g.TestSuite.Tests {
g.w("\n")
g.genLocationYAML(t)
g.w("\t" + `t.Run("` + strings.ReplaceAll(t.Name, " ", "_") + `", ` + "func(t *testing.T) {\n")
if !g.genSkip(t) {
g.w("\tdefer recoverPanic(t)\n")
g.w("\tcommonSetup()\n")
if g.TestSuite.Type == "xpack" {
g.w("\txpackSetup()\n")
}
if len(g.TestSuite.Setup) > 0 {
g.w("\ttestSuiteSetup()\n")
}
g.w("\n")
if len(t.Setup) > 0 {
g.w("\t// Test setup\n")
g.genSetupTeardown(t.Setup)
}
if len(t.Teardown) > 0 {
g.w("\t// Test teardown\n")
g.w("\tdefer func(t) {\n")
g.genSetupTeardown(t.Teardown)
g.w("\t}(t *testing.T)\n")
}
if len(t.Setup) > 0 || len(t.Teardown) > 0 {
g.w("\n")
}
g.genSteps(t)
}
g.w("\t})\n")
if i < len(g.TestSuite.Tests)-1 {
g.w("\n")
}
}
g.w("}\n")
return bytes.NewReader(g.b.Bytes()), nil
}
// OutputFormatted returns a formatted generator output.
//
func (g *Generator) OutputFormatted() (io.Reader, error) {
out, err := g.Output()
if err != nil {
return bytes.NewReader(g.b.Bytes()), err
}
var b bytes.Buffer
if _, err := io.Copy(&b, out); err != nil {
return bytes.NewReader(g.b.Bytes()), err
}
fout, err := imports.Process(
"",
g.b.Bytes(),
&imports.Options{
AllErrors: true,
Comments: true,
FormatOnly: false,
TabIndent: true,
TabWidth: 1,
})
if err != nil {
return bytes.NewReader(b.Bytes()), err
}
g.b.Reset()
g.b.Write(fout)
return bytes.NewReader(fout), nil
}
func (g *Generator) w(s string) {
g.b.WriteString(s)
}
func (g *Generator) gorun(code string) (string, error) {
if goruncache[code] != "" {
return goruncache[code], nil
}
dir, err := ioutil.TempDir("tmp", "gorun")
if err != nil {
return "", fmt.Errorf("gorun: %s", err)
}
f, err := os.Create(filepath.Join(dir, "type_for_struct_field.go"))
if err != nil {
return "", fmt.Errorf("gorun: %s", err)
}
defer func() {
f.Close()
os.RemoveAll(dir)
}()
// fmt.Println(code)
if err := ioutil.WriteFile(f.Name(), []byte(code), 0644); err != nil {
return "", fmt.Errorf("gorun: %s", err)
}
cmd := exec.Command("go", "run", f.Name())
out, err := cmd.Output()
if err != nil {
if e, ok := err.(*exec.ExitError); ok {
return "", fmt.Errorf("gorun: %s", e.Stderr)
}
return "", fmt.Errorf("gorun: %s", err)
}
goruncache[code] = string(out)
return string(out), nil
}
func (g *Generator) genFileHeader() {
g.w("// Code generated")
if EsVersion != "" || GitCommit != "" || GitTag != "" {
g.w(" from YAML test suite version")
if GitCommit != "" {
g.w(fmt.Sprintf(" %s", GitCommit))
if GitTag != "" {
g.w(fmt.Sprintf("|%s", GitTag))
}
}
}
g.w(" -- DO NOT EDIT\n")
g.w("\n")
g.w("package esapi_test\n")
g.w(`
import (
encjson "encoding/json"
encyaml "gopkg.in/yaml.v2"
"fmt"
"context"
"crypto/tls"
"os"
"net/url"
"testing"
"time"
"github.com/elastic/go-elasticsearch/v8"
"github.com/elastic/go-elasticsearch/v8/esapi"
"github.com/elastic/elastic-transport-go/v8/elastictransport"
)
var (
// Prevent compilation errors for unused packages
_ = fmt.Printf
_ = encjson.NewDecoder
_ = encyaml.NewDecoder
_ = tls.Certificate{}
_ = url.QueryEscape
)` + "\n")
}
func (g *Generator) genInitializeClient() {
g.w(`
cfg := elasticsearch.Config{}
`)
if g.TestSuite.Type == "xpack" {
g.w(`
cfg.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}` + "\n")
}
g.w(`
if os.Getenv("DEBUG") != "" {
cfg.Logger = &elastictransport.ColorLogger{
Output: os.Stdout,
// EnableRequestBody: true,
EnableResponseBody: true,
}
}` + "\n")
g.w(`
es, eserr := elasticsearch.NewClient(cfg)
if eserr != nil {
t.Fatalf("Error creating the client: %s\n", eserr)
}
`)
}
func (g *Generator) genHelpers() {
g.w(`recoverPanic := func(t *testing.T) {
reLocation := regexp.MustCompile("(.*_test.go:\\d+).*")
if rec := recover(); rec != nil {
var loc string
s := strings.Split(string(debug.Stack()), "\n")
for i := len(s) - 1; i >= 0; i-- {
if reLocation.MatchString(s[i]) {
loc = strings.TrimSpace(s[i])
break
}
}
t.Fatalf("Panic: %s in %s", rec, reLocation.ReplaceAllString(loc, "$1"))
}
}
_ = recoverPanic
` + "\n")
g.w(`
handleResponseError := func(t *testing.T, res *esapi.Response) {
if res.IsError() {
reLocation := regexp.MustCompile("(.*_test.go:\\d+).*")
var loc string
s := strings.Split(string(debug.Stack()), "\n")
for i := len(s) - 1; i >= 0; i-- {
if reLocation.MatchString(s[i]) {
loc = strings.TrimSpace(s[i])
break
}
}
t.Logf("Response error: %s in %s", res, reLocation.ReplaceAllString(loc, "$1"))
}
}
_ = handleResponseError
`)
g.w("\n\n")
}
// Reference: https://github.com/elastic/elasticsearch/blob/master/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
//
func (g *Generator) genCommonSetup() {
g.w(`
// ----- Common Setup -------------------------------------------------------------
commonSetup := func() {
var res *esapi.Response
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForNoInitializingShards(true))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
type Node struct {
UnderscoreNodes struct {
Total int "json:\"total\""
Successful int "json:\"successful\""
Failed int "json:\"failed\""
} "json:\"_nodes\""
ClusterName string "json:\"cluster_name\""
Nodes []struct {
NodeId string "json:\"node_id\""
} "json:\"nodes\""
}
res, _ = es.ShutdownGetNode()
if res != nil && res.Body != nil {
defer res.Body.Close()
}
var r Node
_ = json.NewDecoder(res.Body).Decode(&r)
handleResponseError(t, res)
for _, node := range r.Nodes {
es.ShutdownDeleteNode(node.NodeId)
}
}
{
type Meta struct {
Metadata struct {
Indices map[string]interface {
} "json:\"indices\""
} "json:\"metadata\""
}
indices, _ := es.Cluster.State(
es.Cluster.State.WithMetric("metadata"),
es.Cluster.State.WithFilterPath("metadata.indices.*.settings.index.store.snapshot"),
)
var r Meta
_ = json.NewDecoder(indices.Body).Decode(&r)
for key, _ := range r.Metadata.Indices {
es.Indices.Delete([]string{key})
}
}
{
type Repositories map[string]struct {
Type string "json:\"type\""
}
type Snaps struct {
Snapshots []struct {
Snapshot string "json:\"snapshot\""
Repository string "json:\"repository\""
} "json:\"snapshots\""
}
res, _ = es.Snapshot.GetRepository(es.Snapshot.GetRepository.WithRepository("_all"))
var rep Repositories
_ = json.NewDecoder(res.Body).Decode(&rep)
for repoName, repository := range rep {
if repository.Type == "fs" {
snapshots, _ := es.Snapshot.Get(
repoName, []string{"_all"},
es.Snapshot.Get.WithIgnoreUnavailable(true),
)
var snaps Snaps
_ = json.NewDecoder(snapshots.Body).Decode(&snaps)
for _, snapshot := range snaps.Snapshots {
_, _ = es.Snapshot.Delete(repoName, []string{snapshot.Snapshot})
}
}
_, _ = es.Snapshot.DeleteRepository([]string{repoName})
}
}
{
res, _ = es.Indices.DeleteDataStream(
[]string{"*"},
es.Indices.DeleteDataStream.WithExpandWildcards("all"),
es.Indices.DeleteDataStream.WithExpandWildcards("hidden"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Indices.Delete(
[]string{"*"},
es.Indices.Delete.WithExpandWildcards("all"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
res, _ = es.Indices.GetTemplate()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for templateName, _ := range r {
if strings.HasPrefix(templateName, ".") {
continue
}
if templateName == "security_audit_log" {
continue
}
if templateName == "logstash-index-template" {
continue
}
es.Indices.DeleteTemplate(templateName)
}
}
}
{
res, _ = es.Indices.DeleteIndexTemplate("*")
if res != nil && res.Body != nil { defer res.Body.Close() }
}
{
res, _ = es.Indices.DeleteAlias([]string{"_all"}, []string{"_all"})
if res != nil && res.Body != nil { defer res.Body.Close() }
}
{
var r map[string]interface{}
res, _ = es.Snapshot.GetRepository()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for repositoryID, _ := range r {
var r map[string]interface{}
res, _ = es.Snapshot.Get(repositoryID, []string{"_all"})
json.NewDecoder(res.Body).Decode(&r)
if r["responses"] != nil {
for _, vv := range r["responses"].([]interface{}) {
for _, v := range vv.(map[string]interface{})["snapshots"].([]interface{}) {
snapshotID, ok := v.(map[string]interface{})["snapshot"]
if !ok {
continue
}
es.Snapshot.Delete(repositoryID, []string{fmt.Sprintf("%s", snapshotID)})
}
}
}
es.Snapshot.DeleteRepository([]string{fmt.Sprintf("%s", repositoryID)})
}
}
}
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForStatus("yellow"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
}
_ = commonSetup
`)
}
// Reference: https://github.com/elastic/elasticsearch/blob/master/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java
// Reference: https://github.com/elastic/elasticsearch/blob/master/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java
//
func (g *Generator) genXPackSetup() {
g.w(`
// ----- XPack Setup -------------------------------------------------------------
xpackSetup := func() {
var res *esapi.Response
{
res, _ = es.Indices.DeleteDataStream([]string{"*"})
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r struct {
Transforms []struct {
Id string ` + "`json:\"id\"`" + `
} ` + "`json:\"transforms\"`" + `
}
res, _ := es.TransformGetTransform(es.TransformGetTransform.WithTransformID("_all"))
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
}
for _, transform := range r.Transforms {
id := transform.Id
res, _ := es.TransformDeleteTransform(id)
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
}
{
var r struct {
TrainedModelConfigs []struct {
ModelId string ` + "`json:\"model_id\"`" + `
} ` + "`json:\"trained_model_configs\"`" + `
}
res, _ := es.ML.GetTrainedModels(es.ML.GetTrainedModels.WithModelID("_all"))
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, model := range r.TrainedModelConfigs {
model_id := model.ModelId
res, _ := es.ML.DeleteTrainedModel(model_id)
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
}
}
{
var r struct {
Filters []struct {
FilterId string ` + "`json:\"filter_id\"`" + `
} ` + "`json:\"filters\"`" + `
}
res, _ := es.ML.GetFilters()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
}
for _, filter := range r.Filters {
filter_id := filter.FilterId
res, _ := es.ML.DeleteFilter(filter_id)
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
}
{
var r struct {
Calendars []struct {
CalendarId string ` + "`json:\"calendar_id\"`" + `
} ` + "`json:\"calendars\"`" + `
}
res, _ := es.ML.GetCalendars()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
}
for _, calendar := range r.Calendars {
calendar_id := calendar.CalendarId
res, _ := es.ML.DeleteCalendar(calendar_id)
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
}
{
var r map[string]interface{}
res, _ = es.Indices.GetTemplate()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for templateName, _ := range r {
if strings.HasPrefix(templateName, ".") {
continue
}
es.Indices.DeleteTemplate(templateName)
}
}
}
{
res, _ = es.Watcher.DeleteWatch("my_watch")
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
res, _ = es.Security.GetRole()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for k, v := range r {
reserved, ok := v.(map[string]interface{})["metadata"].(map[string]interface{})["_reserved"].(bool)
if ok && reserved {
continue
}
es.Security.DeleteRole(k)
}
}
}
{
var r map[string]interface{}
res, _ = es.Security.GetUser()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for k, v := range r {
reserved, ok := v.(map[string]interface{})["metadata"].(map[string]interface{})["_reserved"].(bool)
if ok && reserved {
continue
}
es.Security.DeleteUser(k)
}
}
}
{
var r struct {
Apps map[string]struct {
Read struct {
Metadata struct {
Description string ` + "`json:\"description\"`" + `
Reserved bool ` + "`json:\"_reserved\"`" + `
} ` + "`json:\"metadata\"`" + `
} ` + "`json:\"read\"`" + `
}
}
res, _ = es.Security.GetPrivileges()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for k, v := range r.Apps {
if v.Read.Metadata.Reserved {
continue
}
es.Security.DeletePrivileges(k, "_all")
}
}
}
{
res, _ = es.Indices.Refresh(
es.Indices.Refresh.WithIndex("_all"),
es.Indices.Refresh.WithExpandWildcards("open,closed,hidden"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var r map[string]interface{}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
es.ML.StopDatafeed("_all", es.ML.StopDatafeed.WithContext(ctx))
res, _ = es.ML.GetDatafeeds()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, v := range r["datafeeds"].([]interface{}) {
datafeed, ok := v.(map[string]interface{})
if ok {
datafeedID := datafeed["datafeed_id"].(string)
es.ML.DeleteDatafeed(datafeedID)
}
}
}
}
{
var r map[string]interface{}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
es.ML.CloseJob("_all", es.ML.CloseJob.WithContext(ctx))
res, _ = es.ML.GetJobs()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, v := range r["jobs"].([]interface{}) {
job, ok := v.(map[string]interface{})
if ok {
jobID := job["job_id"].(string)
es.ML.DeleteJob(jobID)
}
}
}
}
{
var r map[string]interface{}
res, _ = es.Rollup.GetJobs(es.Rollup.GetJobs.WithJobID("_all"))
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, v := range r["jobs"].([]interface{}) {
job, ok := v.(map[string]interface{})["config"]
if ok {
jobID := job.(map[string]interface{})["id"].(string)
es.Rollup.StopJob(jobID, es.Rollup.StopJob.WithWaitForCompletion(true))
es.Rollup.DeleteJob(jobID)
}
}
}
}
{
var r map[string]interface{}
res, _ = es.Tasks.List()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for _, vv := range r["nodes"].(map[string]interface{}) {
for _, v := range vv.(map[string]interface{})["tasks"].(map[string]interface{}) {
cancellable, ok := v.(map[string]interface{})["cancellable"]
if ok && cancellable.(bool) {
taskID := fmt.Sprintf("%v:%v", v.(map[string]interface{})["node"], v.(map[string]interface{})["id"])
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
es.Tasks.Cancel(es.Tasks.Cancel.WithTaskID(taskID), es.Tasks.Cancel.WithContext(ctx))
}
}
}
}
}
{
var r map[string]interface{}
res, _ = es.Snapshot.GetRepository()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
for repositoryID, _ := range r {
var r map[string]interface{}
res, _ = es.Snapshot.Get(repositoryID, []string{"_all"})
json.NewDecoder(res.Body).Decode(&r)
for _, vv := range r["responses"].([]interface{}) {
for _, v := range vv.(map[string]interface{})["snapshots"].([]interface{}) {
snapshotID, ok := v.(map[string]interface{})["snapshot"]
if ok {
es.Snapshot.Delete(repositoryID, []string{fmt.Sprintf("%s", snapshotID)})
}
}
}
es.Snapshot.DeleteRepository([]string{fmt.Sprintf("%s", repositoryID)})
}
}
}
{
res, _ = es.Indices.Delete([]string{".ml*"})
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.ILM.RemovePolicy("_all")
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForStatus("yellow"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Security.PutUser("x_pack_rest_user", strings.NewReader(` + "`" + `{"password":"x-pack-test-password", "roles":["superuser"]}` + "`" + `), es.Security.PutUser.WithPretty())
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Indices.Refresh(
es.Indices.Refresh.WithIndex("_all"),
es.Indices.Refresh.WithExpandWildcards("open,closed,hidden"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
res, _ = es.Cluster.Health(es.Cluster.Health.WithWaitForStatus("yellow"))
if res != nil && res.Body != nil {
defer res.Body.Close()
}
}
{
var i int
for {
i++
var r map[string]interface{}
res, _ = es.Cluster.PendingTasks()
if res != nil && res.Body != nil {
defer res.Body.Close()
json.NewDecoder(res.Body).Decode(&r)
if len(r["tasks"].([]interface{})) < 1 {
break
}
}
if i > 30 {
break
}
time.Sleep(time.Second)
}
}
}
_ = xpackSetup
`)
}
func (g *Generator) genLocationYAML(t Test) {
f, err := os.Open(t.Filepath)
if err != nil {
g.w(fmt.Sprintf("// Error opening file: %s\n", err))
}
scanner := bufio.NewScanner(f)
var i int
for scanner.Scan() {
i++
tname := scanner.Text()
tname = strings.TrimRight(tname, `:`)
tname = strings.NewReplacer(`\"`, `"`).Replace(tname)
tname = strings.TrimPrefix(tname, `"`)
tname = strings.TrimSuffix(tname, `"`)
if tname == t.OrigName {
// TODO: Github URL (with proper branch/commit/etc)
g.w("\t// Source: " + t.Filepath + fmt.Sprintf(":%d", i) + "\n\t//\n")
}
}
if err := scanner.Err(); err != nil {
g.w(fmt.Sprintf("// Error reading file: %s\n", err))
}
}
func (g *Generator) genSkip(t Test) (skipped bool) {
// Check the custom skip list
if skips, ok := skipTests[t.BaseFilename()]; ok {
if len(skips) < 1 {
g.w("\t// Skipping all tests in '" + t.BaseFilename() + "'\n")
g.w("\tt.SkipNow()\n\n")
return true
}
for _, skip := range skips {
if skip == t.OrigName {
g.w("\tt.SkipNow()\n\n")
return true
}
}
}
// Check the skip property coming from YAML
if t.Skip {
if t.SkipInfo != "" {
g.w("\tt.Skip(" + strconv.Quote(t.SkipInfo) + ")\n\n")
return true
} else {
g.w("\tt.SkipNow()\n\n")
return true
}
}
return false
}
func (g *Generator) genSetupTeardown(actions []Action) {
g.genVarSection(Test{})
for _, a := range actions {
g.genAction(a, false)
g.w("\n")
}
}
func (g *Generator) genSteps(t Test) {
var skipBody bool
if !t.Steps.ContainsAssertion() && !t.Steps.ContainsCatch() && !t.Steps.ContainsStash() {
skipBody = true
}
g.genVarSection(t, skipBody)
for _, step := range t.Steps {
switch step.(type) {
case Action:
// Generate debug info
var dbg strings.Builder
dbg.WriteString("\t\t// => " + step.(Action).Method() + "(")
var j int
for k, v := range step.(Action).Params() {
j++
dbg.WriteString(k + ": " + strings.Replace(fmt.Sprintf("%v", v), "\n", "|", -1))
if j < len(step.(Action).Params()) {
dbg.WriteString(", ")
}
}
dbg.WriteString(") ")
pad := 101 - dbg.Len()
if pad < 0 {
pad = 0
}
g.w(dbg.String() + strings.Repeat("-", pad) + "\n\t\t//\n")
// Generate the action
g.genAction(step.(Action), skipBody)
g.w("\t\t// " + strings.Repeat("-", 96) + "\n\n")
case Assertion:
// Generate debug info
g.w("\t\t// ~> ")
g.w(fmt.Sprintf("%q: ", step.(Assertion).operation))
g.w(strings.Replace(fmt.Sprintf("%s", step.(Assertion).payload), "\n", "|", -1))
g.w("\n")
// Generate the assertion
g.genAssertion(step.(Assertion))
g.w("\n")
case Stash:
// Generate setting the stash
g.genStashSet(step.(Stash))
g.w("\n")
default:
panic(fmt.Sprintf("Unknown step %T", step))
}
}
}
func (g *Generator) genVarSection(t Test, skipBody ...bool) {
g.w("\t\tvar (\n")
g.w("\t\t\treq esapi.Request\n")
g.w("\t\t\tres *esapi.Response\n")
g.w("\t\t\terr error\n\n")
g.w("\t\t\tstash = make(map[string]interface{}, 0)\n\n")
if (len(skipBody) < 1 || (len(skipBody) > 0 && skipBody[0] == false)) &&
(t.Steps.ContainsAssertion() || t.Steps.ContainsCatch() || true) {
g.w("\t\t\tbody []byte\n")
g.w("\t\t\tmapi map[string]interface{}\n")
g.w("\t\t\tslic []interface{}\n")
}
if t.Steps.ContainsAssertion("is_false", "is_true") {
g.w("\n\t\t\tvalue reflect.Value\n")
}
g.w("\n")
g.w("\t\t\tassertion bool\n")
g.w("\t\t\tactual interface{}\n")
g.w("\t\t\texpected interface{}\n")
g.w("\n")
if t.Steps.ContainsAssertion("match", "match-regexp") {
g.w("\n\t\t\tre *regexp.Regexp\n")
g.w("\t\t\tmatch bool\n")
}
g.w("\t\t)\n\n")
if (len(skipBody) < 1 || (len(skipBody) > 0 && skipBody[0] == false)) &&
(t.Steps.ContainsAssertion() || t.Steps.ContainsCatch() || true) {
g.w("\t\t_ = mapi\n")
g.w("\t\t_ = slic\n")
g.w("\n")
g.w(`handleResponseBody := func(t *testing.T, res *esapi.Response) {
// Reset deserialized structures
mapi = make(map[string]interface{})
slic = make([]interface{}, 0)
var err error
body, err = ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("Error reading body: %s", err)
}
res.Body.Close()
res.Body = ioutil.NopCloser(bytes.NewBuffer(body))
if len(body) < 1 {
// FIXME: Hack to prevent EOF errors
return
}
if len(res.Header) > 0 {
if strings.Contains(res.Header["Content-Type"][0], "text/plain") {
return
}
if strings.Contains(res.Header["Content-Type"][0], "yaml") {
if strings.HasPrefix(string(body), "---\n-") {
if err := encyaml.NewDecoder(res.Body).Decode(&slic); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
} else {
if err := encyaml.NewDecoder(res.Body).Decode(&mapi); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
}
return
}
}
d := encjson.NewDecoder(res.Body)
d.UseNumber()
if strings.HasPrefix(string(body), "[") {
if err := d.Decode(&slic); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
} else {
if err := d.Decode(&mapi); err != nil {
t.Fatalf("Error parsing the response body: %s", err)
}
}
}` + "\n")
}
g.w("\n")
g.w("\t\t_ = stash\n")
if t.Steps.ContainsAssertion("is_false", "is_true") {
g.w("\t\t_ = value\n")
}
g.w("\t\t_ = assertion\n")
g.w("\t\t_ = actual\n")
g.w("\t\t_ = expected\n")
if t.Steps.ContainsAssertion("match", "match-regexp") {
g.w("\n")
g.w("\t\t_ = re\n")
g.w("\t\t_ = match\n")
}
g.w("\n")
}
func (g *Generator) genAction(a Action, skipBody ...bool) {
varDetection := regexp.MustCompile(".*(\\$\\{(\\w+)\\}).*")
// Initialize the request
g.w("\t\treq = esapi." + a.Request() + "{\n")
// Pass the parameters
for k, v := range a.Params() {
// fmt.Printf("%s.%s: <%T> %v\n", a.Request(), k, v, v)
if strings.HasPrefix(fmt.Sprintf("%s", v), "$") {
v = `stash[` + strconv.Quote(strings.ReplaceAll(strings.ReplaceAll(fmt.Sprintf("%s", v), "{", ""), "}", "")) + `]`
}
switch v.(type) {
case bool:
g.w("\t\t\t" + k + ": ")
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
switch typ {
case "bool":
g.w(strconv.FormatBool(v.(bool)))
case "*bool":
g.w(`esapi.BoolPtr(` + strconv.FormatBool(v.(bool)) + `)`)
case "string":
g.w(`"` + strconv.FormatBool(v.(bool)) + `"`)
case "[]string":
// TODO: Listify
g.w(`[]string{"` + strconv.FormatBool(v.(bool)) + `"}`)
default:
g.w(strconv.FormatBool(v.(bool)))
}
g.w(",\n")
case string:
if k == "Body" {
g.w("\t\t\t" + k + ": ")
body := v.(string)
if varDetection.MatchString(body) {
g.w("strings.NewReader(strings.NewReplacer(")
matchs := varDetection.FindAllStringSubmatch(body, -1)
for _, match := range matchs {
bodyVar := match[1]
stashVar := fmt.Sprintf(`stash["$%s"].(string)`, match[2])
g.w(fmt.Sprintf("`%s`, %s", bodyVar, stashVar))
}
g.w(").Replace(`" + body + "`))")
} else {
if !strings.HasSuffix(body, "\n") {
body = body + "\n"
}
g.w("strings.NewReader(`" + body + "`)")
}
} else {
g.w("\t\t\t" + k + ": ")
// TODO: Handle comma separated strings as lists
// fmt.Printf("%s: %#v\n", a.Request(), apiRegistry[a.Request()])
// fmt.Printf("%s: %#v\n", k, apiRegistry[a.Request()][k])
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
var value string
if strings.HasPrefix(v.(string), "stash[") {
switch typ {
case "bool":
value = `fmt.Sprintf("%v", ` + v.(string) + `)`
case "string":
value = fmt.Sprintf("%s.(string)", v)
case "[]string":
// TODO: Comma-separated list => Quoted list
value = fmt.Sprintf(`[]string{%s.(string)}`, v)
case "int":
value = `func() int {
switch ` + v.(string) + `.(type) {
case int:
return ` + v.(string) + `.(int)
case float64:
return int(` + v.(string) + `.(float64))
}
case json.Number:
v, _ := ` + v.(string) + `.(encjson.Number).Int64()
vv := int(v)
return vv
panic(fmt.Sprintf(` + "`" + `Unexpected type %T for ` + v.(string) + "`" + `, ` + v.(string) + `))
}()`
case "*int":
value = `func() *int {
switch ` + v.(string) + `.(type) {
case int:
v := ` + v.(string) + `.(int)
return &v
case float64:
v := int(` + v.(string) + `.(float64))
return &v
case json.Number:
v, _ := ` + v.(string) + `.(encjson.Number).Int64()
vv := int(v)
return &vv
}
panic(fmt.Sprintf(` + "`" + `Unexpected type %T for ` + v.(string) + "`" + `, ` + v.(string) + `))
}()`
case "time.Duration":
value = `fmt.Sprintf("%d", ` + v.(string) + `)`
default:
panic(fmt.Sprintf("Unexpected type %q for value %v", typ, v))
}
} else {
switch typ {
case "[]string":
value = `[]string{` + fmt.Sprintf("%q", v) + `}`
case "time.Duration":
// re := regexp.MustCompile("^(\\d+).*")
// value = re.ReplaceAllString(fmt.Sprintf("%s", v), "$1")
inputValue := v.(string)
if strings.HasSuffix(inputValue, "d") {
inputValue = inputValue[:len(inputValue)-1]
numericValue, err := strconv.Atoi(inputValue)
if err != nil {
panic(fmt.Sprintf("Cannot convert duration [%s]: %s", inputValue, err))
}
// Convert to hours
inputValue = fmt.Sprintf("%dh", numericValue*24)
}
dur, err := time.ParseDuration(inputValue)
if err != nil {
panic(fmt.Sprintf("Cannot parse duration [%s]: %s", v, err))
}
value = fmt.Sprintf("%d", dur.Nanoseconds())
default:
if strings.HasSuffix(k, "ID") {
value = fmt.Sprintf("url.QueryEscape(%q)", v)
} else {
value = fmt.Sprintf("%q", v)
}
}
}
g.w(value)
}
g.w(",\n")
case int, *int, float64:
g.w("\t\t\t" + k + ": ")
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
var value string
switch typ {
case "string":
value = `"` + fmt.Sprintf("%d", v) + `"`
case "[]string":
value = `[]string{"` + fmt.Sprintf("%d", v) + `"}`
case "time.Duration":
re := regexp.MustCompile("^(\\d+).*")
value = re.ReplaceAllString(fmt.Sprintf("%d", v), "$1")
case "*int":
switch v.(type) {
case int:
g.w(`esapi.IntPtr(` + fmt.Sprintf("%d", v) + `)`)
case float64:
if vv, ok := v.(float64); ok {
g.w(`esapi.IntPtr(` + fmt.Sprintf("%d", int(vv)) + `)`)
}
default:
panic(fmt.Sprintf("Unexpected type [%T] for [%s]", v, k))
}
default:
value = fmt.Sprintf("%v", v)
}
g.w(value)
g.w(",\n")
case []interface{}:
g.w("\t\t\t" + k + ": ")
typ, ok := apiRegistry[a.Request()][k]
if !ok {
panic(fmt.Sprintf("%s.%s: field not found", a.Request(), k))
}
switch typ {
case "string":
switch v.(type) {
case string:
g.w("`" + v.(string) + "`")
case []interface{}:
vvv := make([]string, 0)
for _, vv := range v.([]interface{}) {
vvv = append(vvv, fmt.Sprintf("%s", vv))
}
g.w("`" + strings.Join(vvv, ",") + "`")
default:
panic(fmt.Sprintf("<%s> %s{}.%s: unexpected value <%T> %#v", typ, a.Request(), k, v, v))
}
case "[]string":
qv := make([]string, 0)
for _, vv := range v.([]interface{}) {
// TODO: Check type
qv = append(qv, fmt.Sprintf("%q", vv.(string)))
}
g.w(`[]string{` + strings.Join(qv, ",") + `}`)
case "io.Reader":
// Serialize Bulk payloads ...
if k == "Body" {
var b strings.Builder
for _, vv := range v.([]interface{}) {
switch vv.(type) {
case string:
b.WriteString(vv.(string))
default:
j, err := json.Marshal(convert(vv))
if err != nil {
panic(fmt.Sprintf("%s{}.%s: %s (%s)", a.Request(), k, err, v))
}
b.WriteString(string(j))
}
b.WriteString("\n")
}
b.WriteString("\n")
g.w("\t\tstrings.NewReader(`" + b.String() + "`)")
// ... or just convert the value to JSON
} else {
j, err := json.Marshal(convert(v))
if err != nil {
panic(fmt.Sprintf("%s{}.%s: %s (%s)", a.Request(), k, err, v))
}
g.w("\t\tstrings.NewReader(`" + fmt.Sprintf("%s", j) + "`)")
}
}
g.w(",\n")
case map[interface{}]interface{}:
g.w("\t\t\t" + k + ": ")
// vv := unstash(convert(v).(map[string]interface{}))
// fmt.Println(vv)
j, err := json.Marshal(convert(v))
if err != nil {
panic(fmt.Sprintf("JSON parse error: %s; %s", err, v))
} else {
// Unstash values
reStash := regexp.MustCompile(`("\$[^"]+")`)
j = reStash.ReplaceAll(j, []byte("` + strconv.Quote(fmt.Sprintf(\"%v\", stash[$1])) + `"))
g.w("\t\tstrings.NewReader(`" + fmt.Sprintf("%s", j) + "`)")
g.w(",\n")
}
default:
g.w(fmt.Sprintf("\t\t// TODO: %s (%v)\n", k, v))
}
}
if len(a.headers) > 0 {
if strings.Contains(a.headers["Accept"], "yaml") && strings.HasPrefix(a.Request(), "Cat") {
g.w("\t\t" + `Format: "yaml",` + "\n")
}
g.w("\t\tHeader: http.Header{\n")
for name, value := range a.headers {
if name == "Content-Type" && value == "application/json" {
continue
}
if name == "Authorization" {
auth_fields := strings.Split(value, " ")
auth_name := auth_fields[0]
auth_value := auth_fields[1]
if strings.HasPrefix(auth_value, "$") {
auth_value = `fmt.Sprintf("%s", stash["` + strings.ReplaceAll(strings.ReplaceAll(auth_value, "{", ""), "}", "") + `"])`
} else {
auth_value = `"` + auth_value + `"`
}
g.w("\t\t\t" + `"Authorization": []string{"` + auth_name + ` " + ` + auth_value + `},` + "\n")
} else {
g.w("\t\t\t\"" + name + "\": []string{\"" + value + "\"},\n")
}
}
g.w("\t\t},\n")
}
g.w("\t\t}\n\n")
// Get response
g.w("\t\tres, err = req.Do(context.Background(), es)\n")
g.w(` if err != nil {
t.Fatalf("ERROR: %s", err)
}
defer res.Body.Close()
`)
g.w("\n\n")
if len(a.catch) < 1 {
// Handle error responses
g.w(` handleResponseError(t, res)` + "\n")
} else {
// TODO: Test catch
}
if len(skipBody) < 1 || (len(skipBody) > 0 && skipBody[0] == false) {
// Read and parse the body
g.w(` handleResponseBody(t, res)` + "\n")
}
}
func (g *Generator) genAssertion(a Assertion) {
g.w(a.Condition())
g.w(a.Error() + "\n")
g.w("}\n") // Close the condition
}
func (g *Generator) genStashSet(s Stash) {
g.w(fmt.Sprintf("// Set %q\n", s.Key()))
var stash string
value := s.ExpandedValue()
switch {
case strings.Contains(value, "_arbitrary_key_"):
key := strings.Trim(s.FirstValue(), "._arbitrary_key_")
stash = `for k, _ := range mapi["` + key + `"].(map[string]interface{}) {
stash["` + s.Key() + `"] = k
}
`
case strings.HasPrefix(value, `mapi["#`):
switch {
case strings.HasPrefix(value, `mapi["#base64EncodeCredentials`):
i, j := strings.Index(value, "("), strings.Index(value, ")")
values := strings.Split(value[i+1:j], ",")
value = `base64.StdEncoding.EncodeToString([]byte(`
value += `strings.Join([]string{`
for n, v := range values {
value += `mapi["` + v + `"].(string)`
if n < len(values)-1 {
value += ","
}
}
value += `}, ":")`
value += `))`
stash = fmt.Sprintf("stash[%q] = %s\n", s.Key(), value)
default:
panic(fmt.Sprintf("Unknown transformation: %s", value))
}
default:
stash = fmt.Sprintf("stash[%q] = %s\n", s.Key(), value)
}
g.w(stash)
}
func convert(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
var ks string
switch k.(type) {
case string:
ks = k.(string)
case int:
ks = fmt.Sprintf("%d", k)
default:
ks = fmt.Sprintf("%v", k)
}
m2[ks] = convert(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = convert(v)
}
}
return i
}
|
package parse
import "unicode/utf8"
type rawtextlexer struct {
str string
pos int
lastpos int
}
func (l *rawtextlexer) eof() bool {
return l.pos >= len(l.str)
}
func (l *rawtextlexer) next() rune {
l.lastpos = l.pos
var r, width = utf8.DecodeRuneInString(l.str[l.pos:])
l.pos += width
return r
}
// rawtext processes the raw text found in templates:
// - trim leading/trailing whitespace if either:
// a. the whitespace includes a newline, or
// b. the caller tells us the surrounding content is a tight joiner by trimBefore/After
// - trim leading and trailing whitespace on each internal line
// - join lines with no space if '<' or '>' are on either side, else with 1 space.
func rawtext(s string, trimBefore, trimAfter bool) []byte {
var lex = rawtextlexer{s, 0, 0}
var (
spaces = 0
seenNewline = trimBefore
lastChar rune
charBeforeTrim rune
result = make([]byte, len(s))
resultLen = 0
)
if trimBefore {
spaces = 1
}
for {
if lex.eof() {
// if we haven't seen a newline, add all the space we've been trimming.
if !seenNewline && spaces > 0 && !trimAfter {
for i := lex.pos - spaces; i < lex.pos; i++ {
result[resultLen] = s[i]
resultLen++
}
}
return result[:resultLen]
}
var r = lex.next()
// join lines
if spaces > 0 {
// more space, keep going
if isSpace(r) {
spaces++
continue
}
if isEndOfLine(r) {
spaces++
seenNewline = true
continue
}
// done with scanning a set of space. actions:
// - add the run of space to the result if we haven't seen a newline.
// - add one space if the character before and after the newline are not tight joiners.
// - else, ignore the space.
switch {
case !seenNewline:
for i := lex.lastpos - spaces; i < lex.lastpos; i++ {
result[resultLen] = s[i]
resultLen++
}
case seenNewline && !isTightJoiner(charBeforeTrim) && !isTightJoiner(r):
result[resultLen] = ' '
resultLen++
default:
// ignore the space
}
spaces = 0
seenNewline = false
}
// begin to trim
seenNewline = isEndOfLine(r)
if isSpace(r) || seenNewline {
spaces = 1
charBeforeTrim = lastChar
continue
}
// non-space characters are added verbatim.
for i := lex.lastpos; i < lex.pos; i++ {
result[resultLen] = lex.str[i]
resultLen++
}
lastChar = r
}
}
func isTightJoiner(r rune) bool {
switch r {
case 0, '<', '>':
return true
}
return false
}
Remove ineffective assignment in parse/rawtext.go
This was found via Go Report Card:
https://goreportcard.com/report/github.com/robfig/soy#ineffassign
> Line 84: warning: ineffectual assignment to seenNewline (ineffassign)
According to @robfig in
https://github.com/robfig/soy/issues/78#issuecomment-649787848, this line can be
removed.
All tests pass with this change.
Closes #78
package parse
import "unicode/utf8"
type rawtextlexer struct {
str string
pos int
lastpos int
}
func (l *rawtextlexer) eof() bool {
return l.pos >= len(l.str)
}
func (l *rawtextlexer) next() rune {
l.lastpos = l.pos
var r, width = utf8.DecodeRuneInString(l.str[l.pos:])
l.pos += width
return r
}
// rawtext processes the raw text found in templates:
// - trim leading/trailing whitespace if either:
// a. the whitespace includes a newline, or
// b. the caller tells us the surrounding content is a tight joiner by trimBefore/After
// - trim leading and trailing whitespace on each internal line
// - join lines with no space if '<' or '>' are on either side, else with 1 space.
func rawtext(s string, trimBefore, trimAfter bool) []byte {
var lex = rawtextlexer{s, 0, 0}
var (
spaces = 0
seenNewline = trimBefore
lastChar rune
charBeforeTrim rune
result = make([]byte, len(s))
resultLen = 0
)
if trimBefore {
spaces = 1
}
for {
if lex.eof() {
// if we haven't seen a newline, add all the space we've been trimming.
if !seenNewline && spaces > 0 && !trimAfter {
for i := lex.pos - spaces; i < lex.pos; i++ {
result[resultLen] = s[i]
resultLen++
}
}
return result[:resultLen]
}
var r = lex.next()
// join lines
if spaces > 0 {
// more space, keep going
if isSpace(r) {
spaces++
continue
}
if isEndOfLine(r) {
spaces++
seenNewline = true
continue
}
// done with scanning a set of space. actions:
// - add the run of space to the result if we haven't seen a newline.
// - add one space if the character before and after the newline are not tight joiners.
// - else, ignore the space.
switch {
case !seenNewline:
for i := lex.lastpos - spaces; i < lex.lastpos; i++ {
result[resultLen] = s[i]
resultLen++
}
case seenNewline && !isTightJoiner(charBeforeTrim) && !isTightJoiner(r):
result[resultLen] = ' '
resultLen++
default:
// ignore the space
}
spaces = 0
}
// begin to trim
seenNewline = isEndOfLine(r)
if isSpace(r) || seenNewline {
spaces = 1
charBeforeTrim = lastChar
continue
}
// non-space characters are added verbatim.
for i := lex.lastpos; i < lex.pos; i++ {
result[resultLen] = lex.str[i]
resultLen++
}
lastChar = r
}
}
func isTightJoiner(r rune) bool {
switch r {
case 0, '<', '>':
return true
}
return false
}
|
// Only files including IPv4, IPv6, and Location (in english)
// will be read and parsed into lists.
package parser
import (
"bytes"
"errors"
"log"
"net"
"regexp"
"strconv"
"strings"
)
const mapMax = 200000
// IPNode defines IPv4 and IPv6 databases
type IPNode struct {
IPAddressLow net.IP
IPAddressHigh net.IP
LocationIndex int // Index to slice of locations
PostalCode string
Latitude float64
Longitude float64
}
// locationNode defines Location databases
type LocationNode struct {
GeonameID int
ContinentCode string
CountryCode string
CountryName string
MetroCode int64
CityName string
}
// The GeoDataset struct bundles all the data needed to search and
// find data into one common structure
type GeoDataset struct {
IP4Nodes []IPNode // The IPNode list containing IP4Nodes
IP6Nodes []IPNode // The IPNode list containing IP6Nodes
LocationNodes []LocationNode // The location nodes corresponding to the IPNodes
}
// Verify column length
func checkNumColumns(record []string, size int) error {
if len(record) != size {
log.Println("Incorrect number of columns in IP list", size, " got: ", len(record), record)
return errors.New("Corrupted Data: wrong number of columns")
}
return nil
}
// Finds provided geonameID within idMap and returns the index in idMap
// locationIdMap := map[int]int{
// 609013: 0,
// 104084: 4,
// 17: 4,
// }
// lookupGeoId("17",locationIdMap) would return (2,nil).
// TODO: Add error metrics
func lookupGeoId(gnid string, idMap map[int]int) (int, error) {
geonameId, err := strconv.Atoi(gnid)
if err != nil {
return 0, errors.New("Corrupted Data: geonameID should be a number")
}
loadIndex, ok := idMap[geonameId]
if !ok {
log.Println("geonameID not found ", geonameId)
return 0, errors.New("Corrupted Data: geonameId not found")
}
return loadIndex, nil
}
func stringToFloat(str, field string) (float64, error) {
flt, err := strconv.ParseFloat(str, 64)
if err != nil {
if len(str) > 0 {
log.Println(field, " was not a number")
output := strings.Join([]string{"Corrupted Data: ", field, " should be an int"}, "")
return 0, errors.New(output)
}
}
return flt, nil
}
func checkCaps(str, field string) (string, error) {
match, _ := regexp.MatchString("^[0-9A-Z]*$", str)
if match {
return str, nil
} else {
log.Println(field, "should be all capitals and no punctuation: ", str)
output := strings.Join([]string{"Corrupted Data: ", field, " should be all caps and no punctuation"}, "")
return "", errors.New(output)
}
}
// Returns nil if two nodes are equal
// Used by the search package
func IsEqualIPNodes(expected, node IPNode) error {
if !((node.IPAddressLow).Equal(expected.IPAddressLow)) {
output := strings.Join([]string{"IPAddress Low inconsistent\ngot:", node.IPAddressLow.String(), " \nwanted:", expected.IPAddressLow.String()}, "")
log.Println(output)
return errors.New(output)
}
if !((node.IPAddressHigh).Equal(expected.IPAddressHigh)) {
output := strings.Join([]string{"IPAddressHigh inconsistent\ngot:", node.IPAddressHigh.String(), " \nwanted:", expected.IPAddressHigh.String()}, "")
log.Println(output)
return errors.New(output)
}
if node.LocationIndex != expected.LocationIndex {
output := strings.Join([]string{"LocationIndex inconsistent\ngot:", strconv.Itoa(node.LocationIndex), " \nwanted:", strconv.Itoa(expected.LocationIndex)}, "")
log.Println(output)
return errors.New(output)
}
if node.PostalCode != expected.PostalCode {
output := strings.Join([]string{"PostalCode inconsistent\ngot:", node.PostalCode, " \nwanted:", expected.PostalCode}, "")
log.Println(output)
return errors.New(output)
}
if node.Latitude != expected.Latitude {
output := strings.Join([]string{"Latitude inconsistent\ngot:", floatToString(node.Latitude), " \nwanted:", floatToString(expected.Latitude)}, "")
log.Println(output)
return errors.New(output)
}
if node.Longitude != expected.Longitude {
output := strings.Join([]string{"Longitude inconsistent\ngot:", floatToString(node.Longitude), " \nwanted:", floatToString(expected.Longitude)}, "")
log.Println(output)
return errors.New(output)
}
return nil
}
func floatToString(num float64) string {
return strconv.FormatFloat(num, 'f', 6, 64)
}
func handleStack(stack, list []IPNode, newNode IPNode) ([]IPNode, []IPNode) {
// Stack is not empty aka we're in a nested IP
if len(stack) != 0 {
// newNode is no longer inside stack's nested IP's
if lessThan(stack[len(stack)-1].IPAddressHigh, newNode.IPAddressLow) {
// while closing nested IP's
var pop IPNode
pop, stack = stack[len(stack)-1], stack[:len(stack)-1]
for ; len(stack) > 0; pop, stack = stack[len(stack)-1], stack[:len(stack)-1] {
peek := stack[len(stack)-1]
if lessThan(newNode.IPAddressLow, peek.IPAddressHigh) {
// if theres a gap inbetween imediately nested IP's
// complete the gap
peek.IPAddressLow = PlusOne(pop.IPAddressHigh)
peek.IPAddressHigh = minusOne(newNode.IPAddressLow)
list = append(list, peek)
break
}
peek.IPAddressLow = PlusOne(pop.IPAddressHigh)
list = append(list, peek)
}
} else {
// if we're nesting IP's
// create begnning bounds
lastListNode := &list[len(list)-1]
lastListNode.IPAddressHigh = minusOne(newNode.IPAddressLow)
}
}
stack = append(stack, newNode)
list = append(list, newNode)
return stack, list
}
func moreThan(a, b net.IP) bool {
return bytes.Compare(a, b) > 0
}
func lessThan(a, b net.IP) bool {
return bytes.Compare(a, b) < 0
}
func PlusOne(a net.IP) net.IP {
a = append([]byte(nil), a...)
var i int
for i = 15; a[i] == 255; i-- {
a[i] = 0
}
a[i]++
return a
}
func minusOne(a net.IP) net.IP {
a = append([]byte(nil), a...)
var i int
for i = 15; a[i] == 0; i-- {
a[i] = 255
}
a[i]--
return a
}
fix typo
// Only files including IPv4, IPv6, and Location (in english)
// will be read and parsed into lists.
package parser
import (
"bytes"
"errors"
"log"
"net"
"regexp"
"strconv"
"strings"
)
const mapMax = 200000
// IPNode defines IPv4 and IPv6 databases
type IPNode struct {
IPAddressLow net.IP
IPAddressHigh net.IP
LocationIndex int // Index to slice of locations
PostalCode string
Latitude float64
Longitude float64
}
// locationNode defines Location databases
type LocationNode struct {
GeonameID int
ContinentCode string
CountryCode string
CountryName string
MetroCode int64
CityName string
}
// The GeoDataset struct bundles all the data needed to search and
// find data into one common structure
type GeoDataset struct {
IP4Nodes []IPNode // The IPNode list containing IP4Nodes
IP6Nodes []IPNode // The IPNode list containing IP6Nodes
LocationNodes []LocationNode // The location nodes corresponding to the IPNodes
}
// Verify column length
func checkNumColumns(record []string, size int) error {
if len(record) != size {
log.Println("Incorrect number of columns in IP list", size, " got: ", len(record), record)
return errors.New("Corrupted Data: wrong number of columns")
}
return nil
}
// Finds provided geonameID within idMap and returns the index in idMap
// locationIdMap := map[int]int{
// 609013: 0,
// 104084: 4,
// 17: 4,
// }
// lookupGeoId("17",locationIdMap) would return (2,nil).
// TODO: Add error metrics
func lookupGeoId(gnid string, idMap map[int]int) (int, error) {
geonameId, err := strconv.Atoi(gnid)
if err != nil {
return 0, errors.New("Corrupted Data: geonameID should be a number")
}
loadIndex, ok := idMap[geonameId]
if !ok {
log.Println("geonameID not found ", geonameId)
return 0, errors.New("Corrupted Data: geonameId not found")
}
return loadIndex, nil
}
func stringToFloat(str, field string) (float64, error) {
flt, err := strconv.ParseFloat(str, 64)
if err != nil {
if len(str) > 0 {
log.Println(field, " was not a number")
output := strings.Join([]string{"Corrupted Data: ", field, " should be an int"}, "")
return 0, errors.New(output)
}
}
return flt, nil
}
func checkCaps(str, field string) (string, error) {
match, _ := regexp.MatchString("^[0-9A-Z]*$", str)
if match {
return str, nil
} else {
log.Println(field, "should be all capitals and no punctuation: ", str)
output := strings.Join([]string{"Corrupted Data: ", field, " should be all caps and no punctuation"}, "")
return "", errors.New(output)
}
}
// Returns nil if two nodes are equal
// Used by the search package
func IsEqualIPNodes(expected, node IPNode) error {
if !((node.IPAddressLow).Equal(expected.IPAddressLow)) {
output := strings.Join([]string{"IPAddress Low inconsistent\ngot:", node.IPAddressLow.String(), " \nwanted:", expected.IPAddressLow.String()}, "")
log.Println(output)
return errors.New(output)
}
if !((node.IPAddressHigh).Equal(expected.IPAddressHigh)) {
output := strings.Join([]string{"IPAddressHigh inconsistent\ngot:", node.IPAddressHigh.String(), " \nwanted:", expected.IPAddressHigh.String()}, "")
log.Println(output)
return errors.New(output)
}
if node.LocationIndex != expected.LocationIndex {
output := strings.Join([]string{"LocationIndex inconsistent\ngot:", strconv.Itoa(node.LocationIndex), " \nwanted:", strconv.Itoa(expected.LocationIndex)}, "")
log.Println(output)
return errors.New(output)
}
if node.PostalCode != expected.PostalCode {
output := strings.Join([]string{"PostalCode inconsistent\ngot:", node.PostalCode, " \nwanted:", expected.PostalCode}, "")
log.Println(output)
return errors.New(output)
}
if node.Latitude != expected.Latitude {
output := strings.Join([]string{"Latitude inconsistent\ngot:", floatToString(node.Latitude), " \nwanted:", floatToString(expected.Latitude)}, "")
log.Println(output)
return errors.New(output)
}
if node.Longitude != expected.Longitude {
output := strings.Join([]string{"Longitude inconsistent\ngot:", floatToString(node.Longitude), " \nwanted:", floatToString(expected.Longitude)}, "")
log.Println(output)
return errors.New(output)
}
return nil
}
func floatToString(num float64) string {
return strconv.FormatFloat(num, 'f', 6, 64)
}
// TODO(gfr) What are list and stack?
// handleStack finds the proper place in the stack for the new node.
func handleStack(stack, list []IPNode, newNode IPNode) ([]IPNode, []IPNode) {
// Stack is not empty aka we're in a nested IP
if len(stack) != 0 {
// newNode is no longer inside stack's nested IP's
if lessThan(stack[len(stack)-1].IPAddressHigh, newNode.IPAddressLow) {
// while closing nested IP's
var pop IPNode
pop, stack = stack[len(stack)-1], stack[:len(stack)-1]
for ; len(stack) > 0; pop, stack = stack[len(stack)-1], stack[:len(stack)-1] {
peek := stack[len(stack)-1]
if lessThan(newNode.IPAddressLow, peek.IPAddressHigh) {
// if there's a gap in between adjacent nested IP's,
// complete the gap
peek.IPAddressLow = PlusOne(pop.IPAddressHigh)
peek.IPAddressHigh = minusOne(newNode.IPAddressLow)
list = append(list, peek)
break
}
peek.IPAddressLow = PlusOne(pop.IPAddressHigh)
list = append(list, peek)
}
} else {
// if we're nesting IP's
// create begnning bounds
lastListNode := &list[len(list)-1]
lastListNode.IPAddressHigh = minusOne(newNode.IPAddressLow)
}
}
stack = append(stack, newNode)
list = append(list, newNode)
return stack, list
}
func moreThan(a, b net.IP) bool {
return bytes.Compare(a, b) > 0
}
func lessThan(a, b net.IP) bool {
return bytes.Compare(a, b) < 0
}
func PlusOne(a net.IP) net.IP {
a = append([]byte(nil), a...)
var i int
for i = 15; a[i] == 255; i-- {
a[i] = 0
}
a[i]++
return a
}
func minusOne(a net.IP) net.IP {
a = append([]byte(nil), a...)
var i int
for i = 15; a[i] == 0; i-- {
a[i] = 255
}
a[i]--
return a
}
|
// Copyright 2016 The clang-server Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parser
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/go-clang/v3.9/clang"
"github.com/pkg/errors"
"github.com/pkgutil/osutil"
"github.com/pkgutil/stringsutil"
"github.com/zchee/clang-server/compilationdatabase"
"github.com/zchee/clang-server/indexdb"
"github.com/zchee/clang-server/internal/hashutil"
"github.com/zchee/clang-server/internal/log"
"github.com/zchee/clang-server/internal/pathutil"
"github.com/zchee/clang-server/parser/builtinheader"
"github.com/zchee/clang-server/rpc"
"github.com/zchee/clang-server/symbol"
)
// defaultClangOption defalut global clang options.
// clang.TranslationUnit_DetailedPreprocessingRecord = 0x01
// clang.TranslationUnit_Incomplete = 0x02
// clang.TranslationUnit_PrecompiledPreamble = 0x04
// clang.TranslationUnit_CacheCompletionResults = 0x08
// clang.TranslationUnit_ForSerialization = 0x10
// clang.TranslationUnit_CXXChainedPCH = 0x20
// clang.TranslationUnit_SkipFunctionBodies = 0x40
// clang.TranslationUnit_IncludeBriefCommentsInCodeCompletion = 0x80
// clang.TranslationUnit_CreatePreambleOnFirstParse = 0x100
// clang.TranslationUnit_KeepGoing = 0x200
// const defaultClangOption uint32 = 0x445 // Use all flags for now
var defaultClangOption = clang.DefaultEditingTranslationUnitOptions() | uint32(clang.TranslationUnit_KeepGoing)
// Parser represents a C/C++ AST parser.
type Parser struct {
idx clang.Index
cd *compilationdatabase.CompilationDatabase
db *indexdb.IndexDB
server *rpc.GRPCServer
config *Config
dispatcher *dispatcher
debugUncatched bool // for debug
uncachedKind map[clang.CursorKind]int // for debug
}
// Config represents a parser config.
type Config struct {
Root string
JSONName string
PathRange []string
ClangOption uint32
Debug bool
}
// NewParser return the new Parser.
func NewParser(path string, config *Config) *Parser {
if config.Root == "" {
proot, err := pathutil.FindProjectRoot(path)
if err != nil {
log.Fatal(err)
}
config.Root = proot
}
cd := compilationdatabase.NewCompilationDatabase(config.Root)
if err := cd.Parse(config.JSONName, config.PathRange); err != nil {
log.Fatal(err)
}
db, err := indexdb.NewIndexDB(config.Root)
if err != nil {
log.Fatal(err)
}
if config.ClangOption == 0 {
config.ClangOption = defaultClangOption
}
p := &Parser{
idx: clang.NewIndex(0, 0), // disable excludeDeclarationsFromPCH, enable displayDiagnostics
cd: cd,
db: db,
server: rpc.NewGRPCServer(),
config: config,
}
p.dispatcher = newDispatcher(p.ParseFile)
if config.Debug {
p.debugUncatched = true
p.uncachedKind = make(map[clang.CursorKind]int)
}
if err := CreateBulitinHeaders(); err != nil {
log.Fatal(err)
}
return p
}
// CreateBulitinHeaders creates(dumps) a clang builtin header to cache directory.
func CreateBulitinHeaders() error {
builtinHdrDir := filepath.Join(pathutil.CacheDir(), "clang", "include")
if !osutil.IsExist(builtinHdrDir) {
if err := os.MkdirAll(builtinHdrDir, 0700); err != nil {
return errors.WithStack(err)
}
}
for _, fname := range builtinheader.AssetNames() {
data, err := builtinheader.AssetInfo(fname)
if err != nil {
return errors.WithStack(err)
}
if strings.Contains(data.Name(), string(filepath.Separator)) {
dir, _ := filepath.Split(data.Name())
if err := os.MkdirAll(filepath.Join(builtinHdrDir, dir), 0700); err != nil {
return errors.WithStack(err)
}
}
buf, err := builtinheader.Asset(data.Name())
if err != nil {
return errors.WithStack(err)
}
if err := ioutil.WriteFile(filepath.Join(builtinHdrDir, data.Name()), buf, 0600); err != nil {
return errors.WithStack(err)
}
}
return nil
}
// Parse parses the project directories.
func (p *Parser) Parse() {
defer func() {
p.db.Close()
p.server.Serve()
}()
defer profile(time.Now(), "Parse")
ccs := p.cd.CompileCommands()
if len(ccs) == 0 {
log.Fatal("not walk")
}
compilerConfig := p.cd.CompilerConfig
flags := append(compilerConfig.SystemCIncludeDir, compilerConfig.SystemFrameworkDir...)
// TODO(zchee): needs include stdint.h?
if i := stringsutil.IndexContainsSlice(ccs[0].Arguments, "-std="); i > 0 {
std := ccs[0].Arguments[i][5:]
switch {
case strings.HasPrefix(std, "c"), strings.HasPrefix(std, "gnu"):
if std[len(std)-2] == '8' || std[len(std)-2] == '9' || std[len(std)-2] == '1' {
flags = append(flags, "-include", "/usr/include/stdint.h")
}
}
} else {
flags = append(flags, "-include", "/usr/include/stdint.h")
}
if !(filepath.Ext(ccs[0].File) == ".c") {
flags = append(flags, compilerConfig.SystemCXXIncludeDir...)
}
builtinHdrDir := filepath.Join(pathutil.CacheDir(), "clang", "include")
flags = append(flags, "-I"+builtinHdrDir)
p.dispatcher.Start()
for i := 0; i < len(ccs); i++ {
args := ccs[i].Arguments
args = append(flags, args...)
p.dispatcher.Add(parseArg{ccs[i].File, args})
}
p.dispatcher.Wait()
}
type parseArg struct {
filename string
flag []string
}
// ParseFile parses the C/C++ file.
func (p *Parser) ParseFile(arg parseArg) error {
var tu clang.TranslationUnit
fhash := hashutil.NewHashString(arg.filename)
fh := fhash[:]
if p.db.Has(fh) {
buf, err := p.db.Get(fh)
if err != nil {
return err
}
data := symbol.GetRootAsFile(buf, 0)
tu, err = p.DeserializeTranslationUnit(p.idx, data.TranslationUnit())
if err != nil {
return err
}
defer tu.Dispose()
log.Debugf("tu.Spelling(): %T => %+v\n", tu.Spelling(), tu.Spelling())
return nil
}
if cErr := p.idx.ParseTranslationUnit2(arg.filename, arg.flag, nil, p.config.ClangOption, &tu); clang.ErrorCode(cErr) != clang.Error_Success {
return errors.New(clang.ErrorCode(cErr).Spelling())
}
defer tu.Dispose()
tuch := make(chan []byte, 1)
go func() {
tuch <- p.SerializeTranslationUnit(arg.filename, tu)
}()
// printDiagnostics(tu.Diagnostics())
rootCursor := tu.TranslationUnitCursor()
file := symbol.NewFile(arg.filename, arg.flag)
visitNode := func(cursor, parent clang.Cursor) clang.ChildVisitResult {
if cursor.IsNull() {
log.Debug("cursor: <none>")
return clang.ChildVisit_Continue
}
cursorLoc := symbol.FromCursor(cursor)
if cursorLoc.FileName() == "" || cursorLoc.FileName() == "." {
// TODO(zchee): Ignore system header(?)
return clang.ChildVisit_Continue
}
kind := cursor.Kind()
switch kind {
case clang.Cursor_FunctionDecl, clang.Cursor_StructDecl, clang.Cursor_FieldDecl, clang.Cursor_TypedefDecl, clang.Cursor_EnumDecl, clang.Cursor_EnumConstantDecl:
defCursor := cursor.Definition()
if defCursor.IsNull() {
file.AddDecl(cursorLoc)
} else {
defLoc := symbol.FromCursor(defCursor)
file.AddDefinition(cursorLoc, defLoc)
}
case clang.Cursor_MacroDefinition:
file.AddDefinition(cursorLoc, cursorLoc)
case clang.Cursor_VarDecl:
file.AddDecl(cursorLoc)
case clang.Cursor_ParmDecl:
if cursor.Spelling() != "" {
file.AddDecl(cursorLoc)
}
case clang.Cursor_CallExpr:
refCursor := cursor.Referenced()
refLoc := symbol.FromCursor(refCursor)
file.AddCaller(cursorLoc, refLoc, true)
case clang.Cursor_DeclRefExpr, clang.Cursor_TypeRef, clang.Cursor_MemberRefExpr, clang.Cursor_MacroExpansion:
refCursor := cursor.Referenced()
refLoc := symbol.FromCursor(refCursor)
file.AddCaller(cursorLoc, refLoc, false)
case clang.Cursor_InclusionDirective:
incFile := cursor.IncludedFile()
file.AddHeader(cursor.Spelling(), incFile)
default:
if p.debugUncatched {
p.uncachedKind[kind]++
}
}
return clang.ChildVisit_Recurse
}
rootCursor.Visit(visitNode)
file.AddTranslationUnit(<-tuch)
buf := file.Serialize()
out := symbol.GetRootAsFile(buf.FinishedBytes(), 0)
printFile(out) // for debug
log.Debugf("Goroutine:%d", runtime.NumGoroutine())
log.Debugf("================== DONE: filename: %+v ==================\n\n\n", arg.filename)
return p.db.Put(fh, buf.FinishedBytes())
}
// SerializeTranslationUnit serialize the TranslationUnit to Clang serialized representation.
// TODO(zchee): Avoid ioutil.TempFile if possible.
func (p *Parser) SerializeTranslationUnit(filename string, tu clang.TranslationUnit) []byte {
tmpFile, err := ioutil.TempFile(os.TempDir(), filepath.Base(filename))
if err != nil {
log.Fatal(err)
}
saveOptions := uint32(clang.TranslationUnit_KeepGoing)
if cErr := tu.SaveTranslationUnit(tmpFile.Name(), saveOptions); clang.SaveError(cErr) != clang.SaveError_None {
log.Fatal(clang.SaveError(cErr))
}
buf, err := ioutil.ReadFile(tmpFile.Name())
if err != nil {
log.Fatal(err)
}
os.Remove(tmpFile.Name())
return buf
}
// DeserializeTranslationUnit deserialize the TranslationUnit from buf Clang serialized representation.
func (p *Parser) DeserializeTranslationUnit(idx clang.Index, buf []byte) (clang.TranslationUnit, error) {
var tu clang.TranslationUnit
tmpfile, err := ioutil.TempFile(os.TempDir(), "clang-server")
if err != nil {
return tu, err
}
defer tmpfile.Close()
io.Copy(tmpfile, bytes.NewReader(buf))
if err := idx.TranslationUnit2(tmpfile.Name(), &tu); clang.ErrorCode(err) != clang.Error_Success {
return tu, errors.New(err.Spelling())
}
// finished create a translation unit from an AST file, remove tmpfile
os.Remove(tmpfile.Name())
return tu, nil
}
// ClangVersion return the current clang version.
func ClangVersion() string {
return clang.GetClangVersion()
}
parser: support Jobs config
Signed-off-by: Koichi Shiraishi <2e5bdfebde234ed3509bcfc18121c70b6631e207@gmail.com>
// Copyright 2016 The clang-server Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parser
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/go-clang/v3.9/clang"
"github.com/pkg/errors"
"github.com/pkgutil/osutil"
"github.com/pkgutil/stringsutil"
"github.com/zchee/clang-server/compilationdatabase"
"github.com/zchee/clang-server/indexdb"
"github.com/zchee/clang-server/internal/hashutil"
"github.com/zchee/clang-server/internal/log"
"github.com/zchee/clang-server/internal/pathutil"
"github.com/zchee/clang-server/parser/builtinheader"
"github.com/zchee/clang-server/rpc"
"github.com/zchee/clang-server/symbol"
)
// defaultClangOption defalut global clang options.
// clang.TranslationUnit_DetailedPreprocessingRecord = 0x01
// clang.TranslationUnit_Incomplete = 0x02
// clang.TranslationUnit_PrecompiledPreamble = 0x04
// clang.TranslationUnit_CacheCompletionResults = 0x08
// clang.TranslationUnit_ForSerialization = 0x10
// clang.TranslationUnit_CXXChainedPCH = 0x20
// clang.TranslationUnit_SkipFunctionBodies = 0x40
// clang.TranslationUnit_IncludeBriefCommentsInCodeCompletion = 0x80
// clang.TranslationUnit_CreatePreambleOnFirstParse = 0x100
// clang.TranslationUnit_KeepGoing = 0x200
// const defaultClangOption uint32 = 0x445 // Use all flags for now
var defaultClangOption = clang.DefaultEditingTranslationUnitOptions() | uint32(clang.TranslationUnit_KeepGoing)
// Parser represents a C/C++ AST parser.
type Parser struct {
idx clang.Index
cd *compilationdatabase.CompilationDatabase
db *indexdb.IndexDB
server *rpc.GRPCServer
config *Config
dispatcher *dispatcher
debugUncatched bool // for debug
uncachedKind map[clang.CursorKind]int // for debug
}
// Config represents a parser config.
type Config struct {
Root string
JSONName string
PathRange []string
ClangOption uint32
Jobs int
Debug bool
}
// NewParser return the new Parser.
func NewParser(path string, config *Config) *Parser {
if config.Root == "" {
proot, err := pathutil.FindProjectRoot(path)
if err != nil {
log.Fatal(err)
}
config.Root = proot
}
cd := compilationdatabase.NewCompilationDatabase(config.Root)
if err := cd.Parse(config.JSONName, config.PathRange); err != nil {
log.Fatal(err)
}
db, err := indexdb.NewIndexDB(config.Root)
if err != nil {
log.Fatal(err)
}
if config.ClangOption == 0 {
config.ClangOption = defaultClangOption
}
p := &Parser{
idx: clang.NewIndex(0, 0), // disable excludeDeclarationsFromPCH, enable displayDiagnostics
cd: cd,
db: db,
server: rpc.NewGRPCServer(),
config: config,
}
p.dispatcher = newDispatcher(p.ParseFile)
if config.Debug {
p.debugUncatched = true
p.uncachedKind = make(map[clang.CursorKind]int)
}
if err := CreateBulitinHeaders(); err != nil {
log.Fatal(err)
}
return p
}
// CreateBulitinHeaders creates(dumps) a clang builtin header to cache directory.
func CreateBulitinHeaders() error {
builtinHdrDir := filepath.Join(pathutil.CacheDir(), "clang", "include")
if !osutil.IsExist(builtinHdrDir) {
if err := os.MkdirAll(builtinHdrDir, 0700); err != nil {
return errors.WithStack(err)
}
}
for _, fname := range builtinheader.AssetNames() {
data, err := builtinheader.AssetInfo(fname)
if err != nil {
return errors.WithStack(err)
}
if strings.Contains(data.Name(), string(filepath.Separator)) {
dir, _ := filepath.Split(data.Name())
if err := os.MkdirAll(filepath.Join(builtinHdrDir, dir), 0700); err != nil {
return errors.WithStack(err)
}
}
buf, err := builtinheader.Asset(data.Name())
if err != nil {
return errors.WithStack(err)
}
if err := ioutil.WriteFile(filepath.Join(builtinHdrDir, data.Name()), buf, 0600); err != nil {
return errors.WithStack(err)
}
}
return nil
}
// Parse parses the project directories.
func (p *Parser) Parse() {
if p.config.Jobs > 0 {
ncpu := runtime.NumCPU()
runtime.GOMAXPROCS(p.config.Jobs)
defer runtime.GOMAXPROCS(ncpu)
}
defer func() {
p.db.Close()
p.server.Serve()
}()
defer profile(time.Now(), "Parse")
ccs := p.cd.CompileCommands()
if len(ccs) == 0 {
log.Fatal("not walk")
}
compilerConfig := p.cd.CompilerConfig
flags := append(compilerConfig.SystemCIncludeDir, compilerConfig.SystemFrameworkDir...)
// TODO(zchee): needs include stdint.h?
if i := stringsutil.IndexContainsSlice(ccs[0].Arguments, "-std="); i > 0 {
std := ccs[0].Arguments[i][5:]
switch {
case strings.HasPrefix(std, "c"), strings.HasPrefix(std, "gnu"):
if std[len(std)-2] == '8' || std[len(std)-2] == '9' || std[len(std)-2] == '1' {
flags = append(flags, "-include", "/usr/include/stdint.h")
}
}
} else {
flags = append(flags, "-include", "/usr/include/stdint.h")
}
if !(filepath.Ext(ccs[0].File) == ".c") {
flags = append(flags, compilerConfig.SystemCXXIncludeDir...)
}
builtinHdrDir := filepath.Join(pathutil.CacheDir(), "clang", "include")
flags = append(flags, "-I"+builtinHdrDir)
p.dispatcher.Start()
for i := 0; i < len(ccs); i++ {
args := ccs[i].Arguments
args = append(flags, args...)
p.dispatcher.Add(parseArg{ccs[i].File, args})
}
p.dispatcher.Wait()
}
type parseArg struct {
filename string
flag []string
}
// ParseFile parses the C/C++ file.
func (p *Parser) ParseFile(arg parseArg) error {
var tu clang.TranslationUnit
fhash := hashutil.NewHashString(arg.filename)
fh := fhash[:]
if p.db.Has(fh) {
buf, err := p.db.Get(fh)
if err != nil {
return err
}
data := symbol.GetRootAsFile(buf, 0)
tu, err = p.DeserializeTranslationUnit(p.idx, data.TranslationUnit())
if err != nil {
return err
}
defer tu.Dispose()
log.Debugf("tu.Spelling(): %T => %+v\n", tu.Spelling(), tu.Spelling())
return nil
}
if cErr := p.idx.ParseTranslationUnit2(arg.filename, arg.flag, nil, p.config.ClangOption, &tu); clang.ErrorCode(cErr) != clang.Error_Success {
return errors.New(clang.ErrorCode(cErr).Spelling())
}
defer tu.Dispose()
tuch := make(chan []byte, 1)
go func() {
tuch <- p.SerializeTranslationUnit(arg.filename, tu)
}()
// printDiagnostics(tu.Diagnostics())
rootCursor := tu.TranslationUnitCursor()
file := symbol.NewFile(arg.filename, arg.flag)
visitNode := func(cursor, parent clang.Cursor) clang.ChildVisitResult {
if cursor.IsNull() {
log.Debug("cursor: <none>")
return clang.ChildVisit_Continue
}
cursorLoc := symbol.FromCursor(cursor)
if cursorLoc.FileName() == "" || cursorLoc.FileName() == "." {
// TODO(zchee): Ignore system header(?)
return clang.ChildVisit_Continue
}
kind := cursor.Kind()
switch kind {
case clang.Cursor_FunctionDecl, clang.Cursor_StructDecl, clang.Cursor_FieldDecl, clang.Cursor_TypedefDecl, clang.Cursor_EnumDecl, clang.Cursor_EnumConstantDecl:
defCursor := cursor.Definition()
if defCursor.IsNull() {
file.AddDecl(cursorLoc)
} else {
defLoc := symbol.FromCursor(defCursor)
file.AddDefinition(cursorLoc, defLoc)
}
case clang.Cursor_MacroDefinition:
file.AddDefinition(cursorLoc, cursorLoc)
case clang.Cursor_VarDecl:
file.AddDecl(cursorLoc)
case clang.Cursor_ParmDecl:
if cursor.Spelling() != "" {
file.AddDecl(cursorLoc)
}
case clang.Cursor_CallExpr:
refCursor := cursor.Referenced()
refLoc := symbol.FromCursor(refCursor)
file.AddCaller(cursorLoc, refLoc, true)
case clang.Cursor_DeclRefExpr, clang.Cursor_TypeRef, clang.Cursor_MemberRefExpr, clang.Cursor_MacroExpansion:
refCursor := cursor.Referenced()
refLoc := symbol.FromCursor(refCursor)
file.AddCaller(cursorLoc, refLoc, false)
case clang.Cursor_InclusionDirective:
incFile := cursor.IncludedFile()
file.AddHeader(cursor.Spelling(), incFile)
default:
if p.debugUncatched {
p.uncachedKind[kind]++
}
}
return clang.ChildVisit_Recurse
}
rootCursor.Visit(visitNode)
file.AddTranslationUnit(<-tuch)
buf := file.Serialize()
out := symbol.GetRootAsFile(buf.FinishedBytes(), 0)
printFile(out) // for debug
log.Debugf("Goroutine:%d", runtime.NumGoroutine())
log.Debugf("================== DONE: filename: %+v ==================\n\n\n", arg.filename)
return p.db.Put(fh, buf.FinishedBytes())
}
// SerializeTranslationUnit serialize the TranslationUnit to Clang serialized representation.
// TODO(zchee): Avoid ioutil.TempFile if possible.
func (p *Parser) SerializeTranslationUnit(filename string, tu clang.TranslationUnit) []byte {
tmpFile, err := ioutil.TempFile(os.TempDir(), filepath.Base(filename))
if err != nil {
log.Fatal(err)
}
saveOptions := uint32(clang.TranslationUnit_KeepGoing)
if cErr := tu.SaveTranslationUnit(tmpFile.Name(), saveOptions); clang.SaveError(cErr) != clang.SaveError_None {
log.Fatal(clang.SaveError(cErr))
}
buf, err := ioutil.ReadFile(tmpFile.Name())
if err != nil {
log.Fatal(err)
}
os.Remove(tmpFile.Name())
return buf
}
// DeserializeTranslationUnit deserialize the TranslationUnit from buf Clang serialized representation.
func (p *Parser) DeserializeTranslationUnit(idx clang.Index, buf []byte) (clang.TranslationUnit, error) {
var tu clang.TranslationUnit
tmpfile, err := ioutil.TempFile(os.TempDir(), "clang-server")
if err != nil {
return tu, err
}
defer tmpfile.Close()
io.Copy(tmpfile, bytes.NewReader(buf))
if err := idx.TranslationUnit2(tmpfile.Name(), &tu); clang.ErrorCode(err) != clang.Error_Success {
return tu, errors.New(err.Spelling())
}
// finished create a translation unit from an AST file, remove tmpfile
os.Remove(tmpfile.Name())
return tu, nil
}
// ClangVersion return the current clang version.
func ClangVersion() string {
return clang.GetClangVersion()
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"strconv"
"github.com/peteraba/d5/shared"
)
func readStdInput(logErrors bool) []byte {
reader := bufio.NewReader(os.Stdin)
bytes, err := ioutil.ReadAll(reader)
if err != nil && logErrors {
log.Println(err)
}
return bytes
}
func parseDictionary(dictionary [][6]string, user string) ([]shared.Word, []string) {
var (
words = []shared.Word{}
parseErrors = []string{}
)
for _, word := range dictionary {
var (
w shared.Word
german = word[0]
english = word[1]
third = word[2]
category = word[3]
learned = word[4]
score = word[5]
)
if english == "" {
continue
}
switch category {
case "adj":
w = shared.NewAdjective(german, english, third, user, learned, score)
break
case "noun":
if shared.NounRegexp.MatchString(german) {
w = shared.NewNoun(german, english, third, user, learned, score)
}
break
case "verb":
if shared.VerbRegexp.MatchString(german) {
w = shared.NewVerb(german, english, third, user, learned, score)
}
break
default:
w = shared.NewWord(german, english, third, category, user, learned, score, true)
}
if w == nil {
parseErrors = append(parseErrors, german)
w = shared.NewWord(german, english, third, category, user, learned, score, false)
}
words = append(words, w)
}
return words, parseErrors
}
func main() {
var (
user = ""
logErrors = false
dictionary = [][6]string{}
)
if len(os.Args) > 1 {
user = os.Args[1]
}
if len(os.Args) > 2 {
logErrors, _ = strconv.ParseBool(os.Args[1])
}
json.Unmarshal(readStdInput(logErrors), &dictionary)
words, parseErrors := parseDictionary(dictionary, user)
if logErrors && len(parseErrors) > 0 {
for _, word := range parseErrors {
log.Printf("Failed: %v\n", word)
}
}
b, err := json.Marshal(words)
if err != nil {
log.Fatalln(err)
}
fmt.Println(string(b))
}
Making parser readStdInput more reusable
package main
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"strconv"
"github.com/peteraba/d5/shared"
)
func readStdInput() ([]byte, error) {
reader := bufio.NewReader(os.Stdin)
return ioutil.ReadAll(reader)
}
func parseDictionary(dictionary [][6]string, user string) ([]shared.Word, []string) {
var (
words = []shared.Word{}
parseErrors = []string{}
)
for _, word := range dictionary {
var (
w shared.Word
german = word[0]
english = word[1]
third = word[2]
category = word[3]
learned = word[4]
score = word[5]
)
if english == "" {
continue
}
switch category {
case "adj":
w = shared.NewAdjective(german, english, third, user, learned, score)
break
case "noun":
if shared.NounRegexp.MatchString(german) {
w = shared.NewNoun(german, english, third, user, learned, score)
}
break
case "verb":
if shared.VerbRegexp.MatchString(german) {
w = shared.NewVerb(german, english, third, user, learned, score)
}
break
default:
w = shared.NewWord(german, english, third, category, user, learned, score, true)
}
if w == nil {
parseErrors = append(parseErrors, german)
w = shared.NewWord(german, english, third, category, user, learned, score, false)
}
words = append(words, w)
}
return words, parseErrors
}
func main() {
var (
user = ""
logErrors = false
dictionary = [][6]string{}
)
if len(os.Args) > 1 {
user = os.Args[1]
}
if len(os.Args) > 2 {
logErrors, _ = strconv.ParseBool(os.Args[1])
}
input, err := readStdInput()
if err != nil && logErrors {
log.Println(err)
}
json.Unmarshal(input, &dictionary)
words, parseErrors := parseDictionary(dictionary, user)
if logErrors && len(parseErrors) > 0 {
for _, word := range parseErrors {
log.Printf("Failed: %v\n", word)
}
}
b, err := json.Marshal(words)
if err != nil {
log.Fatalln(err)
}
fmt.Println(string(b))
}
|
// Copyright 2012 Samuel Stauffer. All rights reserved.
// Use of this source code is governed by a 3-clause BSD
// license that can be found in the LICENSE file.
package parser
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/samuel/go-parser"
)
type Filesystem interface {
Open(filename string) (io.ReadCloser, error)
}
type Parser struct {
Filesystem Filesystem // For handling includes. Can be set to nil to fall back to os package.
}
type ErrSyntaxError struct {
File string
Line int
Column int
Offset int
Left string
}
func (e *ErrSyntaxError) Error() string {
return fmt.Sprintf("Syntax Error %s:%d column %d offset %d",
e.File, e.Line, e.Column, e.Offset)
}
var (
ErrParserFail = errors.New("Parsing failed entirely")
spec = parser.Spec{
CommentStart: "/*",
CommentEnd: "*/",
CommentLine: parser.Any(parser.String("#"), parser.String("//")),
NestedComments: true,
IdentStart: parser.Satisfy(
func(c rune) bool {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || c == '_'
}),
IdentLetter: parser.Satisfy(
func(c rune) bool {
return (c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') ||
c == '.' || c == '_'
}),
ReservedNames: []string{
"namespace", "struct", "enum", "const", "service", "throws",
"required", "optional", "exception", "list", "map", "set",
},
}
simpleParser = buildParser()
)
func quotedString() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || next != '"' {
return nil, false, err
}
st.Input.Pop(1)
escaped := false
runes := make([]rune, 1, 8)
runes[0] = '"'
for {
next, err := st.Input.Next()
if err != nil {
return nil, false, err
}
st.Input.Pop(1)
if escaped {
switch next {
case 'n':
next = '\n'
case 'r':
next = '\r'
case 't':
next = '\t'
}
runes = append(runes, next)
escaped = false
} else {
if next == '\\' {
escaped = true
} else {
runes = append(runes, next)
}
if next == '"' {
break
}
}
}
return string(runes), true, nil
}
}
func integer() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || ((next < '0' || next > '9') && next != '-') {
return nil, false, err
}
st.Input.Pop(1)
runes := make([]rune, 1, 8)
runes[0] = next
for {
next, err := st.Input.Next()
if err == io.EOF || !(next >= '0' && next <= '9') {
break
} else if err != nil {
return nil, false, err
}
st.Input.Pop(1)
runes = append(runes, next)
}
// We're guaranteed to only have integers here so don't check the error
i64, _ := strconv.ParseInt(string(runes), 10, 64)
return i64, true, nil
}
}
func float() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || ((next < '0' || next > '9') && next != '-') {
return nil, false, err
}
st.Input.Pop(1)
runes := make([]rune, 1, 8)
runes[0] = next
for {
next, err := st.Input.Next()
if err == io.EOF || !((next >= '0' && next <= '9') || next == '.') {
break
} else {
return nil, false, err
}
st.Input.Pop(1)
runes = append(runes, next)
}
f64, err := strconv.ParseFloat(string(runes), 64)
if err != nil {
return nil, false, nil
}
return f64, true, nil
}
}
type symbolValue struct {
symbol string
value interface{}
}
func symbolDispatcher(table map[string]parser.Parser) parser.Parser {
ws := parser.Whitespace()
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || !(next >= 'a' && next <= 'z') {
return nil, false, err
}
st.Input.Pop(1)
runes := make([]rune, 1, 8)
runes[0] = next
for {
next, err := st.Input.Next()
if err == io.EOF || next == ' ' {
break
} else if err != nil {
return nil, false, err
}
st.Input.Pop(1)
runes = append(runes, next)
}
sym := string(runes)
par := table[sym]
if par == nil {
return nil, false, nil
}
_, ok, err := ws(st)
if !ok || err != nil {
return nil, false, err
}
out, ok, err := par(st)
return symbolValue{sym, out}, ok, err
}
}
func nilParser() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
return nil, true, nil
}
}
func parseType(t interface{}) *Type {
typ := &Type{}
switch t2 := t.(type) {
case string:
if t2 == "void" {
return nil
}
typ.Name = t2
case []interface{}:
typ.Name = t2[0].(string)
if typ.Name == "map" {
typ.KeyType = parseType(t2[2])
typ.ValueType = parseType(t2[4])
} else if typ.Name == "list" || typ.Name == "set" {
typ.ValueType = parseType(t2[2])
} else {
panic("Basic type should never not be map or list: " + typ.Name)
}
default:
panic("Type should never be anything but string or []interface{}")
}
return typ
}
func parseFields(fi []interface{}) []*Field {
fields := make([]*Field, len(fi))
for i, f := range fi {
parts := f.([]interface{})
field := &Field{}
field.Id = int(parts[0].(int64))
field.Optional = strings.ToLower(parts[2].(string)) == "optional"
field.Type = parseType(parts[3])
field.Name = parts[4].(string)
field.Default = parts[5]
fields[i] = field
}
return fields
}
func buildParser() parser.Parser {
constantValue := parser.Lexeme(parser.Any(quotedString(), integer(), float()))
namespaceDef := parser.Collect(
parser.Identifier(), parser.Identifier())
includeDef := parser.Collect(
parser.Lexeme(quotedString()))
var typeDef func(st *parser.State) (parser.Output, bool, error)
recurseTypeDef := func(st *parser.State) (parser.Output, bool, error) {
return typeDef(st)
}
typeDef = parser.Any(
parser.Identifier(),
parser.Collect(parser.Symbol("list"),
parser.Symbol("<"),
recurseTypeDef,
parser.Symbol(">")),
parser.Collect(parser.Symbol("set"),
parser.Symbol("<"),
recurseTypeDef,
parser.Symbol(">")),
parser.Collect(parser.Symbol("map"),
parser.Symbol("<"),
recurseTypeDef,
parser.Symbol(","),
recurseTypeDef,
parser.Symbol(">")),
)
typedefDef := parser.Collect(typeDef, parser.Identifier())
constDef := parser.Collect(
typeDef, parser.Identifier(), parser.Symbol("="), constantValue)
enumItemDef := parser.Collect(
parser.Identifier(),
parser.Any(
parser.All(parser.Symbol("="), parser.Lexeme(integer())),
nilParser(),
),
parser.Any(parser.Symbol(","), parser.Symbol("")),
)
enumDef := parser.Collect(
parser.Identifier(),
parser.Symbol("{"),
parser.Many(enumItemDef),
parser.Symbol("}"),
)
structFieldDef := parser.Collect(
parser.Lexeme(integer()), parser.Symbol(":"),
parser.Any(parser.Symbol("required"), parser.Symbol("optional"), parser.Symbol("")),
typeDef, parser.Identifier(),
// Default
parser.Any(
parser.All(parser.Symbol("="),
parser.Lexeme(parser.Any(
parser.Identifier(), quotedString(),
parser.Try(float()), integer()))),
nilParser(),
),
parser.Skip(parser.Many(parser.Symbol(","))),
)
structDef := parser.Collect(
parser.Identifier(),
parser.Symbol("{"),
parser.Many(structFieldDef),
parser.Symbol("}"),
)
serviceMethodDef := parser.Collect(
// // parser.Comments(),
// parser.Whitespace(),
typeDef, parser.Identifier(),
parser.Symbol("("),
parser.Many(structFieldDef),
parser.Symbol(")"),
// Exceptions
parser.Any(
parser.Collect(
parser.Symbol("throws"),
parser.Symbol("("),
parser.Many(structFieldDef),
parser.Symbol(")"),
),
nilParser(),
),
parser.Any(parser.Symbol(","), parser.Symbol(";"), parser.Symbol("")),
)
serviceDef := parser.Collect(
parser.Identifier(),
parser.Symbol("{"),
parser.Many(serviceMethodDef),
parser.Symbol("}"),
)
thriftSpec := parser.All(parser.Whitespace(), parser.Many(
symbolDispatcher(map[string]parser.Parser{
"namespace": namespaceDef,
"typedef": typedefDef,
"const": constDef,
"include": includeDef,
"enum": enumDef,
"exception": structDef,
"struct": structDef,
"service": serviceDef,
}),
))
return thriftSpec
}
func (p *Parser) outputToThrift(obj parser.Output) (*Thrift, error) {
thrift := &Thrift{
Namespaces: make(map[string]string),
Typedefs: make(map[string]*Type),
Constants: make(map[string]*Constant),
Enums: make(map[string]*Enum),
Structs: make(map[string]*Struct),
Exceptions: make(map[string]*Struct),
Services: make(map[string]*Service),
Includes: make(map[string]*Thrift),
}
for _, symI := range obj.([]interface{}) {
sym := symI.(symbolValue)
val := sym.value.([]interface{})
switch sym.symbol {
case "namespace":
thrift.Namespaces[strings.ToLower(val[0].(string))] = val[1].(string)
case "typedef":
thrift.Typedefs[val[1].(string)] = parseType(val[0])
case "const":
thrift.Constants[val[1].(string)] = &Constant{val[1].(string), &Type{Name: val[0].(string)}, val[3]}
case "enum":
en := &Enum{
Name: val[0].(string),
Values: make(map[string]*EnumValue),
}
next := 0
for _, e := range val[2].([]interface{}) {
parts := e.([]interface{})
name := parts[0].(string)
val := -1
if parts[1] != nil {
val = int(parts[1].(int64))
} else {
val = next
}
if val >= next {
next = val + 1
}
en.Values[name] = &EnumValue{name, val}
}
thrift.Enums[en.Name] = en
case "struct":
thrift.Structs[val[0].(string)] = &Struct{
Name: val[0].(string),
Fields: parseFields(val[2].([]interface{})),
}
case "exception":
thrift.Exceptions[val[0].(string)] = &Struct{
Name: val[0].(string),
Fields: parseFields(val[2].([]interface{})),
}
case "service":
s := &Service{
Name: val[0].(string),
Methods: make(map[string]*Method),
}
for _, m := range val[2].([]interface{}) {
parts := m.([]interface{})
var exc []*Field = nil
if parts[5] != nil {
exc = parseFields((parts[5].([]interface{}))[2].([]interface{}))
} else {
exc = make([]*Field, 0)
}
for _, f := range exc {
f.Optional = true
}
method := &Method{
Name: parts[1].(string),
ReturnType: parseType(parts[0]),
Arguments: parseFields(parts[3].([]interface{})),
Exceptions: exc,
}
s.Methods[method.Name] = method
}
thrift.Services[s.Name] = s
case "include":
filename := val[0].(string)
filename = filename[1 : len(filename)-1]
tr, err := p.ParseFile(filename)
if err != nil {
return nil, err
}
thrift.Includes[strings.Split(filename, ".")[0]] = tr
default:
panic("Should never have an unhandled symbol: " + sym.symbol)
}
}
return thrift, nil
}
func (p *Parser) Parse(r io.Reader) (*Thrift, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
str := string(b)
in := parser.NewStringInput(str)
st := &parser.State{
Input: in,
Spec: spec,
}
out, ok, err := simpleParser(st)
if err != nil && err != io.EOF {
return nil, err
}
if !ok {
return nil, ErrParserFail
}
if err != io.EOF {
_, err = st.Input.Next()
}
if err != io.EOF {
pos := in.Position()
return nil, &ErrSyntaxError{
File: pos.Name,
Line: pos.Line,
Column: pos.Column,
Offset: pos.Offset,
Left: str[pos.Offset:],
}
}
return p.outputToThrift(out)
}
func (p *Parser) ParseFile(filename string) (*Thrift, error) {
var r io.ReadCloser
var err error
if p.Filesystem != nil {
r, err = p.Filesystem.Open(filename)
} else {
r, err = os.Open(filename)
}
if err != nil {
return nil, err
}
defer r.Close()
return p.Parse(r)
}
Fix float default parsing
// Copyright 2012 Samuel Stauffer. All rights reserved.
// Use of this source code is governed by a 3-clause BSD
// license that can be found in the LICENSE file.
package parser
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/samuel/go-parser"
)
type Filesystem interface {
Open(filename string) (io.ReadCloser, error)
}
type Parser struct {
Filesystem Filesystem // For handling includes. Can be set to nil to fall back to os package.
}
type ErrSyntaxError struct {
File string
Line int
Column int
Offset int
Left string
}
func (e *ErrSyntaxError) Error() string {
return fmt.Sprintf("Syntax Error %s:%d column %d offset %d",
e.File, e.Line, e.Column, e.Offset)
}
var (
ErrParserFail = errors.New("Parsing failed entirely")
spec = parser.Spec{
CommentStart: "/*",
CommentEnd: "*/",
CommentLine: parser.Any(parser.String("#"), parser.String("//")),
NestedComments: true,
IdentStart: parser.Satisfy(
func(c rune) bool {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || c == '_'
}),
IdentLetter: parser.Satisfy(
func(c rune) bool {
return (c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') ||
c == '.' || c == '_'
}),
ReservedNames: []string{
"namespace", "struct", "enum", "const", "service", "throws",
"required", "optional", "exception", "list", "map", "set",
},
}
simpleParser = buildParser()
)
func quotedString() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || next != '"' {
return nil, false, err
}
st.Input.Pop(1)
escaped := false
runes := make([]rune, 1, 8)
runes[0] = '"'
for {
next, err := st.Input.Next()
if err != nil {
return nil, false, err
}
st.Input.Pop(1)
if escaped {
switch next {
case 'n':
next = '\n'
case 'r':
next = '\r'
case 't':
next = '\t'
}
runes = append(runes, next)
escaped = false
} else {
if next == '\\' {
escaped = true
} else {
runes = append(runes, next)
}
if next == '"' {
break
}
}
}
return string(runes), true, nil
}
}
func integer() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || ((next < '0' || next > '9') && next != '-') {
return nil, false, err
}
st.Input.Pop(1)
runes := make([]rune, 1, 8)
runes[0] = next
for {
next, err := st.Input.Next()
if err == io.EOF || !(next >= '0' && next <= '9') {
break
} else if err != nil {
return nil, false, err
}
st.Input.Pop(1)
runes = append(runes, next)
}
// We're guaranteed to only have integers here so don't check the error
i64, _ := strconv.ParseInt(string(runes), 10, 64)
return i64, true, nil
}
}
func float() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || ((next < '0' || next > '9') && next != '-') {
return nil, false, err
}
st.Input.Pop(1)
runes := make([]rune, 1, 8)
runes[0] = next
for {
next, err := st.Input.Next()
if err == io.EOF || !((next >= '0' && next <= '9') || next == '.') {
break
} else if err != nil {
return nil, false, err
}
st.Input.Pop(1)
runes = append(runes, next)
}
f64, err := strconv.ParseFloat(string(runes), 64)
if err != nil {
return nil, false, nil
}
return f64, true, nil
}
}
type symbolValue struct {
symbol string
value interface{}
}
func symbolDispatcher(table map[string]parser.Parser) parser.Parser {
ws := parser.Whitespace()
return func(st *parser.State) (parser.Output, bool, error) {
next, err := st.Input.Next()
if err != nil || !(next >= 'a' && next <= 'z') {
return nil, false, err
}
st.Input.Pop(1)
runes := make([]rune, 1, 8)
runes[0] = next
for {
next, err := st.Input.Next()
if err == io.EOF || next == ' ' {
break
} else if err != nil {
return nil, false, err
}
st.Input.Pop(1)
runes = append(runes, next)
}
sym := string(runes)
par := table[sym]
if par == nil {
return nil, false, nil
}
_, ok, err := ws(st)
if !ok || err != nil {
return nil, false, err
}
out, ok, err := par(st)
return symbolValue{sym, out}, ok, err
}
}
func nilParser() parser.Parser {
return func(st *parser.State) (parser.Output, bool, error) {
return nil, true, nil
}
}
func parseType(t interface{}) *Type {
typ := &Type{}
switch t2 := t.(type) {
case string:
if t2 == "void" {
return nil
}
typ.Name = t2
case []interface{}:
typ.Name = t2[0].(string)
if typ.Name == "map" {
typ.KeyType = parseType(t2[2])
typ.ValueType = parseType(t2[4])
} else if typ.Name == "list" || typ.Name == "set" {
typ.ValueType = parseType(t2[2])
} else {
panic("Basic type should never not be map or list: " + typ.Name)
}
default:
panic("Type should never be anything but string or []interface{}")
}
return typ
}
func parseFields(fi []interface{}) []*Field {
fields := make([]*Field, len(fi))
for i, f := range fi {
parts := f.([]interface{})
field := &Field{}
field.Id = int(parts[0].(int64))
field.Optional = strings.ToLower(parts[2].(string)) == "optional"
field.Type = parseType(parts[3])
field.Name = parts[4].(string)
field.Default = parts[5]
fields[i] = field
}
return fields
}
func buildParser() parser.Parser {
constantValue := parser.Lexeme(parser.Any(quotedString(), integer(), float()))
namespaceDef := parser.Collect(
parser.Identifier(), parser.Identifier())
includeDef := parser.Collect(
parser.Lexeme(quotedString()))
var typeDef func(st *parser.State) (parser.Output, bool, error)
recurseTypeDef := func(st *parser.State) (parser.Output, bool, error) {
return typeDef(st)
}
typeDef = parser.Any(
parser.Identifier(),
parser.Collect(parser.Symbol("list"),
parser.Symbol("<"),
recurseTypeDef,
parser.Symbol(">")),
parser.Collect(parser.Symbol("set"),
parser.Symbol("<"),
recurseTypeDef,
parser.Symbol(">")),
parser.Collect(parser.Symbol("map"),
parser.Symbol("<"),
recurseTypeDef,
parser.Symbol(","),
recurseTypeDef,
parser.Symbol(">")),
)
typedefDef := parser.Collect(typeDef, parser.Identifier())
constDef := parser.Collect(
typeDef, parser.Identifier(), parser.Symbol("="), constantValue)
enumItemDef := parser.Collect(
parser.Identifier(),
parser.Any(
parser.All(parser.Symbol("="), parser.Lexeme(integer())),
nilParser(),
),
parser.Any(parser.Symbol(","), parser.Symbol("")),
)
enumDef := parser.Collect(
parser.Identifier(),
parser.Symbol("{"),
parser.Many(enumItemDef),
parser.Symbol("}"),
)
structFieldDef := parser.Collect(
parser.Lexeme(integer()), parser.Symbol(":"),
parser.Any(parser.Symbol("required"), parser.Symbol("optional"), parser.Symbol("")),
typeDef, parser.Identifier(),
// Default
parser.Any(
parser.All(parser.Symbol("="),
parser.Lexeme(parser.Any(
parser.Identifier(), quotedString(),
parser.Try(float()), integer()))),
nilParser(),
),
parser.Skip(parser.Many(parser.Symbol(","))),
)
structDef := parser.Collect(
parser.Identifier(),
parser.Symbol("{"),
parser.Many(structFieldDef),
parser.Symbol("}"),
)
serviceMethodDef := parser.Collect(
// // parser.Comments(),
// parser.Whitespace(),
typeDef, parser.Identifier(),
parser.Symbol("("),
parser.Many(structFieldDef),
parser.Symbol(")"),
// Exceptions
parser.Any(
parser.Collect(
parser.Symbol("throws"),
parser.Symbol("("),
parser.Many(structFieldDef),
parser.Symbol(")"),
),
nilParser(),
),
parser.Any(parser.Symbol(","), parser.Symbol(";"), parser.Symbol("")),
)
serviceDef := parser.Collect(
parser.Identifier(),
parser.Symbol("{"),
parser.Many(serviceMethodDef),
parser.Symbol("}"),
)
thriftSpec := parser.All(parser.Whitespace(), parser.Many(
symbolDispatcher(map[string]parser.Parser{
"namespace": namespaceDef,
"typedef": typedefDef,
"const": constDef,
"include": includeDef,
"enum": enumDef,
"exception": structDef,
"struct": structDef,
"service": serviceDef,
}),
))
return thriftSpec
}
func (p *Parser) outputToThrift(obj parser.Output) (*Thrift, error) {
thrift := &Thrift{
Namespaces: make(map[string]string),
Typedefs: make(map[string]*Type),
Constants: make(map[string]*Constant),
Enums: make(map[string]*Enum),
Structs: make(map[string]*Struct),
Exceptions: make(map[string]*Struct),
Services: make(map[string]*Service),
Includes: make(map[string]*Thrift),
}
for _, symI := range obj.([]interface{}) {
sym := symI.(symbolValue)
val := sym.value.([]interface{})
switch sym.symbol {
case "namespace":
thrift.Namespaces[strings.ToLower(val[0].(string))] = val[1].(string)
case "typedef":
thrift.Typedefs[val[1].(string)] = parseType(val[0])
case "const":
thrift.Constants[val[1].(string)] = &Constant{val[1].(string), &Type{Name: val[0].(string)}, val[3]}
case "enum":
en := &Enum{
Name: val[0].(string),
Values: make(map[string]*EnumValue),
}
next := 0
for _, e := range val[2].([]interface{}) {
parts := e.([]interface{})
name := parts[0].(string)
val := -1
if parts[1] != nil {
val = int(parts[1].(int64))
} else {
val = next
}
if val >= next {
next = val + 1
}
en.Values[name] = &EnumValue{name, val}
}
thrift.Enums[en.Name] = en
case "struct":
thrift.Structs[val[0].(string)] = &Struct{
Name: val[0].(string),
Fields: parseFields(val[2].([]interface{})),
}
case "exception":
thrift.Exceptions[val[0].(string)] = &Struct{
Name: val[0].(string),
Fields: parseFields(val[2].([]interface{})),
}
case "service":
s := &Service{
Name: val[0].(string),
Methods: make(map[string]*Method),
}
for _, m := range val[2].([]interface{}) {
parts := m.([]interface{})
var exc []*Field = nil
if parts[5] != nil {
exc = parseFields((parts[5].([]interface{}))[2].([]interface{}))
} else {
exc = make([]*Field, 0)
}
for _, f := range exc {
f.Optional = true
}
method := &Method{
Name: parts[1].(string),
ReturnType: parseType(parts[0]),
Arguments: parseFields(parts[3].([]interface{})),
Exceptions: exc,
}
s.Methods[method.Name] = method
}
thrift.Services[s.Name] = s
case "include":
filename := val[0].(string)
filename = filename[1 : len(filename)-1]
tr, err := p.ParseFile(filename)
if err != nil {
return nil, err
}
thrift.Includes[strings.Split(filename, ".")[0]] = tr
default:
panic("Should never have an unhandled symbol: " + sym.symbol)
}
}
return thrift, nil
}
func (p *Parser) Parse(r io.Reader) (*Thrift, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
str := string(b)
in := parser.NewStringInput(str)
st := &parser.State{
Input: in,
Spec: spec,
}
out, ok, err := simpleParser(st)
if err != nil && err != io.EOF {
return nil, err
}
if !ok {
return nil, ErrParserFail
}
if err != io.EOF {
_, err = st.Input.Next()
}
if err != io.EOF {
pos := in.Position()
return nil, &ErrSyntaxError{
File: pos.Name,
Line: pos.Line,
Column: pos.Column,
Offset: pos.Offset,
Left: str[pos.Offset:],
}
}
return p.outputToThrift(out)
}
func (p *Parser) ParseFile(filename string) (*Thrift, error) {
var r io.ReadCloser
var err error
if p.Filesystem != nil {
r, err = p.Filesystem.Open(filename)
} else {
r, err = os.Open(filename)
}
if err != nil {
return nil, err
}
defer r.Close()
return p.Parse(r)
}
|
package funcmaps
import (
"fmt"
"html/template"
"time"
)
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func timeSince(then time.Time) string {
now := time.Now()
lbl := "前"
diff := now.Unix() - then.Unix()
if then.After(now) {
lbl = "今から"
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return "たった今"
case diff <= 2:
return "1秒前"
case diff < 1*Minute:
return fmt.Sprintf("%d秒%s", diff, lbl)
case diff < 2*Minute:
return fmt.Sprintf("1分%s", lbl)
case diff < 1*Hour:
return fmt.Sprintf("%d分%s", diff/Minute, lbl)
case diff < 2*Hour:
return fmt.Sprintf("1時間%s", lbl)
case diff < 1*Day:
return fmt.Sprintf("%d時間%s", diff/Hour, lbl)
case diff < 2*Day:
return fmt.Sprintf("1日%s", lbl)
case diff < 1*Week:
return fmt.Sprintf("%d日%s", diff/Day, lbl)
case diff < 2*Week:
return fmt.Sprintf("1週間%s", lbl)
case diff < 1*Month:
return fmt.Sprintf("%d週間%s", diff/Week, lbl)
case diff < 2*Month:
return fmt.Sprintf("1ヶ月%s", lbl)
case diff < 1*Year:
return fmt.Sprintf("%dヶ月%s", diff/Month, lbl)
case diff < 2*Year:
return fmt.Sprintf("1年%s", lbl)
default:
return fmt.Sprintf("%d年%s", diff/Year, lbl)
}
}
func timeSinceColor(then time.Time) string {
now := time.Now()
diff := now.Unix() - then.Unix()
if then.After(now) {
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return "red"
case diff <= 2:
return "red"
case diff < 1*Minute:
return "red"
case diff < 2*Minute:
return "red"
case diff < 1*Hour:
return "salmon"
case diff < 2*Hour:
return "salmon"
case diff < 1*Day:
return "darkorange"
case diff < 2*Day:
return "chocolate"
case diff < 1*Week:
return "darkred"
case diff < 2*Week:
return "darkred"
case diff < 1*Month:
return "olivedrab"
case diff < 2*Month:
return "olivedrab"
case diff < 1*Year:
return "steelblue"
case diff < 2*Year:
return "steelblue"
default:
return "slategrey"
}
}
func TimeSince(t time.Time) template.HTML {
return template.HTML(
fmt.Sprintf(
`<span class="time-since" style="color: %s;" title="%s">%s</span>`,
timeSinceColor(t), t.Format("2006/01/02 15:04"), timeSince(t),
),
)
}
func ToUnix(t time.Time) int64 {
return t.Unix()
}
func ToDay(t time.Time) int {
return t.Day()
}
func ToMonth(t time.Time) time.Month {
return t.Month()
}
func ToYear(t time.Time) int {
return t.Year()
}
func Datenow(format string) string {
return time.Now().Add(time.Duration(9) * time.Hour).Format(format)
}
change color in time
package funcmaps
import (
"fmt"
"html/template"
"time"
)
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func timeSince(then time.Time) string {
now := time.Now()
lbl := "前"
diff := now.Unix() - then.Unix()
if then.After(now) {
lbl = "今から"
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return "たった今"
case diff <= 2:
return "1秒前"
case diff < 1*Minute:
return fmt.Sprintf("%d秒%s", diff, lbl)
case diff < 2*Minute:
return fmt.Sprintf("1分%s", lbl)
case diff < 1*Hour:
return fmt.Sprintf("%d分%s", diff/Minute, lbl)
case diff < 2*Hour:
return fmt.Sprintf("1時間%s", lbl)
case diff < 1*Day:
return fmt.Sprintf("%d時間%s", diff/Hour, lbl)
case diff < 2*Day:
return fmt.Sprintf("1日%s", lbl)
case diff < 1*Week:
return fmt.Sprintf("%d日%s", diff/Day, lbl)
case diff < 2*Week:
return fmt.Sprintf("1週間%s", lbl)
case diff < 1*Month:
return fmt.Sprintf("%d週間%s", diff/Week, lbl)
case diff < 2*Month:
return fmt.Sprintf("1ヶ月%s", lbl)
case diff < 1*Year:
return fmt.Sprintf("%dヶ月%s", diff/Month, lbl)
case diff < 2*Year:
return fmt.Sprintf("1年%s", lbl)
default:
return fmt.Sprintf("%d年%s", diff/Year, lbl)
}
}
func timeSinceColor(then time.Time) string {
now := time.Now()
diff := now.Unix() - then.Unix()
if then.After(now) {
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return "red"
case diff <= 2:
return "red"
case diff < 1*Minute:
return "red"
case diff < 2*Minute:
return "red"
case diff < 1*Hour:
return "salmon"
case diff < 2*Hour:
return "salmon"
case diff < 1*Day:
return "chocolate"
case diff < 2*Day:
return "chocolate"
case diff < 1*Week:
return "darkred"
case diff < 2*Week:
return "darkred"
case diff < 1*Month:
return "olivedrab"
case diff < 2*Month:
return "olivedrab"
case diff < 1*Year:
return "steelblue"
case diff < 2*Year:
return "steelblue"
default:
return "slategrey"
}
}
func TimeSince(t time.Time) template.HTML {
return template.HTML(
fmt.Sprintf(
`<span class="time-since" style="color: %s;" title="%s">%s</span>`,
timeSinceColor(t), t.Format("2006/01/02 15:04"), timeSince(t),
),
)
}
func ToUnix(t time.Time) int64 {
return t.Unix()
}
func ToDay(t time.Time) int {
return t.Day()
}
func ToMonth(t time.Time) time.Month {
return t.Month()
}
func ToYear(t time.Time) int {
return t.Year()
}
func Datenow(format string) string {
return time.Now().Add(time.Duration(9) * time.Hour).Format(format)
}
|
package main
import (
"os"
"reflect"
"testing"
)
func TestNewCLI(t *testing.T) {
cli := NewCLI(&Context{}, &Config{}, []string{"rid", "foo", "bar"})
if !reflect.DeepEqual(cli.Args, []string{"foo", "bar"}) {
t.Error("it should drop the first argument")
}
if cli.Stdin != os.Stdin {
t.Error("it should initialize Stdin to the OS's standard input")
}
if cli.Stdout != os.Stdout {
t.Error("it should initialize Stdin to the OS's standard output")
}
if cli.Stderr != os.Stderr {
t.Error("it should initialize Stdin to the OS's standard error")
}
}
func TestCLI_setup(t *testing.T) {
cli := NewCLI(&Context{
IP: "192.168.0.1",
}, &Config{
ProjectName: "myproject",
}, []string{"rid"})
setTestEnvs(map[string]string{
"COMPOSE_PROJECT_NAME": "",
"DOCKER_HOST_IP": "",
}, func() {
cli.setup()
if os.Getenv("COMPOSE_PROJECT_NAME") != cli.Config.ProjectName {
t.Error("it should set COMPOSE_PROJECT_NAME")
}
if os.Getenv("DOCKER_HOST_IP") != cli.Context.IP {
t.Error("it should set DOCKER_HOST_IP")
}
})
}
func TestCLI_substituteCommand(t *testing.T) {
cli := NewCLI(&Context{
Command: map[string]*Command{
"host": {
Name: "script/host",
RunInContainer: false,
HelpFile: "/path/to/help.txt",
},
"container": {
Name: "script/container",
RunInContainer: true,
},
},
}, &Config{}, []string{"rid"})
t.Run("no args", func(t *testing.T) {
cli.Args = []string{}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{".help"}) {
t.Error("it should subsitute to .help")
}
})
t.Run("host command", func(t *testing.T) {
cli.Args = []string{"host", "foo", "bar"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{"script/host", "foo", "bar"}) {
t.Error("it should subsitute")
}
if cli.RunInContainer != false {
t.Error("it should make RunInContainer false")
}
})
t.Run("container command", func(t *testing.T) {
cli.Args = []string{"container", "foo", "bar"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{"script/container", "foo", "bar"}) {
t.Error("it should subsitute")
}
if cli.RunInContainer != true {
t.Error("it should make RunInContainer true")
}
})
t.Run("sub-command help (no file)", func(t *testing.T) {
cli.Args = []string{"container", "-h"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{"script/container", "-h"}) {
t.Error("it should subsitute the command alone")
}
})
t.Run("sub-command help (with file)", func(t *testing.T) {
cli.Args = []string{"host", "-h"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{".sub-help", cli.Context.Command["host"].HelpFile}) {
t.Fatal("it should subsitute to .sub-help")
}
cli.Args = []string{"host", "--help"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{".sub-help", cli.Context.Command["host"].HelpFile}) {
t.Fatal("it should subsitute to .sub-help")
}
})
}
func setTestEnvs(kv map[string]string, block func()) {
original := make(map[string]string)
for k, v := range kv {
original[k] = os.Getenv(k)
os.Setenv(k, v)
}
defer func() {
for k := range kv {
os.Setenv(k, original[k])
}
}()
block()
}
Test ExecVersion
package main
import (
"bytes"
"os"
"reflect"
"strings"
"testing"
)
func TestNewCLI(t *testing.T) {
cli := NewCLI(&Context{}, &Config{}, []string{"rid", "foo", "bar"})
if !reflect.DeepEqual(cli.Args, []string{"foo", "bar"}) {
t.Error("it should drop the first argument")
}
if cli.Stdin != os.Stdin {
t.Error("it should initialize Stdin to the OS's standard input")
}
if cli.Stdout != os.Stdout {
t.Error("it should initialize Stdin to the OS's standard output")
}
if cli.Stderr != os.Stderr {
t.Error("it should initialize Stdin to the OS's standard error")
}
}
func TestCLI_setup(t *testing.T) {
cli := NewCLI(&Context{
IP: "192.168.0.1",
}, &Config{
ProjectName: "myproject",
}, []string{"rid"})
setTestEnvs(map[string]string{
"COMPOSE_PROJECT_NAME": "",
"DOCKER_HOST_IP": "",
}, func() {
cli.setup()
if os.Getenv("COMPOSE_PROJECT_NAME") != cli.Config.ProjectName {
t.Error("it should set COMPOSE_PROJECT_NAME")
}
if os.Getenv("DOCKER_HOST_IP") != cli.Context.IP {
t.Error("it should set DOCKER_HOST_IP")
}
})
}
func TestCLI_substituteCommand(t *testing.T) {
cli := NewCLI(&Context{
Command: map[string]*Command{
"host": {
Name: "script/host",
RunInContainer: false,
HelpFile: "/path/to/help.txt",
},
"container": {
Name: "script/container",
RunInContainer: true,
},
},
}, &Config{}, []string{"rid"})
t.Run("no args", func(t *testing.T) {
cli.Args = []string{}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{".help"}) {
t.Error("it should subsitute to .help")
}
})
t.Run("host command", func(t *testing.T) {
cli.Args = []string{"host", "foo", "bar"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{"script/host", "foo", "bar"}) {
t.Error("it should subsitute")
}
if cli.RunInContainer != false {
t.Error("it should make RunInContainer false")
}
})
t.Run("container command", func(t *testing.T) {
cli.Args = []string{"container", "foo", "bar"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{"script/container", "foo", "bar"}) {
t.Error("it should subsitute")
}
if cli.RunInContainer != true {
t.Error("it should make RunInContainer true")
}
})
t.Run("sub-command help (no file)", func(t *testing.T) {
cli.Args = []string{"container", "-h"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{"script/container", "-h"}) {
t.Error("it should subsitute the command alone")
}
})
t.Run("sub-command help (with file)", func(t *testing.T) {
cli.Args = []string{"host", "-h"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{".sub-help", cli.Context.Command["host"].HelpFile}) {
t.Fatal("it should subsitute to .sub-help")
}
cli.Args = []string{"host", "--help"}
cli.substituteCommand()
if !reflect.DeepEqual(cli.Args, []string{".sub-help", cli.Context.Command["host"].HelpFile}) {
t.Fatal("it should subsitute to .sub-help")
}
})
}
func TestCLI_ExecVersion(t *testing.T) {
stdout := new(bytes.Buffer)
cli := NewCLI(&Context{}, &Config{}, []string{"rid"})
cli.Stdout = stdout
if err := cli.ExecVersion(); err != nil {
t.Fatalf("it should not return error: %v", err)
}
if !strings.Contains(stdout.String(), Version) {
t.Error("it should print a version")
}
if !strings.Contains(stdout.String(), Revision) {
t.Error("it should print a revision")
}
}
func setTestEnvs(kv map[string]string, block func()) {
original := make(map[string]string)
for k, v := range kv {
original[k] = os.Getenv(k)
os.Setenv(k, v)
}
defer func() {
for k := range kv {
os.Setenv(k, original[k])
}
}()
block()
}
|
// Code generated by pigeon; DO NOT EDIT.
package parser
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// Helper function taken from pigeon source / examples
func toIfaceSlice(v interface{}) []interface{} {
if v == nil {
return nil
}
return v.([]interface{})
}
var g = &grammar{
rules: []*rule{
{
name: "File",
pos: position{line: 17, col: 1, offset: 236},
expr: &actionExpr{
pos: position{line: 17, col: 9, offset: 244},
run: (*parser).callonFile1,
expr: &seqExpr{
pos: position{line: 17, col: 9, offset: 244},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 17, col: 9, offset: 244},
label: "lines",
expr: &zeroOrMoreExpr{
pos: position{line: 17, col: 15, offset: 250},
expr: &ruleRefExpr{
pos: position{line: 17, col: 15, offset: 250},
name: "Line",
},
},
},
&ruleRefExpr{
pos: position{line: 17, col: 21, offset: 256},
name: "EOF",
},
},
},
},
},
{
name: "Line",
pos: position{line: 32, col: 1, offset: 585},
expr: &actionExpr{
pos: position{line: 32, col: 9, offset: 593},
run: (*parser).callonLine1,
expr: &seqExpr{
pos: position{line: 32, col: 9, offset: 593},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 32, col: 9, offset: 593},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 32, col: 12, offset: 596},
name: "_",
},
},
&labeledExpr{
pos: position{line: 32, col: 14, offset: 598},
label: "item",
expr: &zeroOrOneExpr{
pos: position{line: 32, col: 19, offset: 603},
expr: &choiceExpr{
pos: position{line: 32, col: 20, offset: 604},
alternatives: []interface{}{
&ruleRefExpr{
pos: position{line: 32, col: 20, offset: 604},
name: "Comment",
},
&ruleRefExpr{
pos: position{line: 32, col: 30, offset: 614},
name: "Section",
},
&ruleRefExpr{
pos: position{line: 32, col: 40, offset: 624},
name: "KeyValuePair",
},
&ruleRefExpr{
pos: position{line: 32, col: 55, offset: 639},
name: "KeyOnly",
},
},
},
},
},
&labeledExpr{
pos: position{line: 32, col: 65, offset: 649},
label: "le",
expr: &ruleRefExpr{
pos: position{line: 32, col: 68, offset: 652},
name: "LineEnd",
},
},
},
},
},
},
{
name: "Comment",
pos: position{line: 40, col: 1, offset: 866},
expr: &actionExpr{
pos: position{line: 40, col: 12, offset: 877},
run: (*parser).callonComment1,
expr: &seqExpr{
pos: position{line: 40, col: 12, offset: 877},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 40, col: 12, offset: 877},
label: "cs",
expr: &choiceExpr{
pos: position{line: 40, col: 16, offset: 881},
alternatives: []interface{}{
&litMatcher{
pos: position{line: 40, col: 16, offset: 881},
val: ";",
ignoreCase: false,
},
&litMatcher{
pos: position{line: 40, col: 22, offset: 887},
val: "#",
ignoreCase: false,
},
},
},
},
&labeledExpr{
pos: position{line: 40, col: 27, offset: 892},
label: "comment",
expr: &ruleRefExpr{
pos: position{line: 40, col: 35, offset: 900},
name: "CommentVal",
},
},
},
},
},
},
{
name: "Section",
pos: position{line: 47, col: 1, offset: 1109},
expr: &actionExpr{
pos: position{line: 47, col: 12, offset: 1120},
run: (*parser).callonSection1,
expr: &seqExpr{
pos: position{line: 47, col: 12, offset: 1120},
exprs: []interface{}{
&litMatcher{
pos: position{line: 47, col: 12, offset: 1120},
val: "[",
ignoreCase: false,
},
&labeledExpr{
pos: position{line: 47, col: 16, offset: 1124},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 47, col: 21, offset: 1129},
name: "SectionName",
},
},
&litMatcher{
pos: position{line: 47, col: 33, offset: 1141},
val: "]",
ignoreCase: false,
},
&labeledExpr{
pos: position{line: 47, col: 37, offset: 1145},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 47, col: 40, offset: 1148},
name: "_",
},
},
&labeledExpr{
pos: position{line: 47, col: 42, offset: 1150},
label: "comment",
expr: &zeroOrOneExpr{
pos: position{line: 47, col: 50, offset: 1158},
expr: &ruleRefExpr{
pos: position{line: 47, col: 50, offset: 1158},
name: "Comment",
},
},
},
},
},
},
},
{
name: "KeyValuePair",
pos: position{line: 55, col: 1, offset: 1382},
expr: &actionExpr{
pos: position{line: 55, col: 17, offset: 1398},
run: (*parser).callonKeyValuePair1,
expr: &seqExpr{
pos: position{line: 55, col: 17, offset: 1398},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 55, col: 17, offset: 1398},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 55, col: 21, offset: 1402},
name: "Key",
},
},
&litMatcher{
pos: position{line: 55, col: 25, offset: 1406},
val: "=",
ignoreCase: false,
},
&labeledExpr{
pos: position{line: 55, col: 29, offset: 1410},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 55, col: 32, offset: 1413},
name: "_",
},
},
&labeledExpr{
pos: position{line: 55, col: 34, offset: 1415},
label: "val",
expr: &ruleRefExpr{
pos: position{line: 55, col: 38, offset: 1419},
name: "Value",
},
},
&labeledExpr{
pos: position{line: 55, col: 44, offset: 1425},
label: "comment",
expr: &zeroOrOneExpr{
pos: position{line: 55, col: 52, offset: 1433},
expr: &ruleRefExpr{
pos: position{line: 55, col: 52, offset: 1433},
name: "Comment",
},
},
},
},
},
},
},
{
name: "KeyOnly",
pos: position{line: 64, col: 1, offset: 1705},
expr: &actionExpr{
pos: position{line: 64, col: 12, offset: 1716},
run: (*parser).callonKeyOnly1,
expr: &seqExpr{
pos: position{line: 64, col: 12, offset: 1716},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 64, col: 12, offset: 1716},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 64, col: 16, offset: 1720},
name: "Key",
},
},
&labeledExpr{
pos: position{line: 64, col: 20, offset: 1724},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 64, col: 23, offset: 1727},
name: "_",
},
},
&labeledExpr{
pos: position{line: 64, col: 25, offset: 1729},
label: "comment",
expr: &zeroOrOneExpr{
pos: position{line: 64, col: 33, offset: 1737},
expr: &ruleRefExpr{
pos: position{line: 64, col: 33, offset: 1737},
name: "Comment",
},
},
},
},
},
},
},
{
name: "CommentVal",
pos: position{line: 72, col: 1, offset: 1969},
expr: &actionExpr{
pos: position{line: 72, col: 15, offset: 1983},
run: (*parser).callonCommentVal1,
expr: &zeroOrMoreExpr{
pos: position{line: 72, col: 15, offset: 1983},
expr: &seqExpr{
pos: position{line: 72, col: 16, offset: 1984},
exprs: []interface{}{
¬Expr{
pos: position{line: 72, col: 16, offset: 1984},
expr: &ruleRefExpr{
pos: position{line: 72, col: 17, offset: 1985},
name: "LineEnd",
},
},
&anyMatcher{
line: 72, col: 25, offset: 1993,
},
},
},
},
},
},
{
name: "SectionName",
pos: position{line: 79, col: 1, offset: 2156},
expr: &actionExpr{
pos: position{line: 79, col: 16, offset: 2171},
run: (*parser).callonSectionName1,
expr: &oneOrMoreExpr{
pos: position{line: 79, col: 16, offset: 2171},
expr: &charClassMatcher{
pos: position{line: 79, col: 16, offset: 2171},
val: "[^#;\\r\\n[\\]]",
chars: []rune{'#', ';', '\r', '\n', '[', ']'},
ignoreCase: false,
inverted: true,
},
},
},
},
{
name: "Key",
pos: position{line: 86, col: 1, offset: 2345},
expr: &actionExpr{
pos: position{line: 86, col: 8, offset: 2352},
run: (*parser).callonKey1,
expr: &oneOrMoreExpr{
pos: position{line: 86, col: 8, offset: 2352},
expr: &charClassMatcher{
pos: position{line: 86, col: 8, offset: 2352},
val: "[^#;=\\r\\n[\\]]",
chars: []rune{'#', ';', '=', '\r', '\n', '[', ']'},
ignoreCase: false,
inverted: true,
},
},
},
},
{
name: "Value",
pos: position{line: 93, col: 1, offset: 2519},
expr: &choiceExpr{
pos: position{line: 93, col: 10, offset: 2528},
alternatives: []interface{}{
&ruleRefExpr{
pos: position{line: 93, col: 10, offset: 2528},
name: "QuotedValue",
},
&actionExpr{
pos: position{line: 93, col: 24, offset: 2542},
run: (*parser).callonValue3,
expr: &ruleRefExpr{
pos: position{line: 93, col: 24, offset: 2542},
name: "SimpleValue",
},
},
},
},
},
{
name: "QuotedValue",
pos: position{line: 100, col: 1, offset: 2708},
expr: &actionExpr{
pos: position{line: 100, col: 16, offset: 2723},
run: (*parser).callonQuotedValue1,
expr: &seqExpr{
pos: position{line: 100, col: 16, offset: 2723},
exprs: []interface{}{
&litMatcher{
pos: position{line: 100, col: 16, offset: 2723},
val: "\"",
ignoreCase: false,
},
&zeroOrMoreExpr{
pos: position{line: 100, col: 20, offset: 2727},
expr: &ruleRefExpr{
pos: position{line: 100, col: 20, offset: 2727},
name: "Char",
},
},
&litMatcher{
pos: position{line: 100, col: 26, offset: 2733},
val: "\"",
ignoreCase: false,
},
&ruleRefExpr{
pos: position{line: 100, col: 30, offset: 2737},
name: "_",
},
},
},
},
},
{
name: "Char",
pos: position{line: 107, col: 1, offset: 2899},
expr: &choiceExpr{
pos: position{line: 107, col: 9, offset: 2907},
alternatives: []interface{}{
&seqExpr{
pos: position{line: 107, col: 9, offset: 2907},
exprs: []interface{}{
¬Expr{
pos: position{line: 107, col: 9, offset: 2907},
expr: &choiceExpr{
pos: position{line: 107, col: 11, offset: 2909},
alternatives: []interface{}{
&litMatcher{
pos: position{line: 107, col: 11, offset: 2909},
val: "\"",
ignoreCase: false,
},
&litMatcher{
pos: position{line: 107, col: 17, offset: 2915},
val: "\\",
ignoreCase: false,
},
},
},
},
&anyMatcher{
line: 107, col: 23, offset: 2921,
},
},
},
&actionExpr{
pos: position{line: 107, col: 27, offset: 2925},
run: (*parser).callonChar8,
expr: &seqExpr{
pos: position{line: 107, col: 27, offset: 2925},
exprs: []interface{}{
&litMatcher{
pos: position{line: 107, col: 27, offset: 2925},
val: "\\",
ignoreCase: false,
},
&choiceExpr{
pos: position{line: 107, col: 33, offset: 2931},
alternatives: []interface{}{
&charClassMatcher{
pos: position{line: 107, col: 33, offset: 2931},
val: "[\\\\/bfnrt\"]",
chars: []rune{'\\', '/', 'b', 'f', 'n', 'r', 't', '"'},
ignoreCase: false,
inverted: false,
},
&seqExpr{
pos: position{line: 107, col: 47, offset: 2945},
exprs: []interface{}{
&litMatcher{
pos: position{line: 107, col: 47, offset: 2945},
val: "u",
ignoreCase: false,
},
&ruleRefExpr{
pos: position{line: 107, col: 51, offset: 2949},
name: "HexDigit",
},
&ruleRefExpr{
pos: position{line: 107, col: 60, offset: 2958},
name: "HexDigit",
},
&ruleRefExpr{
pos: position{line: 107, col: 69, offset: 2967},
name: "HexDigit",
},
&ruleRefExpr{
pos: position{line: 107, col: 78, offset: 2976},
name: "HexDigit",
},
},
},
},
},
},
},
},
},
},
},
{
name: "HexDigit",
pos: position{line: 114, col: 1, offset: 3154},
expr: &actionExpr{
pos: position{line: 114, col: 13, offset: 3166},
run: (*parser).callonHexDigit1,
expr: &charClassMatcher{
pos: position{line: 114, col: 13, offset: 3166},
val: "[0-9a-f]i",
ranges: []rune{'0', '9', 'a', 'f'},
ignoreCase: true,
inverted: false,
},
},
},
{
name: "SimpleValue",
pos: position{line: 121, col: 1, offset: 3333},
expr: &actionExpr{
pos: position{line: 121, col: 16, offset: 3348},
run: (*parser).callonSimpleValue1,
expr: &zeroOrMoreExpr{
pos: position{line: 121, col: 16, offset: 3348},
expr: &charClassMatcher{
pos: position{line: 121, col: 16, offset: 3348},
val: "[^;#\\r\\n]",
chars: []rune{';', '#', '\r', '\n'},
ignoreCase: false,
inverted: true,
},
},
},
},
{
name: "LineEnd",
pos: position{line: 128, col: 1, offset: 3519},
expr: &choiceExpr{
pos: position{line: 128, col: 12, offset: 3530},
alternatives: []interface{}{
&litMatcher{
pos: position{line: 128, col: 12, offset: 3530},
val: "\r\n",
ignoreCase: false,
},
&actionExpr{
pos: position{line: 128, col: 21, offset: 3539},
run: (*parser).callonLineEnd3,
expr: &litMatcher{
pos: position{line: 128, col: 21, offset: 3539},
val: "\n",
ignoreCase: false,
},
},
},
},
},
{
name: "_",
displayName: "\"whitespace\"",
pos: position{line: 135, col: 1, offset: 3676},
expr: &actionExpr{
pos: position{line: 135, col: 19, offset: 3694},
run: (*parser).callon_1,
expr: &zeroOrMoreExpr{
pos: position{line: 135, col: 19, offset: 3694},
expr: &charClassMatcher{
pos: position{line: 135, col: 19, offset: 3694},
val: "[ \\t]",
chars: []rune{' ', '\t'},
ignoreCase: false,
inverted: false,
},
},
},
},
{
name: "EOF",
pos: position{line: 142, col: 1, offset: 3826},
expr: ¬Expr{
pos: position{line: 142, col: 8, offset: 3833},
expr: &anyMatcher{
line: 142, col: 9, offset: 3834,
},
},
},
},
}
func (c *current) onFile1(lines interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf("\n\n\n>> File: %s // '%s'", c.pos, string(c.text))
// convert iface to []*Line
lsSlice := toIfaceSlice(lines)
ls := make([]*Line, len(lsSlice))
for i, l := range lsSlice {
ls[i] = l.(*Line)
}
return NewFile(ls), nil
}
func (p *parser) callonFile1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFile1(stack["lines"])
}
func (c *current) onLine1(ws, item, le interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Line: %s // '%s'", c.pos, string(c.text))
it, _ := item.(Item)
return NewLine(c.pos, ws.(string), it, le.(string)), nil
}
func (p *parser) callonLine1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLine1(stack["ws"], stack["item"], stack["le"])
}
func (c *current) onComment1(cs, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Comment: %s // '%s'\n", c.pos, string(c.text))
return NewComment(c.pos, string(cs.([]byte)), comment.(string)), nil
}
func (p *parser) callonComment1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onComment1(stack["cs"], stack["comment"])
}
func (c *current) onSection1(name, ws, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Section: %s // '%s'\n", c.pos, name)
com, _ := comment.(*Comment)
return NewSection(c.pos, name.(string), ws.(string), com), nil
}
func (p *parser) callonSection1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSection1(stack["name"], stack["ws"], stack["comment"])
}
func (c *current) onKeyValuePair1(key, ws, val, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> KeyValuePair: %s // '%s': '%s'\n", c.pos, key, val)
com, _ := comment.(*Comment)
v, _ := val.(string)
return NewKeyValuePair(c.pos, key.(string), ws.(string), &v, com), nil
}
func (p *parser) callonKeyValuePair1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onKeyValuePair1(stack["key"], stack["ws"], stack["val"], stack["comment"])
}
func (c *current) onKeyOnly1(key, ws, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> KeyOnly: %s // '%s'\n", c.pos, key)
com, _ := comment.(*Comment)
return NewKeyValuePair(c.pos, key.(string), ws.(string), nil, com), nil
}
func (p *parser) callonKeyOnly1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onKeyOnly1(stack["key"], stack["ws"], stack["comment"])
}
func (c *current) onCommentVal1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> CommentVal: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonCommentVal1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCommentVal1()
}
func (c *current) onSectionName1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> SectionName: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonSectionName1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSectionName1()
}
func (c *current) onKey1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Key: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonKey1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onKey1()
}
func (c *current) onValue3() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Value: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonValue3() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onValue3()
}
func (c *current) onQuotedValue1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> QuotedValue: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonQuotedValue1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onQuotedValue1()
}
func (c *current) onChar8() (interface{}, error) {
// " // ignore
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Char: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonChar8() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onChar8()
}
func (c *current) onHexDigit1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> HexDigit: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonHexDigit1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onHexDigit1()
}
func (c *current) onSimpleValue1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> SimpleValue: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonSimpleValue1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSimpleValue1()
}
func (c *current) onLineEnd3() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> LineEnd: %s\n", c.pos)
return string(c.text), nil
}
func (p *parser) callonLineEnd3() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLineEnd3()
}
func (c *current) on_1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> _ %s\n", c.pos)
return string(c.text), nil
}
func (p *parser) callon_1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.on_1()
}
var (
// errNoRule is returned when the grammar to parse has no rule.
errNoRule = errors.New("grammar has no rule")
// errInvalidEntrypoint is returned when the specified entrypoint rule
// does not exit.
errInvalidEntrypoint = errors.New("invalid entrypoint")
// errInvalidEncoding is returned when the source is not properly
// utf8-encoded.
errInvalidEncoding = errors.New("invalid encoding")
// errMaxExprCnt is used to signal that the maximum number of
// expressions have been parsed.
errMaxExprCnt = errors.New("max number of expresssions parsed")
)
// Option is a function that can set an option on the parser. It returns
// the previous setting as an Option.
type Option func(*parser) Option
// MaxExpressions creates an Option to stop parsing after the provided
// number of expressions have been parsed, if the value is 0 then the parser will
// parse for as many steps as needed (possibly an infinite number).
//
// The default for maxExprCnt is 0.
func MaxExpressions(maxExprCnt uint64) Option {
return func(p *parser) Option {
oldMaxExprCnt := p.maxExprCnt
p.maxExprCnt = maxExprCnt
return MaxExpressions(oldMaxExprCnt)
}
}
// Entrypoint creates an Option to set the rule name to use as entrypoint.
// The rule name must have been specified in the -alternate-entrypoints
// if generating the parser with the -optimize-grammar flag, otherwise
// it may have been optimized out. Passing an empty string sets the
// entrypoint to the first rule in the grammar.
//
// The default is to start parsing at the first rule in the grammar.
func Entrypoint(ruleName string) Option {
return func(p *parser) Option {
oldEntrypoint := p.entrypoint
p.entrypoint = ruleName
if ruleName == "" {
p.entrypoint = g.rules[0].name
}
return Entrypoint(oldEntrypoint)
}
}
// Statistics adds a user provided Stats struct to the parser to allow
// the user to process the results after the parsing has finished.
// Also the key for the "no match" counter is set.
//
// Example usage:
//
// input := "input"
// stats := Stats{}
// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match"))
// if err != nil {
// log.Panicln(err)
// }
// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ")
// if err != nil {
// log.Panicln(err)
// }
// fmt.Println(string(b))
//
func Statistics(stats *Stats, choiceNoMatch string) Option {
return func(p *parser) Option {
oldStats := p.Stats
p.Stats = stats
oldChoiceNoMatch := p.choiceNoMatch
p.choiceNoMatch = choiceNoMatch
if p.Stats.ChoiceAltCnt == nil {
p.Stats.ChoiceAltCnt = make(map[string]map[string]int)
}
return Statistics(oldStats, oldChoiceNoMatch)
}
}
// Debug creates an Option to set the debug flag to b. When set to true,
// debugging information is printed to stdout while parsing.
//
// The default is false.
func Debug(b bool) Option {
return func(p *parser) Option {
old := p.debug
p.debug = b
return Debug(old)
}
}
// Memoize creates an Option to set the memoize flag to b. When set to true,
// the parser will cache all results so each expression is evaluated only
// once. This guarantees linear parsing time even for pathological cases,
// at the expense of more memory and slower times for typical cases.
//
// The default is false.
func Memoize(b bool) Option {
return func(p *parser) Option {
old := p.memoize
p.memoize = b
return Memoize(old)
}
}
// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes.
// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD)
// by character class matchers and is matched by the any matcher.
// The returned matched value, c.text and c.offset are NOT affected.
//
// The default is false.
func AllowInvalidUTF8(b bool) Option {
return func(p *parser) Option {
old := p.allowInvalidUTF8
p.allowInvalidUTF8 = b
return AllowInvalidUTF8(old)
}
}
// Recover creates an Option to set the recover flag to b. When set to
// true, this causes the parser to recover from panics and convert it
// to an error. Setting it to false can be useful while debugging to
// access the full stack trace.
//
// The default is true.
func Recover(b bool) Option {
return func(p *parser) Option {
old := p.recover
p.recover = b
return Recover(old)
}
}
// GlobalStore creates an Option to set a key to a certain value in
// the globalStore.
func GlobalStore(key string, value interface{}) Option {
return func(p *parser) Option {
old := p.cur.globalStore[key]
p.cur.globalStore[key] = value
return GlobalStore(key, old)
}
}
// InitState creates an Option to set a key to a certain value in
// the global "state" store.
func InitState(key string, value interface{}) Option {
return func(p *parser) Option {
old := p.cur.state[key]
p.cur.state[key] = value
return InitState(key, old)
}
}
// ParseFile parses the file identified by filename.
func ParseFile(filename string, opts ...Option) (i interface{}, err error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
err = closeErr
}
}()
return ParseReader(filename, f, opts...)
}
// ParseReader parses the data from r using filename as information in the
// error messages.
func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return Parse(filename, b, opts...)
}
// Parse parses the data from b using filename as information in the
// error messages.
func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
return newParser(filename, b, opts...).parse(g)
}
// position records a position in the text.
type position struct {
line, col, offset int
}
func (p position) String() string {
return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
}
// savepoint stores all state required to go back to this point in the
// parser.
type savepoint struct {
position
rn rune
w int
}
type current struct {
pos position // start position of the match
text []byte // raw text of the match
// state is a store for arbitrary key,value pairs that the user wants to be
// tied to the backtracking of the parser.
// This is always rolled back if a parsing rule fails.
state storeDict
// globalStore is a general store for the user to store arbitrary key-value
// pairs that they need to manage and that they do not want tied to the
// backtracking of the parser. This is only modified by the user and never
// rolled back by the parser. It is always up to the user to keep this in a
// consistent state.
globalStore storeDict
}
type storeDict map[string]interface{}
// the AST types...
type grammar struct {
pos position
rules []*rule
}
type rule struct {
pos position
name string
displayName string
expr interface{}
}
type choiceExpr struct {
pos position
alternatives []interface{}
}
type actionExpr struct {
pos position
expr interface{}
run func(*parser) (interface{}, error)
}
type recoveryExpr struct {
pos position
expr interface{}
recoverExpr interface{}
failureLabel []string
}
type seqExpr struct {
pos position
exprs []interface{}
}
type throwExpr struct {
pos position
label string
}
type labeledExpr struct {
pos position
label string
expr interface{}
}
type expr struct {
pos position
expr interface{}
}
type andExpr expr
type notExpr expr
type zeroOrOneExpr expr
type zeroOrMoreExpr expr
type oneOrMoreExpr expr
type ruleRefExpr struct {
pos position
name string
}
type stateCodeExpr struct {
pos position
run func(*parser) error
}
type andCodeExpr struct {
pos position
run func(*parser) (bool, error)
}
type notCodeExpr struct {
pos position
run func(*parser) (bool, error)
}
type litMatcher struct {
pos position
val string
ignoreCase bool
}
type charClassMatcher struct {
pos position
val string
basicLatinChars [128]bool
chars []rune
ranges []rune
classes []*unicode.RangeTable
ignoreCase bool
inverted bool
}
type anyMatcher position
// errList cumulates the errors found by the parser.
type errList []error
func (e *errList) add(err error) {
*e = append(*e, err)
}
func (e errList) err() error {
if len(e) == 0 {
return nil
}
e.dedupe()
return e
}
func (e *errList) dedupe() {
var cleaned []error
set := make(map[string]bool)
for _, err := range *e {
if msg := err.Error(); !set[msg] {
set[msg] = true
cleaned = append(cleaned, err)
}
}
*e = cleaned
}
func (e errList) Error() string {
switch len(e) {
case 0:
return ""
case 1:
return e[0].Error()
default:
var buf bytes.Buffer
for i, err := range e {
if i > 0 {
buf.WriteRune('\n')
}
buf.WriteString(err.Error())
}
return buf.String()
}
}
// parserError wraps an error with a prefix indicating the rule in which
// the error occurred. The original error is stored in the Inner field.
type parserError struct {
Inner error
pos position
prefix string
expected []string
}
// Error returns the error message.
func (p *parserError) Error() string {
return p.prefix + ": " + p.Inner.Error()
}
// newParser creates a parser with the specified input source and options.
func newParser(filename string, b []byte, opts ...Option) *parser {
stats := Stats{
ChoiceAltCnt: make(map[string]map[string]int),
}
p := &parser{
filename: filename,
errs: new(errList),
data: b,
pt: savepoint{position: position{line: 1}},
recover: true,
cur: current{
state: make(storeDict),
globalStore: make(storeDict),
},
maxFailPos: position{col: 1, line: 1},
maxFailExpected: make([]string, 0, 20),
Stats: &stats,
// start rule is rule [0] unless an alternate entrypoint is specified
entrypoint: g.rules[0].name,
}
p.setOptions(opts)
if p.maxExprCnt == 0 {
p.maxExprCnt = math.MaxUint64
}
return p
}
// setOptions applies the options to the parser.
func (p *parser) setOptions(opts []Option) {
for _, opt := range opts {
opt(p)
}
}
type resultTuple struct {
v interface{}
b bool
end savepoint
}
const choiceNoMatch = -1
// Stats stores some statistics, gathered during parsing
type Stats struct {
// ExprCnt counts the number of expressions processed during parsing
// This value is compared to the maximum number of expressions allowed
// (set by the MaxExpressions option).
ExprCnt uint64
// ChoiceAltCnt is used to count for each ordered choice expression,
// which alternative is used how may times.
// These numbers allow to optimize the order of the ordered choice expression
// to increase the performance of the parser
//
// The outer key of ChoiceAltCnt is composed of the name of the rule as well
// as the line and the column of the ordered choice.
// The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative.
// For each alternative the number of matches are counted. If an ordered choice does not
// match, a special counter is incremented. The name of this counter is set with
// the parser option Statistics.
// For an alternative to be included in ChoiceAltCnt, it has to match at least once.
ChoiceAltCnt map[string]map[string]int
}
type parser struct {
filename string
pt savepoint
cur current
data []byte
errs *errList
depth int
recover bool
debug bool
memoize bool
// memoization table for the packrat algorithm:
// map[offset in source] map[expression or rule] {value, match}
memo map[int]map[interface{}]resultTuple
// rules table, maps the rule identifier to the rule node
rules map[string]*rule
// variables stack, map of label to value
vstack []map[string]interface{}
// rule stack, allows identification of the current rule in errors
rstack []*rule
// parse fail
maxFailPos position
maxFailExpected []string
maxFailInvertExpected bool
// max number of expressions to be parsed
maxExprCnt uint64
// entrypoint for the parser
entrypoint string
allowInvalidUTF8 bool
*Stats
choiceNoMatch string
// recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse
recoveryStack []map[string]interface{}
}
// push a variable set on the vstack.
func (p *parser) pushV() {
if cap(p.vstack) == len(p.vstack) {
// create new empty slot in the stack
p.vstack = append(p.vstack, nil)
} else {
// slice to 1 more
p.vstack = p.vstack[:len(p.vstack)+1]
}
// get the last args set
m := p.vstack[len(p.vstack)-1]
if m != nil && len(m) == 0 {
// empty map, all good
return
}
m = make(map[string]interface{})
p.vstack[len(p.vstack)-1] = m
}
// pop a variable set from the vstack.
func (p *parser) popV() {
// if the map is not empty, clear it
m := p.vstack[len(p.vstack)-1]
if len(m) > 0 {
// GC that map
p.vstack[len(p.vstack)-1] = nil
}
p.vstack = p.vstack[:len(p.vstack)-1]
}
// push a recovery expression with its labels to the recoveryStack
func (p *parser) pushRecovery(labels []string, expr interface{}) {
if cap(p.recoveryStack) == len(p.recoveryStack) {
// create new empty slot in the stack
p.recoveryStack = append(p.recoveryStack, nil)
} else {
// slice to 1 more
p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1]
}
m := make(map[string]interface{}, len(labels))
for _, fl := range labels {
m[fl] = expr
}
p.recoveryStack[len(p.recoveryStack)-1] = m
}
// pop a recovery expression from the recoveryStack
func (p *parser) popRecovery() {
// GC that map
p.recoveryStack[len(p.recoveryStack)-1] = nil
p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1]
}
func (p *parser) print(prefix, s string) string {
if !p.debug {
return s
}
fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
return s
}
func (p *parser) in(s string) string {
p.depth++
return p.print(strings.Repeat(" ", p.depth)+">", s)
}
func (p *parser) out(s string) string {
p.depth--
return p.print(strings.Repeat(" ", p.depth)+"<", s)
}
func (p *parser) addErr(err error) {
p.addErrAt(err, p.pt.position, []string{})
}
func (p *parser) addErrAt(err error, pos position, expected []string) {
var buf bytes.Buffer
if p.filename != "" {
buf.WriteString(p.filename)
}
if buf.Len() > 0 {
buf.WriteString(":")
}
buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
if len(p.rstack) > 0 {
if buf.Len() > 0 {
buf.WriteString(": ")
}
rule := p.rstack[len(p.rstack)-1]
if rule.displayName != "" {
buf.WriteString("rule " + rule.displayName)
} else {
buf.WriteString("rule " + rule.name)
}
}
pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected}
p.errs.add(pe)
}
func (p *parser) failAt(fail bool, pos position, want string) {
// process fail if parsing fails and not inverted or parsing succeeds and invert is set
if fail == p.maxFailInvertExpected {
if pos.offset < p.maxFailPos.offset {
return
}
if pos.offset > p.maxFailPos.offset {
p.maxFailPos = pos
p.maxFailExpected = p.maxFailExpected[:0]
}
if p.maxFailInvertExpected {
want = "!" + want
}
p.maxFailExpected = append(p.maxFailExpected, want)
}
}
// read advances the parser to the next rune.
func (p *parser) read() {
p.pt.offset += p.pt.w
rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
p.pt.rn = rn
p.pt.w = n
p.pt.col++
if rn == '\n' {
p.pt.line++
p.pt.col = 0
}
if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune
if !p.allowInvalidUTF8 {
p.addErr(errInvalidEncoding)
}
}
}
// restore parser position to the savepoint pt.
func (p *parser) restore(pt savepoint) {
if p.debug {
defer p.out(p.in("restore"))
}
if pt.offset == p.pt.offset {
return
}
p.pt = pt
}
// Cloner is implemented by any value that has a Clone method, which returns a
// copy of the value. This is mainly used for types which are not passed by
// value (e.g map, slice, chan) or structs that contain such types.
//
// This is used in conjunction with the global state feature to create proper
// copies of the state to allow the parser to properly restore the state in
// the case of backtracking.
type Cloner interface {
Clone() interface{}
}
// clone and return parser current state.
func (p *parser) cloneState() storeDict {
if p.debug {
defer p.out(p.in("cloneState"))
}
state := make(storeDict, len(p.cur.state))
for k, v := range p.cur.state {
if c, ok := v.(Cloner); ok {
state[k] = c.Clone()
} else {
state[k] = v
}
}
return state
}
// restore parser current state to the state storeDict.
// every restoreState should applied only one time for every cloned state
func (p *parser) restoreState(state storeDict) {
if p.debug {
defer p.out(p.in("restoreState"))
}
p.cur.state = state
}
// get the slice of bytes from the savepoint start to the current position.
func (p *parser) sliceFrom(start savepoint) []byte {
return p.data[start.position.offset:p.pt.position.offset]
}
func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
if len(p.memo) == 0 {
return resultTuple{}, false
}
m := p.memo[p.pt.offset]
if len(m) == 0 {
return resultTuple{}, false
}
res, ok := m[node]
return res, ok
}
func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
if p.memo == nil {
p.memo = make(map[int]map[interface{}]resultTuple)
}
m := p.memo[pt.offset]
if m == nil {
m = make(map[interface{}]resultTuple)
p.memo[pt.offset] = m
}
m[node] = tuple
}
func (p *parser) buildRulesTable(g *grammar) {
p.rules = make(map[string]*rule, len(g.rules))
for _, r := range g.rules {
p.rules[r.name] = r
}
}
func (p *parser) parse(g *grammar) (val interface{}, err error) {
if len(g.rules) == 0 {
p.addErr(errNoRule)
return nil, p.errs.err()
}
// TODO : not super critical but this could be generated
p.buildRulesTable(g)
if p.recover {
// panic can be used in action code to stop parsing immediately
// and return the panic as an error.
defer func() {
if e := recover(); e != nil {
if p.debug {
defer p.out(p.in("panic handler"))
}
val = nil
switch e := e.(type) {
case error:
p.addErr(e)
default:
p.addErr(fmt.Errorf("%v", e))
}
err = p.errs.err()
}
}()
}
startRule, ok := p.rules[p.entrypoint]
if !ok {
p.addErr(errInvalidEntrypoint)
return nil, p.errs.err()
}
p.read() // advance to first rune
val, ok = p.parseRule(startRule)
if !ok {
if len(*p.errs) == 0 {
// If parsing fails, but no errors have been recorded, the expected values
// for the farthest parser position are returned as error.
maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected))
for _, v := range p.maxFailExpected {
maxFailExpectedMap[v] = struct{}{}
}
expected := make([]string, 0, len(maxFailExpectedMap))
eof := false
if _, ok := maxFailExpectedMap["!."]; ok {
delete(maxFailExpectedMap, "!.")
eof = true
}
for k := range maxFailExpectedMap {
expected = append(expected, k)
}
sort.Strings(expected)
if eof {
expected = append(expected, "EOF")
}
p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected)
}
return nil, p.errs.err()
}
return val, p.errs.err()
}
func listJoin(list []string, sep string, lastSep string) string {
switch len(list) {
case 0:
return ""
case 1:
return list[0]
default:
return fmt.Sprintf("%s %s %s", strings.Join(list[:len(list)-1], sep), lastSep, list[len(list)-1])
}
}
func (p *parser) parseRule(rule *rule) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseRule " + rule.name))
}
if p.memoize {
res, ok := p.getMemoized(rule)
if ok {
p.restore(res.end)
return res.v, res.b
}
}
start := p.pt
p.rstack = append(p.rstack, rule)
p.pushV()
val, ok := p.parseExpr(rule.expr)
p.popV()
p.rstack = p.rstack[:len(p.rstack)-1]
if ok && p.debug {
p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
}
if p.memoize {
p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
}
return val, ok
}
func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
var pt savepoint
if p.memoize {
res, ok := p.getMemoized(expr)
if ok {
p.restore(res.end)
return res.v, res.b
}
pt = p.pt
}
p.ExprCnt++
if p.ExprCnt > p.maxExprCnt {
panic(errMaxExprCnt)
}
var val interface{}
var ok bool
switch expr := expr.(type) {
case *actionExpr:
val, ok = p.parseActionExpr(expr)
case *andCodeExpr:
val, ok = p.parseAndCodeExpr(expr)
case *andExpr:
val, ok = p.parseAndExpr(expr)
case *anyMatcher:
val, ok = p.parseAnyMatcher(expr)
case *charClassMatcher:
val, ok = p.parseCharClassMatcher(expr)
case *choiceExpr:
val, ok = p.parseChoiceExpr(expr)
case *labeledExpr:
val, ok = p.parseLabeledExpr(expr)
case *litMatcher:
val, ok = p.parseLitMatcher(expr)
case *notCodeExpr:
val, ok = p.parseNotCodeExpr(expr)
case *notExpr:
val, ok = p.parseNotExpr(expr)
case *oneOrMoreExpr:
val, ok = p.parseOneOrMoreExpr(expr)
case *recoveryExpr:
val, ok = p.parseRecoveryExpr(expr)
case *ruleRefExpr:
val, ok = p.parseRuleRefExpr(expr)
case *seqExpr:
val, ok = p.parseSeqExpr(expr)
case *stateCodeExpr:
val, ok = p.parseStateCodeExpr(expr)
case *throwExpr:
val, ok = p.parseThrowExpr(expr)
case *zeroOrMoreExpr:
val, ok = p.parseZeroOrMoreExpr(expr)
case *zeroOrOneExpr:
val, ok = p.parseZeroOrOneExpr(expr)
default:
panic(fmt.Sprintf("unknown expression type %T", expr))
}
if p.memoize {
p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
}
return val, ok
}
func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseActionExpr"))
}
start := p.pt
val, ok := p.parseExpr(act.expr)
if ok {
p.cur.pos = start.position
p.cur.text = p.sliceFrom(start)
state := p.cloneState()
actVal, err := act.run(p)
if err != nil {
p.addErrAt(err, start.position, []string{})
}
p.restoreState(state)
val = actVal
}
if ok && p.debug {
p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
}
return val, ok
}
func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseAndCodeExpr"))
}
state := p.cloneState()
ok, err := and.run(p)
if err != nil {
p.addErr(err)
}
p.restoreState(state)
return nil, ok
}
func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseAndExpr"))
}
pt := p.pt
state := p.cloneState()
p.pushV()
_, ok := p.parseExpr(and.expr)
p.popV()
p.restoreState(state)
p.restore(pt)
return nil, ok
}
func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseAnyMatcher"))
}
if p.pt.rn == utf8.RuneError && p.pt.w == 0 {
// EOF - see utf8.DecodeRune
p.failAt(false, p.pt.position, ".")
return nil, false
}
start := p.pt
p.read()
p.failAt(true, start.position, ".")
return p.sliceFrom(start), true
}
func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseCharClassMatcher"))
}
cur := p.pt.rn
start := p.pt
// can't match EOF
if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune
p.failAt(false, start.position, chr.val)
return nil, false
}
if chr.ignoreCase {
cur = unicode.ToLower(cur)
}
// try to match in the list of available chars
for _, rn := range chr.chars {
if rn == cur {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
// try to match in the list of ranges
for i := 0; i < len(chr.ranges); i += 2 {
if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
// try to match in the list of Unicode classes
for _, cl := range chr.classes {
if unicode.Is(cl, cur) {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
if chr.inverted {
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
p.failAt(false, start.position, chr.val)
return nil, false
}
func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) {
choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col)
m := p.ChoiceAltCnt[choiceIdent]
if m == nil {
m = make(map[string]int)
p.ChoiceAltCnt[choiceIdent] = m
}
// We increment altI by 1, so the keys do not start at 0
alt := strconv.Itoa(altI + 1)
if altI == choiceNoMatch {
alt = p.choiceNoMatch
}
m[alt]++
}
func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseChoiceExpr"))
}
for altI, alt := range ch.alternatives {
// dummy assignment to prevent compile error if optimized
_ = altI
state := p.cloneState()
p.pushV()
val, ok := p.parseExpr(alt)
p.popV()
if ok {
p.incChoiceAltCnt(ch, altI)
return val, ok
}
p.restoreState(state)
}
p.incChoiceAltCnt(ch, choiceNoMatch)
return nil, false
}
func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseLabeledExpr"))
}
p.pushV()
val, ok := p.parseExpr(lab.expr)
p.popV()
if ok && lab.label != "" {
m := p.vstack[len(p.vstack)-1]
m[lab.label] = val
}
return val, ok
}
func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseLitMatcher"))
}
ignoreCase := ""
if lit.ignoreCase {
ignoreCase = "i"
}
val := fmt.Sprintf("%q%s", lit.val, ignoreCase)
start := p.pt
for _, want := range lit.val {
cur := p.pt.rn
if lit.ignoreCase {
cur = unicode.ToLower(cur)
}
if cur != want {
p.failAt(false, start.position, val)
p.restore(start)
return nil, false
}
p.read()
}
p.failAt(true, start.position, val)
return p.sliceFrom(start), true
}
func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseNotCodeExpr"))
}
state := p.cloneState()
ok, err := not.run(p)
if err != nil {
p.addErr(err)
}
p.restoreState(state)
return nil, !ok
}
func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseNotExpr"))
}
pt := p.pt
state := p.cloneState()
p.pushV()
p.maxFailInvertExpected = !p.maxFailInvertExpected
_, ok := p.parseExpr(not.expr)
p.maxFailInvertExpected = !p.maxFailInvertExpected
p.popV()
p.restoreState(state)
p.restore(pt)
return nil, !ok
}
func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseOneOrMoreExpr"))
}
var vals []interface{}
for {
p.pushV()
val, ok := p.parseExpr(expr.expr)
p.popV()
if !ok {
if len(vals) == 0 {
// did not match once, no match
return nil, false
}
return vals, true
}
vals = append(vals, val)
}
}
func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")"))
}
p.pushRecovery(recover.failureLabel, recover.recoverExpr)
val, ok := p.parseExpr(recover.expr)
p.popRecovery()
return val, ok
}
func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseRuleRefExpr " + ref.name))
}
if ref.name == "" {
panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
}
rule := p.rules[ref.name]
if rule == nil {
p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
return nil, false
}
return p.parseRule(rule)
}
func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseSeqExpr"))
}
vals := make([]interface{}, 0, len(seq.exprs))
pt := p.pt
state := p.cloneState()
for _, expr := range seq.exprs {
val, ok := p.parseExpr(expr)
if !ok {
p.restoreState(state)
p.restore(pt)
return nil, false
}
vals = append(vals, val)
}
return vals, true
}
func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseStateCodeExpr"))
}
err := state.run(p)
if err != nil {
p.addErr(err)
}
return nil, true
}
func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseThrowExpr"))
}
for i := len(p.recoveryStack) - 1; i >= 0; i-- {
if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok {
if val, ok := p.parseExpr(recoverExpr); ok {
return val, ok
}
}
}
return nil, false
}
func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseZeroOrMoreExpr"))
}
var vals []interface{}
for {
p.pushV()
val, ok := p.parseExpr(expr.expr)
p.popV()
if !ok {
return vals, true
}
vals = append(vals, val)
}
}
func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseZeroOrOneExpr"))
}
p.pushV()
val, _ := p.parseExpr(expr.expr)
p.popV()
// whether it matched or not, consider it a match
return val, true
}
Updating parser
// Code generated by pigeon; DO NOT EDIT.
package parser
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// Helper function taken from pigeon source / examples
func toIfaceSlice(v interface{}) []interface{} {
if v == nil {
return nil
}
return v.([]interface{})
}
var g = &grammar{
rules: []*rule{
{
name: "File",
pos: position{line: 17, col: 1, offset: 236},
expr: &actionExpr{
pos: position{line: 17, col: 9, offset: 244},
run: (*parser).callonFile1,
expr: &seqExpr{
pos: position{line: 17, col: 9, offset: 244},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 17, col: 9, offset: 244},
label: "lines",
expr: &zeroOrMoreExpr{
pos: position{line: 17, col: 15, offset: 250},
expr: &ruleRefExpr{
pos: position{line: 17, col: 15, offset: 250},
name: "Line",
},
},
},
&ruleRefExpr{
pos: position{line: 17, col: 21, offset: 256},
name: "EOF",
},
},
},
},
},
{
name: "Line",
pos: position{line: 32, col: 1, offset: 585},
expr: &actionExpr{
pos: position{line: 32, col: 9, offset: 593},
run: (*parser).callonLine1,
expr: &seqExpr{
pos: position{line: 32, col: 9, offset: 593},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 32, col: 9, offset: 593},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 32, col: 12, offset: 596},
name: "_",
},
},
&labeledExpr{
pos: position{line: 32, col: 14, offset: 598},
label: "item",
expr: &zeroOrOneExpr{
pos: position{line: 32, col: 19, offset: 603},
expr: &choiceExpr{
pos: position{line: 32, col: 20, offset: 604},
alternatives: []interface{}{
&ruleRefExpr{
pos: position{line: 32, col: 20, offset: 604},
name: "Comment",
},
&ruleRefExpr{
pos: position{line: 32, col: 30, offset: 614},
name: "Section",
},
&ruleRefExpr{
pos: position{line: 32, col: 40, offset: 624},
name: "KeyValuePair",
},
&ruleRefExpr{
pos: position{line: 32, col: 55, offset: 639},
name: "KeyOnly",
},
},
},
},
},
&labeledExpr{
pos: position{line: 32, col: 65, offset: 649},
label: "le",
expr: &ruleRefExpr{
pos: position{line: 32, col: 68, offset: 652},
name: "LineEnd",
},
},
},
},
},
},
{
name: "Comment",
pos: position{line: 40, col: 1, offset: 866},
expr: &actionExpr{
pos: position{line: 40, col: 12, offset: 877},
run: (*parser).callonComment1,
expr: &seqExpr{
pos: position{line: 40, col: 12, offset: 877},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 40, col: 12, offset: 877},
label: "cs",
expr: &choiceExpr{
pos: position{line: 40, col: 16, offset: 881},
alternatives: []interface{}{
&litMatcher{
pos: position{line: 40, col: 16, offset: 881},
val: ";",
ignoreCase: false,
want: "\";\"",
},
&litMatcher{
pos: position{line: 40, col: 22, offset: 887},
val: "#",
ignoreCase: false,
want: "\"#\"",
},
},
},
},
&labeledExpr{
pos: position{line: 40, col: 27, offset: 892},
label: "comment",
expr: &ruleRefExpr{
pos: position{line: 40, col: 35, offset: 900},
name: "CommentVal",
},
},
},
},
},
},
{
name: "Section",
pos: position{line: 47, col: 1, offset: 1109},
expr: &actionExpr{
pos: position{line: 47, col: 12, offset: 1120},
run: (*parser).callonSection1,
expr: &seqExpr{
pos: position{line: 47, col: 12, offset: 1120},
exprs: []interface{}{
&litMatcher{
pos: position{line: 47, col: 12, offset: 1120},
val: "[",
ignoreCase: false,
want: "\"[\"",
},
&labeledExpr{
pos: position{line: 47, col: 16, offset: 1124},
label: "name",
expr: &ruleRefExpr{
pos: position{line: 47, col: 21, offset: 1129},
name: "SectionName",
},
},
&litMatcher{
pos: position{line: 47, col: 33, offset: 1141},
val: "]",
ignoreCase: false,
want: "\"]\"",
},
&labeledExpr{
pos: position{line: 47, col: 37, offset: 1145},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 47, col: 40, offset: 1148},
name: "_",
},
},
&labeledExpr{
pos: position{line: 47, col: 42, offset: 1150},
label: "comment",
expr: &zeroOrOneExpr{
pos: position{line: 47, col: 50, offset: 1158},
expr: &ruleRefExpr{
pos: position{line: 47, col: 50, offset: 1158},
name: "Comment",
},
},
},
},
},
},
},
{
name: "KeyValuePair",
pos: position{line: 55, col: 1, offset: 1382},
expr: &actionExpr{
pos: position{line: 55, col: 17, offset: 1398},
run: (*parser).callonKeyValuePair1,
expr: &seqExpr{
pos: position{line: 55, col: 17, offset: 1398},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 55, col: 17, offset: 1398},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 55, col: 21, offset: 1402},
name: "Key",
},
},
&litMatcher{
pos: position{line: 55, col: 25, offset: 1406},
val: "=",
ignoreCase: false,
want: "\"=\"",
},
&labeledExpr{
pos: position{line: 55, col: 29, offset: 1410},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 55, col: 32, offset: 1413},
name: "_",
},
},
&labeledExpr{
pos: position{line: 55, col: 34, offset: 1415},
label: "val",
expr: &ruleRefExpr{
pos: position{line: 55, col: 38, offset: 1419},
name: "Value",
},
},
&labeledExpr{
pos: position{line: 55, col: 44, offset: 1425},
label: "comment",
expr: &zeroOrOneExpr{
pos: position{line: 55, col: 52, offset: 1433},
expr: &ruleRefExpr{
pos: position{line: 55, col: 52, offset: 1433},
name: "Comment",
},
},
},
},
},
},
},
{
name: "KeyOnly",
pos: position{line: 64, col: 1, offset: 1705},
expr: &actionExpr{
pos: position{line: 64, col: 12, offset: 1716},
run: (*parser).callonKeyOnly1,
expr: &seqExpr{
pos: position{line: 64, col: 12, offset: 1716},
exprs: []interface{}{
&labeledExpr{
pos: position{line: 64, col: 12, offset: 1716},
label: "key",
expr: &ruleRefExpr{
pos: position{line: 64, col: 16, offset: 1720},
name: "Key",
},
},
&labeledExpr{
pos: position{line: 64, col: 20, offset: 1724},
label: "ws",
expr: &ruleRefExpr{
pos: position{line: 64, col: 23, offset: 1727},
name: "_",
},
},
&labeledExpr{
pos: position{line: 64, col: 25, offset: 1729},
label: "comment",
expr: &zeroOrOneExpr{
pos: position{line: 64, col: 33, offset: 1737},
expr: &ruleRefExpr{
pos: position{line: 64, col: 33, offset: 1737},
name: "Comment",
},
},
},
},
},
},
},
{
name: "CommentVal",
pos: position{line: 72, col: 1, offset: 1969},
expr: &actionExpr{
pos: position{line: 72, col: 15, offset: 1983},
run: (*parser).callonCommentVal1,
expr: &zeroOrMoreExpr{
pos: position{line: 72, col: 15, offset: 1983},
expr: &seqExpr{
pos: position{line: 72, col: 16, offset: 1984},
exprs: []interface{}{
¬Expr{
pos: position{line: 72, col: 16, offset: 1984},
expr: &ruleRefExpr{
pos: position{line: 72, col: 17, offset: 1985},
name: "LineEnd",
},
},
&anyMatcher{
line: 72, col: 25, offset: 1993,
},
},
},
},
},
},
{
name: "SectionName",
pos: position{line: 79, col: 1, offset: 2156},
expr: &actionExpr{
pos: position{line: 79, col: 16, offset: 2171},
run: (*parser).callonSectionName1,
expr: &oneOrMoreExpr{
pos: position{line: 79, col: 16, offset: 2171},
expr: &charClassMatcher{
pos: position{line: 79, col: 16, offset: 2171},
val: "[^#;\\r\\n[\\]]",
chars: []rune{'#', ';', '\r', '\n', '[', ']'},
ignoreCase: false,
inverted: true,
},
},
},
},
{
name: "Key",
pos: position{line: 86, col: 1, offset: 2345},
expr: &actionExpr{
pos: position{line: 86, col: 8, offset: 2352},
run: (*parser).callonKey1,
expr: &oneOrMoreExpr{
pos: position{line: 86, col: 8, offset: 2352},
expr: &charClassMatcher{
pos: position{line: 86, col: 8, offset: 2352},
val: "[^#;=\\r\\n[\\]]",
chars: []rune{'#', ';', '=', '\r', '\n', '[', ']'},
ignoreCase: false,
inverted: true,
},
},
},
},
{
name: "Value",
pos: position{line: 93, col: 1, offset: 2519},
expr: &choiceExpr{
pos: position{line: 93, col: 10, offset: 2528},
alternatives: []interface{}{
&ruleRefExpr{
pos: position{line: 93, col: 10, offset: 2528},
name: "QuotedValue",
},
&actionExpr{
pos: position{line: 93, col: 24, offset: 2542},
run: (*parser).callonValue3,
expr: &ruleRefExpr{
pos: position{line: 93, col: 24, offset: 2542},
name: "SimpleValue",
},
},
},
},
},
{
name: "QuotedValue",
pos: position{line: 100, col: 1, offset: 2708},
expr: &actionExpr{
pos: position{line: 100, col: 16, offset: 2723},
run: (*parser).callonQuotedValue1,
expr: &seqExpr{
pos: position{line: 100, col: 16, offset: 2723},
exprs: []interface{}{
&litMatcher{
pos: position{line: 100, col: 16, offset: 2723},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
&zeroOrMoreExpr{
pos: position{line: 100, col: 20, offset: 2727},
expr: &ruleRefExpr{
pos: position{line: 100, col: 20, offset: 2727},
name: "Char",
},
},
&litMatcher{
pos: position{line: 100, col: 26, offset: 2733},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
&ruleRefExpr{
pos: position{line: 100, col: 30, offset: 2737},
name: "_",
},
},
},
},
},
{
name: "Char",
pos: position{line: 107, col: 1, offset: 2899},
expr: &choiceExpr{
pos: position{line: 107, col: 9, offset: 2907},
alternatives: []interface{}{
&seqExpr{
pos: position{line: 107, col: 9, offset: 2907},
exprs: []interface{}{
¬Expr{
pos: position{line: 107, col: 9, offset: 2907},
expr: &choiceExpr{
pos: position{line: 107, col: 11, offset: 2909},
alternatives: []interface{}{
&litMatcher{
pos: position{line: 107, col: 11, offset: 2909},
val: "\"",
ignoreCase: false,
want: "\"\\\"\"",
},
&litMatcher{
pos: position{line: 107, col: 17, offset: 2915},
val: "\\",
ignoreCase: false,
want: "\"\\\\\"",
},
},
},
},
&anyMatcher{
line: 107, col: 23, offset: 2921,
},
},
},
&actionExpr{
pos: position{line: 107, col: 27, offset: 2925},
run: (*parser).callonChar8,
expr: &seqExpr{
pos: position{line: 107, col: 27, offset: 2925},
exprs: []interface{}{
&litMatcher{
pos: position{line: 107, col: 27, offset: 2925},
val: "\\",
ignoreCase: false,
want: "\"\\\\\"",
},
&choiceExpr{
pos: position{line: 107, col: 33, offset: 2931},
alternatives: []interface{}{
&charClassMatcher{
pos: position{line: 107, col: 33, offset: 2931},
val: "[\\\\/bfnrt\"]",
chars: []rune{'\\', '/', 'b', 'f', 'n', 'r', 't', '"'},
ignoreCase: false,
inverted: false,
},
&seqExpr{
pos: position{line: 107, col: 47, offset: 2945},
exprs: []interface{}{
&litMatcher{
pos: position{line: 107, col: 47, offset: 2945},
val: "u",
ignoreCase: false,
want: "\"u\"",
},
&ruleRefExpr{
pos: position{line: 107, col: 51, offset: 2949},
name: "HexDigit",
},
&ruleRefExpr{
pos: position{line: 107, col: 60, offset: 2958},
name: "HexDigit",
},
&ruleRefExpr{
pos: position{line: 107, col: 69, offset: 2967},
name: "HexDigit",
},
&ruleRefExpr{
pos: position{line: 107, col: 78, offset: 2976},
name: "HexDigit",
},
},
},
},
},
},
},
},
},
},
},
{
name: "HexDigit",
pos: position{line: 114, col: 1, offset: 3154},
expr: &actionExpr{
pos: position{line: 114, col: 13, offset: 3166},
run: (*parser).callonHexDigit1,
expr: &charClassMatcher{
pos: position{line: 114, col: 13, offset: 3166},
val: "[0-9a-f]i",
ranges: []rune{'0', '9', 'a', 'f'},
ignoreCase: true,
inverted: false,
},
},
},
{
name: "SimpleValue",
pos: position{line: 121, col: 1, offset: 3333},
expr: &actionExpr{
pos: position{line: 121, col: 16, offset: 3348},
run: (*parser).callonSimpleValue1,
expr: &zeroOrMoreExpr{
pos: position{line: 121, col: 16, offset: 3348},
expr: &charClassMatcher{
pos: position{line: 121, col: 16, offset: 3348},
val: "[^;#\\r\\n]",
chars: []rune{';', '#', '\r', '\n'},
ignoreCase: false,
inverted: true,
},
},
},
},
{
name: "LineEnd",
pos: position{line: 128, col: 1, offset: 3519},
expr: &choiceExpr{
pos: position{line: 128, col: 12, offset: 3530},
alternatives: []interface{}{
&litMatcher{
pos: position{line: 128, col: 12, offset: 3530},
val: "\r\n",
ignoreCase: false,
want: "\"\\r\\n\"",
},
&actionExpr{
pos: position{line: 128, col: 21, offset: 3539},
run: (*parser).callonLineEnd3,
expr: &litMatcher{
pos: position{line: 128, col: 21, offset: 3539},
val: "\n",
ignoreCase: false,
want: "\"\\n\"",
},
},
},
},
},
{
name: "_",
displayName: "\"whitespace\"",
pos: position{line: 135, col: 1, offset: 3676},
expr: &actionExpr{
pos: position{line: 135, col: 19, offset: 3694},
run: (*parser).callon_1,
expr: &zeroOrMoreExpr{
pos: position{line: 135, col: 19, offset: 3694},
expr: &charClassMatcher{
pos: position{line: 135, col: 19, offset: 3694},
val: "[ \\t]",
chars: []rune{' ', '\t'},
ignoreCase: false,
inverted: false,
},
},
},
},
{
name: "EOF",
pos: position{line: 142, col: 1, offset: 3826},
expr: ¬Expr{
pos: position{line: 142, col: 8, offset: 3833},
expr: &anyMatcher{
line: 142, col: 9, offset: 3834,
},
},
},
},
}
func (c *current) onFile1(lines interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf("\n\n\n>> File: %s // '%s'", c.pos, string(c.text))
// convert iface to []*Line
lsSlice := toIfaceSlice(lines)
ls := make([]*Line, len(lsSlice))
for i, l := range lsSlice {
ls[i] = l.(*Line)
}
return NewFile(ls), nil
}
func (p *parser) callonFile1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onFile1(stack["lines"])
}
func (c *current) onLine1(ws, item, le interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Line: %s // '%s'", c.pos, string(c.text))
it, _ := item.(Item)
return NewLine(c.pos, ws.(string), it, le.(string)), nil
}
func (p *parser) callonLine1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLine1(stack["ws"], stack["item"], stack["le"])
}
func (c *current) onComment1(cs, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Comment: %s // '%s'\n", c.pos, string(c.text))
return NewComment(c.pos, string(cs.([]byte)), comment.(string)), nil
}
func (p *parser) callonComment1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onComment1(stack["cs"], stack["comment"])
}
func (c *current) onSection1(name, ws, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Section: %s // '%s'\n", c.pos, name)
com, _ := comment.(*Comment)
return NewSection(c.pos, name.(string), ws.(string), com), nil
}
func (p *parser) callonSection1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSection1(stack["name"], stack["ws"], stack["comment"])
}
func (c *current) onKeyValuePair1(key, ws, val, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> KeyValuePair: %s // '%s': '%s'\n", c.pos, key, val)
com, _ := comment.(*Comment)
v, _ := val.(string)
return NewKeyValuePair(c.pos, key.(string), ws.(string), &v, com), nil
}
func (p *parser) callonKeyValuePair1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onKeyValuePair1(stack["key"], stack["ws"], stack["val"], stack["comment"])
}
func (c *current) onKeyOnly1(key, ws, comment interface{}) (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> KeyOnly: %s // '%s'\n", c.pos, key)
com, _ := comment.(*Comment)
return NewKeyValuePair(c.pos, key.(string), ws.(string), nil, com), nil
}
func (p *parser) callonKeyOnly1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onKeyOnly1(stack["key"], stack["ws"], stack["comment"])
}
func (c *current) onCommentVal1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> CommentVal: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonCommentVal1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onCommentVal1()
}
func (c *current) onSectionName1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> SectionName: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonSectionName1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSectionName1()
}
func (c *current) onKey1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Key: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonKey1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onKey1()
}
func (c *current) onValue3() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Value: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonValue3() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onValue3()
}
func (c *current) onQuotedValue1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> QuotedValue: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonQuotedValue1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onQuotedValue1()
}
func (c *current) onChar8() (interface{}, error) {
// " // ignore
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> Char: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonChar8() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onChar8()
}
func (c *current) onHexDigit1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> HexDigit: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonHexDigit1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onHexDigit1()
}
func (c *current) onSimpleValue1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> SimpleValue: %s // '%s'\n", c.pos, string(c.text))
return string(c.text), nil
}
func (p *parser) callonSimpleValue1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onSimpleValue1()
}
func (c *current) onLineEnd3() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> LineEnd: %s\n", c.pos)
return string(c.text), nil
}
func (p *parser) callonLineEnd3() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.onLineEnd3()
}
func (c *current) on_1() (interface{}, error) {
lastPosition, lastText = c.pos, string(c.text)
//fmt.Printf(">> _ %s\n", c.pos)
return string(c.text), nil
}
func (p *parser) callon_1() (interface{}, error) {
stack := p.vstack[len(p.vstack)-1]
_ = stack
return p.cur.on_1()
}
var (
// errNoRule is returned when the grammar to parse has no rule.
errNoRule = errors.New("grammar has no rule")
// errInvalidEntrypoint is returned when the specified entrypoint rule
// does not exit.
errInvalidEntrypoint = errors.New("invalid entrypoint")
// errInvalidEncoding is returned when the source is not properly
// utf8-encoded.
errInvalidEncoding = errors.New("invalid encoding")
// errMaxExprCnt is used to signal that the maximum number of
// expressions have been parsed.
errMaxExprCnt = errors.New("max number of expresssions parsed")
)
// Option is a function that can set an option on the parser. It returns
// the previous setting as an Option.
type Option func(*parser) Option
// MaxExpressions creates an Option to stop parsing after the provided
// number of expressions have been parsed, if the value is 0 then the parser will
// parse for as many steps as needed (possibly an infinite number).
//
// The default for maxExprCnt is 0.
func MaxExpressions(maxExprCnt uint64) Option {
return func(p *parser) Option {
oldMaxExprCnt := p.maxExprCnt
p.maxExprCnt = maxExprCnt
return MaxExpressions(oldMaxExprCnt)
}
}
// Entrypoint creates an Option to set the rule name to use as entrypoint.
// The rule name must have been specified in the -alternate-entrypoints
// if generating the parser with the -optimize-grammar flag, otherwise
// it may have been optimized out. Passing an empty string sets the
// entrypoint to the first rule in the grammar.
//
// The default is to start parsing at the first rule in the grammar.
func Entrypoint(ruleName string) Option {
return func(p *parser) Option {
oldEntrypoint := p.entrypoint
p.entrypoint = ruleName
if ruleName == "" {
p.entrypoint = g.rules[0].name
}
return Entrypoint(oldEntrypoint)
}
}
// Statistics adds a user provided Stats struct to the parser to allow
// the user to process the results after the parsing has finished.
// Also the key for the "no match" counter is set.
//
// Example usage:
//
// input := "input"
// stats := Stats{}
// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match"))
// if err != nil {
// log.Panicln(err)
// }
// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ")
// if err != nil {
// log.Panicln(err)
// }
// fmt.Println(string(b))
//
func Statistics(stats *Stats, choiceNoMatch string) Option {
return func(p *parser) Option {
oldStats := p.Stats
p.Stats = stats
oldChoiceNoMatch := p.choiceNoMatch
p.choiceNoMatch = choiceNoMatch
if p.Stats.ChoiceAltCnt == nil {
p.Stats.ChoiceAltCnt = make(map[string]map[string]int)
}
return Statistics(oldStats, oldChoiceNoMatch)
}
}
// Debug creates an Option to set the debug flag to b. When set to true,
// debugging information is printed to stdout while parsing.
//
// The default is false.
func Debug(b bool) Option {
return func(p *parser) Option {
old := p.debug
p.debug = b
return Debug(old)
}
}
// Memoize creates an Option to set the memoize flag to b. When set to true,
// the parser will cache all results so each expression is evaluated only
// once. This guarantees linear parsing time even for pathological cases,
// at the expense of more memory and slower times for typical cases.
//
// The default is false.
func Memoize(b bool) Option {
return func(p *parser) Option {
old := p.memoize
p.memoize = b
return Memoize(old)
}
}
// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes.
// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD)
// by character class matchers and is matched by the any matcher.
// The returned matched value, c.text and c.offset are NOT affected.
//
// The default is false.
func AllowInvalidUTF8(b bool) Option {
return func(p *parser) Option {
old := p.allowInvalidUTF8
p.allowInvalidUTF8 = b
return AllowInvalidUTF8(old)
}
}
// Recover creates an Option to set the recover flag to b. When set to
// true, this causes the parser to recover from panics and convert it
// to an error. Setting it to false can be useful while debugging to
// access the full stack trace.
//
// The default is true.
func Recover(b bool) Option {
return func(p *parser) Option {
old := p.recover
p.recover = b
return Recover(old)
}
}
// GlobalStore creates an Option to set a key to a certain value in
// the globalStore.
func GlobalStore(key string, value interface{}) Option {
return func(p *parser) Option {
old := p.cur.globalStore[key]
p.cur.globalStore[key] = value
return GlobalStore(key, old)
}
}
// InitState creates an Option to set a key to a certain value in
// the global "state" store.
func InitState(key string, value interface{}) Option {
return func(p *parser) Option {
old := p.cur.state[key]
p.cur.state[key] = value
return InitState(key, old)
}
}
// ParseFile parses the file identified by filename.
func ParseFile(filename string, opts ...Option) (i interface{}, err error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
err = closeErr
}
}()
return ParseReader(filename, f, opts...)
}
// ParseReader parses the data from r using filename as information in the
// error messages.
func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return Parse(filename, b, opts...)
}
// Parse parses the data from b using filename as information in the
// error messages.
func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
return newParser(filename, b, opts...).parse(g)
}
// position records a position in the text.
type position struct {
line, col, offset int
}
func (p position) String() string {
return strconv.Itoa(p.line) + ":" + strconv.Itoa(p.col) + " [" + strconv.Itoa(p.offset) + "]"
}
// savepoint stores all state required to go back to this point in the
// parser.
type savepoint struct {
position
rn rune
w int
}
type current struct {
pos position // start position of the match
text []byte // raw text of the match
// state is a store for arbitrary key,value pairs that the user wants to be
// tied to the backtracking of the parser.
// This is always rolled back if a parsing rule fails.
state storeDict
// globalStore is a general store for the user to store arbitrary key-value
// pairs that they need to manage and that they do not want tied to the
// backtracking of the parser. This is only modified by the user and never
// rolled back by the parser. It is always up to the user to keep this in a
// consistent state.
globalStore storeDict
}
type storeDict map[string]interface{}
// the AST types...
type grammar struct {
pos position
rules []*rule
}
type rule struct {
pos position
name string
displayName string
expr interface{}
}
type choiceExpr struct {
pos position
alternatives []interface{}
}
type actionExpr struct {
pos position
expr interface{}
run func(*parser) (interface{}, error)
}
type recoveryExpr struct {
pos position
expr interface{}
recoverExpr interface{}
failureLabel []string
}
type seqExpr struct {
pos position
exprs []interface{}
}
type throwExpr struct {
pos position
label string
}
type labeledExpr struct {
pos position
label string
expr interface{}
}
type expr struct {
pos position
expr interface{}
}
type andExpr expr
type notExpr expr
type zeroOrOneExpr expr
type zeroOrMoreExpr expr
type oneOrMoreExpr expr
type ruleRefExpr struct {
pos position
name string
}
type stateCodeExpr struct {
pos position
run func(*parser) error
}
type andCodeExpr struct {
pos position
run func(*parser) (bool, error)
}
type notCodeExpr struct {
pos position
run func(*parser) (bool, error)
}
type litMatcher struct {
pos position
val string
ignoreCase bool
want string
}
type charClassMatcher struct {
pos position
val string
basicLatinChars [128]bool
chars []rune
ranges []rune
classes []*unicode.RangeTable
ignoreCase bool
inverted bool
}
type anyMatcher position
// errList cumulates the errors found by the parser.
type errList []error
func (e *errList) add(err error) {
*e = append(*e, err)
}
func (e errList) err() error {
if len(e) == 0 {
return nil
}
e.dedupe()
return e
}
func (e *errList) dedupe() {
var cleaned []error
set := make(map[string]bool)
for _, err := range *e {
if msg := err.Error(); !set[msg] {
set[msg] = true
cleaned = append(cleaned, err)
}
}
*e = cleaned
}
func (e errList) Error() string {
switch len(e) {
case 0:
return ""
case 1:
return e[0].Error()
default:
var buf bytes.Buffer
for i, err := range e {
if i > 0 {
buf.WriteRune('\n')
}
buf.WriteString(err.Error())
}
return buf.String()
}
}
// parserError wraps an error with a prefix indicating the rule in which
// the error occurred. The original error is stored in the Inner field.
type parserError struct {
Inner error
pos position
prefix string
expected []string
}
// Error returns the error message.
func (p *parserError) Error() string {
return p.prefix + ": " + p.Inner.Error()
}
// newParser creates a parser with the specified input source and options.
func newParser(filename string, b []byte, opts ...Option) *parser {
stats := Stats{
ChoiceAltCnt: make(map[string]map[string]int),
}
p := &parser{
filename: filename,
errs: new(errList),
data: b,
pt: savepoint{position: position{line: 1}},
recover: true,
cur: current{
state: make(storeDict),
globalStore: make(storeDict),
},
maxFailPos: position{col: 1, line: 1},
maxFailExpected: make([]string, 0, 20),
Stats: &stats,
// start rule is rule [0] unless an alternate entrypoint is specified
entrypoint: g.rules[0].name,
}
p.setOptions(opts)
if p.maxExprCnt == 0 {
p.maxExprCnt = math.MaxUint64
}
return p
}
// setOptions applies the options to the parser.
func (p *parser) setOptions(opts []Option) {
for _, opt := range opts {
opt(p)
}
}
type resultTuple struct {
v interface{}
b bool
end savepoint
}
const choiceNoMatch = -1
// Stats stores some statistics, gathered during parsing
type Stats struct {
// ExprCnt counts the number of expressions processed during parsing
// This value is compared to the maximum number of expressions allowed
// (set by the MaxExpressions option).
ExprCnt uint64
// ChoiceAltCnt is used to count for each ordered choice expression,
// which alternative is used how may times.
// These numbers allow to optimize the order of the ordered choice expression
// to increase the performance of the parser
//
// The outer key of ChoiceAltCnt is composed of the name of the rule as well
// as the line and the column of the ordered choice.
// The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative.
// For each alternative the number of matches are counted. If an ordered choice does not
// match, a special counter is incremented. The name of this counter is set with
// the parser option Statistics.
// For an alternative to be included in ChoiceAltCnt, it has to match at least once.
ChoiceAltCnt map[string]map[string]int
}
type parser struct {
filename string
pt savepoint
cur current
data []byte
errs *errList
depth int
recover bool
debug bool
memoize bool
// memoization table for the packrat algorithm:
// map[offset in source] map[expression or rule] {value, match}
memo map[int]map[interface{}]resultTuple
// rules table, maps the rule identifier to the rule node
rules map[string]*rule
// variables stack, map of label to value
vstack []map[string]interface{}
// rule stack, allows identification of the current rule in errors
rstack []*rule
// parse fail
maxFailPos position
maxFailExpected []string
maxFailInvertExpected bool
// max number of expressions to be parsed
maxExprCnt uint64
// entrypoint for the parser
entrypoint string
allowInvalidUTF8 bool
*Stats
choiceNoMatch string
// recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse
recoveryStack []map[string]interface{}
}
// push a variable set on the vstack.
func (p *parser) pushV() {
if cap(p.vstack) == len(p.vstack) {
// create new empty slot in the stack
p.vstack = append(p.vstack, nil)
} else {
// slice to 1 more
p.vstack = p.vstack[:len(p.vstack)+1]
}
// get the last args set
m := p.vstack[len(p.vstack)-1]
if m != nil && len(m) == 0 {
// empty map, all good
return
}
m = make(map[string]interface{})
p.vstack[len(p.vstack)-1] = m
}
// pop a variable set from the vstack.
func (p *parser) popV() {
// if the map is not empty, clear it
m := p.vstack[len(p.vstack)-1]
if len(m) > 0 {
// GC that map
p.vstack[len(p.vstack)-1] = nil
}
p.vstack = p.vstack[:len(p.vstack)-1]
}
// push a recovery expression with its labels to the recoveryStack
func (p *parser) pushRecovery(labels []string, expr interface{}) {
if cap(p.recoveryStack) == len(p.recoveryStack) {
// create new empty slot in the stack
p.recoveryStack = append(p.recoveryStack, nil)
} else {
// slice to 1 more
p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1]
}
m := make(map[string]interface{}, len(labels))
for _, fl := range labels {
m[fl] = expr
}
p.recoveryStack[len(p.recoveryStack)-1] = m
}
// pop a recovery expression from the recoveryStack
func (p *parser) popRecovery() {
// GC that map
p.recoveryStack[len(p.recoveryStack)-1] = nil
p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1]
}
func (p *parser) print(prefix, s string) string {
if !p.debug {
return s
}
fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
return s
}
func (p *parser) in(s string) string {
p.depth++
return p.print(strings.Repeat(" ", p.depth)+">", s)
}
func (p *parser) out(s string) string {
p.depth--
return p.print(strings.Repeat(" ", p.depth)+"<", s)
}
func (p *parser) addErr(err error) {
p.addErrAt(err, p.pt.position, []string{})
}
func (p *parser) addErrAt(err error, pos position, expected []string) {
var buf bytes.Buffer
if p.filename != "" {
buf.WriteString(p.filename)
}
if buf.Len() > 0 {
buf.WriteString(":")
}
buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
if len(p.rstack) > 0 {
if buf.Len() > 0 {
buf.WriteString(": ")
}
rule := p.rstack[len(p.rstack)-1]
if rule.displayName != "" {
buf.WriteString("rule " + rule.displayName)
} else {
buf.WriteString("rule " + rule.name)
}
}
pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected}
p.errs.add(pe)
}
func (p *parser) failAt(fail bool, pos position, want string) {
// process fail if parsing fails and not inverted or parsing succeeds and invert is set
if fail == p.maxFailInvertExpected {
if pos.offset < p.maxFailPos.offset {
return
}
if pos.offset > p.maxFailPos.offset {
p.maxFailPos = pos
p.maxFailExpected = p.maxFailExpected[:0]
}
if p.maxFailInvertExpected {
want = "!" + want
}
p.maxFailExpected = append(p.maxFailExpected, want)
}
}
// read advances the parser to the next rune.
func (p *parser) read() {
p.pt.offset += p.pt.w
rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
p.pt.rn = rn
p.pt.w = n
p.pt.col++
if rn == '\n' {
p.pt.line++
p.pt.col = 0
}
if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune
if !p.allowInvalidUTF8 {
p.addErr(errInvalidEncoding)
}
}
}
// restore parser position to the savepoint pt.
func (p *parser) restore(pt savepoint) {
if p.debug {
defer p.out(p.in("restore"))
}
if pt.offset == p.pt.offset {
return
}
p.pt = pt
}
// Cloner is implemented by any value that has a Clone method, which returns a
// copy of the value. This is mainly used for types which are not passed by
// value (e.g map, slice, chan) or structs that contain such types.
//
// This is used in conjunction with the global state feature to create proper
// copies of the state to allow the parser to properly restore the state in
// the case of backtracking.
type Cloner interface {
Clone() interface{}
}
var statePool = &sync.Pool{
New: func() interface{} { return make(storeDict) },
}
func (sd storeDict) Discard() {
for k := range sd {
delete(sd, k)
}
statePool.Put(sd)
}
// clone and return parser current state.
func (p *parser) cloneState() storeDict {
if p.debug {
defer p.out(p.in("cloneState"))
}
state := statePool.Get().(storeDict)
for k, v := range p.cur.state {
if c, ok := v.(Cloner); ok {
state[k] = c.Clone()
} else {
state[k] = v
}
}
return state
}
// restore parser current state to the state storeDict.
// every restoreState should applied only one time for every cloned state
func (p *parser) restoreState(state storeDict) {
if p.debug {
defer p.out(p.in("restoreState"))
}
p.cur.state.Discard()
p.cur.state = state
}
// get the slice of bytes from the savepoint start to the current position.
func (p *parser) sliceFrom(start savepoint) []byte {
return p.data[start.position.offset:p.pt.position.offset]
}
func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
if len(p.memo) == 0 {
return resultTuple{}, false
}
m := p.memo[p.pt.offset]
if len(m) == 0 {
return resultTuple{}, false
}
res, ok := m[node]
return res, ok
}
func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
if p.memo == nil {
p.memo = make(map[int]map[interface{}]resultTuple)
}
m := p.memo[pt.offset]
if m == nil {
m = make(map[interface{}]resultTuple)
p.memo[pt.offset] = m
}
m[node] = tuple
}
func (p *parser) buildRulesTable(g *grammar) {
p.rules = make(map[string]*rule, len(g.rules))
for _, r := range g.rules {
p.rules[r.name] = r
}
}
func (p *parser) parse(g *grammar) (val interface{}, err error) {
if len(g.rules) == 0 {
p.addErr(errNoRule)
return nil, p.errs.err()
}
// TODO : not super critical but this could be generated
p.buildRulesTable(g)
if p.recover {
// panic can be used in action code to stop parsing immediately
// and return the panic as an error.
defer func() {
if e := recover(); e != nil {
if p.debug {
defer p.out(p.in("panic handler"))
}
val = nil
switch e := e.(type) {
case error:
p.addErr(e)
default:
p.addErr(fmt.Errorf("%v", e))
}
err = p.errs.err()
}
}()
}
startRule, ok := p.rules[p.entrypoint]
if !ok {
p.addErr(errInvalidEntrypoint)
return nil, p.errs.err()
}
p.read() // advance to first rune
val, ok = p.parseRule(startRule)
if !ok {
if len(*p.errs) == 0 {
// If parsing fails, but no errors have been recorded, the expected values
// for the farthest parser position are returned as error.
maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected))
for _, v := range p.maxFailExpected {
maxFailExpectedMap[v] = struct{}{}
}
expected := make([]string, 0, len(maxFailExpectedMap))
eof := false
if _, ok := maxFailExpectedMap["!."]; ok {
delete(maxFailExpectedMap, "!.")
eof = true
}
for k := range maxFailExpectedMap {
expected = append(expected, k)
}
sort.Strings(expected)
if eof {
expected = append(expected, "EOF")
}
p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected)
}
return nil, p.errs.err()
}
return val, p.errs.err()
}
func listJoin(list []string, sep string, lastSep string) string {
switch len(list) {
case 0:
return ""
case 1:
return list[0]
default:
return strings.Join(list[:len(list)-1], sep) + " " + lastSep + " " + list[len(list)-1]
}
}
func (p *parser) parseRule(rule *rule) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseRule " + rule.name))
}
if p.memoize {
res, ok := p.getMemoized(rule)
if ok {
p.restore(res.end)
return res.v, res.b
}
}
start := p.pt
p.rstack = append(p.rstack, rule)
p.pushV()
val, ok := p.parseExpr(rule.expr)
p.popV()
p.rstack = p.rstack[:len(p.rstack)-1]
if ok && p.debug {
p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
}
if p.memoize {
p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
}
return val, ok
}
func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
var pt savepoint
if p.memoize {
res, ok := p.getMemoized(expr)
if ok {
p.restore(res.end)
return res.v, res.b
}
pt = p.pt
}
p.ExprCnt++
if p.ExprCnt > p.maxExprCnt {
panic(errMaxExprCnt)
}
var val interface{}
var ok bool
switch expr := expr.(type) {
case *actionExpr:
val, ok = p.parseActionExpr(expr)
case *andCodeExpr:
val, ok = p.parseAndCodeExpr(expr)
case *andExpr:
val, ok = p.parseAndExpr(expr)
case *anyMatcher:
val, ok = p.parseAnyMatcher(expr)
case *charClassMatcher:
val, ok = p.parseCharClassMatcher(expr)
case *choiceExpr:
val, ok = p.parseChoiceExpr(expr)
case *labeledExpr:
val, ok = p.parseLabeledExpr(expr)
case *litMatcher:
val, ok = p.parseLitMatcher(expr)
case *notCodeExpr:
val, ok = p.parseNotCodeExpr(expr)
case *notExpr:
val, ok = p.parseNotExpr(expr)
case *oneOrMoreExpr:
val, ok = p.parseOneOrMoreExpr(expr)
case *recoveryExpr:
val, ok = p.parseRecoveryExpr(expr)
case *ruleRefExpr:
val, ok = p.parseRuleRefExpr(expr)
case *seqExpr:
val, ok = p.parseSeqExpr(expr)
case *stateCodeExpr:
val, ok = p.parseStateCodeExpr(expr)
case *throwExpr:
val, ok = p.parseThrowExpr(expr)
case *zeroOrMoreExpr:
val, ok = p.parseZeroOrMoreExpr(expr)
case *zeroOrOneExpr:
val, ok = p.parseZeroOrOneExpr(expr)
default:
panic(fmt.Sprintf("unknown expression type %T", expr))
}
if p.memoize {
p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
}
return val, ok
}
func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseActionExpr"))
}
start := p.pt
val, ok := p.parseExpr(act.expr)
if ok {
p.cur.pos = start.position
p.cur.text = p.sliceFrom(start)
state := p.cloneState()
actVal, err := act.run(p)
if err != nil {
p.addErrAt(err, start.position, []string{})
}
p.restoreState(state)
val = actVal
}
if ok && p.debug {
p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
}
return val, ok
}
func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseAndCodeExpr"))
}
state := p.cloneState()
ok, err := and.run(p)
if err != nil {
p.addErr(err)
}
p.restoreState(state)
return nil, ok
}
func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseAndExpr"))
}
pt := p.pt
state := p.cloneState()
p.pushV()
_, ok := p.parseExpr(and.expr)
p.popV()
p.restoreState(state)
p.restore(pt)
return nil, ok
}
func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseAnyMatcher"))
}
if p.pt.rn == utf8.RuneError && p.pt.w == 0 {
// EOF - see utf8.DecodeRune
p.failAt(false, p.pt.position, ".")
return nil, false
}
start := p.pt
p.read()
p.failAt(true, start.position, ".")
return p.sliceFrom(start), true
}
func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseCharClassMatcher"))
}
cur := p.pt.rn
start := p.pt
// can't match EOF
if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune
p.failAt(false, start.position, chr.val)
return nil, false
}
if chr.ignoreCase {
cur = unicode.ToLower(cur)
}
// try to match in the list of available chars
for _, rn := range chr.chars {
if rn == cur {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
// try to match in the list of ranges
for i := 0; i < len(chr.ranges); i += 2 {
if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
// try to match in the list of Unicode classes
for _, cl := range chr.classes {
if unicode.Is(cl, cur) {
if chr.inverted {
p.failAt(false, start.position, chr.val)
return nil, false
}
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
}
if chr.inverted {
p.read()
p.failAt(true, start.position, chr.val)
return p.sliceFrom(start), true
}
p.failAt(false, start.position, chr.val)
return nil, false
}
func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) {
choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col)
m := p.ChoiceAltCnt[choiceIdent]
if m == nil {
m = make(map[string]int)
p.ChoiceAltCnt[choiceIdent] = m
}
// We increment altI by 1, so the keys do not start at 0
alt := strconv.Itoa(altI + 1)
if altI == choiceNoMatch {
alt = p.choiceNoMatch
}
m[alt]++
}
func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseChoiceExpr"))
}
for altI, alt := range ch.alternatives {
// dummy assignment to prevent compile error if optimized
_ = altI
state := p.cloneState()
p.pushV()
val, ok := p.parseExpr(alt)
p.popV()
if ok {
p.incChoiceAltCnt(ch, altI)
return val, ok
}
p.restoreState(state)
}
p.incChoiceAltCnt(ch, choiceNoMatch)
return nil, false
}
func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseLabeledExpr"))
}
p.pushV()
val, ok := p.parseExpr(lab.expr)
p.popV()
if ok && lab.label != "" {
m := p.vstack[len(p.vstack)-1]
m[lab.label] = val
}
return val, ok
}
func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseLitMatcher"))
}
start := p.pt
for _, want := range lit.val {
cur := p.pt.rn
if lit.ignoreCase {
cur = unicode.ToLower(cur)
}
if cur != want {
p.failAt(false, start.position, lit.want)
p.restore(start)
return nil, false
}
p.read()
}
p.failAt(true, start.position, lit.want)
return p.sliceFrom(start), true
}
func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseNotCodeExpr"))
}
state := p.cloneState()
ok, err := not.run(p)
if err != nil {
p.addErr(err)
}
p.restoreState(state)
return nil, !ok
}
func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseNotExpr"))
}
pt := p.pt
state := p.cloneState()
p.pushV()
p.maxFailInvertExpected = !p.maxFailInvertExpected
_, ok := p.parseExpr(not.expr)
p.maxFailInvertExpected = !p.maxFailInvertExpected
p.popV()
p.restoreState(state)
p.restore(pt)
return nil, !ok
}
func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseOneOrMoreExpr"))
}
var vals []interface{}
for {
p.pushV()
val, ok := p.parseExpr(expr.expr)
p.popV()
if !ok {
if len(vals) == 0 {
// did not match once, no match
return nil, false
}
return vals, true
}
vals = append(vals, val)
}
}
func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")"))
}
p.pushRecovery(recover.failureLabel, recover.recoverExpr)
val, ok := p.parseExpr(recover.expr)
p.popRecovery()
return val, ok
}
func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseRuleRefExpr " + ref.name))
}
if ref.name == "" {
panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
}
rule := p.rules[ref.name]
if rule == nil {
p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
return nil, false
}
return p.parseRule(rule)
}
func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseSeqExpr"))
}
vals := make([]interface{}, 0, len(seq.exprs))
pt := p.pt
state := p.cloneState()
for _, expr := range seq.exprs {
val, ok := p.parseExpr(expr)
if !ok {
p.restoreState(state)
p.restore(pt)
return nil, false
}
vals = append(vals, val)
}
return vals, true
}
func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseStateCodeExpr"))
}
err := state.run(p)
if err != nil {
p.addErr(err)
}
return nil, true
}
func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseThrowExpr"))
}
for i := len(p.recoveryStack) - 1; i >= 0; i-- {
if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok {
if val, ok := p.parseExpr(recoverExpr); ok {
return val, ok
}
}
}
return nil, false
}
func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseZeroOrMoreExpr"))
}
var vals []interface{}
for {
p.pushV()
val, ok := p.parseExpr(expr.expr)
p.popV()
if !ok {
return vals, true
}
vals = append(vals, val)
}
}
func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
if p.debug {
defer p.out(p.in("parseZeroOrOneExpr"))
}
p.pushV()
val, _ := p.parseExpr(expr.expr)
p.popV()
// whether it matched or not, consider it a match
return val, true
}
|
/*
*
* k6 - a next-generation load testing tool
* Copyright (C) 2016 Load Impact
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package js
import (
"context"
"crypto/tls"
"encoding/json"
"net"
"net/http"
"net/http/cookiejar"
"strconv"
"time"
"github.com/dop251/goja"
"github.com/loadimpact/k6/js/common"
"github.com/loadimpact/k6/lib"
"github.com/loadimpact/k6/lib/metrics"
"github.com/loadimpact/k6/lib/netext"
"github.com/loadimpact/k6/stats"
"github.com/oxtoacart/bpool"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/viki-org/dnscache"
"golang.org/x/net/http2"
"golang.org/x/time/rate"
)
var errInterrupt = errors.New("context cancelled")
type Runner struct {
Bundle *Bundle
Logger *log.Logger
defaultGroup *lib.Group
BaseDialer net.Dialer
Resolver *dnscache.Resolver
RPSLimit *rate.Limiter
setupData interface{}
}
func New(src *lib.SourceData, fs afero.Fs, rtOpts lib.RuntimeOptions) (*Runner, error) {
bundle, err := NewBundle(src, fs, rtOpts)
if err != nil {
return nil, err
}
return NewFromBundle(bundle)
}
func NewFromArchive(arc *lib.Archive, rtOpts lib.RuntimeOptions) (*Runner, error) {
bundle, err := NewBundleFromArchive(arc, rtOpts)
if err != nil {
return nil, err
}
return NewFromBundle(bundle)
}
func NewFromBundle(b *Bundle) (*Runner, error) {
defaultGroup, err := lib.NewGroup("", nil)
if err != nil {
return nil, err
}
r := &Runner{
Bundle: b,
Logger: log.StandardLogger(),
defaultGroup: defaultGroup,
BaseDialer: net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
},
Resolver: dnscache.New(0),
}
r.SetOptions(r.Bundle.Options)
return r, nil
}
func (r *Runner) MakeArchive() *lib.Archive {
return r.Bundle.MakeArchive()
}
func (r *Runner) NewVU() (lib.VU, error) {
vu, err := r.newVU()
if err != nil {
return nil, err
}
return lib.VU(vu), nil
}
func (r *Runner) newVU() (*VU, error) {
// Instantiate a new bundle, make a VU out of it.
bi, err := r.Bundle.Instantiate()
if err != nil {
return nil, err
}
var cipherSuites []uint16
if r.Bundle.Options.TLSCipherSuites != nil {
cipherSuites = *r.Bundle.Options.TLSCipherSuites
}
var tlsVersions lib.TLSVersions
if r.Bundle.Options.TLSVersion != nil {
tlsVersions = *r.Bundle.Options.TLSVersion
}
tlsAuth := r.Bundle.Options.TLSAuth
certs := make([]tls.Certificate, len(tlsAuth))
nameToCert := make(map[string]*tls.Certificate)
for i, auth := range tlsAuth {
for _, name := range auth.Domains {
cert, err := auth.Certificate()
if err != nil {
return nil, err
}
certs[i] = *cert
nameToCert[name] = &certs[i]
}
}
dialer := &netext.Dialer{
Dialer: r.BaseDialer,
Resolver: r.Resolver,
Blacklist: r.Bundle.Options.BlacklistIPs,
Hosts: r.Bundle.Options.Hosts,
}
tlsConfig := &tls.Config{
InsecureSkipVerify: r.Bundle.Options.InsecureSkipTLSVerify.Bool,
CipherSuites: cipherSuites,
MinVersion: uint16(tlsVersions.Min),
MaxVersion: uint16(tlsVersions.Max),
Certificates: certs,
NameToCertificate: nameToCert,
Renegotiation: tls.RenegotiateFreelyAsClient,
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: tlsConfig,
DialContext: dialer.DialContext,
DisableCompression: true,
}
_ = http2.ConfigureTransport(transport)
vu := &VU{
BundleInstance: *bi,
Runner: r,
HTTPTransport: transport,
Dialer: dialer,
TLSConfig: tlsConfig,
Console: NewConsole(),
BPool: bpool.NewBufferPool(100),
}
vu.Runtime.Set("console", common.Bind(vu.Runtime, vu.Console, vu.Context))
common.BindToGlobal(vu.Runtime, map[string]interface{}{
"open": func() {
common.Throw(vu.Runtime, errors.New("open function it is only available at init code"))
},
})
// Give the VU an initial sense of identity.
if err := vu.Reconfigure(0); err != nil {
return nil, err
}
return vu, nil
}
func (r *Runner) Setup(ctx context.Context) error {
v, err := r.runPart(ctx, "setup", nil)
if err != nil {
return errors.Wrap(err, "setup")
}
data, err := json.Marshal(v.Export())
if err != nil {
return errors.Wrap(err, "setup")
}
return json.Unmarshal(data, &r.setupData)
}
func (r *Runner) Teardown(ctx context.Context) error {
_, err := r.runPart(ctx, "teardown", r.setupData)
return err
}
func (r *Runner) GetDefaultGroup() *lib.Group {
return r.defaultGroup
}
func (r *Runner) GetOptions() lib.Options {
return r.Bundle.Options
}
func (r *Runner) SetOptions(opts lib.Options) {
r.Bundle.Options = opts
r.RPSLimit = nil
if rps := opts.RPS; rps.Valid {
r.RPSLimit = rate.NewLimiter(rate.Limit(rps.Int64), 1)
}
}
// Runs an exported function in its own temporary VU, optionally with an argument. Execution is
// interrupted if the context expires. No error is returned if the part does not exist.
func (r *Runner) runPart(ctx context.Context, name string, arg interface{}) (goja.Value, error) {
vu, err := r.newVU()
if err != nil {
return goja.Undefined(), err
}
exp := vu.Runtime.Get("exports").ToObject(vu.Runtime)
if exp == nil {
return goja.Undefined(), nil
}
fn, ok := goja.AssertFunction(exp.Get(name))
if !ok {
return goja.Undefined(), nil
}
ctx, cancel := context.WithCancel(ctx)
go func() {
<-ctx.Done()
vu.Runtime.Interrupt(errInterrupt)
}()
v, _, err := vu.runFn(ctx, fn, vu.Runtime.ToValue(arg))
cancel()
return v, err
}
type VU struct {
BundleInstance
Runner *Runner
HTTPTransport *http.Transport
Dialer *netext.Dialer
TLSConfig *tls.Config
ID int64
Iteration int64
Console *Console
BPool *bpool.BufferPool
setupData goja.Value
// A VU will track the last context it was called with for cancellation.
// Note that interruptTrackedCtx is the context that is currently being tracked, while
// interruptCancel cancels an unrelated context that terminates the tracking goroutine
// without triggering an interrupt (for if the context changes).
// There are cleaner ways of handling the interruption problem, but this is a hot path that
// needs to be called thousands of times per second, which rules out anything that spawns a
// goroutine per call.
interruptTrackedCtx context.Context
interruptCancel context.CancelFunc
}
func (u *VU) Reconfigure(id int64) error {
u.ID = id
u.Iteration = 0
u.Runtime.Set("__VU", u.ID)
return nil
}
func (u *VU) RunOnce(ctx context.Context) ([]stats.Sample, error) {
// Track the context and interrupt JS execution if it's cancelled.
if u.interruptTrackedCtx != ctx {
interCtx, interCancel := context.WithCancel(context.Background())
if u.interruptCancel != nil {
u.interruptCancel()
}
u.interruptCancel = interCancel
u.interruptTrackedCtx = ctx
go func() {
select {
case <-interCtx.Done():
case <-ctx.Done():
u.Runtime.Interrupt(errInterrupt)
}
}()
}
// Lazily JS-ify setupData on first run. This is lightweight enough that we can get away with
// it, and alleviates a problem where setupData wouldn't get populated properly if NewVU() was
// called before Setup(), which is hard to avoid with how the Executor works w/o complicating
// the local executor further by deferring SetVUsMax() calls to within the Run() function.
if u.setupData == nil && u.Runner.setupData != nil {
u.setupData = u.Runtime.ToValue(u.Runner.setupData)
}
// Call the default function.
_, state, err := u.runFn(ctx, u.Default, u.setupData)
if err != nil {
return nil, err
}
return state.Samples, nil
}
func (u *VU) runFn(ctx context.Context, fn goja.Callable, args ...goja.Value) (goja.Value, *common.State, error) {
cookieJar, err := cookiejar.New(nil)
if err != nil {
return goja.Undefined(), nil, err
}
state := &common.State{
Logger: u.Runner.Logger,
Options: u.Runner.Bundle.Options,
Group: u.Runner.defaultGroup,
HTTPTransport: u.HTTPTransport,
Dialer: u.Dialer,
TLSConfig: u.TLSConfig,
CookieJar: cookieJar,
RPSLimit: u.Runner.RPSLimit,
BPool: u.BPool,
Vu: u.ID,
Iteration: u.Iteration,
}
// Zero out the values, since we may be reusing a connection
u.Dialer.BytesRead = 0
u.Dialer.BytesWritten = 0
newctx := common.WithRuntime(ctx, u.Runtime)
newctx = common.WithState(newctx, state)
*u.Context = newctx
u.Runtime.Set("__ITER", u.Iteration)
iter := u.Iteration
u.Iteration++
startTime := time.Now()
v, err := fn(goja.Undefined(), args...) // Actually run the JS script
t := time.Now()
tags := map[string]string{}
if state.Options.SystemTags["vu"] {
tags["vu"] = strconv.FormatInt(u.ID, 10)
}
if state.Options.SystemTags["iter"] {
tags["iter"] = strconv.FormatInt(iter, 10)
}
state.Samples = append(state.Samples,
stats.Sample{Time: t, Metric: metrics.DataSent, Value: float64(u.Dialer.BytesWritten), Tags: tags},
stats.Sample{Time: t, Metric: metrics.DataReceived, Value: float64(u.Dialer.BytesRead), Tags: tags},
stats.Sample{Time: t, Metric: metrics.IterationDuration, Value: stats.D(t.Sub(startTime)), Tags: tags},
)
if u.Runner.Bundle.Options.NoConnectionReuse.Bool {
u.HTTPTransport.CloseIdleConnections()
}
return v, state, err
}
Rephrase the error message
/*
*
* k6 - a next-generation load testing tool
* Copyright (C) 2016 Load Impact
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package js
import (
"context"
"crypto/tls"
"encoding/json"
"net"
"net/http"
"net/http/cookiejar"
"strconv"
"time"
"github.com/dop251/goja"
"github.com/loadimpact/k6/js/common"
"github.com/loadimpact/k6/lib"
"github.com/loadimpact/k6/lib/metrics"
"github.com/loadimpact/k6/lib/netext"
"github.com/loadimpact/k6/stats"
"github.com/oxtoacart/bpool"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/viki-org/dnscache"
"golang.org/x/net/http2"
"golang.org/x/time/rate"
)
var errInterrupt = errors.New("context cancelled")
type Runner struct {
Bundle *Bundle
Logger *log.Logger
defaultGroup *lib.Group
BaseDialer net.Dialer
Resolver *dnscache.Resolver
RPSLimit *rate.Limiter
setupData interface{}
}
func New(src *lib.SourceData, fs afero.Fs, rtOpts lib.RuntimeOptions) (*Runner, error) {
bundle, err := NewBundle(src, fs, rtOpts)
if err != nil {
return nil, err
}
return NewFromBundle(bundle)
}
func NewFromArchive(arc *lib.Archive, rtOpts lib.RuntimeOptions) (*Runner, error) {
bundle, err := NewBundleFromArchive(arc, rtOpts)
if err != nil {
return nil, err
}
return NewFromBundle(bundle)
}
func NewFromBundle(b *Bundle) (*Runner, error) {
defaultGroup, err := lib.NewGroup("", nil)
if err != nil {
return nil, err
}
r := &Runner{
Bundle: b,
Logger: log.StandardLogger(),
defaultGroup: defaultGroup,
BaseDialer: net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
},
Resolver: dnscache.New(0),
}
r.SetOptions(r.Bundle.Options)
return r, nil
}
func (r *Runner) MakeArchive() *lib.Archive {
return r.Bundle.MakeArchive()
}
func (r *Runner) NewVU() (lib.VU, error) {
vu, err := r.newVU()
if err != nil {
return nil, err
}
return lib.VU(vu), nil
}
func (r *Runner) newVU() (*VU, error) {
// Instantiate a new bundle, make a VU out of it.
bi, err := r.Bundle.Instantiate()
if err != nil {
return nil, err
}
var cipherSuites []uint16
if r.Bundle.Options.TLSCipherSuites != nil {
cipherSuites = *r.Bundle.Options.TLSCipherSuites
}
var tlsVersions lib.TLSVersions
if r.Bundle.Options.TLSVersion != nil {
tlsVersions = *r.Bundle.Options.TLSVersion
}
tlsAuth := r.Bundle.Options.TLSAuth
certs := make([]tls.Certificate, len(tlsAuth))
nameToCert := make(map[string]*tls.Certificate)
for i, auth := range tlsAuth {
for _, name := range auth.Domains {
cert, err := auth.Certificate()
if err != nil {
return nil, err
}
certs[i] = *cert
nameToCert[name] = &certs[i]
}
}
dialer := &netext.Dialer{
Dialer: r.BaseDialer,
Resolver: r.Resolver,
Blacklist: r.Bundle.Options.BlacklistIPs,
Hosts: r.Bundle.Options.Hosts,
}
tlsConfig := &tls.Config{
InsecureSkipVerify: r.Bundle.Options.InsecureSkipTLSVerify.Bool,
CipherSuites: cipherSuites,
MinVersion: uint16(tlsVersions.Min),
MaxVersion: uint16(tlsVersions.Max),
Certificates: certs,
NameToCertificate: nameToCert,
Renegotiation: tls.RenegotiateFreelyAsClient,
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: tlsConfig,
DialContext: dialer.DialContext,
DisableCompression: true,
}
_ = http2.ConfigureTransport(transport)
vu := &VU{
BundleInstance: *bi,
Runner: r,
HTTPTransport: transport,
Dialer: dialer,
TLSConfig: tlsConfig,
Console: NewConsole(),
BPool: bpool.NewBufferPool(100),
}
vu.Runtime.Set("console", common.Bind(vu.Runtime, vu.Console, vu.Context))
common.BindToGlobal(vu.Runtime, map[string]interface{}{
"open": func() {
common.Throw(vu.Runtime, errors.New("\"open\" function is only available to the init code (aka global scope), see https://docs.k6.io/docs/test-life-cycle for more information"))
},
})
// Give the VU an initial sense of identity.
if err := vu.Reconfigure(0); err != nil {
return nil, err
}
return vu, nil
}
func (r *Runner) Setup(ctx context.Context) error {
v, err := r.runPart(ctx, "setup", nil)
if err != nil {
return errors.Wrap(err, "setup")
}
data, err := json.Marshal(v.Export())
if err != nil {
return errors.Wrap(err, "setup")
}
return json.Unmarshal(data, &r.setupData)
}
func (r *Runner) Teardown(ctx context.Context) error {
_, err := r.runPart(ctx, "teardown", r.setupData)
return err
}
func (r *Runner) GetDefaultGroup() *lib.Group {
return r.defaultGroup
}
func (r *Runner) GetOptions() lib.Options {
return r.Bundle.Options
}
func (r *Runner) SetOptions(opts lib.Options) {
r.Bundle.Options = opts
r.RPSLimit = nil
if rps := opts.RPS; rps.Valid {
r.RPSLimit = rate.NewLimiter(rate.Limit(rps.Int64), 1)
}
}
// Runs an exported function in its own temporary VU, optionally with an argument. Execution is
// interrupted if the context expires. No error is returned if the part does not exist.
func (r *Runner) runPart(ctx context.Context, name string, arg interface{}) (goja.Value, error) {
vu, err := r.newVU()
if err != nil {
return goja.Undefined(), err
}
exp := vu.Runtime.Get("exports").ToObject(vu.Runtime)
if exp == nil {
return goja.Undefined(), nil
}
fn, ok := goja.AssertFunction(exp.Get(name))
if !ok {
return goja.Undefined(), nil
}
ctx, cancel := context.WithCancel(ctx)
go func() {
<-ctx.Done()
vu.Runtime.Interrupt(errInterrupt)
}()
v, _, err := vu.runFn(ctx, fn, vu.Runtime.ToValue(arg))
cancel()
return v, err
}
type VU struct {
BundleInstance
Runner *Runner
HTTPTransport *http.Transport
Dialer *netext.Dialer
TLSConfig *tls.Config
ID int64
Iteration int64
Console *Console
BPool *bpool.BufferPool
setupData goja.Value
// A VU will track the last context it was called with for cancellation.
// Note that interruptTrackedCtx is the context that is currently being tracked, while
// interruptCancel cancels an unrelated context that terminates the tracking goroutine
// without triggering an interrupt (for if the context changes).
// There are cleaner ways of handling the interruption problem, but this is a hot path that
// needs to be called thousands of times per second, which rules out anything that spawns a
// goroutine per call.
interruptTrackedCtx context.Context
interruptCancel context.CancelFunc
}
func (u *VU) Reconfigure(id int64) error {
u.ID = id
u.Iteration = 0
u.Runtime.Set("__VU", u.ID)
return nil
}
func (u *VU) RunOnce(ctx context.Context) ([]stats.Sample, error) {
// Track the context and interrupt JS execution if it's cancelled.
if u.interruptTrackedCtx != ctx {
interCtx, interCancel := context.WithCancel(context.Background())
if u.interruptCancel != nil {
u.interruptCancel()
}
u.interruptCancel = interCancel
u.interruptTrackedCtx = ctx
go func() {
select {
case <-interCtx.Done():
case <-ctx.Done():
u.Runtime.Interrupt(errInterrupt)
}
}()
}
// Lazily JS-ify setupData on first run. This is lightweight enough that we can get away with
// it, and alleviates a problem where setupData wouldn't get populated properly if NewVU() was
// called before Setup(), which is hard to avoid with how the Executor works w/o complicating
// the local executor further by deferring SetVUsMax() calls to within the Run() function.
if u.setupData == nil && u.Runner.setupData != nil {
u.setupData = u.Runtime.ToValue(u.Runner.setupData)
}
// Call the default function.
_, state, err := u.runFn(ctx, u.Default, u.setupData)
if err != nil {
return nil, err
}
return state.Samples, nil
}
func (u *VU) runFn(ctx context.Context, fn goja.Callable, args ...goja.Value) (goja.Value, *common.State, error) {
cookieJar, err := cookiejar.New(nil)
if err != nil {
return goja.Undefined(), nil, err
}
state := &common.State{
Logger: u.Runner.Logger,
Options: u.Runner.Bundle.Options,
Group: u.Runner.defaultGroup,
HTTPTransport: u.HTTPTransport,
Dialer: u.Dialer,
TLSConfig: u.TLSConfig,
CookieJar: cookieJar,
RPSLimit: u.Runner.RPSLimit,
BPool: u.BPool,
Vu: u.ID,
Iteration: u.Iteration,
}
// Zero out the values, since we may be reusing a connection
u.Dialer.BytesRead = 0
u.Dialer.BytesWritten = 0
newctx := common.WithRuntime(ctx, u.Runtime)
newctx = common.WithState(newctx, state)
*u.Context = newctx
u.Runtime.Set("__ITER", u.Iteration)
iter := u.Iteration
u.Iteration++
startTime := time.Now()
v, err := fn(goja.Undefined(), args...) // Actually run the JS script
t := time.Now()
tags := map[string]string{}
if state.Options.SystemTags["vu"] {
tags["vu"] = strconv.FormatInt(u.ID, 10)
}
if state.Options.SystemTags["iter"] {
tags["iter"] = strconv.FormatInt(iter, 10)
}
state.Samples = append(state.Samples,
stats.Sample{Time: t, Metric: metrics.DataSent, Value: float64(u.Dialer.BytesWritten), Tags: tags},
stats.Sample{Time: t, Metric: metrics.DataReceived, Value: float64(u.Dialer.BytesRead), Tags: tags},
stats.Sample{Time: t, Metric: metrics.IterationDuration, Value: stats.D(t.Sub(startTime)), Tags: tags},
)
if u.Runner.Bundle.Options.NoConnectionReuse.Bool {
u.HTTPTransport.CloseIdleConnections()
}
return v, state, err
}
|
// Copyright © 2017 The Kubicorn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"github.com/kris-nova/kubicorn/cutil/logger"
"github.com/kris-nova/kubicorn/state"
"github.com/kris-nova/kubicorn/state/fs"
"github.com/spf13/cobra"
"io/ioutil"
"os"
"os/exec"
)
type EditOptions struct {
Options
Editor string
}
var eo = &EditOptions{}
var editCmd = &cobra.Command{
Use: "edit <NAME>",
Short: "Edit a cluster state",
Long: `Use this command to edit a state.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
logger.Critical("Too many arguments.")
os.Exit(1)
} else {
eo.Name = args[0]
}
err := RunEdit(eo)
if err != nil {
logger.Critical(err.Error())
os.Exit(1)
}
},
}
func init() {
editCmd.Flags().StringVarP(&eo.StateStore, "state-store", "s", strEnvDef("KUBICORN_STATE_STORE", "fs"), "The state store type to use for the cluster")
editCmd.Flags().StringVarP(&eo.StateStorePath, "state-store-path", "S", strEnvDef("KUBICORN_STATE_STORE_PATH", "./_state"), "The state store path to use")
editCmd.Flags().StringVarP(&eo.Editor, "editor", "e", strEnvDef("KUBICORN_DEFAULT_EDITOR", "vi"), "The editor used to edit the state store")
RootCmd.AddCommand(editCmd)
}
func RunEdit(options *EditOptions) error {
options.StateStorePath = expandPath(options.StateStorePath)
name := options.Name
// Register state store
var stateStore state.ClusterStorer
switch options.StateStore {
case "fs":
logger.Info("Selected [fs] state store")
stateStore = fs.NewFileSystemStore(&fs.FileSystemStoreOptions{
BasePath: options.StateStorePath,
ClusterName: name,
})
}
// Check if state store exists
if !stateStore.Exists() {
return fmt.Errorf("State store [%s] does not exists, can't edit", name)
}
stateContent, err := stateStore.ReadStore()
if err != nil {
return err
}
fpath := os.TempDir() + "/kubicorn_cluster.tmp"
f, err := os.Create(fpath)
if err != nil {
return err
}
ioutil.WriteFile(fpath, stateContent, 0664)
f.Close()
path, err := exec.LookPath(options.Editor)
if err != nil {
os.Remove(fpath)
return err
}
cmd := exec.Command(path, fpath)
err = cmd.Start()
if err != nil {
os.Remove(fpath)
return err
}
err = cmd.Wait()
if err != nil {
logger.Debug("Error while editing. Error: %v", err)
os.Remove(fpath)
return err
} else {
logger.Info("Successfull edit")
}
data, err := ioutil.ReadFile(fpath)
if err != nil {
os.Remove(fpath)
return err
}
cluster, err := stateStore.BytesToCluster(data)
if err != nil {
os.Remove(fpath)
return err
}
// Init new state store with the cluster resource
err = stateStore.Commit(cluster)
if err != nil {
os.Remove(fpath)
return fmt.Errorf("Unable to init state store: %v", err)
}
os.Remove(fpath)
return nil
}
Added getting env name if not set.
// Copyright © 2017 The Kubicorn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"github.com/kris-nova/kubicorn/cutil/logger"
"github.com/kris-nova/kubicorn/state"
"github.com/kris-nova/kubicorn/state/fs"
"github.com/spf13/cobra"
"io/ioutil"
"os"
"os/exec"
)
type EditOptions struct {
Options
Editor string
}
var eo = &EditOptions{}
var editCmd = &cobra.Command{
Use: "edit <NAME>",
Short: "Edit a cluster state",
Long: `Use this command to edit a state.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
ao.Name = strEnvDef("KUBICORN_NAME", "")
} else if len(args) > 1 {
logger.Critical("Too many arguments.")
os.Exit(1)
} else {
eo.Name = args[0]
}
err := RunEdit(eo)
if err != nil {
logger.Critical(err.Error())
os.Exit(1)
}
},
}
func init() {
editCmd.Flags().StringVarP(&eo.StateStore, "state-store", "s", strEnvDef("KUBICORN_STATE_STORE", "fs"), "The state store type to use for the cluster")
editCmd.Flags().StringVarP(&eo.StateStorePath, "state-store-path", "S", strEnvDef("KUBICORN_STATE_STORE_PATH", "./_state"), "The state store path to use")
editCmd.Flags().StringVarP(&eo.Editor, "editor", "e", strEnvDef("KUBICORN_DEFAULT_EDITOR", "vi"), "The editor used to edit the state store")
RootCmd.AddCommand(editCmd)
}
func RunEdit(options *EditOptions) error {
options.StateStorePath = expandPath(options.StateStorePath)
name := options.Name
// Register state store
var stateStore state.ClusterStorer
switch options.StateStore {
case "fs":
logger.Info("Selected [fs] state store")
stateStore = fs.NewFileSystemStore(&fs.FileSystemStoreOptions{
BasePath: options.StateStorePath,
ClusterName: name,
})
}
// Check if state store exists
if !stateStore.Exists() {
return fmt.Errorf("State store [%s] does not exists, can't edit", name)
}
stateContent, err := stateStore.ReadStore()
if err != nil {
return err
}
fpath := os.TempDir() + "/kubicorn_cluster.tmp"
f, err := os.Create(fpath)
if err != nil {
return err
}
ioutil.WriteFile(fpath, stateContent, 0664)
f.Close()
path, err := exec.LookPath(options.Editor)
if err != nil {
os.Remove(fpath)
return err
}
cmd := exec.Command(path, fpath)
err = cmd.Start()
if err != nil {
os.Remove(fpath)
return err
}
err = cmd.Wait()
if err != nil {
logger.Debug("Error while editing. Error: %v", err)
os.Remove(fpath)
return err
} else {
logger.Info("Successfull edit")
}
data, err := ioutil.ReadFile(fpath)
if err != nil {
os.Remove(fpath)
return err
}
cluster, err := stateStore.BytesToCluster(data)
if err != nil {
os.Remove(fpath)
return err
}
// Init new state store with the cluster resource
err = stateStore.Commit(cluster)
if err != nil {
os.Remove(fpath)
return fmt.Errorf("Unable to init state store: %v", err)
}
os.Remove(fpath)
return nil
}
|
// Copyright 2017 The kubecfg authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"encoding/json"
goflag "flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
"github.com/ksonnet/kubecfg/metadata"
"github.com/ksonnet/kubecfg/template"
"github.com/ksonnet/kubecfg/utils"
// Register auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
const (
flagVerbose = "verbose"
flagJpath = "jpath"
flagExtVar = "ext-str"
flagExtVarFile = "ext-str-file"
flagTlaVar = "tla-str"
flagTlaVarFile = "tla-str-file"
flagResolver = "resolve-images"
flagResolvFail = "resolve-images-error"
flagAPISpec = "api-spec"
// For use in the commands (e.g., diff, apply, delete) that require either an
// environment or the -f flag.
flagFile = "file"
flagFileShort = "f"
)
var clientConfig clientcmd.ClientConfig
var overrides clientcmd.ConfigOverrides
func init() {
RootCmd.PersistentFlags().CountP(flagVerbose, "v", "Increase verbosity. May be given multiple times.")
RootCmd.PersistentFlags().StringSliceP(flagJpath, "J", nil, "Additional jsonnet library search path")
RootCmd.PersistentFlags().StringSliceP(flagExtVar, "V", nil, "Values of external variables")
RootCmd.PersistentFlags().StringSlice(flagExtVarFile, nil, "Read external variable from a file")
RootCmd.PersistentFlags().StringSliceP(flagTlaVar, "A", nil, "Values of top level arguments")
RootCmd.PersistentFlags().StringSlice(flagTlaVarFile, nil, "Read top level argument from a file")
RootCmd.PersistentFlags().String(flagResolver, "noop", "Change implementation of resolveImage native function. One of: noop, registry")
RootCmd.PersistentFlags().String(flagResolvFail, "warn", "Action when resolveImage fails. One of ignore,warn,error")
// The "usual" clientcmd/kubectl flags
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
kflags := clientcmd.RecommendedConfigOverrideFlags("")
RootCmd.PersistentFlags().StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster")
clientcmd.BindOverrideFlags(&overrides, RootCmd.PersistentFlags(), kflags)
clientConfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
RootCmd.PersistentFlags().Set("logtostderr", "true")
}
// RootCmd is the root of cobra subcommand tree
var RootCmd = &cobra.Command{
Use: "kubecfg",
Short: "Synchronise Kubernetes resources with config files",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
goflag.CommandLine.Parse([]string{})
flags := cmd.Flags()
out := cmd.OutOrStderr()
log.SetOutput(out)
logFmt := NewLogFormatter(out)
log.SetFormatter(logFmt)
verbosity, err := flags.GetCount(flagVerbose)
if err != nil {
return err
}
log.SetLevel(logLevel(verbosity))
return nil
},
}
// clientConfig.Namespace() is broken in client-go 3.0:
// namespace in config erroneously overrides explicit --namespace
func defaultNamespace(c clientcmd.ClientConfig) (string, error) {
if overrides.Context.Namespace != "" {
return overrides.Context.Namespace, nil
}
ns, _, err := c.Namespace()
return ns, err
}
func logLevel(verbosity int) log.Level {
switch verbosity {
case 0:
return log.WarnLevel
case 1:
return log.InfoLevel
default:
return log.DebugLevel
}
}
type logFormatter struct {
escapes *terminal.EscapeCodes
colorise bool
}
// NewLogFormatter creates a new log.Formatter customised for writer
func NewLogFormatter(out io.Writer) log.Formatter {
var ret = logFormatter{}
if f, ok := out.(*os.File); ok {
ret.colorise = terminal.IsTerminal(int(f.Fd()))
ret.escapes = terminal.NewTerminal(f, "").Escape
}
return &ret
}
func (f *logFormatter) levelEsc(level log.Level) []byte {
switch level {
case log.DebugLevel:
return []byte{}
case log.WarnLevel:
return f.escapes.Yellow
case log.ErrorLevel, log.FatalLevel, log.PanicLevel:
return f.escapes.Red
default:
return f.escapes.Blue
}
}
func (f *logFormatter) Format(e *log.Entry) ([]byte, error) {
buf := bytes.Buffer{}
if f.colorise {
buf.Write(f.levelEsc(e.Level))
fmt.Fprintf(&buf, "%-5s ", strings.ToUpper(e.Level.String()))
buf.Write(f.escapes.Reset)
}
buf.WriteString(strings.TrimSpace(e.Message))
buf.WriteString("\n")
return buf.Bytes(), nil
}
func newExpander(cmd *cobra.Command) (*template.Expander, error) {
flags := cmd.Flags()
spec := template.Expander{}
var err error
spec.EnvJPath = filepath.SplitList(os.Getenv("KUBECFG_JPATH"))
spec.FlagJpath, err = flags.GetStringSlice(flagJpath)
if err != nil {
return nil, err
}
spec.ExtVars, err = flags.GetStringSlice(flagExtVar)
if err != nil {
return nil, err
}
spec.ExtVarFiles, err = flags.GetStringSlice(flagExtVarFile)
if err != nil {
return nil, err
}
spec.TlaVars, err = flags.GetStringSlice(flagTlaVar)
if err != nil {
return nil, err
}
spec.TlaVarFiles, err = flags.GetStringSlice(flagTlaVarFile)
if err != nil {
return nil, err
}
spec.Resolver, err = flags.GetString(flagResolver)
if err != nil {
return nil, err
}
spec.FailAction, err = flags.GetString(flagResolvFail)
if err != nil {
return nil, err
}
return &spec, nil
}
// For debugging
func dumpJSON(v interface{}) string {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
enc.SetIndent("", " ")
if err := enc.Encode(v); err != nil {
return err.Error()
}
return string(buf.Bytes())
}
func restClientPool(cmd *cobra.Command) (dynamic.ClientPool, discovery.DiscoveryInterface, error) {
conf, err := clientConfig.ClientConfig()
if err != nil {
return nil, nil, err
}
disco, err := discovery.NewDiscoveryClientForConfig(conf)
if err != nil {
return nil, nil, err
}
discoCache := utils.NewMemcachedDiscoveryClient(disco)
mapper := discovery.NewDeferredDiscoveryRESTMapper(discoCache, dynamic.VersionInterfaces)
pathresolver := dynamic.LegacyAPIPathResolverFunc
pool := dynamic.NewClientPool(conf, mapper, pathresolver)
return pool, discoCache, nil
}
// addEnvCmdFlags adds the flags that are common to the family of commands
// whose form is `[<env>|-f <file-name>]`, e.g., `apply` and `delete`.
func addEnvCmdFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringArrayP(flagFile, flagFileShort, nil, "Filename or directory that contains the configuration to apply (accepts YAML, JSON, and Jsonnet)")
}
// parseEnvCmd parses the family of commands that come in the form `[<env>|-f
// <file-name>]`, e.g., `apply` and `delete`.
func parseEnvCmd(cmd *cobra.Command, args []string) (*string, []string, error) {
flags := cmd.Flags()
files, err := flags.GetStringArray(flagFile)
if err != nil {
return nil, nil, err
}
var env *string
if len(args) == 1 {
env = &args[0]
}
return env, files, nil
}
// expandEnvCmdObjs finds and expands templates for the family of commands of
// the form `[<env>|-f <file-name>]`, e.g., `apply` and `delete`. That is, if
// the user passes a list of files, we will expand all templates in those files,
// while if a user passes an environment name, we will expand all component
// files using that environment.
func expandEnvCmdObjs(cmd *cobra.Command, args []string) ([]*unstructured.Unstructured, error) {
env, fileNames, err := parseEnvCmd(cmd, args)
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
expander, err := newExpander(cmd)
if err != nil {
return nil, err
}
//
// Get all filenames that contain templates to expand. Importantly, we need to
// enforce the form `[<env-name>|-f <file-name>]`; that is, we need to make
// sure that the user either passed an environment name or a `-f` flag.
//
envPresent := env != nil
filesPresent := len(fileNames) > 0
// This is equivalent to: `if !xor(envPresent, filesPresent) {`
if envPresent && filesPresent {
return nil, fmt.Errorf("Either an environment name or a file list is required, but not both")
} else if !envPresent && !filesPresent {
return nil, fmt.Errorf("Must specify either an environment or a file list")
}
if envPresent {
manager, err := metadata.Find(metadata.AbsPath(cwd))
if err != nil {
return nil, err
}
libPath, envLibPath := manager.LibPaths(*env)
expander.FlagJpath = append([]string{string(libPath), string(envLibPath)}, expander.FlagJpath...)
fileNames, err = manager.ComponentPaths()
if err != nil {
return nil, err
}
}
//
// Expand templates.
//
return expander.Expand(fileNames)
}
Set default log level to 'Info'
Currently 'Info' level logs are only shown with the '-v' flag. This
makes commands without the '-v' flag of little use to users, especially
on success cases, due to no output.
This commit will set the default log level to 'Info', and passing a '-v'
flag will log at a 'Debug' level.
// Copyright 2017 The kubecfg authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"encoding/json"
goflag "flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
"github.com/ksonnet/kubecfg/metadata"
"github.com/ksonnet/kubecfg/template"
"github.com/ksonnet/kubecfg/utils"
// Register auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
const (
flagVerbose = "verbose"
flagJpath = "jpath"
flagExtVar = "ext-str"
flagExtVarFile = "ext-str-file"
flagTlaVar = "tla-str"
flagTlaVarFile = "tla-str-file"
flagResolver = "resolve-images"
flagResolvFail = "resolve-images-error"
flagAPISpec = "api-spec"
// For use in the commands (e.g., diff, apply, delete) that require either an
// environment or the -f flag.
flagFile = "file"
flagFileShort = "f"
)
var clientConfig clientcmd.ClientConfig
var overrides clientcmd.ConfigOverrides
func init() {
RootCmd.PersistentFlags().CountP(flagVerbose, "v", "Increase verbosity. May be given multiple times.")
RootCmd.PersistentFlags().StringSliceP(flagJpath, "J", nil, "Additional jsonnet library search path")
RootCmd.PersistentFlags().StringSliceP(flagExtVar, "V", nil, "Values of external variables")
RootCmd.PersistentFlags().StringSlice(flagExtVarFile, nil, "Read external variable from a file")
RootCmd.PersistentFlags().StringSliceP(flagTlaVar, "A", nil, "Values of top level arguments")
RootCmd.PersistentFlags().StringSlice(flagTlaVarFile, nil, "Read top level argument from a file")
RootCmd.PersistentFlags().String(flagResolver, "noop", "Change implementation of resolveImage native function. One of: noop, registry")
RootCmd.PersistentFlags().String(flagResolvFail, "warn", "Action when resolveImage fails. One of ignore,warn,error")
// The "usual" clientcmd/kubectl flags
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
kflags := clientcmd.RecommendedConfigOverrideFlags("")
RootCmd.PersistentFlags().StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster")
clientcmd.BindOverrideFlags(&overrides, RootCmd.PersistentFlags(), kflags)
clientConfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
RootCmd.PersistentFlags().Set("logtostderr", "true")
}
// RootCmd is the root of cobra subcommand tree
var RootCmd = &cobra.Command{
Use: "kubecfg",
Short: "Synchronise Kubernetes resources with config files",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
goflag.CommandLine.Parse([]string{})
flags := cmd.Flags()
out := cmd.OutOrStderr()
log.SetOutput(out)
logFmt := NewLogFormatter(out)
log.SetFormatter(logFmt)
verbosity, err := flags.GetCount(flagVerbose)
if err != nil {
return err
}
log.SetLevel(logLevel(verbosity))
return nil
},
}
// clientConfig.Namespace() is broken in client-go 3.0:
// namespace in config erroneously overrides explicit --namespace
func defaultNamespace(c clientcmd.ClientConfig) (string, error) {
if overrides.Context.Namespace != "" {
return overrides.Context.Namespace, nil
}
ns, _, err := c.Namespace()
return ns, err
}
func logLevel(verbosity int) log.Level {
switch verbosity {
case 0:
return log.InfoLevel
default:
return log.DebugLevel
}
}
type logFormatter struct {
escapes *terminal.EscapeCodes
colorise bool
}
// NewLogFormatter creates a new log.Formatter customised for writer
func NewLogFormatter(out io.Writer) log.Formatter {
var ret = logFormatter{}
if f, ok := out.(*os.File); ok {
ret.colorise = terminal.IsTerminal(int(f.Fd()))
ret.escapes = terminal.NewTerminal(f, "").Escape
}
return &ret
}
func (f *logFormatter) levelEsc(level log.Level) []byte {
switch level {
case log.DebugLevel:
return []byte{}
case log.WarnLevel:
return f.escapes.Yellow
case log.ErrorLevel, log.FatalLevel, log.PanicLevel:
return f.escapes.Red
default:
return f.escapes.Blue
}
}
func (f *logFormatter) Format(e *log.Entry) ([]byte, error) {
buf := bytes.Buffer{}
if f.colorise {
buf.Write(f.levelEsc(e.Level))
fmt.Fprintf(&buf, "%-5s ", strings.ToUpper(e.Level.String()))
buf.Write(f.escapes.Reset)
}
buf.WriteString(strings.TrimSpace(e.Message))
buf.WriteString("\n")
return buf.Bytes(), nil
}
func newExpander(cmd *cobra.Command) (*template.Expander, error) {
flags := cmd.Flags()
spec := template.Expander{}
var err error
spec.EnvJPath = filepath.SplitList(os.Getenv("KUBECFG_JPATH"))
spec.FlagJpath, err = flags.GetStringSlice(flagJpath)
if err != nil {
return nil, err
}
spec.ExtVars, err = flags.GetStringSlice(flagExtVar)
if err != nil {
return nil, err
}
spec.ExtVarFiles, err = flags.GetStringSlice(flagExtVarFile)
if err != nil {
return nil, err
}
spec.TlaVars, err = flags.GetStringSlice(flagTlaVar)
if err != nil {
return nil, err
}
spec.TlaVarFiles, err = flags.GetStringSlice(flagTlaVarFile)
if err != nil {
return nil, err
}
spec.Resolver, err = flags.GetString(flagResolver)
if err != nil {
return nil, err
}
spec.FailAction, err = flags.GetString(flagResolvFail)
if err != nil {
return nil, err
}
return &spec, nil
}
// For debugging
func dumpJSON(v interface{}) string {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
enc.SetIndent("", " ")
if err := enc.Encode(v); err != nil {
return err.Error()
}
return string(buf.Bytes())
}
func restClientPool(cmd *cobra.Command) (dynamic.ClientPool, discovery.DiscoveryInterface, error) {
conf, err := clientConfig.ClientConfig()
if err != nil {
return nil, nil, err
}
disco, err := discovery.NewDiscoveryClientForConfig(conf)
if err != nil {
return nil, nil, err
}
discoCache := utils.NewMemcachedDiscoveryClient(disco)
mapper := discovery.NewDeferredDiscoveryRESTMapper(discoCache, dynamic.VersionInterfaces)
pathresolver := dynamic.LegacyAPIPathResolverFunc
pool := dynamic.NewClientPool(conf, mapper, pathresolver)
return pool, discoCache, nil
}
// addEnvCmdFlags adds the flags that are common to the family of commands
// whose form is `[<env>|-f <file-name>]`, e.g., `apply` and `delete`.
func addEnvCmdFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringArrayP(flagFile, flagFileShort, nil, "Filename or directory that contains the configuration to apply (accepts YAML, JSON, and Jsonnet)")
}
// parseEnvCmd parses the family of commands that come in the form `[<env>|-f
// <file-name>]`, e.g., `apply` and `delete`.
func parseEnvCmd(cmd *cobra.Command, args []string) (*string, []string, error) {
flags := cmd.Flags()
files, err := flags.GetStringArray(flagFile)
if err != nil {
return nil, nil, err
}
var env *string
if len(args) == 1 {
env = &args[0]
}
return env, files, nil
}
// expandEnvCmdObjs finds and expands templates for the family of commands of
// the form `[<env>|-f <file-name>]`, e.g., `apply` and `delete`. That is, if
// the user passes a list of files, we will expand all templates in those files,
// while if a user passes an environment name, we will expand all component
// files using that environment.
func expandEnvCmdObjs(cmd *cobra.Command, args []string) ([]*unstructured.Unstructured, error) {
env, fileNames, err := parseEnvCmd(cmd, args)
if err != nil {
return nil, err
}
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
expander, err := newExpander(cmd)
if err != nil {
return nil, err
}
//
// Get all filenames that contain templates to expand. Importantly, we need to
// enforce the form `[<env-name>|-f <file-name>]`; that is, we need to make
// sure that the user either passed an environment name or a `-f` flag.
//
envPresent := env != nil
filesPresent := len(fileNames) > 0
// This is equivalent to: `if !xor(envPresent, filesPresent) {`
if envPresent && filesPresent {
return nil, fmt.Errorf("Either an environment name or a file list is required, but not both")
} else if !envPresent && !filesPresent {
return nil, fmt.Errorf("Must specify either an environment or a file list")
}
if envPresent {
manager, err := metadata.Find(metadata.AbsPath(cwd))
if err != nil {
return nil, err
}
libPath, envLibPath := manager.LibPaths(*env)
expander.FlagJpath = append([]string{string(libPath), string(envLibPath)}, expander.FlagJpath...)
fileNames, err = manager.ComponentPaths()
if err != nil {
return nil, err
}
}
//
// Expand templates.
//
return expander.Expand(fileNames)
}
|
// Copyright © 2017 Circonus, Inc. <support@circonus.com>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
package cmd
import (
"encoding/json"
"fmt"
"io"
stdlog "log"
"os"
"time"
"github.com/circonus-labs/circonus-agent/internal/agent"
"github.com/circonus-labs/circonus-agent/internal/config"
"github.com/circonus-labs/circonus-agent/internal/config/defaults"
"github.com/circonus-labs/circonus-agent/internal/release"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var cfgFile string
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: release.NAME,
Short: "Circonus Host Agent",
Long: `The Circonus host agent daemon provides a simple mechanism
to expose systems and application metrics to Circonus.
It inventories all executable programs in its plugin directory
and executes them upon external request, returning results
in JSON format.`,
PersistentPreRunE: initLogging,
Run: func(cmd *cobra.Command, args []string) {
//
// show version and exit
//
if viper.GetBool(config.KeyShowVersion) {
fmt.Printf("%s v%s - commit: %s, date: %s, tag: %s\n", release.NAME, release.VERSION, release.COMMIT, release.DATE, release.TAG)
return
}
//
// show configuration and exit
//
if viper.GetBool(config.KeyShowConfig) {
showConfig(os.Stdout)
return
}
log.Info().
Int("pid", os.Getpid()).
Str("name", release.NAME).
Str("ver", release.VERSION).Msg("Starting")
a, err := agent.New()
if err != nil {
log.Fatal().Err(err).Msg("initializing")
}
if err := a.Start(); err != nil {
log.Fatal().Err(err).Msg("starting agent")
}
},
}
func init() {
zerolog.TimeFieldFormat = time.RFC3339Nano
zerolog.SetGlobalLevel(zerolog.InfoLevel)
zlog := zerolog.New(zerolog.SyncWriter(os.Stderr)).With().Timestamp().Logger()
log.Logger = zlog
stdlog.SetFlags(0)
stdlog.SetOutput(zlog)
cobra.OnInitialize(initConfig)
//
// Basic
//
{
var (
longOpt = "config"
shortOpt = "c"
description = "config file (default is " + defaults.EtcPath + "/" + release.NAME + ".(json|toml|yaml)"
)
RootCmd.PersistentFlags().StringVarP(&cfgFile, longOpt, shortOpt, "", description)
}
{
const (
key = config.KeyListen
longOpt = "listen"
shortOpt = "l"
envVar = release.ENVPREFIX + "_LISTEN"
description = "Listen address and port [[IP]:[PORT]]" + `(default "` + defaults.Listen + `")`
)
RootCmd.Flags().StringP(longOpt, shortOpt, "", description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyPluginDir
longOpt = "plugin-dir"
shortOpt = "p"
envVar = release.ENVPREFIX + "_PLUGIN_DIR"
description = "Plugin directory"
)
RootCmd.Flags().StringP(longOpt, shortOpt, defaults.PluginPath, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.PluginPath)
}
//
// Reverse mode
//
{
const (
key = config.KeyReverse
longOpt = "reverse"
shortOpt = "r"
envVar = release.ENVPREFIX + "_REVERSE"
description = "Enable reverse connection"
)
RootCmd.Flags().BoolP(longOpt, shortOpt, defaults.Reverse, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.Reverse)
}
{
const (
key = config.KeyReverseCID
longOpt = "reverse-cid"
defaultValue = ""
envVar = release.ENVPREFIX + "_REVERSE_CID"
description = "Check Bundle ID for reverse connection"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyReverseTarget
longOpt = "reverse-target"
envVar = release.ENVPREFIX + "_REVERSE_TARGET"
description = "Target host"
)
RootCmd.Flags().String(longOpt, defaults.Target, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.Target)
}
{
const (
key = config.KeyReverseCreateCheck
longOpt = "reverse-create-check"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK"
description = "Create check if one cannot be found"
)
RootCmd.Flags().Bool(longOpt, defaults.ReverseCreateCheck, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheck)
}
{
const (
key = config.KeyReverseCreateCheckBroker
longOpt = "reverse-create-check-broker"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK_BROKER"
description = "Broker to use, if creating a check"
)
RootCmd.Flags().String(longOpt, defaults.ReverseCreateCheckBroker, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheckBroker)
}
{
const (
key = config.KeyReverseCreateCheckTitle
longOpt = "reverse-create-check-title"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK_TITLE"
description = "Title [display name] to use, if creating a check"
)
RootCmd.Flags().String(longOpt, defaults.ReverseCreateCheckTitle, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheckTitle)
}
{
const (
key = config.KeyReverseCreateCheckTags
longOpt = "reverse-create-check-tags"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK_TAGS"
description = "Tags [comma separated list] to use, if creating a check"
)
RootCmd.Flags().String(longOpt, defaults.ReverseCreateCheckTags, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheckTags)
}
{
const (
key = config.KeyReverseBrokerCAFile
longOpt = "reverse-broker-ca-file"
defaultValue = ""
envVar = release.ENVPREFIX + "_REVERSE_BROKER_CA_FILE"
description = "Broker CA certificate file"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
//
// API
//
{
const (
key = config.KeyAPITokenKey
longOpt = "api-key"
defaultValue = ""
envVar = release.ENVPREFIX + "_API_KEY"
description = "Circonus API Token key"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyAPITokenApp
longOpt = "api-app"
envVar = release.ENVPREFIX + "_API_APP"
description = "Circonus API Token app"
)
RootCmd.Flags().String(longOpt, defaults.APIApp, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.APIApp)
}
{
const (
key = config.KeyAPIURL
longOpt = "api-url"
envVar = release.ENVPREFIX + "_API_URL"
description = "Circonus API URL"
)
RootCmd.Flags().String(longOpt, defaults.APIURL, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.APIURL)
}
{
const (
key = config.KeyAPICAFile
longOpt = "api-ca-file"
defaultValue = ""
envVar = release.ENVPREFIX + "_API_CA_FILE"
description = "Circonus API CA certificate file"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
//
// SSL
//
{
const (
key = config.KeySSLListen
longOpt = "ssl-listen"
defaultValue = ""
envVar = release.ENVPREFIX + "_SSL_LISTEN"
description = "SSL listen address and port [IP]:[PORT] - setting enables SSL"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeySSLCertFile
longOpt = "ssl-cert-file"
envVar = release.ENVPREFIX + "_SSL_CERT_FILE"
description = "SSL Certificate file (PEM cert and CAs concatenated together)"
)
RootCmd.Flags().String(longOpt, defaults.SSLCertFile, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.SSLCertFile)
}
{
const (
key = config.KeySSLKeyFile
longOpt = "ssl-key-file"
envVar = release.ENVPREFIX + "_SSL_KEY_FILE"
description = "SSL Key file"
)
RootCmd.Flags().String(longOpt, defaults.SSLKeyFile, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.SSLKeyFile)
}
{
const (
key = config.KeySSLVerify
longOpt = "ssl-verify"
envVar = release.ENVPREFIX + "_SSL_VERIFY"
description = "Enable SSL verification"
)
RootCmd.Flags().Bool(longOpt, defaults.SSLVerify, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.SSLVerify)
}
//
// StatsD
//
{
const (
key = config.KeyStatsdDisabled
longOpt = "no-statsd"
envVar = release.ENVPREFIX + "_NO_STATSD"
description = "Disable StatsD listener"
)
RootCmd.Flags().Bool(longOpt, defaults.NoStatsd, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.NoStatsd)
}
{
const (
key = config.KeyStatsdPort
longOpt = "statsd-port"
envVar = release.ENVPREFIX + "_STATSD_PORT"
description = "StatsD port"
)
RootCmd.Flags().String(longOpt, defaults.StatsdPort, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdPort)
}
{
const (
key = config.KeyStatsdHostPrefix
longOpt = "statsd-host-prefix"
envVar = release.ENVPREFIX + "_STATSD_HOST_PREFIX"
description = "StatsD host metric prefix"
)
RootCmd.Flags().String(longOpt, defaults.StatsdHostPrefix, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdHostPrefix)
}
{
const (
key = config.KeyStatsdHostCategory
longOpt = "statsd-host-cateogry"
envVar = release.ENVPREFIX + "_STATSD_HOST_CATEGORY"
description = "StatsD host metric category"
)
RootCmd.Flags().String(longOpt, defaults.StatsdHostCategory, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdHostCategory)
}
{
const (
key = config.KeyStatsdGroupCID
longOpt = "statsd-group-cid"
defaultValue = ""
envVar = release.ENVPREFIX + "_STATSD_GROUP_CID"
description = "StatsD group check bundle ID"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyStatsdGroupPrefix
longOpt = "statsd-group-prefix"
envVar = release.ENVPREFIX + "_STATSD_GROUP_PREFIX"
description = "StatsD group metric prefix"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupPrefix, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupPrefix)
}
{
const (
key = config.KeyStatsdGroupCounters
longOpt = "statsd-group-counters"
envVar = release.ENVPREFIX + "_STATSD_GROUP_COUNTERS"
description = "StatsD group metric counter handling (average|sum)"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupCounters, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupCounters)
}
{
const (
key = config.KeyStatsdGroupGauges
longOpt = "statsd-group-gauges"
envVar = release.ENVPREFIX + "_STATSD_GROUP_GAUGES"
description = "StatsD group gauge operator"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupGauges, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupGauges)
}
{
const (
key = config.KeyStatsdGroupSets
longOpt = "statsd-group-sets"
envVar = release.ENVPREFIX + "_STATSD_GROPUP_SETS"
description = "StatsD group set operator"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupSets, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupSets)
}
// Miscellenous
{
const (
key = config.KeyDebug
longOpt = "debug"
shortOpt = "d"
envVar = release.ENVPREFIX + "_DEBUG"
description = "Enable debug messages"
)
RootCmd.Flags().BoolP(longOpt, shortOpt, defaults.Debug, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.Debug)
}
{
const (
key = config.KeyDebugCGM
longOpt = "debug-cgm"
defaultValue = false
envVar = release.ENVPREFIX + "_DEBUG_CGM"
description = "Enable CGM & API debug messages"
)
RootCmd.Flags().Bool(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaultValue)
}
{
const (
key = config.KeyLogLevel
longOpt = "log-level"
envVar = release.ENVPREFIX + "_LOG_LEVEL"
description = "Log level [(panic|fatal|error|warn|info|debug|disabled)]"
)
RootCmd.Flags().String(longOpt, defaults.LogLevel, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.LogLevel)
}
{
const (
key = config.KeyLogPretty
longOpt = "log-pretty"
envVar = release.ENVPREFIX + "_LOG_PRETTY"
description = "Output formatted/colored log lines"
)
RootCmd.Flags().Bool(longOpt, defaults.LogPretty, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.LogPretty)
}
// RootCmd.Flags().Bool("watch", defaults.Watch, "Watch plugins, reload on change")
// viper.SetDefault("watch", defaults.Watch)
// viper.BindPFlag("watch", RootCmd.Flags().Lookup("watch"))
{
const (
key = config.KeyShowVersion
longOpt = "version"
shortOpt = "V"
defaultValue = false
description = "Show version and exit"
)
RootCmd.Flags().BoolP(longOpt, shortOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
}
{
const (
key = config.KeyShowConfig
longOpt = "show-config"
defaultValue = false
description = "Show config and exit"
)
RootCmd.Flags().Bool(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
}
}
// initLogging initializes zerolog
func initLogging(cmd *cobra.Command, args []string) error {
//
// Enable formatted output
//
if viper.GetBool(config.KeyLogPretty) {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout})
}
//
// Enable debug logging, if requested
// otherwise, default to info level and set custom level, if specified
//
if viper.GetBool(config.KeyDebug) {
viper.Set(config.KeyLogLevel, "debug")
zerolog.SetGlobalLevel(zerolog.DebugLevel)
log.Debug().Msg("--debug flag, forcing debug log level")
} else {
if viper.IsSet(config.KeyLogLevel) {
level := viper.GetString(config.KeyLogLevel)
switch level {
case "panic":
zerolog.SetGlobalLevel(zerolog.PanicLevel)
case "fatal":
zerolog.SetGlobalLevel(zerolog.FatalLevel)
case "error":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
case "warn":
zerolog.SetGlobalLevel(zerolog.WarnLevel)
case "info":
zerolog.SetGlobalLevel(zerolog.InfoLevel)
case "debug":
zerolog.SetGlobalLevel(zerolog.DebugLevel)
case "disabled":
zerolog.SetGlobalLevel(zerolog.Disabled)
default:
return errors.Errorf("Unknown log level (%s)", level)
}
log.Debug().Str("log-level", level).Msg("Logging level")
}
}
return nil
}
// initConfig reads in config file and/or ENV variables if set.
func initConfig() {
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
viper.AddConfigPath(defaults.EtcPath)
viper.AddConfigPath(".")
viper.SetConfigName(release.NAME)
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err != nil {
f := viper.ConfigFileUsed()
if f != "" {
log.Fatal().Err(err).Str("config_file", f).Msg("Unable to load config file")
}
}
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
log.Fatal().
Err(err).
Msg("Unable to start")
}
}
func showConfig(w io.Writer) error {
var cfg interface{}
if err := viper.Unmarshal(&cfg); err != nil {
return errors.Wrap(err, "parsing config")
}
data, err := json.MarshalIndent(cfg, " ", " ")
if err != nil {
return errors.Wrap(err, "formatting config")
}
fmt.Fprintf(w, "%s v%s running config:\n%s\n", release.NAME, release.VERSION, data)
return nil
}
better descriptions for reverse check create options
// Copyright © 2017 Circonus, Inc. <support@circonus.com>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
package cmd
import (
"encoding/json"
"fmt"
"io"
stdlog "log"
"os"
"time"
"github.com/circonus-labs/circonus-agent/internal/agent"
"github.com/circonus-labs/circonus-agent/internal/config"
"github.com/circonus-labs/circonus-agent/internal/config/defaults"
"github.com/circonus-labs/circonus-agent/internal/release"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var cfgFile string
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: release.NAME,
Short: "Circonus Host Agent",
Long: `The Circonus host agent daemon provides a simple mechanism
to expose systems and application metrics to Circonus.
It inventories all executable programs in its plugin directory
and executes them upon external request, returning results
in JSON format.`,
PersistentPreRunE: initLogging,
Run: func(cmd *cobra.Command, args []string) {
//
// show version and exit
//
if viper.GetBool(config.KeyShowVersion) {
fmt.Printf("%s v%s - commit: %s, date: %s, tag: %s\n", release.NAME, release.VERSION, release.COMMIT, release.DATE, release.TAG)
return
}
//
// show configuration and exit
//
if viper.GetBool(config.KeyShowConfig) {
showConfig(os.Stdout)
return
}
log.Info().
Int("pid", os.Getpid()).
Str("name", release.NAME).
Str("ver", release.VERSION).Msg("Starting")
a, err := agent.New()
if err != nil {
log.Fatal().Err(err).Msg("initializing")
}
if err := a.Start(); err != nil {
log.Fatal().Err(err).Msg("starting agent")
}
},
}
func init() {
zerolog.TimeFieldFormat = time.RFC3339Nano
zerolog.SetGlobalLevel(zerolog.InfoLevel)
zlog := zerolog.New(zerolog.SyncWriter(os.Stderr)).With().Timestamp().Logger()
log.Logger = zlog
stdlog.SetFlags(0)
stdlog.SetOutput(zlog)
cobra.OnInitialize(initConfig)
//
// Basic
//
{
var (
longOpt = "config"
shortOpt = "c"
description = "config file (default is " + defaults.EtcPath + "/" + release.NAME + ".(json|toml|yaml)"
)
RootCmd.PersistentFlags().StringVarP(&cfgFile, longOpt, shortOpt, "", description)
}
{
const (
key = config.KeyListen
longOpt = "listen"
shortOpt = "l"
envVar = release.ENVPREFIX + "_LISTEN"
description = "Listen address and port [[IP]:[PORT]]" + `(default "` + defaults.Listen + `")`
)
RootCmd.Flags().StringP(longOpt, shortOpt, "", description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyPluginDir
longOpt = "plugin-dir"
shortOpt = "p"
envVar = release.ENVPREFIX + "_PLUGIN_DIR"
description = "Plugin directory"
)
RootCmd.Flags().StringP(longOpt, shortOpt, defaults.PluginPath, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.PluginPath)
}
//
// Reverse mode
//
{
const (
key = config.KeyReverse
longOpt = "reverse"
shortOpt = "r"
envVar = release.ENVPREFIX + "_REVERSE"
description = "Enable reverse connection"
)
RootCmd.Flags().BoolP(longOpt, shortOpt, defaults.Reverse, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.Reverse)
}
{
const (
key = config.KeyReverseCID
longOpt = "reverse-cid"
defaultValue = ""
envVar = release.ENVPREFIX + "_REVERSE_CID"
description = "Check Bundle ID for reverse connection"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyReverseTarget
longOpt = "reverse-target"
envVar = release.ENVPREFIX + "_REVERSE_TARGET"
description = "Target host"
)
RootCmd.Flags().String(longOpt, defaults.Target, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.Target)
}
{
const (
key = config.KeyReverseCreateCheck
longOpt = "reverse-create-check"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK"
description = "Create check bundle for reverse if one cannot be found"
)
RootCmd.Flags().Bool(longOpt, defaults.ReverseCreateCheck, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheck)
}
{
const (
key = config.KeyReverseCreateCheckBroker
longOpt = "reverse-create-check-broker"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK_BROKER"
description = "ID of Broker to use or 'select' for random selection of valid broker, if creating a check bundle"
)
RootCmd.Flags().String(longOpt, defaults.ReverseCreateCheckBroker, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheckBroker)
}
{
const (
key = config.KeyReverseCreateCheckTitle
longOpt = "reverse-create-check-title"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK_TITLE"
description = "Title [display name] to use, if creating a check bundle"
)
RootCmd.Flags().String(longOpt, defaults.ReverseCreateCheckTitle, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheckTitle)
}
{
const (
key = config.KeyReverseCreateCheckTags
longOpt = "reverse-create-check-tags"
envVar = release.ENVPREFIX + "_REVERSE_CREATE_CHECK_TAGS"
description = "Tags [comma separated list] to use, if creating a check bundle"
)
RootCmd.Flags().String(longOpt, defaults.ReverseCreateCheckTags, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.ReverseCreateCheckTags)
}
{
const (
key = config.KeyReverseBrokerCAFile
longOpt = "reverse-broker-ca-file"
defaultValue = ""
envVar = release.ENVPREFIX + "_REVERSE_BROKER_CA_FILE"
description = "Broker CA certificate file"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
//
// API
//
{
const (
key = config.KeyAPITokenKey
longOpt = "api-key"
defaultValue = ""
envVar = release.ENVPREFIX + "_API_KEY"
description = "Circonus API Token key"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyAPITokenApp
longOpt = "api-app"
envVar = release.ENVPREFIX + "_API_APP"
description = "Circonus API Token app"
)
RootCmd.Flags().String(longOpt, defaults.APIApp, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.APIApp)
}
{
const (
key = config.KeyAPIURL
longOpt = "api-url"
envVar = release.ENVPREFIX + "_API_URL"
description = "Circonus API URL"
)
RootCmd.Flags().String(longOpt, defaults.APIURL, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.APIURL)
}
{
const (
key = config.KeyAPICAFile
longOpt = "api-ca-file"
defaultValue = ""
envVar = release.ENVPREFIX + "_API_CA_FILE"
description = "Circonus API CA certificate file"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
//
// SSL
//
{
const (
key = config.KeySSLListen
longOpt = "ssl-listen"
defaultValue = ""
envVar = release.ENVPREFIX + "_SSL_LISTEN"
description = "SSL listen address and port [IP]:[PORT] - setting enables SSL"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeySSLCertFile
longOpt = "ssl-cert-file"
envVar = release.ENVPREFIX + "_SSL_CERT_FILE"
description = "SSL Certificate file (PEM cert and CAs concatenated together)"
)
RootCmd.Flags().String(longOpt, defaults.SSLCertFile, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.SSLCertFile)
}
{
const (
key = config.KeySSLKeyFile
longOpt = "ssl-key-file"
envVar = release.ENVPREFIX + "_SSL_KEY_FILE"
description = "SSL Key file"
)
RootCmd.Flags().String(longOpt, defaults.SSLKeyFile, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.SSLKeyFile)
}
{
const (
key = config.KeySSLVerify
longOpt = "ssl-verify"
envVar = release.ENVPREFIX + "_SSL_VERIFY"
description = "Enable SSL verification"
)
RootCmd.Flags().Bool(longOpt, defaults.SSLVerify, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.SSLVerify)
}
//
// StatsD
//
{
const (
key = config.KeyStatsdDisabled
longOpt = "no-statsd"
envVar = release.ENVPREFIX + "_NO_STATSD"
description = "Disable StatsD listener"
)
RootCmd.Flags().Bool(longOpt, defaults.NoStatsd, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.NoStatsd)
}
{
const (
key = config.KeyStatsdPort
longOpt = "statsd-port"
envVar = release.ENVPREFIX + "_STATSD_PORT"
description = "StatsD port"
)
RootCmd.Flags().String(longOpt, defaults.StatsdPort, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdPort)
}
{
const (
key = config.KeyStatsdHostPrefix
longOpt = "statsd-host-prefix"
envVar = release.ENVPREFIX + "_STATSD_HOST_PREFIX"
description = "StatsD host metric prefix"
)
RootCmd.Flags().String(longOpt, defaults.StatsdHostPrefix, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdHostPrefix)
}
{
const (
key = config.KeyStatsdHostCategory
longOpt = "statsd-host-cateogry"
envVar = release.ENVPREFIX + "_STATSD_HOST_CATEGORY"
description = "StatsD host metric category"
)
RootCmd.Flags().String(longOpt, defaults.StatsdHostCategory, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdHostCategory)
}
{
const (
key = config.KeyStatsdGroupCID
longOpt = "statsd-group-cid"
defaultValue = ""
envVar = release.ENVPREFIX + "_STATSD_GROUP_CID"
description = "StatsD group check bundle ID"
)
RootCmd.Flags().String(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
}
{
const (
key = config.KeyStatsdGroupPrefix
longOpt = "statsd-group-prefix"
envVar = release.ENVPREFIX + "_STATSD_GROUP_PREFIX"
description = "StatsD group metric prefix"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupPrefix, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupPrefix)
}
{
const (
key = config.KeyStatsdGroupCounters
longOpt = "statsd-group-counters"
envVar = release.ENVPREFIX + "_STATSD_GROUP_COUNTERS"
description = "StatsD group metric counter handling (average|sum)"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupCounters, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupCounters)
}
{
const (
key = config.KeyStatsdGroupGauges
longOpt = "statsd-group-gauges"
envVar = release.ENVPREFIX + "_STATSD_GROUP_GAUGES"
description = "StatsD group gauge operator"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupGauges, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupGauges)
}
{
const (
key = config.KeyStatsdGroupSets
longOpt = "statsd-group-sets"
envVar = release.ENVPREFIX + "_STATSD_GROPUP_SETS"
description = "StatsD group set operator"
)
RootCmd.Flags().String(longOpt, defaults.StatsdGroupSets, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.StatsdGroupSets)
}
// Miscellenous
{
const (
key = config.KeyDebug
longOpt = "debug"
shortOpt = "d"
envVar = release.ENVPREFIX + "_DEBUG"
description = "Enable debug messages"
)
RootCmd.Flags().BoolP(longOpt, shortOpt, defaults.Debug, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.Debug)
}
{
const (
key = config.KeyDebugCGM
longOpt = "debug-cgm"
defaultValue = false
envVar = release.ENVPREFIX + "_DEBUG_CGM"
description = "Enable CGM & API debug messages"
)
RootCmd.Flags().Bool(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaultValue)
}
{
const (
key = config.KeyLogLevel
longOpt = "log-level"
envVar = release.ENVPREFIX + "_LOG_LEVEL"
description = "Log level [(panic|fatal|error|warn|info|debug|disabled)]"
)
RootCmd.Flags().String(longOpt, defaults.LogLevel, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.LogLevel)
}
{
const (
key = config.KeyLogPretty
longOpt = "log-pretty"
envVar = release.ENVPREFIX + "_LOG_PRETTY"
description = "Output formatted/colored log lines"
)
RootCmd.Flags().Bool(longOpt, defaults.LogPretty, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
viper.BindEnv(key, envVar)
viper.SetDefault(key, defaults.LogPretty)
}
// RootCmd.Flags().Bool("watch", defaults.Watch, "Watch plugins, reload on change")
// viper.SetDefault("watch", defaults.Watch)
// viper.BindPFlag("watch", RootCmd.Flags().Lookup("watch"))
{
const (
key = config.KeyShowVersion
longOpt = "version"
shortOpt = "V"
defaultValue = false
description = "Show version and exit"
)
RootCmd.Flags().BoolP(longOpt, shortOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
}
{
const (
key = config.KeyShowConfig
longOpt = "show-config"
defaultValue = false
description = "Show config and exit"
)
RootCmd.Flags().Bool(longOpt, defaultValue, description)
viper.BindPFlag(key, RootCmd.Flags().Lookup(longOpt))
}
}
// initLogging initializes zerolog
func initLogging(cmd *cobra.Command, args []string) error {
//
// Enable formatted output
//
if viper.GetBool(config.KeyLogPretty) {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout})
}
//
// Enable debug logging, if requested
// otherwise, default to info level and set custom level, if specified
//
if viper.GetBool(config.KeyDebug) {
viper.Set(config.KeyLogLevel, "debug")
zerolog.SetGlobalLevel(zerolog.DebugLevel)
log.Debug().Msg("--debug flag, forcing debug log level")
} else {
if viper.IsSet(config.KeyLogLevel) {
level := viper.GetString(config.KeyLogLevel)
switch level {
case "panic":
zerolog.SetGlobalLevel(zerolog.PanicLevel)
case "fatal":
zerolog.SetGlobalLevel(zerolog.FatalLevel)
case "error":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
case "warn":
zerolog.SetGlobalLevel(zerolog.WarnLevel)
case "info":
zerolog.SetGlobalLevel(zerolog.InfoLevel)
case "debug":
zerolog.SetGlobalLevel(zerolog.DebugLevel)
case "disabled":
zerolog.SetGlobalLevel(zerolog.Disabled)
default:
return errors.Errorf("Unknown log level (%s)", level)
}
log.Debug().Str("log-level", level).Msg("Logging level")
}
}
return nil
}
// initConfig reads in config file and/or ENV variables if set.
func initConfig() {
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
viper.AddConfigPath(defaults.EtcPath)
viper.AddConfigPath(".")
viper.SetConfigName(release.NAME)
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err != nil {
f := viper.ConfigFileUsed()
if f != "" {
log.Fatal().Err(err).Str("config_file", f).Msg("Unable to load config file")
}
}
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
log.Fatal().
Err(err).
Msg("Unable to start")
}
}
func showConfig(w io.Writer) error {
var cfg interface{}
if err := viper.Unmarshal(&cfg); err != nil {
return errors.Wrap(err, "parsing config")
}
data, err := json.MarshalIndent(cfg, " ", " ")
if err != nil {
return errors.Wrap(err, "formatting config")
}
fmt.Fprintf(w, "%s v%s running config:\n%s\n", release.NAME, release.VERSION, data)
return nil
}
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/Unknwon/com"
"github.com/urfave/cli"
log "gopkg.in/clog.v1"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/models/errors"
"github.com/gogits/gogs/pkg/setting"
http "github.com/gogits/gogs/routers/repo"
"syscall"
)
const (
_ACCESS_DENIED_MESSAGE = "Repository does not exist or you do not have access"
)
var Serv = cli.Command{
Name: "serv",
Usage: "This command should only be called by SSH shell",
Description: `Serv provide access auth for repositories`,
Action: runServ,
Flags: []cli.Flag{
stringFlag("config, c", "custom/conf/app.ini", "Custom configuration file path"),
},
}
func fail(userMessage, logMessage string, args ...interface{}) {
fmt.Fprintln(os.Stderr, "Gin:", userMessage)
if len(logMessage) > 0 {
if !setting.ProdMode {
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
}
log.Fatal(3, logMessage, args...)
}
os.Exit(1)
}
func setup(c *cli.Context, logPath string, connectDB bool) {
if c.IsSet("config") {
setting.CustomConf = c.String("config")
} else if c.GlobalIsSet("config") {
setting.CustomConf = c.GlobalString("config")
}
setting.NewContext()
level := log.TRACE
if setting.ProdMode {
level = log.ERROR
}
log.New(log.FILE, log.FileConfig{
Level: level,
Filename: filepath.Join(setting.LogRootPath, logPath),
FileRotationConfig: log.FileRotationConfig{
Rotate: true,
Daily: true,
MaxDays: 3,
},
})
log.Delete(log.CONSOLE) // Remove primary logger
if !connectDB {
return
}
models.LoadConfigs()
if setting.UseSQLite3 {
workDir, _ := setting.WorkDir()
os.Chdir(workDir)
}
if err := models.SetEngine(); err != nil {
fail("Internal error", "SetEngine: %v", err)
}
}
func isAnnexShell(cmd string) bool {
return cmd == "git-annex-shell"
}
func parseSSHCmd(cmd string) (string, string, []string) {
ss := strings.Split(cmd, " ")
if len(ss) < 2 {
return "", "", nil
}
if isAnnexShell(ss[0]) {
return ss[0], strings.Replace(ss[2], "/", "'", 1), ss
} else {
return ss[0], strings.Replace(ss[1], "/", "'", 1), ss
}
}
func checkDeployKey(key *models.PublicKey, repo *models.Repository) {
// Check if this deploy key belongs to current repository.
if !models.HasDeployKey(key.ID, repo.ID) {
fail("Key access denied", "Deploy key access denied: [key_id: %d, repo_id: %d]", key.ID, repo.ID)
}
// Update deploy key activity.
deployKey, err := models.GetDeployKeyByRepo(key.ID, repo.ID)
if err != nil {
fail("Internal error", "GetDeployKey: %v", err)
}
deployKey.Updated = time.Now()
if err = models.UpdateDeployKey(deployKey); err != nil {
fail("Internal error", "UpdateDeployKey: %v", err)
}
}
var (
allowedCommands = map[string]models.AccessMode{
"git-upload-pack": models.ACCESS_MODE_READ,
"git-upload-archive": models.ACCESS_MODE_READ,
"git-receive-pack": models.ACCESS_MODE_WRITE,
"git-annex-shell": models.ACCESS_MODE_READ,
}
)
func runServ(c *cli.Context) error {
setup(c, "serv.log", true)
if setting.SSH.Disabled {
println("Gins: SSH has been disabled")
return nil
}
if len(c.Args()) < 1 {
fail("Not enough arguments", "Not enough arguments")
}
sshCmd := strings.Replace(os.Getenv("SSH_ORIGINAL_COMMAND"), "'", "", -1)
log.Info("SSH commadn:%s", sshCmd)
if len(sshCmd) == 0 {
println("Hi there, You've successfully authenticated, but Gin does not provide shell access.")
return nil
}
verb, path, args := parseSSHCmd(sshCmd)
repoFullName := strings.ToLower(strings.Trim(path, "'"))
repoFields := strings.SplitN(repoFullName, "/", 2)
if len(repoFields) != 2 {
fail("Invalid repository path", "Invalid repository path: %v", path)
}
ownerName := strings.ToLower(repoFields[0])
repoName := strings.TrimSuffix(strings.ToLower(repoFields[1]), ".git")
repoName = strings.TrimSuffix(repoName, ".wiki")
owner, err := models.GetUserByName(ownerName)
if err != nil {
if errors.IsUserNotExist(err) {
fail("Repository owner does not exist", "Unregistered owner: %s", ownerName)
}
fail("Internal error", "Fail to get repository owner '%s': %v", ownerName, err)
}
repo, err := models.GetRepositoryByName(owner.ID, repoName)
if err != nil {
if errors.IsRepoNotExist(err) {
fail(_ACCESS_DENIED_MESSAGE, "Repository does not exist: %s/%s", owner.Name, repoName)
}
fail("Internal error", "Fail to get repository: %v", err)
}
repo.Owner = owner
requestMode, ok := allowedCommands[verb]
if !ok {
fail("Unknown git command", "Unknown git command '%s'", verb)
}
// Prohibit push to mirror repositories.
if requestMode > models.ACCESS_MODE_READ && repo.IsMirror {
fail("Mirror repository is read-only", "")
}
// Allow anonymous (user is nil) clone for public repositories.
var user *models.User
key, err := models.GetPublicKeyByID(com.StrTo(strings.TrimPrefix(c.Args()[0], "key-")).MustInt64())
if err != nil {
fail("Invalid key ID", "Invalid key ID '%s': %v", c.Args()[0], err)
}
if requestMode == models.ACCESS_MODE_WRITE || repo.IsPrivate {
// Check deploy key or user key.
if key.IsDeployKey() {
if key.Mode < requestMode {
fail("Key permission denied", "Cannot push with deployment key: %d", key.ID)
}
checkDeployKey(key, repo)
} else {
user, err = models.GetUserByKeyID(key.ID)
if err != nil {
fail("Internal error", "Fail to get user by key ID '%d': %v", key.ID, err)
}
mode, err := models.AccessLevel(user.ID, repo)
if err != nil {
fail("Internal error", "Fail to check access: %v", err)
}
if mode < requestMode {
clientMessage := _ACCESS_DENIED_MESSAGE
if mode >= models.ACCESS_MODE_READ {
clientMessage = "You do not have sufficient authorization for this action"
}
fail(clientMessage,
"User '%s' does not have level '%v' access to repository '%s'",
user.Name, requestMode, repoFullName)
}
}
} else {
setting.NewService()
// Check if the key can access to the repository in case of it is a deploy key (a deploy keys != user key).
// A deploy key doesn't represent a signed in user, so in a site with Service.RequireSignInView activated
// we should give read access only in repositories where this deploy key is in use. In other case, a server
// or system using an active deploy key can get read access to all the repositories in a Gogs service.
if key.IsDeployKey() && setting.Service.RequireSignInView {
checkDeployKey(key, repo)
}
}
// Update user key activity.
if key.ID > 0 {
key, err := models.GetPublicKeyByID(key.ID)
if err != nil {
fail("Internal error", "GetPublicKeyByID: %v", err)
}
key.Updated = time.Now()
if err = models.UpdatePublicKey(key); err != nil {
fail("Internal error", "UpdatePublicKey: %v", err)
}
}
// Special handle for Windows.
// Todo will break with annex
if setting.IsWindows {
verb = strings.Replace(verb, "-", " ", 1)
}
verbs := strings.Split(verb, " ")
var cmd []string
if len(verbs) == 2 {
cmd = []string{verbs[0], verbs[1], repoFullName}
} else if isAnnexShell(verb) {
repoAbsPath := setting.RepoRootPath + "/" + repoFullName
if err := secureGitAnnex(repoAbsPath, requestMode); err != nil {
fail("Git annex failed", "Git annex failed: %s", err)
}
cmd = args
// Setting full path to repo as git-annex-shell requires it
cmd[2] = repoAbsPath
} else {
cmd = []string{verb, repoFullName}
}
runGit(cmd, requestMode, user, owner, repo)
return nil
}
func runGit(cmd [] string, requestMode models.AccessMode, user *models.User, owner *models.User,
repo *models.Repository) error {
log.Info("will exectute:%s", cmd)
gitCmd := exec.Command(cmd[0], cmd[1:]...)
if requestMode == models.ACCESS_MODE_WRITE {
gitCmd.Env = append(os.Environ(), http.ComposeHookEnvs(http.ComposeHookEnvsOptions{
AuthUser: user,
OwnerName: owner.Name,
OwnerSalt: owner.Salt,
RepoID: repo.ID,
RepoName: repo.Name,
RepoPath: repo.RepoPath(),
})...)
}
gitCmd.Dir = setting.RepoRootPath
gitCmd.Stdout = os.Stdout
gitCmd.Stdin = os.Stdin
gitCmd.Stderr = os.Stderr
log.Info("args:%s", gitCmd.Args)
err := gitCmd.Run()
log.Info("err:%s", err)
if t, ok := err.(*exec.ExitError); ok {
log.Info("t:%s", t)
os.Exit(t.Sys().(syscall.WaitStatus).ExitStatus())
}
return nil
}
// Make sure git-annex-shell does not make "bad" changes (refectored from repo)
func secureGitAnnex(path string, requestMode models.AccessMode) error {
// "If set, disallows running git-shell to handle unknown commands."
err := os.Setenv("GIT_ANNEX_SHELL_LIMITED", "True")
if err != nil {
return fmt.Errorf("ERROR: Could set annex shell to be limited.")
}
// "If set, git-annex-shell will refuse to run commands
// that do not operate on the specified directory."
err = os.Setenv("GIT_ANNEX_SHELL_DIRECTORY", path)
if err != nil {
return fmt.Errorf("ERROR: Could set annex shell directory.")
}
if ! (requestMode > models.ACCESS_MODE_READ) {
err = os.Setenv("GIT_ANNEX_SHELL_READONLY", "True")
if err != nil {
return fmt.Errorf("ERROR: Could set annex shell to read only.")
}
}
return nil
}
[annex] allow and secure read only annexes
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/Unknwon/com"
"github.com/urfave/cli"
log "gopkg.in/clog.v1"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/models/errors"
"github.com/gogits/gogs/pkg/setting"
http "github.com/gogits/gogs/routers/repo"
"syscall"
)
const (
_ACCESS_DENIED_MESSAGE = "Repository does not exist or you do not have access"
)
var Serv = cli.Command{
Name: "serv",
Usage: "This command should only be called by SSH shell",
Description: `Serv provide access auth for repositories`,
Action: runServ,
Flags: []cli.Flag{
stringFlag("config, c", "custom/conf/app.ini", "Custom configuration file path"),
},
}
func fail(userMessage, logMessage string, args ...interface{}) {
fmt.Fprintln(os.Stderr, "Gin:", userMessage)
if len(logMessage) > 0 {
if !setting.ProdMode {
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
}
log.Fatal(3, logMessage, args...)
}
os.Exit(1)
}
func setup(c *cli.Context, logPath string, connectDB bool) {
if c.IsSet("config") {
setting.CustomConf = c.String("config")
} else if c.GlobalIsSet("config") {
setting.CustomConf = c.GlobalString("config")
}
setting.NewContext()
level := log.TRACE
if setting.ProdMode {
level = log.ERROR
}
log.New(log.FILE, log.FileConfig{
Level: level,
Filename: filepath.Join(setting.LogRootPath, logPath),
FileRotationConfig: log.FileRotationConfig{
Rotate: true,
Daily: true,
MaxDays: 3,
},
})
log.Delete(log.CONSOLE) // Remove primary logger
if !connectDB {
return
}
models.LoadConfigs()
if setting.UseSQLite3 {
workDir, _ := setting.WorkDir()
os.Chdir(workDir)
}
if err := models.SetEngine(); err != nil {
fail("Internal error", "SetEngine: %v", err)
}
}
func isAnnexShell(cmd string) bool {
return cmd == "git-annex-shell"
}
func parseSSHCmd(cmd string) (string, string, []string) {
ss := strings.Split(cmd, " ")
if len(ss) < 2 {
return "", "", nil
}
if isAnnexShell(ss[0]) {
return ss[0], strings.Replace(ss[2], "/", "'", 1), ss
} else {
return ss[0], strings.Replace(ss[1], "/", "'", 1), ss
}
}
func checkDeployKey(key *models.PublicKey, repo *models.Repository) {
// Check if this deploy key belongs to current repository.
if !models.HasDeployKey(key.ID, repo.ID) {
fail("Key access denied", "Deploy key access denied: [key_id: %d, repo_id: %d]", key.ID, repo.ID)
}
// Update deploy key activity.
deployKey, err := models.GetDeployKeyByRepo(key.ID, repo.ID)
if err != nil {
fail("Internal error", "GetDeployKey: %v", err)
}
deployKey.Updated = time.Now()
if err = models.UpdateDeployKey(deployKey); err != nil {
fail("Internal error", "UpdateDeployKey: %v", err)
}
}
var (
allowedCommands = map[string]models.AccessMode{
"git-upload-pack": models.ACCESS_MODE_READ,
"git-upload-archive": models.ACCESS_MODE_READ,
"git-receive-pack": models.ACCESS_MODE_WRITE,
"git-annex-shell": models.ACCESS_MODE_READ,
}
)
func runServ(c *cli.Context) error {
setup(c, "serv.log", true)
if setting.SSH.Disabled {
println("Gins: SSH has been disabled")
return nil
}
if len(c.Args()) < 1 {
fail("Not enough arguments", "Not enough arguments")
}
sshCmd := strings.Replace(os.Getenv("SSH_ORIGINAL_COMMAND"), "'", "", -1)
log.Info("SSH commadn:%s", sshCmd)
if len(sshCmd) == 0 {
println("Hi there, You've successfully authenticated, but Gin does not provide shell access.")
return nil
}
verb, path, args := parseSSHCmd(sshCmd)
repoFullName := strings.ToLower(strings.Trim(path, "'"))
repoFields := strings.SplitN(repoFullName, "/", 2)
if len(repoFields) != 2 {
fail("Invalid repository path", "Invalid repository path: %v", path)
}
ownerName := strings.ToLower(repoFields[0])
repoName := strings.TrimSuffix(strings.ToLower(repoFields[1]), ".git")
repoName = strings.TrimSuffix(repoName, ".wiki")
owner, err := models.GetUserByName(ownerName)
if err != nil {
if errors.IsUserNotExist(err) {
fail("Repository owner does not exist", "Unregistered owner: %s", ownerName)
}
fail("Internal error", "Fail to get repository owner '%s': %v", ownerName, err)
}
repo, err := models.GetRepositoryByName(owner.ID, repoName)
if err != nil {
if errors.IsRepoNotExist(err) {
fail(_ACCESS_DENIED_MESSAGE, "Repository does not exist: %s/%s", owner.Name, repoName)
}
fail("Internal error", "Fail to get repository: %v", err)
}
repo.Owner = owner
requestMode, ok := allowedCommands[verb]
if !ok {
fail("Unknown git command", "Unknown git command '%s'", verb)
}
// Prohibit push to mirror repositories.
if requestMode > models.ACCESS_MODE_READ && repo.IsMirror {
fail("Mirror repository is read-only", "")
}
// Allow anonymous (user is nil) clone for public repositories.
var user *models.User
key, err := models.GetPublicKeyByID(com.StrTo(strings.TrimPrefix(c.Args()[0], "key-")).MustInt64())
if err != nil {
fail("Invalid key ID", "Invalid key ID '%s': %v", c.Args()[0], err)
}
if requestMode == models.ACCESS_MODE_WRITE || repo.IsPrivate {
// Check deploy key or user key.
if key.IsDeployKey() {
if key.Mode < requestMode {
fail("Key permission denied", "Cannot push with deployment key: %d", key.ID)
}
checkDeployKey(key, repo)
} else {
user, err = models.GetUserByKeyID(key.ID)
if err != nil {
fail("Internal error", "Fail to get user by key ID '%d': %v", key.ID, err)
}
mode, err := models.AccessLevel(user.ID, repo)
if err != nil {
fail("Internal error", "Fail to check access: %v", err)
}
if mode < requestMode {
clientMessage := _ACCESS_DENIED_MESSAGE
if mode >= models.ACCESS_MODE_READ {
clientMessage = "You do not have sufficient authorization for this action"
}
fail(clientMessage,
"User '%s' does not have level '%v' access to repository '%s'",
user.Name, requestMode, repoFullName)
}
}
} else {
setting.NewService()
// Check if the key can access to the repository in case of it is a deploy key (a deploy keys != user key).
// A deploy key doesn't represent a signed in user, so in a site with Service.RequireSignInView activated
// we should give read access only in repositories where this deploy key is in use. In other case, a server
// or system using an active deploy key can get read access to all the repositories in a Gogs service.
if key.IsDeployKey() && setting.Service.RequireSignInView {
checkDeployKey(key, repo)
}
}
// Update user key activity.
if key.ID > 0 {
key, err := models.GetPublicKeyByID(key.ID)
if err != nil {
fail("Internal error", "GetPublicKeyByID: %v", err)
}
key.Updated = time.Now()
if err = models.UpdatePublicKey(key); err != nil {
fail("Internal error", "UpdatePublicKey: %v", err)
}
}
// Special handle for Windows.
// Todo will break with annex
if setting.IsWindows {
verb = strings.Replace(verb, "-", " ", 1)
}
verbs := strings.Split(verb, " ")
var cmd []string
if len(verbs) == 2 {
cmd = []string{verbs[0], verbs[1], repoFullName}
} else if isAnnexShell(verb) {
repoAbsPath := setting.RepoRootPath + "/" + repoFullName
if err := secureGitAnnex(repoAbsPath, user, repo); err != nil {
fail("Git annex failed", "Git annex failed: %s", err)
}
cmd = args
// Setting full path to repo as git-annex-shell requires it
cmd[2] = repoAbsPath
} else {
cmd = []string{verb, repoFullName}
}
runGit(cmd, requestMode, user, owner, repo)
return nil
}
func runGit(cmd [] string, requestMode models.AccessMode, user *models.User, owner *models.User,
repo *models.Repository) error {
log.Info("will exectute:%s", cmd)
gitCmd := exec.Command(cmd[0], cmd[1:]...)
if requestMode == models.ACCESS_MODE_WRITE {
gitCmd.Env = append(os.Environ(), http.ComposeHookEnvs(http.ComposeHookEnvsOptions{
AuthUser: user,
OwnerName: owner.Name,
OwnerSalt: owner.Salt,
RepoID: repo.ID,
RepoName: repo.Name,
RepoPath: repo.RepoPath(),
})...)
}
gitCmd.Dir = setting.RepoRootPath
gitCmd.Stdout = os.Stdout
gitCmd.Stdin = os.Stdin
gitCmd.Stderr = os.Stderr
log.Info("args:%s", gitCmd.Args)
err := gitCmd.Run()
log.Info("err:%s", err)
if t, ok := err.(*exec.ExitError); ok {
log.Info("t:%s", t)
os.Exit(t.Sys().(syscall.WaitStatus).ExitStatus())
}
return nil
}
// Make sure git-annex-shell does not make "bad" changes (refectored from repo)
func secureGitAnnex(path string, user *models.User, repo *models.Repository) error {
// "If set, disallows running git-shell to handle unknown commands."
err := os.Setenv("GIT_ANNEX_SHELL_LIMITED", "True")
if err != nil {
return fmt.Errorf("ERROR: Could set annex shell to be limited.")
}
// "If set, git-annex-shell will refuse to run commands
// that do not operate on the specified directory."
err = os.Setenv("GIT_ANNEX_SHELL_DIRECTORY", path)
if err != nil {
return fmt.Errorf("ERROR: Could set annex shell directory.")
}
mode, err := models.AccessLevel(user.ID, repo)
if err != nil {
fail("Internal error", "Fail to check access: %v", err)
}
if mode < models.ACCESS_MODE_WRITE {
err = os.Setenv("GIT_ANNEX_SHELL_READONLY", "True")
if err != nil {
return fmt.Errorf("ERROR: Could set annex shell to read only.")
}
}
return nil
}
|
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bufio"
"bytes"
"context"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/asn1"
"encoding/hex"
"encoding/pem"
"fmt"
"math/big"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/fatih/color"
"github.com/minio/mc/pkg/probe"
)
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, e error) {
// pkcs1PublicKey reflects the ASN.1 structure of a PKCS #1 public key.
type pkcs1PublicKey struct {
N *big.Int
E int
}
switch pub := pub.(type) {
case *rsa.PublicKey:
publicKeyBytes, e = asn1.Marshal(pkcs1PublicKey{
N: pub.N,
E: pub.E,
})
if e != nil {
return nil, e
}
case *ecdsa.PublicKey:
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
case ed25519.PublicKey:
publicKeyBytes = pub
default:
return nil, fmt.Errorf("x509: unsupported public key type: %T", pub)
}
return publicKeyBytes, nil
}
// promptTrustSelfSignedCert connects to the given endpoint and
// checks whether the peer certificate can be verified.
// If not, it computes a fingerprint of the peer certificate
// public key, asks the user to confirm the fingerprint and
// adds the peer certificate to the local trust store in the
// CAs directory.
func promptTrustSelfSignedCert(ctx context.Context, endpoint, alias string) (*x509.Certificate, *probe.Error) {
req, e := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if e != nil {
return nil, probe.NewError(e)
}
// no need to probe certs for http endpoints.
if req.URL.Scheme == "http" {
return nil, nil
}
client := http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
RootCAs: globalRootCAs, // make sure to use loaded certs before probing
},
},
}
_, te := client.Do(req)
if te == nil {
// certs are already trusted system wide, nothing to do.
return nil, nil
}
if te != nil && !strings.Contains(te.Error(), "certificate signed by unknown authority") {
return nil, probe.NewError(te)
}
// Now, we fetch the peer certificate, compute the SHA-256 of
// public key and let the user confirm the fingerprint.
// If the user confirms, we store the peer certificate in the CAs
// directory and retry.
peerCert, e := fetchPeerCertificate(ctx, endpoint)
if e != nil {
return nil, probe.NewError(e)
}
if peerCert.IsCA && len(peerCert.AuthorityKeyId) == 0 {
// If peerCert is its own CA then AuthorityKeyId will be empty
// which means the SubjeyKeyId is the sha1.Sum(publicKeyBytes)
// Refer - SubjectKeyId generated using method 1 in RFC 5280, Section 4.2.1.2:
publicKeyBytes, e := marshalPublicKey(peerCert.PublicKey)
if e != nil {
return nil, probe.NewError(e)
}
h := sha1.Sum(publicKeyBytes)
if !bytes.Equal(h[:], peerCert.SubjectKeyId) {
return nil, probe.NewError(te)
}
} else {
// Check that the subject key id is equal to the authority key id.
// If true, the certificate is its own issuer, and therefore, a
// self-signed certificate. Otherwise, the certificate has been
// issued by some other certificate that is just not trusted.
if !bytes.Equal(peerCert.SubjectKeyId, peerCert.AuthorityKeyId) {
return nil, probe.NewError(te)
}
}
fingerprint := sha256.Sum256(peerCert.RawSubjectPublicKeyInfo)
fmt.Printf("Fingerprint of %s public key: %s\nConfirm public key y/N: ", color.GreenString(alias), color.YellowString(hex.EncodeToString(fingerprint[:])))
answer, e := bufio.NewReader(os.Stdin).ReadString('\n')
if e != nil {
return nil, probe.NewError(e)
}
if answer = strings.ToLower(answer); answer != "y\n" && answer != "yes\n" {
return nil, probe.NewError(te)
}
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: peerCert.Raw})
if e = os.WriteFile(filepath.Join(mustGetCAsDir(), alias+".crt"), certPEM, 0o644); e != nil {
return nil, probe.NewError(e)
}
return peerCert, nil
}
// fetchPeerCertificate uses the given transport to fetch the peer
// certificate from the given endpoint.
func fetchPeerCertificate(ctx context.Context, endpoint string) (*x509.Certificate, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return nil, err
}
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
return nil, fmt.Errorf("Unable to read remote TLS certificate")
}
return resp.TLS.PeerCertificates[0], nil
}
alias: Enable self signed TLS prompt in darwin platforms (#4094)
Not trusted TLS error message has another string representation in
Darwin platform. Since there is no another way to test for this specific
error with Golang API, we will need to support this alternate error message.
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bufio"
"bytes"
"context"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/asn1"
"encoding/hex"
"encoding/pem"
"fmt"
"math/big"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/fatih/color"
"github.com/minio/mc/pkg/probe"
)
func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, e error) {
// pkcs1PublicKey reflects the ASN.1 structure of a PKCS #1 public key.
type pkcs1PublicKey struct {
N *big.Int
E int
}
switch pub := pub.(type) {
case *rsa.PublicKey:
publicKeyBytes, e = asn1.Marshal(pkcs1PublicKey{
N: pub.N,
E: pub.E,
})
if e != nil {
return nil, e
}
case *ecdsa.PublicKey:
publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
case ed25519.PublicKey:
publicKeyBytes = pub
default:
return nil, fmt.Errorf("x509: unsupported public key type: %T", pub)
}
return publicKeyBytes, nil
}
// promptTrustSelfSignedCert connects to the given endpoint and
// checks whether the peer certificate can be verified.
// If not, it computes a fingerprint of the peer certificate
// public key, asks the user to confirm the fingerprint and
// adds the peer certificate to the local trust store in the
// CAs directory.
func promptTrustSelfSignedCert(ctx context.Context, endpoint, alias string) (*x509.Certificate, *probe.Error) {
req, e := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if e != nil {
return nil, probe.NewError(e)
}
// no need to probe certs for http endpoints.
if req.URL.Scheme == "http" {
return nil, nil
}
client := http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
RootCAs: globalRootCAs, // make sure to use loaded certs before probing
},
},
}
_, te := client.Do(req)
if te == nil {
// certs are already trusted system wide, nothing to do.
return nil, nil
}
if te != nil && !strings.Contains(te.Error(), "certificate signed by unknown authority") &&
!strings.Contains(te.Error(), "certificate is not trusted") /* darwin specific error message */ {
return nil, probe.NewError(te)
}
// Now, we fetch the peer certificate, compute the SHA-256 of
// public key and let the user confirm the fingerprint.
// If the user confirms, we store the peer certificate in the CAs
// directory and retry.
peerCert, e := fetchPeerCertificate(ctx, endpoint)
if e != nil {
return nil, probe.NewError(e)
}
if peerCert.IsCA && len(peerCert.AuthorityKeyId) == 0 {
// If peerCert is its own CA then AuthorityKeyId will be empty
// which means the SubjeyKeyId is the sha1.Sum(publicKeyBytes)
// Refer - SubjectKeyId generated using method 1 in RFC 5280, Section 4.2.1.2:
publicKeyBytes, e := marshalPublicKey(peerCert.PublicKey)
if e != nil {
return nil, probe.NewError(e)
}
h := sha1.Sum(publicKeyBytes)
if !bytes.Equal(h[:], peerCert.SubjectKeyId) {
return nil, probe.NewError(te)
}
} else {
// Check that the subject key id is equal to the authority key id.
// If true, the certificate is its own issuer, and therefore, a
// self-signed certificate. Otherwise, the certificate has been
// issued by some other certificate that is just not trusted.
if !bytes.Equal(peerCert.SubjectKeyId, peerCert.AuthorityKeyId) {
return nil, probe.NewError(te)
}
}
fingerprint := sha256.Sum256(peerCert.RawSubjectPublicKeyInfo)
fmt.Printf("Fingerprint of %s public key: %s\nConfirm public key y/N: ", color.GreenString(alias), color.YellowString(hex.EncodeToString(fingerprint[:])))
answer, e := bufio.NewReader(os.Stdin).ReadString('\n')
if e != nil {
return nil, probe.NewError(e)
}
if answer = strings.ToLower(answer); answer != "y\n" && answer != "yes\n" {
return nil, probe.NewError(te)
}
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: peerCert.Raw})
if e = os.WriteFile(filepath.Join(mustGetCAsDir(), alias+".crt"), certPEM, 0o644); e != nil {
return nil, probe.NewError(e)
}
return peerCert, nil
}
// fetchPeerCertificate uses the given transport to fetch the peer
// certificate from the given endpoint.
func fetchPeerCertificate(ctx context.Context, endpoint string) (*x509.Certificate, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return nil, err
}
client := http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
return nil, fmt.Errorf("Unable to read remote TLS certificate")
}
return resp.TLS.PeerCertificates[0], nil
}
|
package conveymetric
import (
"github.com/Comcast/webpa-common/convey"
"github.com/go-kit/kit/metrics"
)
// UnknownLabel is a constant for when key/tag can not be found in the C JSON
const UnknownLabel = "unknown"
// MetricClosure will be returned after update of struct, this should be used to update the struct, aka decrement the count
type MetricClosure func()
// CMetric provides
type CMetric interface {
// Update takes the convey JSON to update internal struct, and return a closure to update the struct again, or an
// error
//
// Note: MetricClosure should only be called once.
Update(data convey.C) (MetricClosure, error)
}
// NewConveyMetric produces a CMetric where gauge is the internal structure to update, tag is the key in the C JSON
// to update the gauge, and metricName is the `key` for the gauge cardinality.
//
// Note: The Gauge must have the metricName as one of the constant labels
func NewConveyMetric(gauge metrics.Gauge, tag string, metricName string) CMetric {
return &cMetric{
tag: tag,
metricName: metricName,
gauge: gauge,
}
}
// cMetric is the internal CMetric implementation
type cMetric struct {
tag string
metricName string
gauge metrics.Gauge
}
func (m *cMetric) Update(data convey.C) (MetricClosure, error) {
key := UnknownLabel
if item, ok := data[m.tag].(string); ok {
key = item
}
m.gauge.With(m.metricName, key).Add(1.0)
return func() { m.gauge.With(m.metricName, key).Add(-1.0) }, nil
}
rename metricName to label
package conveymetric
import (
"github.com/Comcast/webpa-common/convey"
"github.com/go-kit/kit/metrics"
)
// UnknownLabel is a constant for when key/tag can not be found in the C JSON
const UnknownLabel = "unknown"
// MetricClosure will be returned after update of struct, this should be used to update the struct, aka decrement the count
type MetricClosure func()
// CMetric provides
type CMetric interface {
// Update takes the convey JSON to update internal struct, and return a closure to update the struct again, or an
// error
//
// Note: MetricClosure should only be called once.
Update(data convey.C) (MetricClosure, error)
}
// NewConveyMetric produces a CMetric where gauge is the internal structure to update, tag is the key in the C JSON
// to update the gauge, and label is the `key` for the gauge cardinality.
//
// Note: The Gauge must have the label as one of the constant labels, (aka. the name of the gauge)
func NewConveyMetric(gauge metrics.Gauge, tag string, label string) CMetric {
return &cMetric{
tag: tag,
label: label,
gauge: gauge,
}
}
// cMetric is the internal CMetric implementation
type cMetric struct {
tag string
label string
gauge metrics.Gauge
}
func (m *cMetric) Update(data convey.C) (MetricClosure, error) {
key := UnknownLabel
if item, ok := data[m.tag].(string); ok {
key = item
}
m.gauge.With(m.label, key).Add(1.0)
return func() { m.gauge.With(m.label, key).Add(-1.0) }, nil
} |
package main
import (
"container/list"
"errors"
log "github.com/Sirupsen/logrus"
"github.com/gtfierro/cs262-project/common"
"sync"
"time"
)
type MessageHandler func(*MessageFromBroker)
type BrokerManager interface {
ConnectBroker(brokerInfo *common.BrokerInfo, commConn CommConn) (err error)
GetBrokerAddr(brokerID common.UUID) *string
GetBrokerInfo(brokerID common.UUID) *common.BrokerInfo
IsBrokerAlive(brokerID common.UUID) bool
GetLiveBroker() *Broker
TerminateBroker(brokerID common.UUID)
SendToBroker(brokerID common.UUID, message common.Sendable) (err error)
BroadcastToBrokers(message common.Sendable, skipBrokerID *common.UUID)
HandlePubClientRemapping(msg *common.BrokerRequestMessage) (*common.BrokerAssignmentMessage, error)
RebuildFromEtcd(upToRev int64) (err error)
}
type MessageFromBroker struct {
message common.Sendable
broker *Broker
}
type BrokerManagerImpl struct {
etcdManager EtcdManager
brokerMap map[common.UUID]*Broker
brokerAddrMap map[string]*Broker // Map of Broker contact address -> broker
mapLock sync.RWMutex
heartbeatInterval time.Duration
clock common.Clock
deadBrokerQueue *list.List
liveBrokerQueue *list.List
queueLock sync.Mutex
internalDeathChan chan *Broker
BrokerDeathChan chan *common.UUID // Written to when a broker dies
BrokerLiveChan chan *common.UUID // Written to when a broker comes alive
BrokerReassignChan chan *BrokerReassignment // Written to when a client or pub is reassigned
MessageBuffer chan *MessageFromBroker // Buffers incoming messages meant for other systems
}
func NewBrokerManager(etcdMgr EtcdManager, heartbeatInterval time.Duration, brokerDeathChan, brokerLiveChan chan *common.UUID,
messageBuffer chan *MessageFromBroker, brokerReassignChan chan *BrokerReassignment, clock common.Clock) *BrokerManagerImpl {
bm := new(BrokerManagerImpl)
bm.etcdManager = etcdMgr
bm.brokerMap = make(map[common.UUID]*Broker)
bm.brokerAddrMap = make(map[string]*Broker)
bm.mapLock = sync.RWMutex{}
bm.heartbeatInterval = heartbeatInterval
bm.clock = clock
bm.deadBrokerQueue = list.New()
bm.liveBrokerQueue = list.New()
bm.queueLock = sync.Mutex{}
bm.internalDeathChan = make(chan *Broker, 10)
bm.MessageBuffer = messageBuffer
bm.BrokerDeathChan = brokerDeathChan
bm.BrokerLiveChan = brokerLiveChan
bm.BrokerReassignChan = brokerReassignChan
go bm.monitorDeathChan()
return bm
}
// If broker already exists in the mapping, this should be a reconnection
// so simply update with the new connection and restart
// Otherwise, create a new Broker, put it into the map, and start it
// if commConn is nil, create the Broker and everything, but *don't start it*
func (bm *BrokerManagerImpl) ConnectBroker(brokerInfo *common.BrokerInfo, commConn CommConn) (err error) {
bm.mapLock.RLock()
brokerConn, ok := bm.brokerMap[brokerInfo.BrokerID]
bm.mapLock.RUnlock()
if ok {
//brokerConn.WaitForCleanup()
bm.queueLock.Lock()
if brokerConn.IsAlive() {
brokerConn.RemoveFromList(bm.liveBrokerQueue)
} else {
brokerConn.RemoveFromList(bm.deadBrokerQueue)
}
bm.queueLock.Unlock()
} else {
messageHandler := bm.createMessageHandler(brokerInfo.BrokerID)
brokerConn = NewBroker(brokerInfo, messageHandler, bm.heartbeatInterval,
bm.clock, bm.internalDeathChan)
bm.mapLock.Lock()
bm.brokerMap[brokerInfo.BrokerID] = brokerConn
bm.brokerAddrMap[brokerInfo.ClientBrokerAddr] = brokerConn
bm.etcdManager.UpdateEntity(brokerConn.ToSerializable())
bm.mapLock.Unlock()
}
if commConn != nil {
brokerConn.StartAsynchronously(commConn)
}
bm.queueLock.Lock()
brokerConn.PushToList(bm.liveBrokerQueue)
bm.queueLock.Unlock()
bm.BrokerLiveChan <- &brokerInfo.BrokerID
return
}
func (bm *BrokerManagerImpl) GetBrokerAddr(brokerID common.UUID) *string {
bm.mapLock.RLock()
bconn, ok := bm.brokerMap[brokerID]
bm.mapLock.RUnlock()
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to get address of a nonexistent broker")
return nil
}
return &bconn.CoordBrokerAddr
}
func (bm *BrokerManagerImpl) GetBrokerInfo(brokerID common.UUID) *common.BrokerInfo {
bm.mapLock.RLock()
bconn, ok := bm.brokerMap[brokerID]
bm.mapLock.RUnlock()
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to get info of a nonexistent broker")
return nil
}
return &bconn.BrokerInfo
}
func (bm *BrokerManagerImpl) IsBrokerAlive(brokerID common.UUID) bool {
bm.mapLock.RLock()
bconn, ok := bm.brokerMap[brokerID]
bm.mapLock.RUnlock()
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to check liveness of a nonexistent broker")
return false
}
return bconn.IsAlive()
}
// Get an arbitrary live broker; for redirecting clients/publishers with a dead main broker
// Will return nil if none are available
func (bm *BrokerManagerImpl) GetLiveBroker() *Broker {
bm.queueLock.Lock()
defer bm.queueLock.Unlock()
tempDeadList := list.New()
// Store until the end so that we can push the dead ones back onto
// the live list; we don't want to mess up the routines that will
// be moving them later
defer func() {
for tempDeadList.Len() > 0 {
e := tempDeadList.Front()
b := e.Value.(*Broker)
b.RemoveFromList(tempDeadList)
b.PushToList(bm.liveBrokerQueue)
}
}()
for {
elem := bm.liveBrokerQueue.Front()
if elem == nil {
return nil
}
bconn := elem.Value.(*Broker)
bconn.RemoveFromList(bm.liveBrokerQueue)
if bconn.IsAlive() { // Double check liveness to be sure
bconn.PushToList(bm.liveBrokerQueue)
return bconn
} else {
bconn.PushToList(tempDeadList)
}
}
}
// Terminate the Broker and remove from our map
func (bm *BrokerManagerImpl) TerminateBroker(brokerID common.UUID) {
bm.mapLock.Lock()
defer bm.mapLock.Unlock()
if broker, found := bm.brokerMap[brokerID]; !found {
log.WithField("brokerID", brokerID).Warn("Attempted to terminate nonexistent broker")
} else {
log.WithField("brokerID", brokerID).Info("BrokerManagerImpl terminating broker")
delete(bm.brokerMap, brokerID)
delete(bm.brokerAddrMap, broker.ClientBrokerAddr)
bm.etcdManager.DeleteEntity(broker.ToSerializable())
broker.Terminate()
}
}
// Asynchronously send a message to the given broker
func (bm *BrokerManagerImpl) SendToBroker(brokerID common.UUID, message common.Sendable) (err error) {
bm.mapLock.RLock()
defer bm.mapLock.RUnlock()
brokerConn, ok := bm.brokerMap[brokerID]
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to send message to nonexistent broker")
return errors.New("Broker was unable to be found")
}
brokerConn.Send(message)
return
}
// Asynchronously send a message to all currently living brokers
// Automatically adjusts the MessageID of the messages to be distinct
// if the message contains it
// if skipBrokerID is non-nil, skips a broker with that ID
func (bm *BrokerManagerImpl) BroadcastToBrokers(message common.Sendable, skipBrokerID *common.UUID) {
bm.mapLock.RLock()
defer bm.mapLock.RUnlock()
switch msg := message.(type) {
case common.SendableWithID:
for id, brokerConn := range bm.brokerMap {
if skipBrokerID != nil && *skipBrokerID != id {
// To set the ID to be different we have to make copies of the object
m := msg.Copy()
m.SetID(common.GetMessageID())
brokerConn.Send(m)
}
}
case common.Sendable:
for _, brokerConn := range bm.brokerMap {
brokerConn.Send(message)
}
}
}
// First confirm the lack of liveness of the home broker
// If the home broker is still alive, simply redirect the client back there and do nothing else
// If it is confirmed dead, pick a random live broker and redirect the client there
func (bm *BrokerManagerImpl) HandlePubClientRemapping(msg *common.BrokerRequestMessage) (*common.BrokerAssignmentMessage, error) {
var (
homeBroker *Broker
newBroker *Broker
brokerDead bool
found bool
)
bm.mapLock.RLock()
if homeBroker, found = bm.brokerAddrMap[msg.LocalBrokerAddr]; !found {
return nil, errors.New("Local broker could not be found")
}
bm.mapLock.RUnlock()
if !homeBroker.IsAlive() {
brokerDead = true
} else {
brokerDead = !homeBroker.RequestHeartbeatAndWait()
homeBroker.WaitForCleanup()
}
if brokerDead { // actions to take if broker determined dead
newBroker = bm.GetLiveBroker()
bm.BrokerReassignChan <- &BrokerReassignment{
HomeBrokerID: &homeBroker.BrokerID,
IsPublisher: msg.IsPublisher,
UUID: &msg.UUID,
}
} else {
newBroker = homeBroker // no real redirect necessary
}
return &common.BrokerAssignmentMessage{
BrokerInfo: common.BrokerInfo{
BrokerID: newBroker.BrokerID,
CoordBrokerAddr: newBroker.CoordBrokerAddr,
ClientBrokerAddr: newBroker.ClientBrokerAddr,
},
}, nil
}
func (bm *BrokerManagerImpl) createMessageHandler(brokerID common.UUID) MessageHandler {
return func(brokerMessage *MessageFromBroker) {
message := brokerMessage.message
switch msg := message.(type) {
case *common.BrokerConnectMessage:
log.WithFields(log.Fields{
"connBrokerID": brokerID, "newBrokerID": msg.BrokerID, "newBrokerAddr": msg.CoordBrokerAddr,
}).Warn("Received a BrokerConnectMessage over an existing broker connection")
case *common.BrokerTerminateMessage:
bm.TerminateBroker(brokerMessage.broker.BrokerID)
brokerMessage.broker.Send(&common.AcknowledgeMessage{MessageID: msg.MessageID})
default:
bm.MessageBuffer <- brokerMessage
}
}
}
// Monitor Broker death, moving the dying broker to the correct
// queue. Also forward the broker on to the outward Death chan
func (bm *BrokerManagerImpl) monitorDeathChan() {
for {
deadBroker, ok := <-bm.internalDeathChan
log.WithField("broker", deadBroker.BrokerInfo).Debug("Broker determined as dead!")
if !ok {
return
}
bm.queueLock.Lock()
deadBroker.RemoveFromList(bm.liveBrokerQueue)
deadBroker.PushToList(bm.deadBrokerQueue)
bm.queueLock.Unlock()
bm.BrokerDeathChan <- &deadBroker.BrokerID // Forward on to outward death chan
bm.BroadcastToBrokers(&common.BrokerDeathMessage{BrokerInfo: deadBroker.BrokerInfo}, &deadBroker.BrokerID)
}
}
func (bm *BrokerManagerImpl) rebuildBrokerState(entity EtcdSerializable) {
serBroker := entity.(*SerializableBroker)
bm.ConnectBroker(&serBroker.BrokerInfo, nil)
}
func (bm *BrokerManagerImpl) RebuildFromEtcd(upToRev int64) (err error) {
err = bm.etcdManager.IterateOverAllEntities(BrokerEntity, upToRev, bm.rebuildBrokerState)
if err != nil {
log.WithField("error", err).Fatal("Error while iterating over Brokers when rebuilding")
return
}
return
}
Last attempt to speed up failover
package main
import (
"container/list"
"errors"
log "github.com/Sirupsen/logrus"
"github.com/gtfierro/cs262-project/common"
"sync"
"time"
)
type MessageHandler func(*MessageFromBroker)
type BrokerManager interface {
ConnectBroker(brokerInfo *common.BrokerInfo, commConn CommConn) (err error)
GetBrokerAddr(brokerID common.UUID) *string
GetBrokerInfo(brokerID common.UUID) *common.BrokerInfo
IsBrokerAlive(brokerID common.UUID) bool
GetLiveBroker() *Broker
TerminateBroker(brokerID common.UUID)
SendToBroker(brokerID common.UUID, message common.Sendable) (err error)
BroadcastToBrokers(message common.Sendable, skipBrokerID *common.UUID)
HandlePubClientRemapping(msg *common.BrokerRequestMessage) (*common.BrokerAssignmentMessage, error)
RebuildFromEtcd(upToRev int64) (err error)
}
type MessageFromBroker struct {
message common.Sendable
broker *Broker
}
type BrokerManagerImpl struct {
etcdManager EtcdManager
brokerMap map[common.UUID]*Broker
brokerAddrMap map[string]*Broker // Map of Broker contact address -> broker
mapLock sync.RWMutex
heartbeatInterval time.Duration
clock common.Clock
deadBrokerQueue *list.List
liveBrokerQueue *list.List
queueLock sync.Mutex
internalDeathChan chan *Broker
BrokerDeathChan chan *common.UUID // Written to when a broker dies
BrokerLiveChan chan *common.UUID // Written to when a broker comes alive
BrokerReassignChan chan *BrokerReassignment // Written to when a client or pub is reassigned
MessageBuffer chan *MessageFromBroker // Buffers incoming messages meant for other systems
}
func NewBrokerManager(etcdMgr EtcdManager, heartbeatInterval time.Duration, brokerDeathChan, brokerLiveChan chan *common.UUID,
messageBuffer chan *MessageFromBroker, brokerReassignChan chan *BrokerReassignment, clock common.Clock) *BrokerManagerImpl {
bm := new(BrokerManagerImpl)
bm.etcdManager = etcdMgr
bm.brokerMap = make(map[common.UUID]*Broker)
bm.brokerAddrMap = make(map[string]*Broker)
bm.mapLock = sync.RWMutex{}
bm.heartbeatInterval = heartbeatInterval
bm.clock = clock
bm.deadBrokerQueue = list.New()
bm.liveBrokerQueue = list.New()
bm.queueLock = sync.Mutex{}
bm.internalDeathChan = make(chan *Broker, 10)
bm.MessageBuffer = messageBuffer
bm.BrokerDeathChan = brokerDeathChan
bm.BrokerLiveChan = brokerLiveChan
bm.BrokerReassignChan = brokerReassignChan
go bm.monitorDeathChan()
return bm
}
// If broker already exists in the mapping, this should be a reconnection
// so simply update with the new connection and restart
// Otherwise, create a new Broker, put it into the map, and start it
// if commConn is nil, create the Broker and everything, but *don't start it*
func (bm *BrokerManagerImpl) ConnectBroker(brokerInfo *common.BrokerInfo, commConn CommConn) (err error) {
bm.mapLock.RLock()
brokerConn, ok := bm.brokerMap[brokerInfo.BrokerID]
bm.mapLock.RUnlock()
if ok {
//brokerConn.WaitForCleanup()
bm.queueLock.Lock()
if brokerConn.IsAlive() {
brokerConn.RemoveFromList(bm.liveBrokerQueue)
} else {
brokerConn.RemoveFromList(bm.deadBrokerQueue)
}
bm.queueLock.Unlock()
} else {
messageHandler := bm.createMessageHandler(brokerInfo.BrokerID)
brokerConn = NewBroker(brokerInfo, messageHandler, bm.heartbeatInterval,
bm.clock, bm.internalDeathChan)
bm.mapLock.Lock()
bm.brokerMap[brokerInfo.BrokerID] = brokerConn
bm.brokerAddrMap[brokerInfo.ClientBrokerAddr] = brokerConn
bm.etcdManager.UpdateEntity(brokerConn.ToSerializable())
bm.mapLock.Unlock()
}
if commConn != nil {
brokerConn.StartAsynchronously(commConn)
}
bm.queueLock.Lock()
brokerConn.PushToList(bm.liveBrokerQueue)
bm.queueLock.Unlock()
bm.BrokerLiveChan <- &brokerInfo.BrokerID
return
}
func (bm *BrokerManagerImpl) GetBrokerAddr(brokerID common.UUID) *string {
bm.mapLock.RLock()
bconn, ok := bm.brokerMap[brokerID]
bm.mapLock.RUnlock()
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to get address of a nonexistent broker")
return nil
}
return &bconn.CoordBrokerAddr
}
func (bm *BrokerManagerImpl) GetBrokerInfo(brokerID common.UUID) *common.BrokerInfo {
bm.mapLock.RLock()
bconn, ok := bm.brokerMap[brokerID]
bm.mapLock.RUnlock()
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to get info of a nonexistent broker")
return nil
}
return &bconn.BrokerInfo
}
func (bm *BrokerManagerImpl) IsBrokerAlive(brokerID common.UUID) bool {
bm.mapLock.RLock()
bconn, ok := bm.brokerMap[brokerID]
bm.mapLock.RUnlock()
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to check liveness of a nonexistent broker")
return false
}
return bconn.IsAlive()
}
// Get an arbitrary live broker; for redirecting clients/publishers with a dead main broker
// Will return nil if none are available
func (bm *BrokerManagerImpl) GetLiveBroker() *Broker {
bm.queueLock.Lock()
defer bm.queueLock.Unlock()
tempDeadList := list.New()
// Store until the end so that we can push the dead ones back onto
// the live list; we don't want to mess up the routines that will
// be moving them later
defer func() {
for tempDeadList.Len() > 0 {
e := tempDeadList.Front()
b := e.Value.(*Broker)
b.RemoveFromList(tempDeadList)
b.PushToList(bm.liveBrokerQueue)
}
}()
for {
elem := bm.liveBrokerQueue.Front()
if elem == nil {
return nil
}
bconn := elem.Value.(*Broker)
bconn.RemoveFromList(bm.liveBrokerQueue)
if bconn.IsAlive() { // Double check liveness to be sure
bconn.PushToList(bm.liveBrokerQueue)
return bconn
} else {
bconn.PushToList(tempDeadList)
}
}
}
// Terminate the Broker and remove from our map
func (bm *BrokerManagerImpl) TerminateBroker(brokerID common.UUID) {
bm.mapLock.Lock()
defer bm.mapLock.Unlock()
if broker, found := bm.brokerMap[brokerID]; !found {
log.WithField("brokerID", brokerID).Warn("Attempted to terminate nonexistent broker")
} else {
log.WithField("brokerID", brokerID).Info("BrokerManagerImpl terminating broker")
delete(bm.brokerMap, brokerID)
delete(bm.brokerAddrMap, broker.ClientBrokerAddr)
bm.etcdManager.DeleteEntity(broker.ToSerializable())
broker.Terminate()
}
}
// Asynchronously send a message to the given broker
func (bm *BrokerManagerImpl) SendToBroker(brokerID common.UUID, message common.Sendable) (err error) {
bm.mapLock.RLock()
defer bm.mapLock.RUnlock()
brokerConn, ok := bm.brokerMap[brokerID]
if !ok {
log.WithField("brokerID", brokerID).Warn("Attempted to send message to nonexistent broker")
return errors.New("Broker was unable to be found")
}
brokerConn.Send(message)
return
}
// Asynchronously send a message to all currently living brokers
// Automatically adjusts the MessageID of the messages to be distinct
// if the message contains it
// if skipBrokerID is non-nil, skips a broker with that ID
func (bm *BrokerManagerImpl) BroadcastToBrokers(message common.Sendable, skipBrokerID *common.UUID) {
bm.mapLock.RLock()
defer bm.mapLock.RUnlock()
switch msg := message.(type) {
case common.SendableWithID:
for id, brokerConn := range bm.brokerMap {
if skipBrokerID != nil && *skipBrokerID != id {
// To set the ID to be different we have to make copies of the object
m := msg.Copy()
m.SetID(common.GetMessageID())
brokerConn.Send(m)
}
}
case common.Sendable:
for _, brokerConn := range bm.brokerMap {
brokerConn.Send(message)
}
}
}
// First confirm the lack of liveness of the home broker
// If the home broker is still alive, simply redirect the client back there and do nothing else
// If it is confirmed dead, pick a random live broker and redirect the client there
func (bm *BrokerManagerImpl) HandlePubClientRemapping(msg *common.BrokerRequestMessage) (*common.BrokerAssignmentMessage, error) {
var (
homeBroker *Broker
newBroker *Broker
brokerDead bool
found bool
)
bm.mapLock.RLock()
if homeBroker, found = bm.brokerAddrMap[msg.LocalBrokerAddr]; !found {
return nil, errors.New("Local broker could not be found")
}
bm.mapLock.RUnlock()
if !homeBroker.IsAlive() {
brokerDead = true
} else {
brokerDead = !homeBroker.RequestHeartbeatAndWait()
}
if brokerDead { // actions to take if broker determined dead
newBroker = bm.GetLiveBroker()
bm.BrokerReassignChan <- &BrokerReassignment{
HomeBrokerID: &homeBroker.BrokerID,
IsPublisher: msg.IsPublisher,
UUID: &msg.UUID,
}
} else {
newBroker = homeBroker // no real redirect necessary
}
return &common.BrokerAssignmentMessage{
BrokerInfo: common.BrokerInfo{
BrokerID: newBroker.BrokerID,
CoordBrokerAddr: newBroker.CoordBrokerAddr,
ClientBrokerAddr: newBroker.ClientBrokerAddr,
},
}, nil
}
func (bm *BrokerManagerImpl) createMessageHandler(brokerID common.UUID) MessageHandler {
return func(brokerMessage *MessageFromBroker) {
message := brokerMessage.message
switch msg := message.(type) {
case *common.BrokerConnectMessage:
log.WithFields(log.Fields{
"connBrokerID": brokerID, "newBrokerID": msg.BrokerID, "newBrokerAddr": msg.CoordBrokerAddr,
}).Warn("Received a BrokerConnectMessage over an existing broker connection")
case *common.BrokerTerminateMessage:
bm.TerminateBroker(brokerMessage.broker.BrokerID)
brokerMessage.broker.Send(&common.AcknowledgeMessage{MessageID: msg.MessageID})
default:
bm.MessageBuffer <- brokerMessage
}
}
}
// Monitor Broker death, moving the dying broker to the correct
// queue. Also forward the broker on to the outward Death chan
func (bm *BrokerManagerImpl) monitorDeathChan() {
for {
deadBroker, ok := <-bm.internalDeathChan
log.WithField("broker", deadBroker.BrokerInfo).Debug("Broker determined as dead!")
if !ok {
return
}
bm.queueLock.Lock()
deadBroker.RemoveFromList(bm.liveBrokerQueue)
deadBroker.PushToList(bm.deadBrokerQueue)
bm.queueLock.Unlock()
bm.BrokerDeathChan <- &deadBroker.BrokerID // Forward on to outward death chan
bm.BroadcastToBrokers(&common.BrokerDeathMessage{BrokerInfo: deadBroker.BrokerInfo}, &deadBroker.BrokerID)
}
}
func (bm *BrokerManagerImpl) rebuildBrokerState(entity EtcdSerializable) {
serBroker := entity.(*SerializableBroker)
bm.ConnectBroker(&serBroker.BrokerInfo, nil)
}
func (bm *BrokerManagerImpl) RebuildFromEtcd(upToRev int64) (err error) {
err = bm.etcdManager.IterateOverAllEntities(BrokerEntity, upToRev, bm.rebuildBrokerState)
if err != nil {
log.WithField("error", err).Fatal("Error while iterating over Brokers when rebuilding")
return
}
return
}
|
package writer
import (
"fmt"
"go/format"
"testing"
"github.com/dave/jennifer/jen"
"github.com/jbowes/oag/pkg"
)
func TestSetQueryArgs(t *testing.T) {
tcs := []struct {
name string
in []pkg.Param
out string
}{
{"no args", nil, ""},
{"string arg",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{}}},
`
q := make(url.Values)
q.Set("arg", arg)
`,
},
{"different name",
[]pkg.Param{{ID: "arg", Arg: "arg", Orig: "arg_thing", Type: &pkg.IdentType{}}},
`
q := make(url.Values)
q.Set("arg_thing", arg)
`,
},
{"different arg value",
[]pkg.Param{{ID: "arg", Arg: "different", Orig: "arg_thing", Type: &pkg.IdentType{}}},
`
q := make(url.Values)
q.Set("arg_thing", different)
`,
},
{"marshal",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{Marshal: true}}},
`
q := make(url.Values)
argBytes, err := arg.MarshalText()
if err != nil {
return
}
q.Set("arg", string(argBytes))
`,
},
{"space after martial, not between regular",
[]pkg.Param{
{ID: "arg1", Arg: "arg1", Type: &pkg.IdentType{Marshal: true}},
{ID: "arg2", Arg: "arg2", Type: &pkg.IdentType{}},
{ID: "arg3", Arg: "arg3", Type: &pkg.IdentType{}},
},
`
q := make(url.Values)
arg1Bytes, err := arg1.MarshalText()
if err != nil {
return
}
q.Set("arg1", string(arg1Bytes))
q.Set("arg2", arg2)
q.Set("arg3", arg3)
`,
},
{"multi collection string arg",
[]pkg.Param{{ID: "arg", Arg: "arg", Collection: pkg.Multi, Type: &pkg.SliceType{Type: &pkg.IdentType{}}}},
`
q := make(url.Values)
for _, v := range arg {
q.Add("arg", v)
}
`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
sqa := jen.Id("v").Op("=").Func().Params().BlockFunc(func(g *jen.Group) {
setQueryArgs(g, nil, tc.in)
})
out := fmt.Sprintf("%#v", sqa)
formatted, _ := format.Source([]byte("v = func() {" + tc.out + "}"))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestSetOptQueryArgs(t *testing.T) {
tcs := []struct {
name string
in []pkg.Field
out string
}{
{"no args", nil, ""},
{"string arg",
[]pkg.Field{{ID: "arg", Type: &pkg.PointerType{Type: &pkg.IdentType{}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg != nil {
q.Set("arg", *opts.arg)
}
}
`,
},
{"different name",
[]pkg.Field{{ID: "arg", Orig: "arg_thing", Type: &pkg.PointerType{Type: &pkg.IdentType{}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg != nil {
q.Set("arg_thing", *opts.arg)
}
}
`,
},
{"marshal",
[]pkg.Field{{ID: "arg", Type: &pkg.PointerType{Type: &pkg.IdentType{Marshal: true}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg != nil {
b, err := opts.arg.MarshalText()
if err != nil {
return
}
q.Set("arg", string(b))
}
}
`,
},
{"space after martial, not between regular",
[]pkg.Field{
{ID: "arg1", Type: &pkg.PointerType{Type: &pkg.IdentType{Marshal: true}}},
{ID: "arg2", Type: &pkg.PointerType{Type: &pkg.IdentType{}}},
{ID: "arg3", Type: &pkg.PointerType{Type: &pkg.IdentType{}}},
},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg1 != nil {
b, err := opts.arg1.MarshalText()
if err != nil {
return
}
q.Set("arg1", string(b))
}
if opts.arg2 != nil {
q.Set("arg2", *opts.arg2)
}
if opts.arg3 != nil {
q.Set("arg3", *opts.arg3)
}
}
`,
},
{"multi collection string arg",
[]pkg.Field{{ID: "arg", Collection: pkg.Multi, Type: &pkg.PointerType{Type: &pkg.SliceType{Type: &pkg.IdentType{}}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
for _, v := range opts.arg {
q.Add("arg", v)
}
}
`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
soqa := jen.Id("v").Op("=").Func().Params().BlockFunc(func(g *jen.Group) {
setOptQueryArgs(g, nil, false, tc.in)
})
out := fmt.Sprintf("%#v", soqa)
formatted, _ := format.Source([]byte("v = func() {" + tc.out + "}"))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestSetHeaderArgs(t *testing.T) {
tcs := []struct {
name string
in []pkg.Param
out string
}{
{"no args", nil, ""},
{"string arg",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{}}},
`req.Header.Set("arg", arg)
`,
},
{"different name",
[]pkg.Param{{ID: "arg", Arg: "arg", Orig: "arg_thing", Type: &pkg.IdentType{}}},
`req.Header.Set("arg_thing", arg)
`,
},
{"marshal",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{Marshal: true}}},
`argBytes, err := arg.MarshalText()
if err != nil {
return
}
req.Header.Set("arg", string(argBytes))
`,
},
{"space after martial, not between regular",
[]pkg.Param{
{ID: "arg1", Arg: "arg1", Type: &pkg.IdentType{Marshal: true}},
{ID: "arg2", Arg: "arg2", Type: &pkg.IdentType{}},
{ID: "arg3", Arg: "arg3", Type: &pkg.IdentType{}},
},
`arg1Bytes, err := arg1.MarshalText()
if err != nil {
return
}
req.Header.Set("arg1", string(arg1Bytes))
req.Header.Set("arg2", arg2)
req.Header.Set("arg3", arg3)
`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
sha := jen.Id("v").Op("=").Func().Params().BlockFunc(func(g *jen.Group) {
setHeaderArgs(g, nil, tc.in)
})
out := fmt.Sprintf("%#v", sha)
formatted, _ := format.Source([]byte("v = func() {" + tc.out + "}"))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestErrSelectFunc(t *testing.T) {
tcs := []struct {
name string
in pkg.Method
out string
}{
{"no error codes", pkg.Method{}, "nil"},
{"only default",
pkg.Method{Errors: map[int]pkg.Type{
-1: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error"}},
}},
`func(code int) error {
return &Error{}
}`,
},
{"many codes",
pkg.Method{Errors: map[int]pkg.Type{
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error400"}},
401: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error401"}},
}},
`func(code int) error {
switch code {
case 400:
return &Error400{}
case 401:
return &Error401{}
default:
return nil
}
}`,
},
{"codes and default",
pkg.Method{Errors: map[int]pkg.Type{
-1: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error"}},
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error400"}},
401: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error401"}},
}},
`func(code int) error {
switch code {
case 400:
return &Error400{}
case 401:
return &Error401{}
default:
return &Error{}
}
}`,
},
{"single code no default",
pkg.Method{Errors: map[int]pkg.Type{
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error400"}},
}},
`func(code int) error {
if code == 400 {
return &Error400{}
}
return nil
}`,
},
{"same successive error groups case",
pkg.Method{Errors: map[int]pkg.Type{
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
401: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
403: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorB"}},
404: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
405: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
}},
`func(code int) error {
switch code {
case 400, 401:
return &ErrorA{}
case 403:
return &ErrorB{}
case 404, 405:
return &ErrorA{}
default:
return nil
}
}`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
esf := jen.Id("v").Op("=").Add(errSelectFunc(&tc.in))
out := fmt.Sprintf("%#v", esf)
formatted, _ := format.Source([]byte("v = " + tc.out))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestStringFor(t *testing.T) {
tcs := []struct {
name string
in pkg.Type
out string
}{
{"string", &pkg.IdentType{Name: "string"}, "x"},
{"bool", &pkg.IdentType{Name: "bool"}, "strconv.FormatBool(x)"},
{"int", &pkg.IdentType{Name: "int"}, "strconv.Itoa(x)"},
{"float64", &pkg.IdentType{Name: "float64"}, "strconv.FormatFloat(x, 'f', -1, 64)"},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
esf := jen.Id("v").Op("=").Add(stringFor(tc.in, jen.Id("x")))
out := fmt.Sprintf("%#v", esf)
formatted, _ := format.Source([]byte("v = " + tc.out))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
Add more test coverage for query arg writing
package writer
import (
"fmt"
"go/format"
"testing"
"github.com/dave/jennifer/jen"
"github.com/jbowes/oag/pkg"
)
func TestSetQueryArgs(t *testing.T) {
tcs := []struct {
name string
in []pkg.Param
out string
}{
{"no args", nil, ""},
{"string arg",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{}}},
`
q := make(url.Values)
q.Set("arg", arg)
`,
},
{"different name",
[]pkg.Param{{ID: "arg", Arg: "arg", Orig: "arg_thing", Type: &pkg.IdentType{}}},
`
q := make(url.Values)
q.Set("arg_thing", arg)
`,
},
{"different arg value",
[]pkg.Param{{ID: "arg", Arg: "different", Orig: "arg_thing", Type: &pkg.IdentType{}}},
`
q := make(url.Values)
q.Set("arg_thing", different)
`,
},
{"marshal",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{Marshal: true}}},
`
q := make(url.Values)
argBytes, err := arg.MarshalText()
if err != nil {
return
}
q.Set("arg", string(argBytes))
`,
},
{"space after martial, not between regular",
[]pkg.Param{
{ID: "arg1", Arg: "arg1", Type: &pkg.IdentType{Marshal: true}},
{ID: "arg2", Arg: "arg2", Type: &pkg.IdentType{}},
{ID: "arg3", Arg: "arg3", Type: &pkg.IdentType{}},
},
`
q := make(url.Values)
arg1Bytes, err := arg1.MarshalText()
if err != nil {
return
}
q.Set("arg1", string(arg1Bytes))
q.Set("arg2", arg2)
q.Set("arg3", arg3)
`,
},
{"multi collection string arg",
[]pkg.Param{{ID: "arg", Arg: "arg", Collection: pkg.Multi, Type: &pkg.SliceType{Type: &pkg.IdentType{}}}},
`
q := make(url.Values)
for _, v := range arg {
q.Add("arg", v)
}
`,
},
{"multi collection marshal",
[]pkg.Param{{ID: "arg", Arg: "arg", Collection: pkg.Multi, Type: &pkg.SliceType{Type: &pkg.IdentType{Marshal: true}}}},
`
q := make(url.Values)
for _, v := range arg {
b, err := v.MarshalText()
if err != nil {
return
}
q.Add("arg", string(b))
}
`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
sqa := jen.Id("v").Op("=").Func().Params().BlockFunc(func(g *jen.Group) {
setQueryArgs(g, nil, tc.in)
})
out := fmt.Sprintf("%#v", sqa)
formatted, _ := format.Source([]byte("v = func() {" + tc.out + "}"))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestSetOptQueryArgs(t *testing.T) {
tcs := []struct {
name string
in []pkg.Field
out string
}{
{"no args", nil, ""},
{"string arg",
[]pkg.Field{{ID: "arg", Type: &pkg.PointerType{Type: &pkg.IdentType{}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg != nil {
q.Set("arg", *opts.arg)
}
}
`,
},
{"different name",
[]pkg.Field{{ID: "arg", Orig: "arg_thing", Type: &pkg.PointerType{Type: &pkg.IdentType{}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg != nil {
q.Set("arg_thing", *opts.arg)
}
}
`,
},
{"marshal",
[]pkg.Field{{ID: "arg", Type: &pkg.PointerType{Type: &pkg.IdentType{Marshal: true}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg != nil {
b, err := opts.arg.MarshalText()
if err != nil {
return
}
q.Set("arg", string(b))
}
}
`,
},
{"space after martial, not between regular",
[]pkg.Field{
{ID: "arg1", Type: &pkg.PointerType{Type: &pkg.IdentType{Marshal: true}}},
{ID: "arg2", Type: &pkg.PointerType{Type: &pkg.IdentType{}}},
{ID: "arg3", Type: &pkg.PointerType{Type: &pkg.IdentType{}}},
},
`
var q url.Values
if opts != nil {
q = make(url.Values)
if opts.arg1 != nil {
b, err := opts.arg1.MarshalText()
if err != nil {
return
}
q.Set("arg1", string(b))
}
if opts.arg2 != nil {
q.Set("arg2", *opts.arg2)
}
if opts.arg3 != nil {
q.Set("arg3", *opts.arg3)
}
}
`,
},
{"multi collection string arg",
[]pkg.Field{{ID: "arg", Collection: pkg.Multi, Type: &pkg.PointerType{Type: &pkg.SliceType{Type: &pkg.IdentType{}}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
for _, v := range opts.arg {
q.Add("arg", v)
}
}
`,
},
{"multi collection marshal",
[]pkg.Field{{ID: "arg", Collection: pkg.Multi, Type: &pkg.PointerType{Type: &pkg.SliceType{Type: &pkg.IdentType{Marshal: true}}}}},
`
var q url.Values
if opts != nil {
q = make(url.Values)
for _, v := range opts.arg {
b, err := v.MarshalText()
if err != nil {
return
}
q.Add("arg", string(b))
}
}
`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
soqa := jen.Id("v").Op("=").Func().Params().BlockFunc(func(g *jen.Group) {
setOptQueryArgs(g, nil, false, tc.in)
})
out := fmt.Sprintf("%#v", soqa)
formatted, _ := format.Source([]byte("v = func() {" + tc.out + "}"))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestSetHeaderArgs(t *testing.T) {
tcs := []struct {
name string
in []pkg.Param
out string
}{
{"no args", nil, ""},
{"string arg",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{}}},
`req.Header.Set("arg", arg)
`,
},
{"different name",
[]pkg.Param{{ID: "arg", Arg: "arg", Orig: "arg_thing", Type: &pkg.IdentType{}}},
`req.Header.Set("arg_thing", arg)
`,
},
{"marshal",
[]pkg.Param{{ID: "arg", Arg: "arg", Type: &pkg.IdentType{Marshal: true}}},
`argBytes, err := arg.MarshalText()
if err != nil {
return
}
req.Header.Set("arg", string(argBytes))
`,
},
{"space after martial, not between regular",
[]pkg.Param{
{ID: "arg1", Arg: "arg1", Type: &pkg.IdentType{Marshal: true}},
{ID: "arg2", Arg: "arg2", Type: &pkg.IdentType{}},
{ID: "arg3", Arg: "arg3", Type: &pkg.IdentType{}},
},
`arg1Bytes, err := arg1.MarshalText()
if err != nil {
return
}
req.Header.Set("arg1", string(arg1Bytes))
req.Header.Set("arg2", arg2)
req.Header.Set("arg3", arg3)
`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
sha := jen.Id("v").Op("=").Func().Params().BlockFunc(func(g *jen.Group) {
setHeaderArgs(g, nil, tc.in)
})
out := fmt.Sprintf("%#v", sha)
formatted, _ := format.Source([]byte("v = func() {" + tc.out + "}"))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestErrSelectFunc(t *testing.T) {
tcs := []struct {
name string
in pkg.Method
out string
}{
{"no error codes", pkg.Method{}, "nil"},
{"only default",
pkg.Method{Errors: map[int]pkg.Type{
-1: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error"}},
}},
`func(code int) error {
return &Error{}
}`,
},
{"many codes",
pkg.Method{Errors: map[int]pkg.Type{
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error400"}},
401: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error401"}},
}},
`func(code int) error {
switch code {
case 400:
return &Error400{}
case 401:
return &Error401{}
default:
return nil
}
}`,
},
{"codes and default",
pkg.Method{Errors: map[int]pkg.Type{
-1: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error"}},
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error400"}},
401: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error401"}},
}},
`func(code int) error {
switch code {
case 400:
return &Error400{}
case 401:
return &Error401{}
default:
return &Error{}
}
}`,
},
{"single code no default",
pkg.Method{Errors: map[int]pkg.Type{
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "Error400"}},
}},
`func(code int) error {
if code == 400 {
return &Error400{}
}
return nil
}`,
},
{"same successive error groups case",
pkg.Method{Errors: map[int]pkg.Type{
400: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
401: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
403: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorB"}},
404: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
405: &pkg.PointerType{Type: &pkg.IdentType{Name: "ErrorA"}},
}},
`func(code int) error {
switch code {
case 400, 401:
return &ErrorA{}
case 403:
return &ErrorB{}
case 404, 405:
return &ErrorA{}
default:
return nil
}
}`,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
esf := jen.Id("v").Op("=").Add(errSelectFunc(&tc.in))
out := fmt.Sprintf("%#v", esf)
formatted, _ := format.Source([]byte("v = " + tc.out))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
func TestStringFor(t *testing.T) {
tcs := []struct {
name string
in pkg.Type
out string
}{
{"string", &pkg.IdentType{Name: "string"}, "x"},
{"bool", &pkg.IdentType{Name: "bool"}, "strconv.FormatBool(x)"},
{"int", &pkg.IdentType{Name: "int"}, "strconv.Itoa(x)"},
{"float64", &pkg.IdentType{Name: "float64"}, "strconv.FormatFloat(x, 'f', -1, 64)"},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
esf := jen.Id("v").Op("=").Add(stringFor(tc.in, jen.Id("x")))
out := fmt.Sprintf("%#v", esf)
formatted, _ := format.Source([]byte("v = " + tc.out))
if out != string(formatted) {
t.Error("got:", out, "expected:", tc.out)
}
})
}
}
|
package pattern
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
)
var tokensByString = map[string]Token{
"INT": Token(token.INT),
"FLOAT": Token(token.FLOAT),
"IMAG": Token(token.IMAG),
"CHAR": Token(token.CHAR),
"STRING": Token(token.STRING),
"+": Token(token.ADD),
"-": Token(token.SUB),
"*": Token(token.MUL),
"/": Token(token.QUO),
"%": Token(token.REM),
"&": Token(token.AND),
"|": Token(token.OR),
"^": Token(token.XOR),
"<<": Token(token.SHL),
">>": Token(token.SHR),
"&^": Token(token.AND_NOT),
"+=": Token(token.ADD_ASSIGN),
"-=": Token(token.SUB_ASSIGN),
"*=": Token(token.MUL_ASSIGN),
"/=": Token(token.QUO_ASSIGN),
"%=": Token(token.REM_ASSIGN),
"&=": Token(token.AND_ASSIGN),
"|=": Token(token.OR_ASSIGN),
"^=": Token(token.XOR_ASSIGN),
"<<=": Token(token.SHL_ASSIGN),
">>=": Token(token.SHR_ASSIGN),
"&^=": Token(token.AND_NOT_ASSIGN),
"&&": Token(token.LAND),
"||": Token(token.LOR),
"<-": Token(token.ARROW),
"++": Token(token.INC),
"--": Token(token.DEC),
"==": Token(token.EQL),
"<": Token(token.LSS),
">": Token(token.GTR),
"=": Token(token.ASSIGN),
"!": Token(token.NOT),
"!=": Token(token.NEQ),
"<=": Token(token.LEQ),
">=": Token(token.GEQ),
":=": Token(token.DEFINE),
"...": Token(token.ELLIPSIS),
"IMPORT": Token(token.IMPORT),
"VAR": Token(token.VAR),
"TYPE": Token(token.TYPE),
"CONST": Token(token.CONST),
"BREAK": Token(token.BREAK),
"CONTINUE": Token(token.CONTINUE),
"GOTO": Token(token.GOTO),
"FALLTHROUGH": Token(token.FALLTHROUGH),
}
func maybeToken(node Node) (Node, bool) {
if node, ok := node.(String); ok {
if tok, ok := tokensByString[string(node)]; ok {
return tok, true
}
return node, false
}
return node, false
}
func isNil(v interface{}) bool {
if v == nil {
return true
}
if _, ok := v.(Nil); ok {
return true
}
return false
}
type matcher interface {
Match(*Matcher, interface{}) (interface{}, bool)
}
type State = map[string]interface{}
type Matcher struct {
TypesInfo *types.Info
State State
}
func (m *Matcher) fork() *Matcher {
state := make(State, len(m.State))
for k, v := range m.State {
state[k] = v
}
return &Matcher{
TypesInfo: m.TypesInfo,
State: state,
}
}
func (m *Matcher) merge(mc *Matcher) {
m.State = mc.State
}
func (m *Matcher) Match(a Node, b ast.Node) bool {
m.State = State{}
_, ok := match(m, a, b)
return ok
}
func Match(a Node, b ast.Node) (*Matcher, bool) {
m := &Matcher{}
ret := m.Match(a, b)
return m, ret
}
// Match two items, which may be (Node, AST) or (AST, AST)
func match(m *Matcher, l, r interface{}) (interface{}, bool) {
if _, ok := r.(Node); ok {
panic("Node mustn't be on right side of match")
}
switch l := l.(type) {
case *ast.ParenExpr:
return match(m, l.X, r)
case *ast.ExprStmt:
return match(m, l.X, r)
case *ast.DeclStmt:
return match(m, l.Decl, r)
case *ast.LabeledStmt:
return match(m, l.Stmt, r)
case *ast.BlockStmt:
return match(m, l.List, r)
case *ast.FieldList:
return match(m, l.List, r)
}
switch r := r.(type) {
case *ast.ParenExpr:
return match(m, l, r.X)
case *ast.ExprStmt:
return match(m, l, r.X)
case *ast.DeclStmt:
return match(m, l, r.Decl)
case *ast.LabeledStmt:
return match(m, l, r.Stmt)
case *ast.BlockStmt:
if r == nil {
return match(m, l, nil)
}
return match(m, l, r.List)
case *ast.FieldList:
if r == nil {
return match(m, l, nil)
}
return match(m, l, r.List)
case *ast.BasicLit:
if r == nil {
return match(m, l, nil)
}
}
if l, ok := l.(matcher); ok {
return l.Match(m, r)
}
if l, ok := l.(Node); ok {
// Matching of pattern with concrete value
return matchNodeAST(m, l, r)
}
if l == nil || r == nil {
return nil, l == r
}
{
ln, ok1 := l.(ast.Node)
rn, ok2 := r.(ast.Node)
if ok1 && ok2 {
return matchAST(m, ln, rn)
}
}
{
obj, ok := l.(types.Object)
if ok {
switch r := r.(type) {
case *ast.Ident:
return obj, obj == m.TypesInfo.ObjectOf(r)
case *ast.SelectorExpr:
return obj, obj == m.TypesInfo.ObjectOf(r.Sel)
default:
return obj, false
}
}
}
{
ln, ok1 := l.([]ast.Expr)
rn, ok2 := r.([]ast.Expr)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []ast.Expr{r.(ast.Expr)}
} else if !ok1 && ok2 {
ln = []ast.Expr{l.(ast.Expr)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
{
ln, ok1 := l.([]ast.Stmt)
rn, ok2 := r.([]ast.Stmt)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []ast.Stmt{r.(ast.Stmt)}
} else if !ok1 && ok2 {
ln = []ast.Stmt{l.(ast.Stmt)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
{
ln, ok1 := l.([]*ast.Field)
rn, ok2 := r.([]*ast.Field)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []*ast.Field{r.(*ast.Field)}
} else if !ok1 && ok2 {
ln = []*ast.Field{l.(*ast.Field)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r))
}
// Match a Node with an AST node
func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) {
switch b := b.(type) {
case []ast.Stmt:
// 'a' is not a List or we'd be using its Match
// implementation.
if len(b) != 1 {
return nil, false
}
return match(m, a, b[0])
case []ast.Expr:
// 'a' is not a List or we'd be using its Match
// implementation.
if len(b) != 1 {
return nil, false
}
return match(m, a, b[0])
case ast.Node:
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b).Elem()
if ra.Type().Name() != rb.Type().Name() {
return nil, false
}
for i := 0; i < ra.NumField(); i++ {
af := ra.Field(i)
fieldName := ra.Type().Field(i).Name
bf := rb.FieldByName(fieldName)
if (bf == reflect.Value{}) {
panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a))
}
ai := af.Interface()
bi := bf.Interface()
if ai == nil {
return b, bi == nil
}
if _, ok := match(m, ai.(Node), bi); !ok {
return b, false
}
}
return b, true
case nil:
return nil, a == Nil{}
default:
panic(fmt.Sprintf("unhandled type %T", b))
}
}
// Match two AST nodes
func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) {
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b)
if ra.Type() != rb.Type() {
return nil, false
}
if ra.IsNil() || rb.IsNil() {
return rb, ra.IsNil() == rb.IsNil()
}
ra = ra.Elem()
rb = rb.Elem()
for i := 0; i < ra.NumField(); i++ {
af := ra.Field(i)
bf := rb.Field(i)
if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup {
continue
}
switch af.Kind() {
case reflect.Slice:
if af.Len() != bf.Len() {
return nil, false
}
for j := 0; j < af.Len(); j++ {
if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok {
return nil, false
}
}
case reflect.String:
if af.String() != bf.String() {
return nil, false
}
case reflect.Int:
if af.Int() != bf.Int() {
return nil, false
}
case reflect.Bool:
if af.Bool() != bf.Bool() {
return nil, false
}
case reflect.Ptr, reflect.Interface:
if _, ok := match(m, af.Interface(), bf.Interface()); !ok {
return nil, false
}
default:
panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface()))
}
}
return b, true
}
func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) {
if isNil(b.Node) {
v, ok := m.State[b.Name]
if ok {
// Recall value
return match(m, v, node)
}
// Matching anything
b.Node = Any{}
}
// Store value
if _, ok := m.State[b.Name]; ok {
panic(fmt.Sprintf("binding already created: %s", b.Name))
}
new, ret := match(m, b.Node, node)
if ret {
m.State[b.Name] = new
}
return new, ret
}
func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) {
return node, true
}
func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) {
v := reflect.ValueOf(node)
if v.Kind() == reflect.Slice {
if isNil(l.Head) {
return node, v.Len() == 0
}
if v.Len() == 0 {
return nil, false
}
// OPT(dh): don't check the entire tail if head didn't match
_, ok1 := match(m, l.Head, v.Index(0).Interface())
_, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface())
return node, ok1 && ok2
}
// Our empty list does not equal an untyped Go nil. This way, we can
// tell apart an if with no else and an if with an empty else.
return nil, false
}
func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) {
switch o := node.(type) {
case token.Token:
if tok, ok := maybeToken(s); ok {
return match(m, tok, node)
}
return nil, false
case string:
return o, string(s) == o
default:
return nil, false
}
}
func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) {
o, ok := node.(token.Token)
if !ok {
return nil, false
}
return o, token.Token(tok) == o
}
func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) {
return nil, isNil(node) || reflect.ValueOf(node).IsNil()
}
func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) {
r, ok := match(m, Ident(builtin), node)
if !ok {
return nil, false
}
ident := r.(*ast.Ident)
obj := m.TypesInfo.ObjectOf(ident)
if obj != types.Universe.Lookup(ident.Name) {
return nil, false
}
return ident, true
}
func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) {
r, ok := match(m, Ident(obj), node)
if !ok {
return nil, false
}
ident := r.(*ast.Ident)
id := m.TypesInfo.ObjectOf(ident)
_, ok = match(m, obj.Name, ident.Name)
return id, ok
}
func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) {
var name string
var obj types.Object
r, ok := match(m, Or{Nodes: []Node{Ident{Any{}}, SelectorExpr{Any{}, Any{}}}}, node)
if !ok {
return nil, false
}
switch r := r.(type) {
case *ast.Ident:
obj = m.TypesInfo.ObjectOf(r)
switch obj := obj.(type) {
case *types.Func:
// OPT(dh): optimize this similar to code.FuncName
name = obj.FullName()
case *types.Builtin:
name = obj.Name()
default:
return nil, false
}
case *ast.SelectorExpr:
var ok bool
obj, ok = m.TypesInfo.ObjectOf(r.Sel).(*types.Func)
if !ok {
return nil, false
}
// OPT(dh): optimize this similar to code.FuncName
name = obj.(*types.Func).FullName()
default:
panic("unreachable")
}
_, ok = match(m, fn.Name, name)
return obj, ok
}
func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) {
for _, opt := range or.Nodes {
mc := m.fork()
if ret, ok := match(mc, opt, node); ok {
m.merge(mc)
return ret, true
}
}
return nil, false
}
func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) {
_, ok := match(m, not.Node, node)
if ok {
return nil, false
}
return node, true
}
var (
// Types of fields in go/ast structs that we want to skip
rtTokPos = reflect.TypeOf(token.Pos(0))
rtObject = reflect.TypeOf((*ast.Object)(nil))
rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil))
)
var (
_ matcher = Binding{}
_ matcher = Any{}
_ matcher = List{}
_ matcher = String("")
_ matcher = Token(0)
_ matcher = Nil{}
_ matcher = Builtin{}
_ matcher = Object{}
_ matcher = Function{}
_ matcher = Or{}
_ matcher = Not{}
)
pattern: allow Function to match type names
package pattern
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
)
var tokensByString = map[string]Token{
"INT": Token(token.INT),
"FLOAT": Token(token.FLOAT),
"IMAG": Token(token.IMAG),
"CHAR": Token(token.CHAR),
"STRING": Token(token.STRING),
"+": Token(token.ADD),
"-": Token(token.SUB),
"*": Token(token.MUL),
"/": Token(token.QUO),
"%": Token(token.REM),
"&": Token(token.AND),
"|": Token(token.OR),
"^": Token(token.XOR),
"<<": Token(token.SHL),
">>": Token(token.SHR),
"&^": Token(token.AND_NOT),
"+=": Token(token.ADD_ASSIGN),
"-=": Token(token.SUB_ASSIGN),
"*=": Token(token.MUL_ASSIGN),
"/=": Token(token.QUO_ASSIGN),
"%=": Token(token.REM_ASSIGN),
"&=": Token(token.AND_ASSIGN),
"|=": Token(token.OR_ASSIGN),
"^=": Token(token.XOR_ASSIGN),
"<<=": Token(token.SHL_ASSIGN),
">>=": Token(token.SHR_ASSIGN),
"&^=": Token(token.AND_NOT_ASSIGN),
"&&": Token(token.LAND),
"||": Token(token.LOR),
"<-": Token(token.ARROW),
"++": Token(token.INC),
"--": Token(token.DEC),
"==": Token(token.EQL),
"<": Token(token.LSS),
">": Token(token.GTR),
"=": Token(token.ASSIGN),
"!": Token(token.NOT),
"!=": Token(token.NEQ),
"<=": Token(token.LEQ),
">=": Token(token.GEQ),
":=": Token(token.DEFINE),
"...": Token(token.ELLIPSIS),
"IMPORT": Token(token.IMPORT),
"VAR": Token(token.VAR),
"TYPE": Token(token.TYPE),
"CONST": Token(token.CONST),
"BREAK": Token(token.BREAK),
"CONTINUE": Token(token.CONTINUE),
"GOTO": Token(token.GOTO),
"FALLTHROUGH": Token(token.FALLTHROUGH),
}
func maybeToken(node Node) (Node, bool) {
if node, ok := node.(String); ok {
if tok, ok := tokensByString[string(node)]; ok {
return tok, true
}
return node, false
}
return node, false
}
func isNil(v interface{}) bool {
if v == nil {
return true
}
if _, ok := v.(Nil); ok {
return true
}
return false
}
type matcher interface {
Match(*Matcher, interface{}) (interface{}, bool)
}
type State = map[string]interface{}
type Matcher struct {
TypesInfo *types.Info
State State
}
func (m *Matcher) fork() *Matcher {
state := make(State, len(m.State))
for k, v := range m.State {
state[k] = v
}
return &Matcher{
TypesInfo: m.TypesInfo,
State: state,
}
}
func (m *Matcher) merge(mc *Matcher) {
m.State = mc.State
}
func (m *Matcher) Match(a Node, b ast.Node) bool {
m.State = State{}
_, ok := match(m, a, b)
return ok
}
func Match(a Node, b ast.Node) (*Matcher, bool) {
m := &Matcher{}
ret := m.Match(a, b)
return m, ret
}
// Match two items, which may be (Node, AST) or (AST, AST)
func match(m *Matcher, l, r interface{}) (interface{}, bool) {
if _, ok := r.(Node); ok {
panic("Node mustn't be on right side of match")
}
switch l := l.(type) {
case *ast.ParenExpr:
return match(m, l.X, r)
case *ast.ExprStmt:
return match(m, l.X, r)
case *ast.DeclStmt:
return match(m, l.Decl, r)
case *ast.LabeledStmt:
return match(m, l.Stmt, r)
case *ast.BlockStmt:
return match(m, l.List, r)
case *ast.FieldList:
return match(m, l.List, r)
}
switch r := r.(type) {
case *ast.ParenExpr:
return match(m, l, r.X)
case *ast.ExprStmt:
return match(m, l, r.X)
case *ast.DeclStmt:
return match(m, l, r.Decl)
case *ast.LabeledStmt:
return match(m, l, r.Stmt)
case *ast.BlockStmt:
if r == nil {
return match(m, l, nil)
}
return match(m, l, r.List)
case *ast.FieldList:
if r == nil {
return match(m, l, nil)
}
return match(m, l, r.List)
case *ast.BasicLit:
if r == nil {
return match(m, l, nil)
}
}
if l, ok := l.(matcher); ok {
return l.Match(m, r)
}
if l, ok := l.(Node); ok {
// Matching of pattern with concrete value
return matchNodeAST(m, l, r)
}
if l == nil || r == nil {
return nil, l == r
}
{
ln, ok1 := l.(ast.Node)
rn, ok2 := r.(ast.Node)
if ok1 && ok2 {
return matchAST(m, ln, rn)
}
}
{
obj, ok := l.(types.Object)
if ok {
switch r := r.(type) {
case *ast.Ident:
return obj, obj == m.TypesInfo.ObjectOf(r)
case *ast.SelectorExpr:
return obj, obj == m.TypesInfo.ObjectOf(r.Sel)
default:
return obj, false
}
}
}
{
ln, ok1 := l.([]ast.Expr)
rn, ok2 := r.([]ast.Expr)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []ast.Expr{r.(ast.Expr)}
} else if !ok1 && ok2 {
ln = []ast.Expr{l.(ast.Expr)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
{
ln, ok1 := l.([]ast.Stmt)
rn, ok2 := r.([]ast.Stmt)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []ast.Stmt{r.(ast.Stmt)}
} else if !ok1 && ok2 {
ln = []ast.Stmt{l.(ast.Stmt)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
{
ln, ok1 := l.([]*ast.Field)
rn, ok2 := r.([]*ast.Field)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []*ast.Field{r.(*ast.Field)}
} else if !ok1 && ok2 {
ln = []*ast.Field{l.(*ast.Field)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r))
}
// Match a Node with an AST node
func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) {
switch b := b.(type) {
case []ast.Stmt:
// 'a' is not a List or we'd be using its Match
// implementation.
if len(b) != 1 {
return nil, false
}
return match(m, a, b[0])
case []ast.Expr:
// 'a' is not a List or we'd be using its Match
// implementation.
if len(b) != 1 {
return nil, false
}
return match(m, a, b[0])
case ast.Node:
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b).Elem()
if ra.Type().Name() != rb.Type().Name() {
return nil, false
}
for i := 0; i < ra.NumField(); i++ {
af := ra.Field(i)
fieldName := ra.Type().Field(i).Name
bf := rb.FieldByName(fieldName)
if (bf == reflect.Value{}) {
panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a))
}
ai := af.Interface()
bi := bf.Interface()
if ai == nil {
return b, bi == nil
}
if _, ok := match(m, ai.(Node), bi); !ok {
return b, false
}
}
return b, true
case nil:
return nil, a == Nil{}
default:
panic(fmt.Sprintf("unhandled type %T", b))
}
}
// Match two AST nodes
func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) {
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b)
if ra.Type() != rb.Type() {
return nil, false
}
if ra.IsNil() || rb.IsNil() {
return rb, ra.IsNil() == rb.IsNil()
}
ra = ra.Elem()
rb = rb.Elem()
for i := 0; i < ra.NumField(); i++ {
af := ra.Field(i)
bf := rb.Field(i)
if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup {
continue
}
switch af.Kind() {
case reflect.Slice:
if af.Len() != bf.Len() {
return nil, false
}
for j := 0; j < af.Len(); j++ {
if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok {
return nil, false
}
}
case reflect.String:
if af.String() != bf.String() {
return nil, false
}
case reflect.Int:
if af.Int() != bf.Int() {
return nil, false
}
case reflect.Bool:
if af.Bool() != bf.Bool() {
return nil, false
}
case reflect.Ptr, reflect.Interface:
if _, ok := match(m, af.Interface(), bf.Interface()); !ok {
return nil, false
}
default:
panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface()))
}
}
return b, true
}
func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) {
if isNil(b.Node) {
v, ok := m.State[b.Name]
if ok {
// Recall value
return match(m, v, node)
}
// Matching anything
b.Node = Any{}
}
// Store value
if _, ok := m.State[b.Name]; ok {
panic(fmt.Sprintf("binding already created: %s", b.Name))
}
new, ret := match(m, b.Node, node)
if ret {
m.State[b.Name] = new
}
return new, ret
}
func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) {
return node, true
}
func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) {
v := reflect.ValueOf(node)
if v.Kind() == reflect.Slice {
if isNil(l.Head) {
return node, v.Len() == 0
}
if v.Len() == 0 {
return nil, false
}
// OPT(dh): don't check the entire tail if head didn't match
_, ok1 := match(m, l.Head, v.Index(0).Interface())
_, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface())
return node, ok1 && ok2
}
// Our empty list does not equal an untyped Go nil. This way, we can
// tell apart an if with no else and an if with an empty else.
return nil, false
}
func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) {
switch o := node.(type) {
case token.Token:
if tok, ok := maybeToken(s); ok {
return match(m, tok, node)
}
return nil, false
case string:
return o, string(s) == o
default:
return nil, false
}
}
func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) {
o, ok := node.(token.Token)
if !ok {
return nil, false
}
return o, token.Token(tok) == o
}
func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) {
return nil, isNil(node) || reflect.ValueOf(node).IsNil()
}
func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) {
r, ok := match(m, Ident(builtin), node)
if !ok {
return nil, false
}
ident := r.(*ast.Ident)
obj := m.TypesInfo.ObjectOf(ident)
if obj != types.Universe.Lookup(ident.Name) {
return nil, false
}
return ident, true
}
func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) {
r, ok := match(m, Ident(obj), node)
if !ok {
return nil, false
}
ident := r.(*ast.Ident)
id := m.TypesInfo.ObjectOf(ident)
_, ok = match(m, obj.Name, ident.Name)
return id, ok
}
func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) {
var name string
var obj types.Object
r, ok := match(m, Or{Nodes: []Node{Ident{Any{}}, SelectorExpr{Any{}, Any{}}}}, node)
if !ok {
return nil, false
}
switch r := r.(type) {
case *ast.Ident:
obj = m.TypesInfo.ObjectOf(r)
switch obj := obj.(type) {
case *types.Func:
// OPT(dh): optimize this similar to code.FuncName
name = obj.FullName()
case *types.Builtin:
name = obj.Name()
case *types.TypeName:
name = types.TypeString(obj.Type(), nil)
default:
return nil, false
}
case *ast.SelectorExpr:
obj = m.TypesInfo.ObjectOf(r.Sel)
switch obj := obj.(type) {
case *types.Func:
// OPT(dh): optimize this similar to code.FuncName
name = obj.FullName()
case *types.TypeName:
name = types.TypeString(obj.Type(), nil)
default:
return nil, false
}
default:
panic("unreachable")
}
_, ok = match(m, fn.Name, name)
return obj, ok
}
func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) {
for _, opt := range or.Nodes {
mc := m.fork()
if ret, ok := match(mc, opt, node); ok {
m.merge(mc)
return ret, true
}
}
return nil, false
}
func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) {
_, ok := match(m, not.Node, node)
if ok {
return nil, false
}
return node, true
}
var (
// Types of fields in go/ast structs that we want to skip
rtTokPos = reflect.TypeOf(token.Pos(0))
rtObject = reflect.TypeOf((*ast.Object)(nil))
rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil))
)
var (
_ matcher = Binding{}
_ matcher = Any{}
_ matcher = List{}
_ matcher = String("")
_ matcher = Token(0)
_ matcher = Nil{}
_ matcher = Builtin{}
_ matcher = Object{}
_ matcher = Function{}
_ matcher = Or{}
_ matcher = Not{}
)
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gctuner
import (
"runtime"
"testing"
"github.com/stretchr/testify/require"
)
var testHeap []byte
func TestTuner(t *testing.T) {
EnableGOGCTuner.Store(true)
memLimit := uint64(100 * 1024 * 1024) //100 MB
threshold := memLimit / 2
tn := newTuner(threshold)
require.Equal(t, threshold, tn.threshold.Load())
require.Equal(t, defaultGCPercent, tn.getGCPercent())
// no heap
testHeap = make([]byte, 1)
runtime.GC()
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.Equal(t, MaxGCPercent, tn.getGCPercent())
}
// 1/4 threshold
testHeap = make([]byte, threshold/4)
for i := 0; i < 100; i++ {
runtime.GC()
require.GreaterOrEqual(t, tn.getGCPercent(), uint32(100))
require.LessOrEqual(t, tn.getGCPercent(), uint32(500))
}
// 1/2 threshold
testHeap = make([]byte, threshold/2)
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.GreaterOrEqual(t, tn.getGCPercent(), uint32(50))
require.LessOrEqual(t, tn.getGCPercent(), uint32(100))
}
// 3/4 threshold
testHeap = make([]byte, threshold/4*3)
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.Equal(t, MinGCPercent, tn.getGCPercent())
}
// out of threshold
testHeap = make([]byte, threshold+1024)
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.Equal(t, MinGCPercent, tn.getGCPercent())
}
}
func TestCalcGCPercent(t *testing.T) {
const gb = 1024 * 1024 * 1024
// use default value when invalid params
require.Equal(t, defaultGCPercent, calcGCPercent(0, 0))
require.Equal(t, defaultGCPercent, calcGCPercent(0, 1))
require.Equal(t, defaultGCPercent, calcGCPercent(1, 0))
require.Equal(t, MaxGCPercent, calcGCPercent(1, 3*gb))
require.Equal(t, MaxGCPercent, calcGCPercent(gb/10, 4*gb))
require.Equal(t, MaxGCPercent, calcGCPercent(gb/2, 4*gb))
require.Equal(t, uint32(300), calcGCPercent(1*gb, 4*gb))
require.Equal(t, uint32(166), calcGCPercent(1.5*gb, 4*gb))
require.Equal(t, uint32(100), calcGCPercent(2*gb, 4*gb))
require.Equal(t, uint32(100), calcGCPercent(3*gb, 4*gb))
require.Equal(t, MinGCPercent, calcGCPercent(4*gb, 4*gb))
require.Equal(t, MinGCPercent, calcGCPercent(5*gb, 4*gb))
}
gogctuner: fix unstable test in the TestTuner (#39101)
close pingcap/tidb#38467
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gctuner
import (
"runtime"
"testing"
"github.com/stretchr/testify/require"
)
var testHeap []byte
func TestTuner(t *testing.T) {
EnableGOGCTuner.Store(true)
memLimit := uint64(1000 * 1024 * 1024) //1000 MB
threshold := memLimit / 2
tn := newTuner(threshold)
require.Equal(t, threshold, tn.threshold.Load())
require.Equal(t, defaultGCPercent, tn.getGCPercent())
// no heap
testHeap = make([]byte, 1)
runtime.GC()
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.Equal(t, MaxGCPercent, tn.getGCPercent())
}
// 1/4 threshold
testHeap = make([]byte, threshold/4)
for i := 0; i < 100; i++ {
runtime.GC()
require.GreaterOrEqual(t, tn.getGCPercent(), MaxGCPercent/2)
require.LessOrEqual(t, tn.getGCPercent(), MaxGCPercent)
}
// 1/2 threshold
testHeap = make([]byte, threshold/2)
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.GreaterOrEqual(t, tn.getGCPercent(), MinGCPercent)
require.LessOrEqual(t, tn.getGCPercent(), MaxGCPercent/2)
}
// 3/4 threshold
testHeap = make([]byte, threshold/4*3)
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.Equal(t, MinGCPercent, tn.getGCPercent())
}
// out of threshold
testHeap = make([]byte, threshold+1024)
runtime.GC()
for i := 0; i < 100; i++ {
runtime.GC()
require.Equal(t, MinGCPercent, tn.getGCPercent())
}
}
func TestCalcGCPercent(t *testing.T) {
const gb = 1024 * 1024 * 1024
// use default value when invalid params
require.Equal(t, defaultGCPercent, calcGCPercent(0, 0))
require.Equal(t, defaultGCPercent, calcGCPercent(0, 1))
require.Equal(t, defaultGCPercent, calcGCPercent(1, 0))
require.Equal(t, MaxGCPercent, calcGCPercent(1, 3*gb))
require.Equal(t, MaxGCPercent, calcGCPercent(gb/10, 4*gb))
require.Equal(t, MaxGCPercent, calcGCPercent(gb/2, 4*gb))
require.Equal(t, uint32(300), calcGCPercent(1*gb, 4*gb))
require.Equal(t, uint32(166), calcGCPercent(1.5*gb, 4*gb))
require.Equal(t, uint32(100), calcGCPercent(2*gb, 4*gb))
require.Equal(t, uint32(100), calcGCPercent(3*gb, 4*gb))
require.Equal(t, MinGCPercent, calcGCPercent(4*gb, 4*gb))
require.Equal(t, MinGCPercent, calcGCPercent(5*gb, 4*gb))
}
|
package renter
// TODO: Add failure cooldowns to the workers, particulary for uploading tasks.
import (
"sync"
"time"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// A worker listens for work on a certain host.
//
// The mutex of the worker only protects the 'unprocessedChunks' and the
// 'standbyChunks' fields of the worker. The rest of the fields are only
// interacted with exclusively by the primary worker thread, and only one of
// those ever exists at a time.
type worker struct {
// The contract and host used by this worker.
contract modules.RenterContract
hostPubKey types.SiaPublicKey
renter *Renter
// Channels that inform the worker of kill signals and of new work.
downloadChan chan downloadWork // higher priority than all uploads
killChan chan struct{} // highest priority
priorityDownloadChan chan downloadWork // higher priority than downloads (used for user-initiated downloads)
uploadChan chan struct{} // lowest priority
// Operation failure statistics for the worker.
downloadRecentFailure time.Time // Only modified by the primary download loop.
uploadRecentFailure time.Time // Only modified by primary repair loop.
uploadConsecutiveFailures time.Duration
// Two lists of chunks that relate to worker upload tasks. The first list is
// the set of chunks that the worker hasn't examined yet. The second list is
// the list of chunks that the worker examined, but was unable to process
// because other workers had taken on all of the work already. This list is
// maintained in case any of the other workers fail - this worker will be
// able to pick up the slack.
mu sync.Mutex
standbyChunks []*unfinishedChunk
terminated bool
unprocessedChunks []*unfinishedChunk
}
// threadedWorkLoop repeatedly issues work to a worker, stopping when the worker
// is killed or when the thread group is closed.
func (w *worker) threadedWorkLoop() {
err := w.renter.tg.Add()
if err != nil {
return
}
defer w.renter.tg.Done()
// The worker may have upload chunks and it needs to drop them before
// terminating.
defer w.managedDropUploadChunks()
for {
// Check for priority downloads.
select {
case d := <-w.priorityDownloadChan:
w.download(d)
continue
default:
}
// Check for standard downloads.
select {
case d := <-w.downloadChan:
w.download(d)
continue
default:
}
// Perform one step of processing upload work.
chunk, pieceIndex := w.managedNextChunk()
if chunk != nil {
w.upload(chunk, pieceIndex)
continue
}
// Determine the maximum amount of time to wait for any standby chunks.
var sleepDuration time.Duration
w.mu.Lock()
numStandby := len(w.standbyChunks)
w.mu.Unlock()
if numStandby > 0 {
// TODO: Pick a random time instead of just a constant time.
sleepDuration = time.Second * 3 // TODO: Constant
} else {
sleepDuration = time.Hour // TODO: Constant
}
// Block until new work is received via the upload or download channels,
// or until the standby chunks are ready to be revisited, or until a
// kill signal is received.
select {
case d := <-w.priorityDownloadChan:
w.download(d)
continue
case d := <-w.downloadChan:
w.download(d)
continue
case <-w.uploadChan:
continue
case <-time.After(sleepDuration):
continue
case <-w.killChan:
return
case <-w.renter.tg.StopChan():
return
}
}
}
// updateWorkerPool will grab the set of contracts from the contractor and
// update the worker pool to match.
func (r *Renter) managedUpdateWorkerPool() {
contractSlice := r.hostContractor.Contracts()
contractMap := make(map[types.FileContractID]modules.RenterContract)
for i := 0; i < len(contractSlice); i++ {
contractMap[contractSlice[i].ID] = contractSlice[i]
}
// Add a worker for any contract that does not already have a worker.
for id, contract := range contractMap {
lockID := r.mu.Lock()
_, exists := r.workerPool[id]
if !exists {
worker := &worker{
contract: contract,
hostPubKey: contract.HostPublicKey,
downloadChan: make(chan downloadWork, 1),
killChan: make(chan struct{}),
priorityDownloadChan: make(chan downloadWork, 1),
uploadChan: make(chan struct{}, 1),
renter: r,
}
r.workerPool[id] = worker
go worker.threadedWorkLoop()
}
r.mu.Unlock(lockID)
}
// Remove a worker for any worker that is not in the set of new contracts.
lockID := r.mu.Lock()
for id, worker := range r.workerPool {
_, exists := contractMap[id]
if !exists {
delete(r.workerPool, id)
close(worker.killChan)
}
}
r.mu.Unlock(lockID)
}
run gofmt
package renter
// TODO: Add failure cooldowns to the workers, particulary for uploading tasks.
import (
"sync"
"time"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// A worker listens for work on a certain host.
//
// The mutex of the worker only protects the 'unprocessedChunks' and the
// 'standbyChunks' fields of the worker. The rest of the fields are only
// interacted with exclusively by the primary worker thread, and only one of
// those ever exists at a time.
type worker struct {
// The contract and host used by this worker.
contract modules.RenterContract
hostPubKey types.SiaPublicKey
renter *Renter
// Channels that inform the worker of kill signals and of new work.
downloadChan chan downloadWork // higher priority than all uploads
killChan chan struct{} // highest priority
priorityDownloadChan chan downloadWork // higher priority than downloads (used for user-initiated downloads)
uploadChan chan struct{} // lowest priority
// Operation failure statistics for the worker.
downloadRecentFailure time.Time // Only modified by the primary download loop.
uploadRecentFailure time.Time // Only modified by primary repair loop.
uploadConsecutiveFailures time.Duration
// Two lists of chunks that relate to worker upload tasks. The first list is
// the set of chunks that the worker hasn't examined yet. The second list is
// the list of chunks that the worker examined, but was unable to process
// because other workers had taken on all of the work already. This list is
// maintained in case any of the other workers fail - this worker will be
// able to pick up the slack.
mu sync.Mutex
standbyChunks []*unfinishedChunk
terminated bool
unprocessedChunks []*unfinishedChunk
}
// threadedWorkLoop repeatedly issues work to a worker, stopping when the worker
// is killed or when the thread group is closed.
func (w *worker) threadedWorkLoop() {
err := w.renter.tg.Add()
if err != nil {
return
}
defer w.renter.tg.Done()
// The worker may have upload chunks and it needs to drop them before
// terminating.
defer w.managedDropUploadChunks()
for {
// Check for priority downloads.
select {
case d := <-w.priorityDownloadChan:
w.download(d)
continue
default:
}
// Check for standard downloads.
select {
case d := <-w.downloadChan:
w.download(d)
continue
default:
}
// Perform one step of processing upload work.
chunk, pieceIndex := w.managedNextChunk()
if chunk != nil {
w.upload(chunk, pieceIndex)
continue
}
// Determine the maximum amount of time to wait for any standby chunks.
var sleepDuration time.Duration
w.mu.Lock()
numStandby := len(w.standbyChunks)
w.mu.Unlock()
if numStandby > 0 {
// TODO: Pick a random time instead of just a constant time.
sleepDuration = time.Second * 3 // TODO: Constant
} else {
sleepDuration = time.Hour // TODO: Constant
}
// Block until new work is received via the upload or download channels,
// or until the standby chunks are ready to be revisited, or until a
// kill signal is received.
select {
case d := <-w.priorityDownloadChan:
w.download(d)
continue
case d := <-w.downloadChan:
w.download(d)
continue
case <-w.uploadChan:
continue
case <-time.After(sleepDuration):
continue
case <-w.killChan:
return
case <-w.renter.tg.StopChan():
return
}
}
}
// updateWorkerPool will grab the set of contracts from the contractor and
// update the worker pool to match.
func (r *Renter) managedUpdateWorkerPool() {
contractSlice := r.hostContractor.Contracts()
contractMap := make(map[types.FileContractID]modules.RenterContract)
for i := 0; i < len(contractSlice); i++ {
contractMap[contractSlice[i].ID] = contractSlice[i]
}
// Add a worker for any contract that does not already have a worker.
for id, contract := range contractMap {
lockID := r.mu.Lock()
_, exists := r.workerPool[id]
if !exists {
worker := &worker{
contract: contract,
hostPubKey: contract.HostPublicKey,
downloadChan: make(chan downloadWork, 1),
killChan: make(chan struct{}),
priorityDownloadChan: make(chan downloadWork, 1),
uploadChan: make(chan struct{}, 1),
renter: r,
}
r.workerPool[id] = worker
go worker.threadedWorkLoop()
}
r.mu.Unlock(lockID)
}
// Remove a worker for any worker that is not in the set of new contracts.
lockID := r.mu.Lock()
for id, worker := range r.workerPool {
_, exists := contractMap[id]
if !exists {
delete(r.workerPool, id)
close(worker.killChan)
}
}
r.mu.Unlock(lockID)
}
|
package main
import (
"fmt"
"github.com/streadway/amqp"
"koding/kontrol/daemon/handler"
"koding/kontrol/daemon/handler/proxy"
"koding/tools/amqputil"
"koding/tools/config"
"log"
)
type Consumer struct {
conn *amqp.Connection
channel *amqp.Channel
tag string
done chan error
}
func init() {
log.SetPrefix("kontrol-daemon ")
}
func main() {
// Initialize db and startup settings
log.Printf("initiliazing handlers")
handler.Startup()
proxy.Startup()
_, err := startRouting()
if err != nil {
log.Fatalf("Could not start routing of api messages: %s", err)
}
select {}
}
func startRouting() (*Consumer, error) {
c := &Consumer{
conn: nil,
channel: nil,
tag: "",
done: make(chan error),
}
type bind struct {
queue string
key string
}
apiBindings := []bind{
bind{"kontrol-cli", "input.cli"},
bind{"kontrol-webapi", "input.webapi"},
bind{"kontrol-proxy", "input.proxy"},
}
workerBindings := []bind{
bind{"kontrol-worker", "input.worker"},
}
log.Printf("creating connection to handle incoming cli and api messages")
user := config.Current.Kontrold.RabbitMq.Login
password := config.Current.Kontrold.RabbitMq.Password
host := config.Current.Kontrold.RabbitMq.Host
port := config.Current.Kontrold.RabbitMq.Port
/* We use one connection and channel for our three consumers */
c.conn = amqputil.CreateAmqpConnection(user, password, host, port)
c.channel = amqputil.CreateChannel(c.conn)
err := c.channel.ExchangeDeclare("infoExchange", "topic", true, false, false, false, nil)
if err != nil {
log.Fatal("info exchange.declare: %s", err)
}
err = c.channel.ExchangeDeclare("workerExchange", "topic", true, false, false, false, nil)
if err != nil {
log.Fatal("worker exchange.declare: %s", err)
}
for _, a := range apiBindings {
_, err = c.channel.QueueDeclare(a.queue, false, true, false, false, nil)
if err != nil {
log.Fatal("queue.declare: %s", err)
}
err = c.channel.QueueBind(a.queue, a.key, "infoExchange", false, nil)
if err != nil {
log.Fatal("queue.bind: %s", err)
}
}
for _, w := range workerBindings {
_, err = c.channel.QueueDeclare(w.queue, false, true, false, false, nil)
if err != nil {
log.Fatal("queue.declare: %s", err)
}
err = c.channel.QueueBind(w.queue, w.key, "workerExchange", false, nil)
if err != nil {
log.Fatal("queue.bind: %s", err)
}
}
err = c.channel.Qos(4, 0, false)
if err != nil {
log.Fatal("basic.qos: %s", err)
}
cliStream, err := c.channel.Consume("kontrol-cli", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
webapiStream, err := c.channel.Consume("kontrol-webapi", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
workerStream, err := c.channel.Consume("kontrol-worker", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
proxyStream, err := c.channel.Consume("kontrol-proxy", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
go func() {
for {
select {
case d := <-workerStream:
if config.Verbose {
log.Printf("worker handle got %dB message data: [%v] %s %s", len(d.Body), d.DeliveryTag, d.Body, d.AppId)
}
handler.HandleWorkerMessage(d.Body)
case d := <-cliStream:
if config.Verbose {
log.Printf("cli handle got %dB message data: [%v] %s", len(d.Body), d.DeliveryTag, d.Body)
}
handler.HandleApiMessage(d.Body, d.AppId)
case d := <-webapiStream:
if config.Verbose {
log.Printf("webapi handle got %dB message data: [%v] %s", len(d.Body), d.DeliveryTag, d.Body)
}
handler.HandleApiMessage(d.Body, "")
case d := <-proxyStream:
if config.Verbose {
log.Printf("proxy handle got %dB message data: [%v] %s", len(d.Body), d.DeliveryTag, d.Body)
}
// d.AppId is stored in d.Body.ProxyMessage.Uuid...
proxy.HandleMessage(d.Body)
}
}
}()
return c, nil
}
func (c *Consumer) Shutdown() error {
// will close() the deliveries channel
if err := c.channel.Cancel(c.tag, true); err != nil {
return fmt.Errorf("Consumer cancel failed: %s", err)
}
if err := c.conn.Close(); err != nil {
return fmt.Errorf("AMQP connection close error: %s", err)
}
defer log.Printf("AMQP shutdown OK")
// wait for handle() to exit
return <-c.done
}
Don't auto delete queues. This is much better for our use cases.
package main
import (
"fmt"
"github.com/streadway/amqp"
"koding/kontrol/daemon/handler"
"koding/kontrol/daemon/handler/proxy"
"koding/tools/amqputil"
"koding/tools/config"
"log"
)
type Consumer struct {
conn *amqp.Connection
channel *amqp.Channel
tag string
done chan error
}
func init() {
log.SetPrefix("kontrol-daemon ")
}
func main() {
// Initialize db and startup settings
log.Printf("initiliazing handlers")
handler.Startup()
proxy.Startup()
_, err := startRouting()
if err != nil {
log.Fatalf("Could not start routing of api messages: %s", err)
}
select {}
}
func startRouting() (*Consumer, error) {
c := &Consumer{
conn: nil,
channel: nil,
tag: "",
done: make(chan error),
}
type bind struct {
queue string
key string
}
apiBindings := []bind{
bind{"kontrol-cli", "input.cli"},
bind{"kontrol-webapi", "input.webapi"},
bind{"kontrol-proxy", "input.proxy"},
}
workerBindings := []bind{
bind{"kontrol-worker", "input.worker"},
}
log.Printf("creating connection to handle incoming cli and api messages")
user := config.Current.Kontrold.RabbitMq.Login
password := config.Current.Kontrold.RabbitMq.Password
host := config.Current.Kontrold.RabbitMq.Host
port := config.Current.Kontrold.RabbitMq.Port
/* We use one connection and channel for our three consumers */
c.conn = amqputil.CreateAmqpConnection(user, password, host, port)
c.channel = amqputil.CreateChannel(c.conn)
err := c.channel.ExchangeDeclare("infoExchange", "topic", true, false, false, false, nil)
if err != nil {
log.Fatal("info exchange.declare: %s", err)
}
err = c.channel.ExchangeDeclare("workerExchange", "topic", true, false, false, false, nil)
if err != nil {
log.Fatal("worker exchange.declare: %s", err)
}
for _, a := range apiBindings {
_, err = c.channel.QueueDeclare(a.queue, true, false, false, false, nil)
if err != nil {
log.Fatal("queue.declare: %s", err)
}
err = c.channel.QueueBind(a.queue, a.key, "infoExchange", false, nil)
if err != nil {
log.Fatal("queue.bind: %s", err)
}
}
for _, w := range workerBindings {
_, err = c.channel.QueueDeclare(w.queue, true, false, false, false, nil)
if err != nil {
log.Fatal("queue.declare: %s", err)
}
err = c.channel.QueueBind(w.queue, w.key, "workerExchange", false, nil)
if err != nil {
log.Fatal("queue.bind: %s", err)
}
}
err = c.channel.Qos(4, 0, false)
if err != nil {
log.Fatal("basic.qos: %s", err)
}
cliStream, err := c.channel.Consume("kontrol-cli", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
webapiStream, err := c.channel.Consume("kontrol-webapi", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
workerStream, err := c.channel.Consume("kontrol-worker", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
proxyStream, err := c.channel.Consume("kontrol-proxy", "", true, true, false, false, nil)
if err != nil {
log.Fatal("basic.consume: %s", err)
}
go func() {
for {
select {
case d := <-workerStream:
if config.Verbose {
log.Printf("worker handle got %dB message data: [%v] %s %s", len(d.Body), d.DeliveryTag, d.Body, d.AppId)
}
handler.HandleWorkerMessage(d.Body)
case d := <-cliStream:
if config.Verbose {
log.Printf("cli handle got %dB message data: [%v] %s", len(d.Body), d.DeliveryTag, d.Body)
}
handler.HandleApiMessage(d.Body, d.AppId)
case d := <-webapiStream:
if config.Verbose {
log.Printf("webapi handle got %dB message data: [%v] %s", len(d.Body), d.DeliveryTag, d.Body)
}
handler.HandleApiMessage(d.Body, "")
case d := <-proxyStream:
if config.Verbose {
log.Printf("proxy handle got %dB message data: [%v] %s", len(d.Body), d.DeliveryTag, d.Body)
}
// d.AppId is stored in d.Body.ProxyMessage.Uuid...
proxy.HandleMessage(d.Body)
}
}
}()
return c, nil
}
func (c *Consumer) Shutdown() error {
// will close() the deliveries channel
if err := c.channel.Cancel(c.tag, true); err != nil {
return fmt.Errorf("Consumer cancel failed: %s", err)
}
if err := c.conn.Close(); err != nil {
return fmt.Errorf("AMQP connection close error: %s", err)
}
defer log.Printf("AMQP shutdown OK")
// wait for handle() to exit
return <-c.done
}
|
package main
import (
"socialapi/models"
"socialapi/rest"
"socialapi/workers/common/tests"
"testing"
"github.com/koding/runner"
. "github.com/smartystreets/goconvey/convey"
)
func TestFollowedTopics(t *testing.T) {
tests.WithRunner(t, func(r *runner.Runner) {
Convey("While testing followed topics", t, func() {
Convey("First Create User", func() {
groupName := models.RandomGroupName()
account, err := models.CreateAccountInBothDbs()
So(err, ShouldBeNil)
So(account, ShouldNotBeNil)
models.CreateTypedGroupedChannelWithTest(
account.Id,
models.Channel_TYPE_GROUP,
groupName,
)
ses, err := models.FetchOrCreateSession(account.Nick, groupName)
So(err, ShouldBeNil)
So(ses, ShouldNotBeNil)
nonOwnerAccount := models.NewAccount()
nonOwnerAccount.OldId = AccountOldId.Hex()
nonOwnerAccount, err = rest.CreateAccount(nonOwnerAccount)
So(err, ShouldBeNil)
So(nonOwnerAccount, ShouldNotBeNil)
topicChannel1, err := rest.CreateChannelByGroupNameAndType(
account.Id,
groupName,
models.Channel_TYPE_TOPIC,
ses.ClientId,
)
So(err, ShouldBeNil)
topicChannel2, err := rest.CreateChannelByGroupNameAndType(
account.Id,
groupName,
models.Channel_TYPE_TOPIC,
ses.ClientId,
)
So(err, ShouldBeNil)
Convey("user should be able to follow one topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
// there should be an err
So(err, ShouldBeNil)
// channel should be nil
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 1)
})
Convey("user should be able to follow two topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
channelParticipant, err = rest.AddChannelParticipant(topicChannel2.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 2)
})
Convey("user should be participant of the followed topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
// there should be an err
So(err, ShouldBeNil)
// channel should be nil
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 1)
So(followedChannels[0].IsParticipant, ShouldBeTrue)
})
Convey("user should not be a participant of the un-followed topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
currentParticipatedChannelCount := len(followedChannels)
channelParticipant, err = rest.DeleteChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
followedChannels, err = rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
lastParticipatedChannelCount := len(followedChannels)
So(currentParticipatedChannelCount-lastParticipatedChannelCount, ShouldEqual, 1)
})
Convey("participant count of the followed topic should be greater than 0", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
// there should be an err
So(err, ShouldBeNil)
// channel should be nil
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 1)
So(followedChannels[0].ParticipantCount, ShouldBeGreaterThanOrEqualTo, 1)
})
})
})
})
}
socialapi: use generic group channel creator
package main
import (
"socialapi/models"
"socialapi/rest"
"socialapi/workers/common/tests"
"testing"
"github.com/koding/runner"
. "github.com/smartystreets/goconvey/convey"
)
func TestFollowedTopics(t *testing.T) {
tests.WithRunner(t, func(r *runner.Runner) {
Convey("While testing followed topics", t, func() {
Convey("First Create User", func() {
account, _, groupName := models.CreateRandomGroupDataWithChecks()
ses, err := models.FetchOrCreateSession(account.Nick, groupName)
So(err, ShouldBeNil)
So(ses, ShouldNotBeNil)
nonOwnerAccount := models.NewAccount()
nonOwnerAccount.OldId = AccountOldId.Hex()
nonOwnerAccount, err = rest.CreateAccount(nonOwnerAccount)
So(err, ShouldBeNil)
So(nonOwnerAccount, ShouldNotBeNil)
topicChannel1, err := rest.CreateChannelByGroupNameAndType(
account.Id,
groupName,
models.Channel_TYPE_TOPIC,
ses.ClientId,
)
So(err, ShouldBeNil)
topicChannel2, err := rest.CreateChannelByGroupNameAndType(
account.Id,
groupName,
models.Channel_TYPE_TOPIC,
ses.ClientId,
)
So(err, ShouldBeNil)
Convey("user should be able to follow one topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
// there should be an err
So(err, ShouldBeNil)
// channel should be nil
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 1)
})
Convey("user should be able to follow two topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
channelParticipant, err = rest.AddChannelParticipant(topicChannel2.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 2)
})
Convey("user should be participant of the followed topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
// there should be an err
So(err, ShouldBeNil)
// channel should be nil
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 1)
So(followedChannels[0].IsParticipant, ShouldBeTrue)
})
Convey("user should not be a participant of the un-followed topic", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
currentParticipatedChannelCount := len(followedChannels)
channelParticipant, err = rest.DeleteChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
So(err, ShouldBeNil)
So(channelParticipant, ShouldNotBeNil)
followedChannels, err = rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
lastParticipatedChannelCount := len(followedChannels)
So(currentParticipatedChannelCount-lastParticipatedChannelCount, ShouldEqual, 1)
})
Convey("participant count of the followed topic should be greater than 0", func() {
channelParticipant, err := rest.AddChannelParticipant(topicChannel1.Id, ses.ClientId, account.Id)
// there should be an err
So(err, ShouldBeNil)
// channel should be nil
So(channelParticipant, ShouldNotBeNil)
followedChannels, err := rest.FetchFollowedChannels(account.Id, ses.ClientId)
So(err, ShouldBeNil)
So(followedChannels, ShouldNotBeNil)
So(len(followedChannels), ShouldBeGreaterThanOrEqualTo, 1)
So(followedChannels[0].ParticipantCount, ShouldBeGreaterThanOrEqualTo, 1)
})
})
})
})
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
"gopkg.in/yaml.v2"
)
type StringValue struct {
Values map[int]string
}
func NewStringValue() StringValue {
return StringValue{
Values: make(map[int]string),
}
}
func (sv StringValue) SetValue(index int, arg interface{}) error {
var value string
switch arg.(type) {
case string:
value = strings.Join([]string{"\"", arg.(string), "\""}, "")
case int:
value = fmt.Sprint(arg)
default:
return fmt.Errorf("Unexpected value: %v", arg)
}
sv.Values[index] = value
return nil
}
type YAMLWrapper struct {
FilePath string
contents map[interface{}]interface{}
columnNames map[int]string
values map[int]StringValue
}
func NewYAMLWrapper(filePath string) *YAMLWrapper {
return &YAMLWrapper{
FilePath: filePath,
contents: make(map[interface{}]interface{}),
columnNames: make(map[int]string),
values: make(map[int]StringValue),
}
}
func (w *YAMLWrapper) Contents() (map[interface{}]interface{}, error) {
if len(w.contents) == 0 {
contents := make(map[interface{}]interface{})
buf, err := ioutil.ReadFile(w.FilePath)
if err != nil {
return contents, err
}
if err := yaml.Unmarshal(buf, &contents); err != nil {
return contents, err
}
w.contents = contents
}
return w.contents, nil
}
func (w *YAMLWrapper) ColumnNames() (map[int]string, error) {
if len(w.columnNames) == 0 {
keys := make(map[int]string)
contents, err := w.Contents()
if err != nil {
return keys, err
}
for _, content := range contents {
i := 0
for key, _ := range content.(map[interface{}]interface{}) {
keys[i] = key.(string)
i++
}
break
}
w.columnNames = keys
}
return w.columnNames, nil
}
func (w *YAMLWrapper) StringValues() (map[int]StringValue, error) {
if len(w.values) == 0 {
var contents map[interface{}]interface{}
columnNames, err := w.ColumnNames()
if err != nil {
return w.values, err
}
contents, err = w.Contents()
if err != nil {
return w.values, err
}
values := make(map[int]StringValue)
contentIndex := 0
for _, data := range contents {
stringValue := NewStringValue()
for i, name := range columnNames {
err := stringValue.SetValue(i, data.(map[interface{}]interface{})[name])
if err != nil {
return w.values, err
}
}
values[contentIndex] = stringValue
contentIndex++
}
w.values = values
}
return w.values, nil
}
null and float value
package main
import (
"fmt"
"io/ioutil"
"strings"
"gopkg.in/yaml.v2"
)
type StringValue struct {
Values map[int]string
}
func NewStringValue() StringValue {
return StringValue{
Values: make(map[int]string),
}
}
func (sv StringValue) SetValue(index int, arg interface{}) error {
var value string
switch arg.(type) {
case string:
value = strings.Join([]string{"\"", arg.(string), "\""}, "")
case int, float64:
value = fmt.Sprint(arg)
case nil:
value = "NULL"
default:
return fmt.Errorf("Unexpected value: %v", arg)
}
sv.Values[index] = value
return nil
}
type YAMLWrapper struct {
FilePath string
contents map[interface{}]interface{}
columnNames map[int]string
values map[int]StringValue
}
func NewYAMLWrapper(filePath string) *YAMLWrapper {
return &YAMLWrapper{
FilePath: filePath,
contents: make(map[interface{}]interface{}),
columnNames: make(map[int]string),
values: make(map[int]StringValue),
}
}
func (w *YAMLWrapper) Contents() (map[interface{}]interface{}, error) {
if len(w.contents) == 0 {
contents := make(map[interface{}]interface{})
buf, err := ioutil.ReadFile(w.FilePath)
if err != nil {
return contents, err
}
if err := yaml.Unmarshal(buf, &contents); err != nil {
return contents, err
}
w.contents = contents
}
return w.contents, nil
}
func (w *YAMLWrapper) ColumnNames() (map[int]string, error) {
if len(w.columnNames) == 0 {
keys := make(map[int]string)
contents, err := w.Contents()
if err != nil {
return keys, err
}
for _, content := range contents {
i := 0
for key, _ := range content.(map[interface{}]interface{}) {
keys[i] = key.(string)
i++
}
break
}
w.columnNames = keys
}
return w.columnNames, nil
}
func (w *YAMLWrapper) StringValues() (map[int]StringValue, error) {
if len(w.values) == 0 {
var contents map[interface{}]interface{}
columnNames, err := w.ColumnNames()
if err != nil {
return w.values, err
}
contents, err = w.Contents()
if err != nil {
return w.values, err
}
values := make(map[int]StringValue)
contentIndex := 0
for _, data := range contents {
stringValue := NewStringValue()
for i, name := range columnNames {
err := stringValue.SetValue(i, data.(map[interface{}]interface{})[name])
if err != nil {
return w.values, err
}
}
values[contentIndex] = stringValue
contentIndex++
}
w.values = values
}
return w.values, nil
}
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd linux netbsd openbsd
package ipv4_test
import (
"code.google.com/p/go.net/ipv4"
"net"
"testing"
"time"
)
// writeThenReadPayload transmits IPv4 datagram payloads to the
// loopback address or interface and captures the loopback'd datagram
// payloads.
func writeThenReadPayload(t *testing.T, i int, c *ipv4.PacketConn, wb []byte, dst net.Addr) []byte {
rb := make([]byte, 1500)
c.SetTOS(i + 1)
var ip net.IP
switch v := dst.(type) {
case *net.UDPAddr:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsMulticast() {
c.SetMulticastTTL(i + 1)
} else {
c.SetTTL(i + 1)
}
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
if _, err := c.WriteTo(wb, nil, dst); err != nil {
t.Fatalf("ipv4.PacketConn.WriteTo failed: %v", err)
}
n, cm, _, err := c.ReadFrom(rb)
if err != nil {
t.Fatalf("ipv4.PacketConn.ReadFrom failed: %v", err)
}
t.Logf("rcvd cmsg: %v", cm)
return rb[:n]
}
// writeThenReadDatagram transmits ICMP for IPv4 datagrams to the
// loopback address or interface and captures the response datagrams
// from the protocol stack within the kernel.
func writeThenReadDatagram(t *testing.T, i int, c *ipv4.RawConn, wb []byte, src, dst net.Addr) []byte {
rb := make([]byte, ipv4.HeaderLen+len(wb))
wh := &ipv4.Header{
Version: ipv4.Version,
Len: ipv4.HeaderLen,
TOS: i + 1,
TotalLen: ipv4.HeaderLen + len(wb),
TTL: i + 1,
Protocol: 1,
}
if src != nil {
wh.Src = src.(*net.IPAddr).IP
}
if dst != nil {
wh.Dst = dst.(*net.IPAddr).IP
}
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
if err := c.WriteTo(wh, wb, nil); err != nil {
t.Fatalf("ipv4.RawConn.WriteTo failed: %v", err)
}
rh, b, cm, err := c.ReadFrom(rb)
if err != nil {
t.Fatalf("ipv4.RawConn.ReadFrom failed: %v", err)
}
t.Logf("rcvd cmsg: %v", cm.String())
t.Logf("rcvd hdr: %v", rh.String())
return b
}
// LoopbackInterface returns a logical network interface for loopback
// tests.
func loopbackInterface() *net.Interface {
ift, err := net.Interfaces()
if err != nil {
return nil
}
for _, ifi := range ift {
if ifi.Flags&net.FlagLoopback != 0 {
return &ifi
}
}
return nil
}
// isMulticastAvailable returns true if ifi is a multicast access
// enabled network interface. It also returns a unicast IPv4 address
// that can be used for listening on ifi.
func isMulticastAvailable(ifi *net.Interface) (net.IP, bool) {
if ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 {
return nil, false
}
ifat, err := ifi.Addrs()
if err != nil {
return nil, false
}
if len(ifat) == 0 {
return nil, false
}
var ip net.IP
for _, ifa := range ifat {
switch v := ifa.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
default:
continue
}
if ip.To4() == nil {
ip = nil
continue
}
break
}
return ip, true
}
go.net/ipv4: make sure whether the interface under test is routed
Some multicast tests depend on proper IP routing because multicasting
requires an unicast source address for its delivery. We call a network
interface that can route IP traffic to neighbors a routed interface
conventionally. This CL makes sure that the interface under test is a
routed interface to avoid using non-routed network interfaces for IP
routing.
Also removes unnecessary build tag.
Fixes golang/go#6709.
R=dave
CC=golang-dev
https://golang.org/cl/21260043
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipv4_test
import (
"code.google.com/p/go.net/ipv4"
"net"
"testing"
"time"
)
// writeThenReadPayload transmits IPv4 datagram payloads to the
// loopback address or interface and captures the loopback'd datagram
// payloads.
func writeThenReadPayload(t *testing.T, i int, c *ipv4.PacketConn, wb []byte, dst net.Addr) []byte {
rb := make([]byte, 1500)
c.SetTOS(i + 1)
var ip net.IP
switch v := dst.(type) {
case *net.UDPAddr:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsMulticast() {
c.SetMulticastTTL(i + 1)
} else {
c.SetTTL(i + 1)
}
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
if _, err := c.WriteTo(wb, nil, dst); err != nil {
t.Fatalf("ipv4.PacketConn.WriteTo failed: %v", err)
}
n, cm, _, err := c.ReadFrom(rb)
if err != nil {
t.Fatalf("ipv4.PacketConn.ReadFrom failed: %v", err)
}
t.Logf("rcvd cmsg: %v", cm)
return rb[:n]
}
// writeThenReadDatagram transmits ICMP for IPv4 datagrams to the
// loopback address or interface and captures the response datagrams
// from the protocol stack within the kernel.
func writeThenReadDatagram(t *testing.T, i int, c *ipv4.RawConn, wb []byte, src, dst net.Addr) []byte {
rb := make([]byte, ipv4.HeaderLen+len(wb))
wh := &ipv4.Header{
Version: ipv4.Version,
Len: ipv4.HeaderLen,
TOS: i + 1,
TotalLen: ipv4.HeaderLen + len(wb),
TTL: i + 1,
Protocol: 1,
}
if src != nil {
wh.Src = src.(*net.IPAddr).IP
}
if dst != nil {
wh.Dst = dst.(*net.IPAddr).IP
}
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
if err := c.WriteTo(wh, wb, nil); err != nil {
t.Fatalf("ipv4.RawConn.WriteTo failed: %v", err)
}
rh, b, cm, err := c.ReadFrom(rb)
if err != nil {
t.Fatalf("ipv4.RawConn.ReadFrom failed: %v", err)
}
t.Logf("rcvd cmsg: %v", cm.String())
t.Logf("rcvd hdr: %v", rh.String())
return b
}
func isUnicast(ip net.IP) bool {
return ip.To4() != nil && (ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsGlobalUnicast())
}
// LoopbackInterface returns a logical network interface for loopback
// tests.
func loopbackInterface() *net.Interface {
ift, err := net.Interfaces()
if err != nil {
return nil
}
for _, ifi := range ift {
if ifi.Flags&net.FlagLoopback == 0 || ifi.Flags&net.FlagUp == 0 {
continue
}
ifat, err := ifi.Addrs()
if err != nil {
continue
}
for _, ifa := range ifat {
switch ifa := ifa.(type) {
case *net.IPAddr:
if isUnicast(ifa.IP) {
return &ifi
}
case *net.IPNet:
if isUnicast(ifa.IP) {
return &ifi
}
}
}
}
return nil
}
// isMulticastAvailable returns true if ifi is a multicast access
// enabled network interface. It also returns a unicast IPv4 address
// that can be used for listening on ifi.
func isMulticastAvailable(ifi *net.Interface) (net.IP, bool) {
if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 {
return nil, false
}
ifat, err := ifi.Addrs()
if err != nil {
return nil, false
}
for _, ifa := range ifat {
switch ifa := ifa.(type) {
case *net.IPAddr:
if isUnicast(ifa.IP) {
return ifa.IP, true
}
case *net.IPNet:
if isUnicast(ifa.IP) {
return ifa.IP, true
}
}
}
return nil, false
}
|
package v_0_1_0
const MasterTemplate = `#cloud-config
users:
{{ range $index, $user := .Cluster.Kubernetes.SSH.UserList }} - name: {{ $user.Name }}
groups:
- "sudo"
- "docker"
ssh-authorized-keys:
- "{{ $user.PublicKey }}"
{{end}}
write_files:
- path: /srv/calico-policy-controller-sa.yaml
owner: root
permissions: 644
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system
- path: /srv/calico-node-sa.yaml
owner: root
permissions: 644
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
- path: /srv/calico-configmap.yaml
owner: root
permissions: 644
content: |
# Calico Version v2.5.1
# https://docs.projectcalico.org/v2.5/releases#v2.5.1
# This manifest includes the following component versions:
# calico/node:v2.5.1
# calico/cni:v1.10.0
# calico/kube-policy-controller:v0.7.0
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "https://{{ .Cluster.Etcd.Domain }}:443"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
# TODO: Do we still need to set MTU manually?
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"mtu": {{.Cluster.Calico.MTU}},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
etcd_ca: "/etc/kubernetes/ssl/etcd/client-ca.pem"
etcd_cert: "/etc/kubernetes/ssl/etcd/client-crt.pem"
etcd_key: "/etc/kubernetes/ssl/etcd/client-key.pem"
- path: /srv/calico-ds.yaml
owner: root
permissions: 644
content: |
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# Tolerations part was taken from calico manifest for kubeadm as we are using same taint for master.
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
hostNetwork: true
serviceAccountName: calico-node
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.5.1
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Disable file logging so kubectl logs works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{.Cluster.Calico.Subnet}}/{{.Cluster.Calico.CIDR}}"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
value: "1440"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /etc/kubernetes/ssl/etcd
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Location of the CA certificate for etcd.
- name: CNI_CONF_ETCD_CA
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: CNI_CONF_ETCD_KEY
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: CNI_CONF_ETCD_CERT
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /etc/kubernetes/ssl/etcd
name: etcd-certs
volumes:
# Used by calico/node.
- name: etcd-certs
hostPath:
path: /etc/kubernetes/ssl/etcd
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- path: /srv/calico-policy-controller.yaml
owner: root
permissions: 644
content: |
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.7.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://{{.Cluster.Kubernetes.API.Domain}}:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /etc/kubernetes/ssl/etcd
name: etcd-certs
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
hostPath:
path: /etc/kubernetes/ssl/etcd
- path: /srv/kubedns-cm.yaml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
- path: /srv/kubedns-sa.yaml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
- path: /srv/kubedns-dep.yaml
owner: root
permissions: 0644
content: |
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
replicas: 3
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: config
mountPath: /etc/kubernetes/config/
readOnly: false
- name: ssl
mountPath: /etc/kubernetes/ssl/
readOnly: false
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args:
# command = "/kube-dns
- --dns-port=10053
- --domain={{.Cluster.Kubernetes.Domain}}
- --config-dir=/kube-dns-config
- --v=2
- --kubecfg-file=/etc/kubernetes/config/kubelet-kubeconfig.yml
- --kube-master-url=https://{{.Cluster.Kubernetes.API.Domain}}
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
failureThreshold: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-resolv
- --log-facility=-
- --server=127.0.0.1#10053
- --server=/{{.Cluster.Kubernetes.Domain}}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: 150m
memory: 10Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.Cluster.Kubernetes.Domain}},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.Cluster.Kubernetes.Domain}},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
- name: config
hostPath:
path: /etc/kubernetes/config/
- name: ssl
hostPath:
path: /etc/kubernetes/ssl/
- path: /srv/kubedns-svc.yaml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.Cluster.Kubernetes.DNS.IP}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- path: /srv/network-policy.json
owner: root
permissions: 0644
content: |
{
"kind": "ThirdPartyResource",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "network-policy.net.alpha.kubernetes.io"
},
"description": "Specification for a network isolation policy",
"versions": [
{
"name": "v1alpha1"
}
]
}
- path: /srv/default-backend-dep.yml
owner: root
permissions: 0644
content: |
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
replicas: 2
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
containers:
- name: default-http-backend
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
- path: /srv/default-backend-svc.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend
- path: /srv/ingress-controller-cm.yml
owner: root
permissions: 0644
content: |
kind: ConfigMap
apiVersion: v1
metadata:
name: ingress-nginx
namespace: kube-system
labels:
k8s-addon: ingress-nginx.addons.k8s.io
data:
server-name-hash-bucket-size: "1024"
server-name-hash-max-size: "1024"
- path: /srv/ingress-controller-dep.yml
owner: root
permissions: 0644
content: |
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: kube-system
labels:
k8s-app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
replicas: {{len .Cluster.Workers}}
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- nginx-ingress-controller
topologyKey: kubernetes.io/hostname
containers:
- name: nginx-ingress-controller
image: {{.Cluster.Kubernetes.IngressController.Docker.Image}}
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/ingress-nginx
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
- path: /srv/ingress-controller-svc.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-controller
namespace: kube-system
labels:
k8s-app: nginx-ingress-controller
spec:
type: NodePort
ports:
- name: http
port: 80
nodePort: 30010
protocol: TCP
targetPort: 80
- name: https
port: 443
nodePort: 30011
protocol: TCP
targetPort: 443
selector:
k8s-app: nginx-ingress-controller
- path: /opt/wait-for-domains
permissions: 0544
content: |
#!/bin/bash
domains="{{.Cluster.Etcd.Domain}} {{.Cluster.Kubernetes.API.Domain}}"
for domain in $domains; do
until nslookup $domain; do
echo "Waiting for domain $domain to be available"
sleep 5
done
echo "Successfully resolved domain $domain"
done
- path: /opt/k8s-addons
permissions: 0544
content: |
#!/bin/bash
KUBECTL={{.Cluster.Kubernetes.Kubectl.Docker.Image}}
/usr/bin/docker pull $KUBECTL
# wait for healthy master
while [ "$(/usr/bin/docker run --net=host --rm $KUBECTL get cs | grep Healthy | wc -l)" -ne "3" ]; do sleep 1 && echo 'Waiting for healthy k8s'; done
# apply calico CNI
CALICO_FILES="calico-configmap.yaml calico-node-sa.yaml calico-policy-controller-sa.yaml calico-ds.yaml calico-policy-controller.yaml"
for manifest in $CALICO_FILES
do
while
/usr/bin/docker run --net=host --rm -v /srv:/srv $KUBECTL apply -f /srv/$manifest
[ "$?" -ne "0" ]
do
echo "failed to apply /src/$manifest, retrying in 5 sec"
sleep 5s
done
done
# wait for healthy calico - we check for pods - desired vs ready
while
# result of this is 'eval [ "$DESIRED_POD_COUNT" -eq "$READY_POD_COUNT" ]'
/usr/bin/docker run --net=host --rm -v /etc/kubernetes:/etc/kubernetes $KUBECTL -n kube-system get ds calico-node 2>/dev/null >/dev/null
RET_CODE_1=$?
eval $(/usr/bin/docker run --net=host --rm $KUBECTL -n kube-system get ds calico-node | tail -1 | awk '{print "[ \"" $2"\" -eq \""$4"\" ] "}')
RET_CODE_2=$?
[ "$RET_CODE_1" -ne "0" ] || [ "$RET_CODE_2" -ne "0" ]
do
echo "Waiting for calico to be ready . . "
sleep 3s
done
# apply default storage class
if [ -f /srv/default-storage-class.yaml ]; then
while
/usr/bin/docker run --net=host --rm -v /srv:/srv $KUBECTL apply -f /srv/default-storage-class.yaml
[ "$?" -ne "0" ]
do
echo "failed to apply /srv/default-storage-class.yaml, retrying in 5 sec"
sleep 5s
done
else
echo "no default storage class to apply"
fi
# apply k8s addons
MANIFESTS="kubedns-cm.yaml kubedns-sa.yaml kubedns-dep.yaml kubedns-svc.yaml default-backend-dep.yml default-backend-svc.yml ingress-controller-cm.yml ingress-controller-dep.yml ingress-controller-svc.yml"
for manifest in $MANIFESTS
do
while
/usr/bin/docker run --net=host --rm -v /srv:/srv $KUBECTL apply -f /srv/$manifest
[ "$?" -ne "0" ]
do
echo "failed to apply /srv/$manifest, retrying in 5 sec"
sleep 5s
done
done
echo "Addons successfully installed"
- path: /etc/kubernetes/config/proxy-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: proxy
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: proxy
name: service-account-context
current-context: service-account-context
- path: /etc/kubernetes/config/kubelet-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
- path: /etc/kubernetes/config/controller-manager-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: controller-manager
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: controller-manager
name: service-account-context
current-context: service-account-context
- path: /etc/kubernetes/config/scheduler-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: scheduler
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: scheduler
name: service-account-context
current-context: service-account-context
- path: /etc/ssh/sshd_config
owner: root
permissions: 0600
content: |
# Use most defaults for sshd configuration.
UsePrivilegeSeparation sandbox
Subsystem sftp internal-sftp
ClientAliveInterval 180
UseDNS no
UsePAM yes
PrintLastLog no # handled by PAM
PrintMotd no # handled by PAM
# Non defaults (#100)
ClientAliveCountMax 2
PasswordAuthentication no
- path: /etc/sysctl.d/hardening.conf
owner: root
permissions: 0600
content: |
kernel.kptr_restrict = 2
kernel.sysrq = 0
net.ipv4.conf.all.log_martians = 1
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.log_martians = 1
net.ipv4.tcp_timestamps = 0
net.ipv6.conf.all.accept_redirects = 0
net.ipv6.conf.default.accept_redirects = 0
- path: /etc/audit/rules.d/10-docker.rules
owner: root
permissions: 644
content: |
-w /usr/bin/docker -k docker
-w /var/lib/docker -k docker
-w /etc/docker -k docker
-w /etc/systemd/system/docker.service.d/10-giantswarm-extra-args.conf -k docker
-w /etc/systemd/system/docker.service.d/01-wait-docker.conf -k docker
-w /usr/lib/systemd/system/docker.service -k docker
-w /usr/lib/systemd/system/docker.socket -k docker
- path: /etc/systemd/system/audit-rules.service.d/10-Wait-For-Docker.conf
owner: root
permissions: 644
content: |
[Service]
ExecStartPre=/bin/bash -c "while [ ! -f /etc/audit/rules.d/10-docker.rules ]; do echo 'Waiting for /etc/audit/rules.d/10-docker.rules to be written' && sleep 1; done"
{{range .Extension.Files}}
- path: {{.Metadata.Path}}
owner: {{.Metadata.Owner}}
{{ if .Metadata.Encoding }}
encoding: {{.Metadata.Encoding}}
{{ end }}
permissions: {{printf "%#o" .Metadata.Permissions}}
content: |
{{range .Content}}{{.}}
{{end}}{{end}}
coreos:
units:
{{range .Extension.Units}}
- name: {{.Metadata.Name}}
enable: {{.Metadata.Enable}}
command: {{.Metadata.Command}}
content: |
{{range .Content}}{{.}}
{{end}}{{end}}
- name: wait-for-domains.service
enable: true
command: start
content: |
[Unit]
Description=Wait for etcd and k8s API domains to be available
[Service]
Type=oneshot
ExecStart=/opt/wait-for-domains
[Install]
WantedBy=multi-user.target
- name: os-hardeing.service
enable: true
command: start
content: |
[Unit]
Description=Apply os hardening
[Service]
Type=oneshot
ExecStartPre=/bin/bash -c "gpasswd -d core rkt; gpasswd -d core docker; gpasswd -d core wheel"
ExecStartPre=/bin/bash -c "until [ -f '/etc/sysctl.d/hardening.conf' ]; do echo Waiting for sysctl file; sleep 1s;done;"
ExecStart=/usr/sbin/sysctl -p /etc/sysctl.d/hardening.conf
[Install]
WantedBy=multi-user.target
- name: set-ownership-etcd-data-dir.service
enable: true
command: start
content: |
[Unit]
Description=Set ownership to etcd3 data dir
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStartSec=0
ExecStart=/usr/bin/mkdir -p /etc/kubernetes/data/etcd
ExecStart=/usr/bin/chown etcd:etcd /etc/kubernetes/data/etcd
- name: docker.service
enable: true
command: start
drop-ins:
- name: 10-giantswarm-extra-args.conf
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=cgroupfs --disable-legacy-registry=true {{.Cluster.Docker.Daemon.ExtraArgs}}"
Environment="DOCKER_OPT_BIP=--bip={{.Cluster.Docker.Daemon.CIDR}}"
Environment="DOCKER_OPTS=--live-restore"
- name: k8s-setup-network-env.service
enable: true
command: start
content: |
[Unit]
Description=k8s-setup-network-env Service
Wants=network-online.target docker.service
After=network-online.target docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStartSec=0
Environment="IMAGE={{.Cluster.Kubernetes.NetworkSetup.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/mkdir -p /opt/bin/
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --net=host -v /etc:/etc --name $NAME $IMAGE
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: etcd2.service
command: stop
enable: false
mask: true
- name: etcd3.service
enable: true
command: start
content: |
[Unit]
Description=etcd3
Requires=k8s-setup-network-env.service
After=k8s-setup-network-env.service
Conflicts=etcd.service etcd2.service
[Service]
StartLimitIntervalSec=0
Restart=always
RestartSec=0
TimeoutStopSec=10
LimitNOFILE=40000
Environment=IMAGE=quay.io/coreos/etcd:v3.1.8
Environment=NAME=%p.service
EnvironmentFile=/etc/network-environment
ExecStartPre=-/usr/bin/docker stop $NAME
ExecStartPre=-/usr/bin/docker rm $NAME
ExecStartPre=-/usr/bin/docker pull $IMAGE
ExecStartPre=/bin/bash -c "while [ ! -f /etc/kubernetes/ssl/etcd/server-ca.pem ]; do echo 'Waiting for /etc/kubernetes/ssl/etcd/server-ca.pem to be written' && sleep 1; done"
ExecStartPre=/bin/bash -c "while [ ! -f /etc/kubernetes/ssl/etcd/server-crt.pem ]; do echo 'Waiting for /etc/kubernetes/ssl/etcd/server-crt.pem to be written' && sleep 1; done"
ExecStartPre=/bin/bash -c "while [ ! -f /etc/kubernetes/ssl/etcd/server-key.pem ]; do echo 'Waiting for /etc/kubernetes/ssl/etcd/server-key.pem to be written' && sleep 1; done"
ExecStart=/usr/bin/docker run \
-v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \
-v /etc/kubernetes/ssl/etcd/:/etc/etcd \
-v /etc/kubernetes/data/etcd/:/var/lib/etcd \
--net=host \
--name $NAME \
$IMAGE \
etcd \
--name etcd0 \
--trusted-ca-file /etc/etcd/server-ca.pem \
--cert-file /etc/etcd/server-crt.pem \
--key-file /etc/etcd/server-key.pem\
--client-cert-auth=true \
--peer-trusted-ca-file /etc/etcd/server-ca.pem \
--peer-cert-file /etc/etcd/server-crt.pem \
--peer-key-file /etc/etcd/server-key.pem \
--peer-client-cert-auth=true \
--advertise-client-urls=https://{{ .Cluster.Etcd.Domain }}:443 \
--initial-advertise-peer-urls=https://127.0.0.1:2380 \
--listen-client-urls=https://0.0.0.0:2379 \
--listen-peer-urls=https://${DEFAULT_IPV4}:2380 \
--initial-cluster-token k8s-etcd-cluster \
--initial-cluster etcd0=https://127.0.0.1:2380 \
--initial-cluster-state new \
--data-dir=/var/lib/etcd
[Install]
WantedBy=multi-user.target
- name: k8s-proxy.service
enable: true
command: start
content: |
[Unit]
Description=k8s-proxy
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStartPre=/bin/sh -c "while ! curl --output /dev/null --silent --head --fail --cacert /etc/kubernetes/ssl/apiserver-ca.pem --cert /etc/kubernetes/ssl/apiserver-crt.pem --key /etc/kubernetes/ssl/apiserver-key.pem https://{{.Cluster.Kubernetes.API.Domain}}; do sleep 1 && echo 'Waiting for master'; done"
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --net=host --privileged=true \
--name $NAME \
-v /usr/share/ca-certificates:/etc/ssl/certs \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
$IMAGE \
/hyperkube proxy \
--master=https://{{.Cluster.Kubernetes.API.Domain}} \
--proxy-mode=iptables \
--logtostderr=true \
--kubeconfig=/etc/kubernetes/config/proxy-kubeconfig.yml \
--conntrack-max-per-core 131072 \
--v=2"
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-kubelet.service
enable: true
command: start
content: |
[Unit]
Description=k8s-kubelet
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --pid=host --net=host --privileged=true \
-v /:/rootfs:ro \
-v /sys:/sys:ro \
-v /dev:/dev:rw \
-v /var/log:/var/log:rw \
-v /run/calico/:/run/calico/:rw \
-v /run/docker/:/run/docker/:rw \
-v /run/docker.sock:/run/docker.sock:rw \
-v /usr/lib/os-release:/etc/os-release \
-v /usr/share/ca-certificates/:/etc/ssl/certs \
-v /var/lib/docker/:/var/lib/docker:rw \
-v /var/lib/kubelet/:/var/lib/kubelet:rw,shared \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
-v /etc/cni/net.d/:/etc/cni/net.d/ \
-v /opt/cni/bin/:/opt/cni/bin/ \
-e ETCD_CA_CERT_FILE=/etc/kubernetes/ssl/etcd/server-ca.pem \
-e ETCD_CERT_FILE=/etc/kubernetes/ssl/etcd/server-crt.pem \
-e ETCD_KEY_FILE=/etc/kubernetes/ssl/etcd/server-key.pem \
--name $NAME \
$IMAGE \
/hyperkube kubelet \
--address=${DEFAULT_IPV4} \
--port={{.Cluster.Kubernetes.Kubelet.Port}} \
--hostname-override=${DEFAULT_IPV4} \
--node-ip=${DEFAULT_IPV4} \
--api-servers=https://{{.Cluster.Kubernetes.API.Domain}} \
--containerized \
--enable-server \
--logtostderr=true \
--machine-id-file=/rootfs/etc/machine-id \
--cadvisor-port=4194 \
--cloud-provider={{.Cluster.Kubernetes.CloudProvider}} \
--healthz-bind-address=${DEFAULT_IPV4} \
--healthz-port=10248 \
--cluster-dns={{.Cluster.Kubernetes.DNS.IP}} \
--cluster-domain={{.Cluster.Kubernetes.Domain}} \
--network-plugin=cni \
--register-node=true \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--allow-privileged=true \
--kubeconfig=/etc/kubernetes/config/kubelet-kubeconfig.yml \
--node-labels="node-role.kubernetes.io/master,role=master,kubernetes.io/hostname=${HOSTNAME},ip=${DEFAULT_IPV4},{{.Cluster.Kubernetes.Kubelet.Labels}}" \
--v=2"
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: update-engine.service
enable: false
command: stop
mask: true
- name: locksmithd.service
enable: false
command: stop
mask: true
- name: fleet.service
enable: false
mask: true
command: stop
- name: fleet.socket
enable: false
mask: true
command: stop
- name: flanneld.service
enable: false
command: stop
mask: true
- name: systemd-networkd-wait-online.service
enable: true
command: start
- name: k8s-api-server.service
enable: true
command: start
content: |
[Unit]
Description=k8s-api-server
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --name $NAME --net=host \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/secrets/token_sign_key.pem:/etc/kubernetes/secrets/token_sign_key.pem \
$IMAGE \
/hyperkube apiserver \
--allow_privileged=true \
--insecure_bind_address=0.0.0.0 \
--insecure_port={{.Cluster.Kubernetes.API.InsecurePort}} \
--kubelet_https=true \
--secure_port={{.Cluster.Kubernetes.API.SecurePort}} \
--bind-address=${DEFAULT_IPV4} \
--etcd-prefix={{.Cluster.Etcd.Prefix}} \
--profiling=false \
--repair-malformed-updates=false \
--service-account-lookup=true \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,DefaultStorageClass \
--cloud-provider={{.Cluster.Kubernetes.CloudProvider}} \
--service-cluster-ip-range={{.Cluster.Kubernetes.API.ClusterIPRange}} \
--etcd-servers=https://{{ .Cluster.Etcd.Domain }}:443 \
--etcd-cafile=/etc/kubernetes/ssl/etcd/server-ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/etcd/server-crt.pem \
--etcd-keyfile=/etc/kubernetes/ssl/etcd/server-key.pem \
--advertise-address=${DEFAULT_IPV4} \
--runtime-config=api/all=true \
--logtostderr=true \
--tls-cert-file=/etc/kubernetes/ssl/apiserver-crt.pem \
--tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/apiserver-ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/service-account-key.pem
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-controller-manager.service
enable: true
command: start
content: |
[Unit]
Description=k8s-controller-manager Service
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --net=host --name $NAME \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
-v /etc/kubernetes/secrets/token_sign_key.pem:/etc/kubernetes/secrets/token_sign_key.pem \
$IMAGE \
/hyperkube controller-manager \
--master=https://{{.Cluster.Kubernetes.API.Domain}}:443 \
--logtostderr=true \
--v=2 \
--cloud-provider={{.Cluster.Kubernetes.CloudProvider}} \
--profiling=false \
--terminated-pod-gc-threshold=10 \
--use-service-account-credentials=true \
--kubeconfig=/etc/kubernetes/config/controller-manager-kubeconfig.yml \
--root-ca-file=/etc/kubernetes/ssl/apiserver-ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/service-account-key.pem
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-scheduler.service
enable: true
command: start
content: |
[Unit]
Description=k8s-scheduler Service
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --net=host --name $NAME \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
$IMAGE \
/hyperkube scheduler \
--master=https://{{.Cluster.Kubernetes.API.Domain}}:443 \
--logtostderr=true \
--v=2 \
--profiling=false \
--kubeconfig=/etc/kubernetes/config/scheduler-kubeconfig.yml
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-addons.service
enable: true
command: start
content: |
[Unit]
Description=Kubernetes Addons
Wants=k8s-api-server.service
After=k8s-api-server.service
[Service]
Type=oneshot
EnvironmentFile=/etc/network-environment
ExecStart=/opt/k8s-addons
[Install]
WantedBy=multi-user.target
- name: node-exporter.service
enable: true
command: start
content: |
[Unit]
Description=Prometheus Node Exporter Service
Requires=docker.service
After=docker.service
[Service]
Restart=always
Environment="IMAGE=prom/node-exporter:0.12.0"
Environment="NAME=%p.service"
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm \
-p 91:91 \
--net=host \
--name $NAME \
$IMAGE \
--web.listen-address=:91
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
[Install]
WantedBy=multi-user.target
update:
reboot-strategy: off
{{ range .Extension.VerbatimSections }}
{{ .Content }}
{{ end }}
`
update-etcd-rewrite (#202)
* update-etcd-rewrite
* update-etcd-rewrite
package v_0_1_0
const MasterTemplate = `#cloud-config
users:
{{ range $index, $user := .Cluster.Kubernetes.SSH.UserList }} - name: {{ $user.Name }}
groups:
- "sudo"
- "docker"
ssh-authorized-keys:
- "{{ $user.PublicKey }}"
{{end}}
write_files:
- path: /srv/calico-policy-controller-sa.yaml
owner: root
permissions: 644
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system
- path: /srv/calico-node-sa.yaml
owner: root
permissions: 644
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
- path: /srv/calico-configmap.yaml
owner: root
permissions: 644
content: |
# Calico Version v2.5.1
# https://docs.projectcalico.org/v2.5/releases#v2.5.1
# This manifest includes the following component versions:
# calico/node:v2.5.1
# calico/cni:v1.10.0
# calico/kube-policy-controller:v0.7.0
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "https://{{ .Cluster.Etcd.Domain }}:443"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
# TODO: Do we still need to set MTU manually?
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"mtu": {{.Cluster.Calico.MTU}},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
etcd_ca: "/etc/kubernetes/ssl/etcd/client-ca.pem"
etcd_cert: "/etc/kubernetes/ssl/etcd/client-crt.pem"
etcd_key: "/etc/kubernetes/ssl/etcd/client-key.pem"
- path: /srv/calico-ds.yaml
owner: root
permissions: 644
content: |
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# Tolerations part was taken from calico manifest for kubeadm as we are using same taint for master.
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
hostNetwork: true
serviceAccountName: calico-node
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.5.1
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Disable file logging so kubectl logs works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{.Cluster.Calico.Subnet}}/{{.Cluster.Calico.CIDR}}"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
value: "1440"
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Auto-detect the BGP IP address.
- name: IP
value: ""
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /etc/kubernetes/ssl/etcd
name: etcd-certs
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Location of the CA certificate for etcd.
- name: CNI_CONF_ETCD_CA
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: CNI_CONF_ETCD_KEY
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: CNI_CONF_ETCD_CERT
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /etc/kubernetes/ssl/etcd
name: etcd-certs
volumes:
# Used by calico/node.
- name: etcd-certs
hostPath:
path: /etc/kubernetes/ssl/etcd
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- path: /srv/calico-policy-controller.yaml
owner: root
permissions: 644
content: |
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.7.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://{{.Cluster.Kubernetes.API.Domain}}:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /etc/kubernetes/ssl/etcd
name: etcd-certs
volumes:
# Mount in the etcd TLS secrets.
- name: etcd-certs
hostPath:
path: /etc/kubernetes/ssl/etcd
- path: /srv/kubedns-cm.yaml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
- path: /srv/kubedns-sa.yaml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
- path: /srv/kubedns-dep.yaml
owner: root
permissions: 0644
content: |
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
replicas: 3
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.2
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: config
mountPath: /etc/kubernetes/config/
readOnly: false
- name: ssl
mountPath: /etc/kubernetes/ssl/
readOnly: false
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args:
# command = "/kube-dns
- --dns-port=10053
- --domain={{.Cluster.Kubernetes.Domain}}
- --config-dir=/kube-dns-config
- --v=2
- --kubecfg-file=/etc/kubernetes/config/kubelet-kubeconfig.yml
- --kube-master-url=https://{{.Cluster.Kubernetes.API.Domain}}
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
failureThreshold: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.2
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-resolv
- --log-facility=-
- --server=127.0.0.1#10053
- --server=/{{.Cluster.Kubernetes.Domain}}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
requests:
cpu: 150m
memory: 10Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.2
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.Cluster.Kubernetes.Domain}},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.Cluster.Kubernetes.Domain}},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
- name: config
hostPath:
path: /etc/kubernetes/config/
- name: ssl
hostPath:
path: /etc/kubernetes/ssl/
- path: /srv/kubedns-svc.yaml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.Cluster.Kubernetes.DNS.IP}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- path: /srv/network-policy.json
owner: root
permissions: 0644
content: |
{
"kind": "ThirdPartyResource",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "network-policy.net.alpha.kubernetes.io"
},
"description": "Specification for a network isolation policy",
"versions": [
{
"name": "v1alpha1"
}
]
}
- path: /srv/default-backend-dep.yml
owner: root
permissions: 0644
content: |
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
replicas: 2
template:
metadata:
labels:
k8s-app: default-http-backend
spec:
containers:
- name: default-http-backend
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
- path: /srv/default-backend-svc.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: default-http-backend
- path: /srv/ingress-controller-cm.yml
owner: root
permissions: 0644
content: |
kind: ConfigMap
apiVersion: v1
metadata:
name: ingress-nginx
namespace: kube-system
labels:
k8s-addon: ingress-nginx.addons.k8s.io
data:
server-name-hash-bucket-size: "1024"
server-name-hash-max-size: "1024"
- path: /srv/ingress-controller-dep.yml
owner: root
permissions: 0644
content: |
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: kube-system
labels:
k8s-app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
replicas: {{len .Cluster.Workers}}
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: nginx-ingress-controller
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- nginx-ingress-controller
topologyKey: kubernetes.io/hostname
containers:
- name: nginx-ingress-controller
image: {{.Cluster.Kubernetes.IngressController.Docker.Image}}
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/ingress-nginx
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 1
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
- path: /srv/ingress-controller-svc.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-controller
namespace: kube-system
labels:
k8s-app: nginx-ingress-controller
spec:
type: NodePort
ports:
- name: http
port: 80
nodePort: 30010
protocol: TCP
targetPort: 80
- name: https
port: 443
nodePort: 30011
protocol: TCP
targetPort: 443
selector:
k8s-app: nginx-ingress-controller
- path: /opt/wait-for-domains
permissions: 0544
content: |
#!/bin/bash
domains="{{.Cluster.Etcd.Domain}} {{.Cluster.Kubernetes.API.Domain}}"
for domain in $domains; do
until nslookup $domain; do
echo "Waiting for domain $domain to be available"
sleep 5
done
echo "Successfully resolved domain $domain"
done
- path: /opt/k8s-addons
permissions: 0544
content: |
#!/bin/bash
KUBECTL={{.Cluster.Kubernetes.Kubectl.Docker.Image}}
/usr/bin/docker pull $KUBECTL
# wait for healthy master
while [ "$(/usr/bin/docker run --net=host --rm $KUBECTL get cs | grep Healthy | wc -l)" -ne "3" ]; do sleep 1 && echo 'Waiting for healthy k8s'; done
# apply calico CNI
CALICO_FILES="calico-configmap.yaml calico-node-sa.yaml calico-policy-controller-sa.yaml calico-ds.yaml calico-policy-controller.yaml"
for manifest in $CALICO_FILES
do
while
/usr/bin/docker run --net=host --rm -v /srv:/srv $KUBECTL apply -f /srv/$manifest
[ "$?" -ne "0" ]
do
echo "failed to apply /src/$manifest, retrying in 5 sec"
sleep 5s
done
done
# wait for healthy calico - we check for pods - desired vs ready
while
# result of this is 'eval [ "$DESIRED_POD_COUNT" -eq "$READY_POD_COUNT" ]'
/usr/bin/docker run --net=host --rm -v /etc/kubernetes:/etc/kubernetes $KUBECTL -n kube-system get ds calico-node 2>/dev/null >/dev/null
RET_CODE_1=$?
eval $(/usr/bin/docker run --net=host --rm $KUBECTL -n kube-system get ds calico-node | tail -1 | awk '{print "[ \"" $2"\" -eq \""$4"\" ] "}')
RET_CODE_2=$?
[ "$RET_CODE_1" -ne "0" ] || [ "$RET_CODE_2" -ne "0" ]
do
echo "Waiting for calico to be ready . . "
sleep 3s
done
# apply default storage class
if [ -f /srv/default-storage-class.yaml ]; then
while
/usr/bin/docker run --net=host --rm -v /srv:/srv $KUBECTL apply -f /srv/default-storage-class.yaml
[ "$?" -ne "0" ]
do
echo "failed to apply /srv/default-storage-class.yaml, retrying in 5 sec"
sleep 5s
done
else
echo "no default storage class to apply"
fi
# apply k8s addons
MANIFESTS="kubedns-cm.yaml kubedns-sa.yaml kubedns-dep.yaml kubedns-svc.yaml default-backend-dep.yml default-backend-svc.yml ingress-controller-cm.yml ingress-controller-dep.yml ingress-controller-svc.yml"
for manifest in $MANIFESTS
do
while
/usr/bin/docker run --net=host --rm -v /srv:/srv $KUBECTL apply -f /srv/$manifest
[ "$?" -ne "0" ]
do
echo "failed to apply /srv/$manifest, retrying in 5 sec"
sleep 5s
done
done
echo "Addons successfully installed"
- path: /etc/kubernetes/config/proxy-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: proxy
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: proxy
name: service-account-context
current-context: service-account-context
- path: /etc/kubernetes/config/kubelet-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
- path: /etc/kubernetes/config/controller-manager-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: controller-manager
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: controller-manager
name: service-account-context
current-context: service-account-context
- path: /etc/kubernetes/config/scheduler-kubeconfig.yml
owner: root
permissions: 0644
content: |
apiVersion: v1
kind: Config
users:
- name: scheduler
user:
client-certificate: /etc/kubernetes/ssl/apiserver-crt.pem
client-key: /etc/kubernetes/ssl/apiserver-key.pem
clusters:
- name: local
cluster:
certificate-authority: /etc/kubernetes/ssl/apiserver-ca.pem
contexts:
- context:
cluster: local
user: scheduler
name: service-account-context
current-context: service-account-context
- path: /etc/ssh/sshd_config
owner: root
permissions: 0600
content: |
# Use most defaults for sshd configuration.
UsePrivilegeSeparation sandbox
Subsystem sftp internal-sftp
ClientAliveInterval 180
UseDNS no
UsePAM yes
PrintLastLog no # handled by PAM
PrintMotd no # handled by PAM
# Non defaults (#100)
ClientAliveCountMax 2
PasswordAuthentication no
- path: /etc/sysctl.d/hardening.conf
owner: root
permissions: 0600
content: |
kernel.kptr_restrict = 2
kernel.sysrq = 0
net.ipv4.conf.all.log_martians = 1
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.log_martians = 1
net.ipv4.tcp_timestamps = 0
net.ipv6.conf.all.accept_redirects = 0
net.ipv6.conf.default.accept_redirects = 0
- path: /etc/audit/rules.d/10-docker.rules
owner: root
permissions: 644
content: |
-w /usr/bin/docker -k docker
-w /var/lib/docker -k docker
-w /etc/docker -k docker
-w /etc/systemd/system/docker.service.d/10-giantswarm-extra-args.conf -k docker
-w /etc/systemd/system/docker.service.d/01-wait-docker.conf -k docker
-w /usr/lib/systemd/system/docker.service -k docker
-w /usr/lib/systemd/system/docker.socket -k docker
- path: /etc/systemd/system/audit-rules.service.d/10-Wait-For-Docker.conf
owner: root
permissions: 644
content: |
[Service]
ExecStartPre=/bin/bash -c "while [ ! -f /etc/audit/rules.d/10-docker.rules ]; do echo 'Waiting for /etc/audit/rules.d/10-docker.rules to be written' && sleep 1; done"
{{range .Extension.Files}}
- path: {{.Metadata.Path}}
owner: {{.Metadata.Owner}}
{{ if .Metadata.Encoding }}
encoding: {{.Metadata.Encoding}}
{{ end }}
permissions: {{printf "%#o" .Metadata.Permissions}}
content: |
{{range .Content}}{{.}}
{{end}}{{end}}
coreos:
units:
{{range .Extension.Units}}
- name: {{.Metadata.Name}}
enable: {{.Metadata.Enable}}
command: {{.Metadata.Command}}
content: |
{{range .Content}}{{.}}
{{end}}{{end}}
- name: wait-for-domains.service
enable: true
command: start
content: |
[Unit]
Description=Wait for etcd and k8s API domains to be available
[Service]
Type=oneshot
ExecStart=/opt/wait-for-domains
[Install]
WantedBy=multi-user.target
- name: os-hardeing.service
enable: true
command: start
content: |
[Unit]
Description=Apply os hardening
[Service]
Type=oneshot
ExecStartPre=/bin/bash -c "gpasswd -d core rkt; gpasswd -d core docker; gpasswd -d core wheel"
ExecStartPre=/bin/bash -c "until [ -f '/etc/sysctl.d/hardening.conf' ]; do echo Waiting for sysctl file; sleep 1s;done;"
ExecStart=/usr/sbin/sysctl -p /etc/sysctl.d/hardening.conf
[Install]
WantedBy=multi-user.target
- name: set-ownership-etcd-data-dir.service
enable: true
command: start
content: |
[Unit]
Description=Set ownership to etcd3 data dir
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStartSec=0
ExecStart=/usr/bin/mkdir -p /etc/kubernetes/data/etcd
ExecStart=/usr/bin/chown etcd:etcd /etc/kubernetes/data/etcd
- name: docker.service
enable: true
command: start
drop-ins:
- name: 10-giantswarm-extra-args.conf
content: |
[Service]
Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=cgroupfs --disable-legacy-registry=true {{.Cluster.Docker.Daemon.ExtraArgs}}"
Environment="DOCKER_OPT_BIP=--bip={{.Cluster.Docker.Daemon.CIDR}}"
Environment="DOCKER_OPTS=--live-restore"
- name: k8s-setup-network-env.service
enable: true
command: start
content: |
[Unit]
Description=k8s-setup-network-env Service
Wants=network-online.target docker.service
After=network-online.target docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStartSec=0
Environment="IMAGE={{.Cluster.Kubernetes.NetworkSetup.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/mkdir -p /opt/bin/
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --net=host -v /etc:/etc --name $NAME $IMAGE
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: etcd2.service
command: stop
enable: false
mask: true
- name: etcd3.service
enable: true
command: start
content: |
[Unit]
Description=etcd3
Requires=k8s-setup-network-env.service
After=k8s-setup-network-env.service
Conflicts=etcd.service etcd2.service
[Service]
StartLimitIntervalSec=0
Restart=always
RestartSec=0
TimeoutStopSec=10
LimitNOFILE=40000
Environment=IMAGE=quay.io/coreos/etcd:v3.2.7
Environment=NAME=%p.service
EnvironmentFile=/etc/network-environment
ExecStartPre=-/usr/bin/docker stop $NAME
ExecStartPre=-/usr/bin/docker rm $NAME
ExecStartPre=-/usr/bin/docker pull $IMAGE
ExecStartPre=/bin/bash -c "while [ ! -f /etc/kubernetes/ssl/etcd/server-ca.pem ]; do echo 'Waiting for /etc/kubernetes/ssl/etcd/server-ca.pem to be written' && sleep 1; done"
ExecStartPre=/bin/bash -c "while [ ! -f /etc/kubernetes/ssl/etcd/server-crt.pem ]; do echo 'Waiting for /etc/kubernetes/ssl/etcd/server-crt.pem to be written' && sleep 1; done"
ExecStartPre=/bin/bash -c "while [ ! -f /etc/kubernetes/ssl/etcd/server-key.pem ]; do echo 'Waiting for /etc/kubernetes/ssl/etcd/server-key.pem to be written' && sleep 1; done"
ExecStart=/usr/bin/docker run \
-v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \
-v /etc/kubernetes/ssl/etcd/:/etc/etcd \
-v /etc/kubernetes/data/etcd/:/var/lib/etcd \
--net=host \
--name $NAME \
$IMAGE \
etcd \
--name etcd0 \
--trusted-ca-file /etc/etcd/server-ca.pem \
--cert-file /etc/etcd/server-crt.pem \
--key-file /etc/etcd/server-key.pem\
--client-cert-auth=true \
--peer-trusted-ca-file /etc/etcd/server-ca.pem \
--peer-cert-file /etc/etcd/server-crt.pem \
--peer-key-file /etc/etcd/server-key.pem \
--peer-client-cert-auth=true \
--advertise-client-urls=https://{{ .Cluster.Etcd.Domain }}:443 \
--initial-advertise-peer-urls=https://127.0.0.1:2380 \
--listen-client-urls=https://0.0.0.0:2379 \
--listen-peer-urls=https://${DEFAULT_IPV4}:2380 \
--initial-cluster-token k8s-etcd-cluster \
--initial-cluster etcd0=https://127.0.0.1:2380 \
--initial-cluster-state new \
--data-dir=/var/lib/etcd \
--auto-compaction-retention=1 \
--enable-v2
[Install]
WantedBy=multi-user.target
- name: k8s-proxy.service
enable: true
command: start
content: |
[Unit]
Description=k8s-proxy
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStartPre=/bin/sh -c "while ! curl --output /dev/null --silent --head --fail --cacert /etc/kubernetes/ssl/apiserver-ca.pem --cert /etc/kubernetes/ssl/apiserver-crt.pem --key /etc/kubernetes/ssl/apiserver-key.pem https://{{.Cluster.Kubernetes.API.Domain}}; do sleep 1 && echo 'Waiting for master'; done"
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --net=host --privileged=true \
--name $NAME \
-v /usr/share/ca-certificates:/etc/ssl/certs \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
$IMAGE \
/hyperkube proxy \
--master=https://{{.Cluster.Kubernetes.API.Domain}} \
--proxy-mode=iptables \
--logtostderr=true \
--kubeconfig=/etc/kubernetes/config/proxy-kubeconfig.yml \
--conntrack-max-per-core 131072 \
--v=2"
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-kubelet.service
enable: true
command: start
content: |
[Unit]
Description=k8s-kubelet
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --pid=host --net=host --privileged=true \
-v /:/rootfs:ro \
-v /sys:/sys:ro \
-v /dev:/dev:rw \
-v /var/log:/var/log:rw \
-v /run/calico/:/run/calico/:rw \
-v /run/docker/:/run/docker/:rw \
-v /run/docker.sock:/run/docker.sock:rw \
-v /usr/lib/os-release:/etc/os-release \
-v /usr/share/ca-certificates/:/etc/ssl/certs \
-v /var/lib/docker/:/var/lib/docker:rw \
-v /var/lib/kubelet/:/var/lib/kubelet:rw,shared \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
-v /etc/cni/net.d/:/etc/cni/net.d/ \
-v /opt/cni/bin/:/opt/cni/bin/ \
-e ETCD_CA_CERT_FILE=/etc/kubernetes/ssl/etcd/server-ca.pem \
-e ETCD_CERT_FILE=/etc/kubernetes/ssl/etcd/server-crt.pem \
-e ETCD_KEY_FILE=/etc/kubernetes/ssl/etcd/server-key.pem \
--name $NAME \
$IMAGE \
/hyperkube kubelet \
--address=${DEFAULT_IPV4} \
--port={{.Cluster.Kubernetes.Kubelet.Port}} \
--hostname-override=${DEFAULT_IPV4} \
--node-ip=${DEFAULT_IPV4} \
--api-servers=https://{{.Cluster.Kubernetes.API.Domain}} \
--containerized \
--enable-server \
--logtostderr=true \
--machine-id-file=/rootfs/etc/machine-id \
--cadvisor-port=4194 \
--cloud-provider={{.Cluster.Kubernetes.CloudProvider}} \
--healthz-bind-address=${DEFAULT_IPV4} \
--healthz-port=10248 \
--cluster-dns={{.Cluster.Kubernetes.DNS.IP}} \
--cluster-domain={{.Cluster.Kubernetes.Domain}} \
--network-plugin=cni \
--register-node=true \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--allow-privileged=true \
--kubeconfig=/etc/kubernetes/config/kubelet-kubeconfig.yml \
--node-labels="node-role.kubernetes.io/master,role=master,kubernetes.io/hostname=${HOSTNAME},ip=${DEFAULT_IPV4},{{.Cluster.Kubernetes.Kubelet.Labels}}" \
--v=2"
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: update-engine.service
enable: false
command: stop
mask: true
- name: locksmithd.service
enable: false
command: stop
mask: true
- name: fleet.service
enable: false
mask: true
command: stop
- name: fleet.socket
enable: false
mask: true
command: stop
- name: flanneld.service
enable: false
command: stop
mask: true
- name: systemd-networkd-wait-online.service
enable: true
command: start
- name: k8s-api-server.service
enable: true
command: start
content: |
[Unit]
Description=k8s-api-server
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --name $NAME --net=host \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/secrets/token_sign_key.pem:/etc/kubernetes/secrets/token_sign_key.pem \
$IMAGE \
/hyperkube apiserver \
--allow_privileged=true \
--insecure_bind_address=0.0.0.0 \
--insecure_port={{.Cluster.Kubernetes.API.InsecurePort}} \
--kubelet_https=true \
--secure_port={{.Cluster.Kubernetes.API.SecurePort}} \
--bind-address=${DEFAULT_IPV4} \
--etcd-prefix={{.Cluster.Etcd.Prefix}} \
--profiling=false \
--repair-malformed-updates=false \
--service-account-lookup=true \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,DefaultStorageClass \
--cloud-provider={{.Cluster.Kubernetes.CloudProvider}} \
--service-cluster-ip-range={{.Cluster.Kubernetes.API.ClusterIPRange}} \
--etcd-servers=https://{{ .Cluster.Etcd.Domain }}:443 \
--etcd-cafile=/etc/kubernetes/ssl/etcd/server-ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/etcd/server-crt.pem \
--etcd-keyfile=/etc/kubernetes/ssl/etcd/server-key.pem \
--advertise-address=${DEFAULT_IPV4} \
--runtime-config=api/all=true \
--logtostderr=true \
--tls-cert-file=/etc/kubernetes/ssl/apiserver-crt.pem \
--tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/apiserver-ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/service-account-key.pem
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-controller-manager.service
enable: true
command: start
content: |
[Unit]
Description=k8s-controller-manager Service
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --net=host --name $NAME \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
-v /etc/kubernetes/secrets/token_sign_key.pem:/etc/kubernetes/secrets/token_sign_key.pem \
$IMAGE \
/hyperkube controller-manager \
--master=https://{{.Cluster.Kubernetes.API.Domain}}:443 \
--logtostderr=true \
--v=2 \
--cloud-provider={{.Cluster.Kubernetes.CloudProvider}} \
--profiling=false \
--terminated-pod-gc-threshold=10 \
--use-service-account-credentials=true \
--kubeconfig=/etc/kubernetes/config/controller-manager-kubeconfig.yml \
--root-ca-file=/etc/kubernetes/ssl/apiserver-ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/service-account-key.pem
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-scheduler.service
enable: true
command: start
content: |
[Unit]
Description=k8s-scheduler Service
StartLimitIntervalSec=0
[Service]
Restart=always
RestartSec=0
TimeoutStopSec=10
EnvironmentFile=/etc/network-environment
Environment="IMAGE={{.Cluster.Kubernetes.Hyperkube.Docker.Image}}"
Environment="NAME=%p.service"
Environment="NETWORK_CONFIG_CONTAINER="
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm --net=host --name $NAME \
-v /etc/kubernetes/ssl/:/etc/kubernetes/ssl/ \
-v /etc/kubernetes/config/:/etc/kubernetes/config/ \
$IMAGE \
/hyperkube scheduler \
--master=https://{{.Cluster.Kubernetes.API.Domain}}:443 \
--logtostderr=true \
--v=2 \
--profiling=false \
--kubeconfig=/etc/kubernetes/config/scheduler-kubeconfig.yml
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
- name: k8s-addons.service
enable: true
command: start
content: |
[Unit]
Description=Kubernetes Addons
Wants=k8s-api-server.service
After=k8s-api-server.service
[Service]
Type=oneshot
EnvironmentFile=/etc/network-environment
ExecStart=/opt/k8s-addons
[Install]
WantedBy=multi-user.target
- name: node-exporter.service
enable: true
command: start
content: |
[Unit]
Description=Prometheus Node Exporter Service
Requires=docker.service
After=docker.service
[Service]
Restart=always
Environment="IMAGE=prom/node-exporter:0.12.0"
Environment="NAME=%p.service"
ExecStartPre=/usr/bin/docker pull $IMAGE
ExecStartPre=-/usr/bin/docker stop -t 10 $NAME
ExecStartPre=-/usr/bin/docker rm -f $NAME
ExecStart=/usr/bin/docker run --rm \
-p 91:91 \
--net=host \
--name $NAME \
$IMAGE \
--web.listen-address=:91
ExecStop=-/usr/bin/docker stop -t 10 $NAME
ExecStopPost=-/usr/bin/docker rm -f $NAME
[Install]
WantedBy=multi-user.target
update:
reboot-strategy: off
{{ range .Extension.VerbatimSections }}
{{ .Content }}
{{ end }}
`
|
package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/satori/go.uuid"
)
type treeRequest struct {
RequestType string
Action string
User string
JobId uuid.UUID
reply chan somaResult
Repository somaRepositoryRequest
Bucket somaBucketRequest
Group somaGroupRequest
Cluster somaClusterRequest
Node somaNodeRequest
CheckConfig somaCheckConfigRequest
}
type treeResult struct {
ResultType string
ResultError error
JobId uuid.UUID
Repository somaRepositoryResult
Bucket somaRepositoryRequest
}
type treeKeeper struct {
repoId string
repoName string
team string
broken bool
ready bool
stopped bool
frozen bool
input chan treeRequest
shutdown chan bool
stopchan chan bool
conn *sql.DB
tree *tree.Tree
errChan chan *tree.Error
actionChan chan *tree.Action
start_job *sql.Stmt
get_view *sql.Stmt
}
func (tk *treeKeeper) run() {
log.Printf("Starting TreeKeeper for Repo %s (%s)", tk.repoName, tk.repoId)
tk.startupLoad()
var err error
if tk.broken {
tickTack := time.NewTicker(time.Second * 10).C
hoverloop:
for {
select {
case <-tickTack:
log.Printf("TK[%s]: BROKEN REPOSITORY %s flying holding patterns!\n",
tk.repoName, tk.repoId)
case <-tk.shutdown:
break hoverloop
case <-tk.stopchan:
tk.stop()
goto stopsign
}
}
return
}
if tk.start_job, err = tk.conn.Prepare(tkStmtStartJob); err != nil {
log.Fatal("treekeeper/start-job: ", err)
}
defer tk.start_job.Close()
if tk.get_view, err = tk.conn.Prepare(tkStmtGetViewFromCapability); err != nil {
log.Fatal("treekeeper/get-view-by-capability: ", err)
}
defer tk.get_view.Close()
log.Printf("TK[%s]: ready for service!\n", tk.repoName)
tk.ready = true
if SomaCfg.Observer {
fmt.Printf("TreeKeeper [%s] entered observer mode\n", tk.repoName)
<-tk.shutdown
goto exit
}
stopsign:
if tk.stopped {
<-tk.shutdown
goto exit
}
runloop:
for {
select {
case <-tk.shutdown:
break runloop
case <-tk.stopchan:
tk.stop()
goto stopsign
case req := <-tk.input:
tk.process(&req)
handlerMap[`jobDelay`].(jobDelay).notify <- req.JobId.String()
if !tk.frozen {
tk.buildDeploymentDetails()
tk.orderDeploymentDetails()
}
}
}
exit:
}
func (tk *treeKeeper) isReady() bool {
return tk.ready
}
func (tk *treeKeeper) isBroken() bool {
return tk.broken
}
func (tk *treeKeeper) stop() {
tk.ready = false
tk.broken = false
tk.stopped = true
}
func (tk *treeKeeper) process(q *treeRequest) {
var (
err error
hasErrors bool
tx *sql.Tx
treeCheck *tree.Check
nullBucket sql.NullString
txStmtPropertyInstanceCreate *sql.Stmt
txStmtRepositoryPropertyServiceCreate *sql.Stmt
txStmtRepositoryPropertySystemCreate *sql.Stmt
txStmtRepositoryPropertyOncallCreate *sql.Stmt
txStmtRepositoryPropertyCustomCreate *sql.Stmt
txStmtCreateBucket *sql.Stmt
txStmtBucketPropertyServiceCreate *sql.Stmt
txStmtBucketPropertySystemCreate *sql.Stmt
txStmtBucketPropertyOncallCreate *sql.Stmt
txStmtBucketPropertyCustomCreate *sql.Stmt
txStmtGroupCreate *sql.Stmt
txStmtGroupUpdate *sql.Stmt
txStmtGroupDelete *sql.Stmt
txStmtGroupMemberNewNode *sql.Stmt
txStmtGroupMemberNewCluster *sql.Stmt
txStmtGroupMemberNewGroup *sql.Stmt
txStmtGroupMemberRemoveNode *sql.Stmt
txStmtGroupMemberRemoveCluster *sql.Stmt
txStmtGroupMemberRemoveGroup *sql.Stmt
txStmtGroupPropertyServiceCreate *sql.Stmt
txStmtGroupPropertySystemCreate *sql.Stmt
txStmtGroupPropertyOncallCreate *sql.Stmt
txStmtGroupPropertyCustomCreate *sql.Stmt
txStmtClusterCreate *sql.Stmt
txStmtClusterUpdate *sql.Stmt
txStmtClusterDelete *sql.Stmt
txStmtClusterMemberNew *sql.Stmt
txStmtClusterMemberRemove *sql.Stmt
txStmtClusterPropertyServiceCreate *sql.Stmt
txStmtClusterPropertySystemCreate *sql.Stmt
txStmtClusterPropertyOncallCreate *sql.Stmt
txStmtClusterPropertyCustomCreate *sql.Stmt
txStmtBucketAssignNode *sql.Stmt
txStmtUpdateNodeState *sql.Stmt
txStmtNodeUnassignFromBucket *sql.Stmt
txStmtNodePropertyServiceCreate *sql.Stmt
txStmtNodePropertySystemCreate *sql.Stmt
txStmtNodePropertyOncallCreate *sql.Stmt
txStmtNodePropertyCustomCreate *sql.Stmt
txStmtCreateCheckConfigurationBase *sql.Stmt
txStmtCreateCheckConfigurationThreshold *sql.Stmt
txStmtCreateCheckConfigurationConstraintSystem *sql.Stmt
txStmtCreateCheckConfigurationConstraintNative *sql.Stmt
txStmtCreateCheckConfigurationConstraintOncall *sql.Stmt
txStmtCreateCheckConfigurationConstraintCustom *sql.Stmt
txStmtCreateCheckConfigurationConstraintService *sql.Stmt
txStmtCreateCheckConfigurationConstraintAttribute *sql.Stmt
txStmtCreateCheck *sql.Stmt
txStmtCreateCheckInstance *sql.Stmt
txStmtCreateCheckInstanceConfiguration *sql.Stmt
txStmtDeleteCheck *sql.Stmt
txStmtDeleteCheckInstance *sql.Stmt
)
_, err = tk.start_job.Exec(q.JobId.String(), time.Now().UTC())
if err != nil {
log.Println(err)
}
log.Printf("Processing job: %s\n", q.JobId.String())
tk.tree.Begin()
switch q.Action {
//
// REPOSITORY MANIPULATION REQUESTS
case "add_system_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case `delete_system_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `repository`,
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case "add_service_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
Attributes: (*q.Repository.Repository.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
})
case "add_oncall_property_to_repository":
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_repository":
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
//
// BUCKET MANIPULATION REQUESTS
case "create_bucket":
tree.NewBucket(tree.BucketSpec{
Id: uuid.NewV4().String(),
Name: q.Bucket.Bucket.Name,
Environment: q.Bucket.Bucket.Environment,
Team: tk.team,
Deleted: q.Bucket.Bucket.IsDeleted,
Frozen: q.Bucket.Bucket.IsFrozen,
Repository: q.Bucket.Bucket.RepositoryId,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "repository",
ParentId: tk.repoId,
ParentName: tk.repoName,
})
case "add_system_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case `delete_system_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case "add_service_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
Attributes: (*q.Bucket.Bucket.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
})
case "add_oncall_property_to_bucket":
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_bucket":
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
//
// GROUP MANIPULATION REQUESTS
case "create_group":
tree.NewGroup(tree.GroupSpec{
Id: uuid.NewV4().String(),
Name: q.Group.Group.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Group.Group.BucketId,
})
case "delete_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_group_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_group_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: (*q.Group.Group.MemberGroups)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case `delete_system_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case "add_service_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
Attributes: (*q.Group.Group.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
})
case "add_oncall_property_to_group":
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_group":
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
//
// CLUSTER MANIPULATION REQUESTS
case "create_cluster":
tree.NewCluster(tree.ClusterSpec{
Id: uuid.NewV4().String(),
Name: q.Cluster.Cluster.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Cluster.Cluster.BucketId,
})
case "delete_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_cluster_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_cluster_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: (*q.Group.Group.MemberClusters)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case `delete_system_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case "add_service_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
Attributes: (*q.Cluster.Cluster.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
})
case "add_oncall_property_to_cluster":
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_cluster":
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
//
// NODE MANIPULATION REQUESTS
case "assign_node":
tree.NewNode(tree.NodeSpec{
Id: q.Node.Node.Id,
AssetId: q.Node.Node.AssetId,
Name: q.Node.Node.Name,
Team: q.Node.Node.TeamId,
ServerId: q.Node.Node.ServerId,
Online: q.Node.Node.IsOnline,
Deleted: q.Node.Node.IsDeleted,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Node.Node.Config.BucketId,
})
case "delete_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_node_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_node_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Group.Group.MemberNodes)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_node_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Cluster.Cluster.Members)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "cluster",
ParentId: q.Cluster.Cluster.Id,
})
case "add_system_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case `delete_system_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case "add_service_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
Attributes: (*q.Node.Node.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
})
case "add_oncall_property_to_node":
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_node":
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
//
// CHECK MANIPULATION REQUESTS
case `add_check_to_repository`:
fallthrough
case `add_check_to_bucket`:
fallthrough
case `add_check_to_group`:
fallthrough
case `add_check_to_cluster`:
fallthrough
case `add_check_to_node`:
if treeCheck, err = tk.convertCheck(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).SetCheck(*treeCheck)
}
case `remove_check_from_repository`:
fallthrough
case `remove_check_from_bucket`:
fallthrough
case `remove_check_from_group`:
fallthrough
case `remove_check_from_cluster`:
fallthrough
case `remove_check_from_node`:
if treeCheck, err = tk.convertCheckForDelete(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).DeleteCheck(*treeCheck)
}
}
// check if we accumulated an error in one of the switch cases
if err != nil {
goto bailout
}
// recalculate check instances
tk.tree.ComputeCheckInstances()
// open multi-statement transaction
if tx, err = tk.conn.Begin(); err != nil {
goto bailout
}
defer tx.Rollback()
// prepare statements within tx context
if txStmtPropertyInstanceCreate, err = tx.Prepare(tkStmtPropertyInstanceCreate); err != nil {
log.Println("Failed to prepare: tkStmtPropertyInstanceCreate")
goto bailout
}
defer txStmtPropertyInstanceCreate.Close()
if txStmtCreateCheckConfigurationBase, err = tx.Prepare(tkStmtCreateCheckConfigurationBase); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationBase")
goto bailout
}
defer txStmtCreateCheckConfigurationBase.Close()
if txStmtCreateCheckConfigurationThreshold, err = tx.Prepare(tkStmtCreateCheckConfigurationThreshold); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationThreshold")
goto bailout
}
defer txStmtCreateCheckConfigurationThreshold.Close()
if txStmtCreateCheckConfigurationConstraintSystem, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintSystem); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintSystem")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintSystem.Close()
if txStmtCreateCheckConfigurationConstraintNative, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintNative); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintNative")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintNative.Close()
if txStmtCreateCheckConfigurationConstraintOncall, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintOncall); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintOncall")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintOncall.Close()
if txStmtCreateCheckConfigurationConstraintCustom, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintCustom); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintCustom")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintCustom.Close()
if txStmtCreateCheckConfigurationConstraintService, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintService); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintService")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintService.Close()
if txStmtCreateCheckConfigurationConstraintAttribute, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintAttribute); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintAttribute")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintAttribute.Close()
if txStmtCreateCheck, err = tx.Prepare(tkStmtCreateCheck); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheck")
goto bailout
}
defer txStmtCreateCheck.Close()
if txStmtCreateCheckInstance, err = tx.Prepare(tkStmtCreateCheckInstance); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstance")
goto bailout
}
defer txStmtCreateCheckInstance.Close()
if txStmtCreateCheckInstanceConfiguration, err = tx.Prepare(tkStmtCreateCheckInstanceConfiguration); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstanceConfiguration")
goto bailout
}
defer txStmtCreateCheckInstanceConfiguration.Close()
if txStmtDeleteCheck, err = tx.Prepare(stmt.TxMarkCheckDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheck")
goto bailout
}
if txStmtDeleteCheckInstance, err = tx.Prepare(stmt.TxMarkCheckInstanceDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheckInstance")
goto bailout
}
//
// REPOSITORY
if txStmtRepositoryPropertyOncallCreate, err = tx.Prepare(tkStmtRepositoryPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyOncallCreate")
goto bailout
}
defer txStmtRepositoryPropertyOncallCreate.Close()
if txStmtRepositoryPropertyServiceCreate, err = tx.Prepare(tkStmtRepositoryPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyServiceCreate")
goto bailout
}
defer txStmtRepositoryPropertyServiceCreate.Close()
if txStmtRepositoryPropertySystemCreate, err = tx.Prepare(tkStmtRepositoryPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertySystemCreate")
goto bailout
}
defer txStmtRepositoryPropertySystemCreate.Close()
if txStmtRepositoryPropertyCustomCreate, err = tx.Prepare(tkStmtRepositoryPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyCustomCreate")
goto bailout
}
defer txStmtRepositoryPropertyCustomCreate.Close()
//
// BUCKET
if txStmtCreateBucket, err = tx.Prepare(tkStmtCreateBucket); err != nil {
log.Println("Failed to prepare: tkStmtCreateBucket")
goto bailout
}
defer txStmtCreateBucket.Close()
if txStmtBucketPropertyOncallCreate, err = tx.Prepare(tkStmtBucketPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyOncallCreate")
goto bailout
}
defer txStmtBucketPropertyOncallCreate.Close()
if txStmtBucketPropertyServiceCreate, err = tx.Prepare(tkStmtBucketPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyServiceCreate")
goto bailout
}
defer txStmtBucketPropertyServiceCreate.Close()
if txStmtBucketPropertySystemCreate, err = tx.Prepare(tkStmtBucketPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertySystemCreate")
goto bailout
}
defer txStmtBucketPropertySystemCreate.Close()
if txStmtBucketPropertyCustomCreate, err = tx.Prepare(tkStmtBucketPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyCustomCreate")
goto bailout
}
defer txStmtBucketPropertyCustomCreate.Close()
//
// GROUP
if txStmtGroupCreate, err = tx.Prepare(tkStmtGroupCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupCreate")
goto bailout
}
defer txStmtGroupCreate.Close()
if txStmtGroupUpdate, err = tx.Prepare(tkStmtGroupUpdate); err != nil {
log.Println("Failed to prepare: tkStmtGroupUpdate")
goto bailout
}
defer txStmtGroupUpdate.Close()
if txStmtGroupDelete, err = tx.Prepare(tkStmtGroupDelete); err != nil {
log.Println("Failed to prepare: tkStmtGroupDelete")
goto bailout
}
defer txStmtGroupDelete.Close()
if txStmtGroupMemberNewNode, err = tx.Prepare(tkStmtGroupMemberNewNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewNode")
goto bailout
}
defer txStmtGroupMemberNewNode.Close()
if txStmtGroupMemberNewCluster, err = tx.Prepare(tkStmtGroupMemberNewCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewCluster")
goto bailout
}
defer txStmtGroupMemberNewCluster.Close()
if txStmtGroupMemberNewGroup, err = tx.Prepare(tkStmtGroupMemberNewGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewGroup")
goto bailout
}
defer txStmtGroupMemberNewGroup.Close()
if txStmtGroupMemberRemoveNode, err = tx.Prepare(tkStmtGroupMemberRemoveNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveNode")
goto bailout
}
defer txStmtGroupMemberRemoveNode.Close()
if txStmtGroupMemberRemoveCluster, err = tx.Prepare(tkStmtGroupMemberRemoveCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveCluster")
goto bailout
}
defer txStmtGroupMemberRemoveCluster.Close()
if txStmtGroupMemberRemoveGroup, err = tx.Prepare(tkStmtGroupMemberRemoveGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveGroup")
goto bailout
}
defer txStmtGroupMemberRemoveGroup.Close()
if txStmtGroupPropertyOncallCreate, err = tx.Prepare(tkStmtGroupPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyOncallCreate")
goto bailout
}
defer txStmtGroupPropertyOncallCreate.Close()
if txStmtGroupPropertyServiceCreate, err = tx.Prepare(tkStmtGroupPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyServiceCreate")
goto bailout
}
defer txStmtGroupPropertyServiceCreate.Close()
if txStmtGroupPropertySystemCreate, err = tx.Prepare(tkStmtGroupPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertySystemCreate")
goto bailout
}
defer txStmtGroupPropertySystemCreate.Close()
if txStmtGroupPropertyCustomCreate, err = tx.Prepare(tkStmtGroupPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyCustomCreate")
goto bailout
}
defer txStmtGroupPropertyCustomCreate.Close()
//
// CLUSTER
if txStmtClusterCreate, err = tx.Prepare(tkStmtClusterCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterCreate")
goto bailout
}
defer txStmtClusterCreate.Close()
if txStmtClusterUpdate, err = tx.Prepare(tkStmtClusterUpdate); err != nil {
log.Println("Failed to prepare: tkStmtClusterUpdate")
goto bailout
}
defer txStmtClusterUpdate.Close()
if txStmtClusterDelete, err = tx.Prepare(tkStmtClusterDelete); err != nil {
log.Println("Failed to prepare: tkStmtClusterDelete")
goto bailout
}
defer txStmtClusterDelete.Close()
if txStmtClusterMemberNew, err = tx.Prepare(tkStmtClusterMemberNew); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberNew")
goto bailout
}
defer txStmtClusterMemberNew.Close()
if txStmtClusterMemberRemove, err = tx.Prepare(tkStmtClusterMemberRemove); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberRemove")
goto bailout
}
defer txStmtClusterMemberRemove.Close()
if txStmtClusterPropertyOncallCreate, err = tx.Prepare(tkStmtClusterPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyOncallCreate")
goto bailout
}
defer txStmtClusterPropertyOncallCreate.Close()
if txStmtClusterPropertyServiceCreate, err = tx.Prepare(tkStmtClusterPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyServiceCreate")
goto bailout
}
defer txStmtClusterPropertyServiceCreate.Close()
if txStmtClusterPropertySystemCreate, err = tx.Prepare(tkStmtClusterPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertySystemCreate")
goto bailout
}
defer txStmtClusterPropertySystemCreate.Close()
if txStmtClusterPropertyCustomCreate, err = tx.Prepare(tkStmtClusterPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyCustomCreate")
goto bailout
}
defer txStmtClusterPropertyCustomCreate.Close()
//
// NODE
if txStmtBucketAssignNode, err = tx.Prepare(tkStmtBucketAssignNode); err != nil {
log.Println("Failed to prepare: tkStmtBucketAssignNode")
goto bailout
}
defer txStmtBucketAssignNode.Close()
if txStmtUpdateNodeState, err = tx.Prepare(tkStmtUpdateNodeState); err != nil {
log.Println("Failed to prepare: tkStmtUpdateNodeState")
goto bailout
}
defer txStmtUpdateNodeState.Close()
if txStmtNodeUnassignFromBucket, err = tx.Prepare(tkStmtNodeUnassignFromBucket); err != nil {
log.Println("Failed to prepare: tkStmtNodeUnassignFromBucket")
goto bailout
}
defer txStmtNodeUnassignFromBucket.Close()
if txStmtNodePropertyOncallCreate, err = tx.Prepare(tkStmtNodePropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyOncallCreate")
goto bailout
}
defer txStmtNodePropertyOncallCreate.Close()
if txStmtNodePropertyServiceCreate, err = tx.Prepare(tkStmtNodePropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyServiceCreate")
goto bailout
}
defer txStmtNodePropertyServiceCreate.Close()
if txStmtNodePropertySystemCreate, err = tx.Prepare(tkStmtNodePropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertySystemCreate")
goto bailout
}
defer txStmtNodePropertySystemCreate.Close()
if txStmtNodePropertyCustomCreate, err = tx.Prepare(tkStmtNodePropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyCustomCreate")
goto bailout
}
defer txStmtNodePropertyCustomCreate.Close()
// defer constraint checks
if _, err = tx.Exec(tkStmtDeferAllConstraints); err != nil {
log.Println("Failed to exec: tkStmtDeferAllConstraints")
goto bailout
}
// save the check configuration as part of the transaction before
// processing the action channel
if strings.Contains(q.Action, "add_check_to_") {
if q.CheckConfig.CheckConfig.BucketId != "" {
nullBucket = sql.NullString{
String: q.CheckConfig.CheckConfig.BucketId,
Valid: true,
}
} else {
nullBucket = sql.NullString{String: "", Valid: false}
}
if _, err = txStmtCreateCheckConfigurationBase.Exec(
q.CheckConfig.CheckConfig.Id,
q.CheckConfig.CheckConfig.Name,
int64(q.CheckConfig.CheckConfig.Interval),
q.CheckConfig.CheckConfig.RepositoryId,
nullBucket,
q.CheckConfig.CheckConfig.CapabilityId,
q.CheckConfig.CheckConfig.ObjectId,
q.CheckConfig.CheckConfig.ObjectType,
q.CheckConfig.CheckConfig.IsActive,
q.CheckConfig.CheckConfig.IsEnabled,
q.CheckConfig.CheckConfig.Inheritance,
q.CheckConfig.CheckConfig.ChildrenOnly,
q.CheckConfig.CheckConfig.ExternalId,
); err != nil {
goto bailout
}
threshloop:
for _, thr := range q.CheckConfig.CheckConfig.Thresholds {
if _, err = txStmtCreateCheckConfigurationThreshold.Exec(
q.CheckConfig.CheckConfig.Id,
thr.Predicate.Symbol,
strconv.FormatInt(thr.Value, 10),
thr.Level.Name,
); err != nil {
break threshloop
}
}
if err != nil {
goto bailout
}
constrloop:
for _, constr := range q.CheckConfig.CheckConfig.Constraints {
switch constr.ConstraintType {
case "native":
if _, err = txStmtCreateCheckConfigurationConstraintNative.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Native.Name,
constr.Native.Value,
); err != nil {
break constrloop
}
case "oncall":
if _, err = txStmtCreateCheckConfigurationConstraintOncall.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Oncall.Id,
); err != nil {
break constrloop
}
case "custom":
if _, err = txStmtCreateCheckConfigurationConstraintCustom.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Custom.Id,
constr.Custom.RepositoryId,
constr.Custom.Value,
); err != nil {
break constrloop
}
case "system":
if _, err = txStmtCreateCheckConfigurationConstraintSystem.Exec(
q.CheckConfig.CheckConfig.Id,
constr.System.Name,
constr.System.Value,
); err != nil {
break constrloop
}
case "service":
if constr.Service.TeamId != tk.team {
err = fmt.Errorf("Service constraint has mismatched TeamID values: %s/%s",
tk.team, constr.Service.TeamId)
fmt.Println(err)
break constrloop
}
log.Printf(`SQL: tkStmtCreateCheckConfigurationConstraintService:
CheckConfig ID: %s
Team ID: %s
Service Name: %s%s`,
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name, "\n")
if _, err = txStmtCreateCheckConfigurationConstraintService.Exec(
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name,
); err != nil {
break constrloop
}
case "attribute":
if _, err = txStmtCreateCheckConfigurationConstraintAttribute.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Attribute.Name,
constr.Attribute.Value,
); err != nil {
break constrloop
}
}
}
if err != nil {
goto bailout
}
}
// mark the check configuration as deleted
if strings.HasPrefix(q.Action, `remove_check_from_`) {
if _, err = tx.Exec(stmt.TxMarkCheckConfigDeleted, q.CheckConfig.CheckConfig.Id); err != nil {
goto bailout
}
}
// if the error channel has entries, we can fully ignore the
// action channel
for i := len(tk.errChan); i > 0; i-- {
e := <-tk.errChan
b, _ := json.Marshal(e)
log.Println(string(b))
hasErrors = true
if err == nil {
err = fmt.Errorf(e.Action)
}
}
if hasErrors {
goto bailout
}
actionloop:
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
// we need all messages to figure out why for example a deferred
// constraint later failed
//jBxX, _ := json.Marshal(a)
//log.Printf("%s - Processing: %s\n", q.JobId.String(), string(jBxX))
switch a.Type {
// REPOSITORY
case "repository":
switch a.Action {
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtRepositoryPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Property.Custom.RepositoryId,
a.Property.View,
a.Property.Custom.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtRepositoryPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtRepositoryPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtRepositoryPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtRepositoryPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtRepositoryPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtRepositoryPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtRepositoryPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
sql.NullString{String: "", Valid: false},
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Repository.Id,
"repository",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// BUCKET
case "bucket":
switch a.Action {
case "create":
if _, err = txStmtCreateBucket.Exec(
a.Bucket.Id,
a.Bucket.Name,
a.Bucket.IsFrozen,
a.Bucket.IsDeleted,
a.Bucket.RepositoryId,
a.Bucket.Environment,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "node_assignment":
if _, err = txStmtBucketAssignNode.Exec(
a.ChildNode.Id,
a.Bucket.Id,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtBucketPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtBucketPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtBucketPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Bucket ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtBucketPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtBucketPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtBucketPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtBucketPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtBucketPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtBucketPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Bucket.Id,
"bucket",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// GROUP
case "group":
switch a.Action {
case "create":
if _, err = txStmtGroupCreate.Exec(
a.Group.Id,
a.Group.BucketId,
a.Group.Name,
a.Group.ObjectState,
a.Group.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtGroupUpdate.Exec(
a.Group.Id,
a.Group.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtGroupDelete.Exec(
a.Group.Id,
); err != nil {
break actionloop
}
case "member_new":
switch a.ChildType {
case "group":
log.Println("==> group/new membergroup")
if _, err = txStmtGroupMemberNewGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "cluster":
log.Println("==> group/new membercluster")
if _, err = txStmtGroupMemberNewCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "node":
log.Println("==> group/new membernode")
if _, err = txStmtGroupMemberNewNode.Exec(
a.Group.Id,
a.ChildNode.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
}
case "member_removed":
switch a.ChildType {
case "group":
if _, err = txStmtGroupMemberRemoveGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
); err != nil {
break actionloop
}
case "cluster":
if _, err = txStmtGroupMemberRemoveCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
); err != nil {
break actionloop
}
case "node":
if _, err = txStmtGroupMemberRemoveNode.Exec(
a.Group.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtGroupPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtGroupPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtGroupPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtGroupPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtGroupPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtGroupPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtGroupPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtGroupPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Group.Id,
"group",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// CLUSTER
case "cluster":
switch a.Action {
case "create":
if _, err = txStmtClusterCreate.Exec(
a.Cluster.Id,
a.Cluster.Name,
a.Cluster.BucketId,
a.Cluster.ObjectState,
a.Cluster.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtClusterUpdate.Exec(
a.Cluster.Id,
a.Cluster.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtClusterDelete.Exec(
a.Cluster.Id,
); err != nil {
break actionloop
}
case "member_new":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberNew.Exec(
a.Cluster.Id,
a.ChildNode.Id,
a.Cluster.BucketId,
); err != nil {
break actionloop
}
case "member_removed":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberRemove.Exec(
a.Cluster.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtClusterPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtClusterPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtClusterPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Cluster ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtClusterPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtClusterPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtClusterPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtClusterPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtClusterPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtClusterPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Cluster.Id,
"cluster",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// NODE
case "node":
switch a.Action {
case "delete":
if _, err = txStmtNodeUnassignFromBucket.Exec(
a.Node.Id,
a.Node.Config.BucketId,
a.Node.TeamId,
); err != nil {
break actionloop
}
fallthrough // need to call txStmtUpdateNodeState for delete as well
case "update":
log.Println("==> node/update")
if _, err = txStmtUpdateNodeState.Exec(
a.Node.Id,
a.Node.State,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtNodePropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
log.Printf(`SQL: tkStmtNodePropertySystemCreate:
Instance ID: %s
Source Instance ID: %s
Node ID: %s
View: %s
SystemProperty: %s
Object Type: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t
System Property Value: %s
Is Inherited: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited, "\n")
if _, err = txStmtNodePropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtNodePropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtNodePropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtNodePropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtNodePropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtNodePropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtNodePropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
log.Printf(`SQL: tkStmtCreateCheck:
Check ID: %s
Repository ID: %s
Bucket ID: %s
Source Check ID: %s
Source Type: %s
Inherited From: %s
Check Config ID: %s
Check Capability ID: %s
Node ID: %s%s`,
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id, "\n")
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id,
"node",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = tx.Exec(stmt.TxMarkCheckDeleted,
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
case "errorchannel":
continue actionloop
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
}
if err != nil {
goto bailout
}
// mark job as finished
if _, err = tx.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"success",
``, // empty error field
); err != nil {
goto bailout
}
// commit transaction
if err = tx.Commit(); err != nil {
goto bailout
}
log.Printf("SUCCESS - Finished job: %s\n", q.JobId.String())
// accept tree changes
tk.tree.Commit()
return
bailout:
log.Printf("FAILED - Finished job: %s\n", q.JobId.String())
log.Println(err)
tk.tree.Rollback()
tx.Rollback()
tk.conn.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"failed",
err.Error(),
)
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
jB, _ := json.Marshal(a)
log.Printf("Cleaned message: %s\n", string(jB))
}
return
}
func (tk *treeKeeper) convertCheckForDelete(conf *proto.CheckConfig) (*tree.Check, error) {
var err error
treechk := &tree.Check{
Id: uuid.Nil,
InheritedFrom: uuid.Nil,
}
if treechk.SourceId, err = uuid.FromString(conf.ExternalId); err != nil {
return nil, err
}
if treechk.ConfigId, err = uuid.FromString(conf.Id); err != nil {
return nil, err
}
return treechk, nil
}
func (tk *treeKeeper) convertCheck(conf *proto.CheckConfig) (*tree.Check, error) {
treechk := &tree.Check{
Id: uuid.Nil,
SourceId: uuid.Nil,
InheritedFrom: uuid.Nil,
Inheritance: conf.Inheritance,
ChildrenOnly: conf.ChildrenOnly,
Interval: conf.Interval,
}
treechk.CapabilityId, _ = uuid.FromString(conf.CapabilityId)
treechk.ConfigId, _ = uuid.FromString(conf.Id)
if err := tk.get_view.QueryRow(conf.CapabilityId).Scan(&treechk.View); err != nil {
return &tree.Check{}, err
}
treechk.Thresholds = make([]tree.CheckThreshold, len(conf.Thresholds))
for i, thr := range conf.Thresholds {
nthr := tree.CheckThreshold{
Predicate: thr.Predicate.Symbol,
Level: uint8(thr.Level.Numeric),
Value: thr.Value,
}
treechk.Thresholds[i] = nthr
}
treechk.Constraints = make([]tree.CheckConstraint, len(conf.Constraints))
for i, constr := range conf.Constraints {
ncon := tree.CheckConstraint{
Type: constr.ConstraintType,
}
switch constr.ConstraintType {
case "native":
ncon.Key = constr.Native.Name
ncon.Value = constr.Native.Value
case "oncall":
ncon.Key = "OncallId"
ncon.Value = constr.Oncall.Id
case "custom":
ncon.Key = constr.Custom.Id
ncon.Value = constr.Custom.Value
case "system":
ncon.Key = constr.System.Name
ncon.Value = constr.System.Value
case "service":
ncon.Key = "name"
ncon.Value = constr.Service.Name
case "attribute":
ncon.Key = constr.Attribute.Name
ncon.Value = constr.Attribute.Value
}
treechk.Constraints[i] = ncon
}
return treechk, nil
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
TreeKeeper: add isStopped() helper function
package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/satori/go.uuid"
)
type treeRequest struct {
RequestType string
Action string
User string
JobId uuid.UUID
reply chan somaResult
Repository somaRepositoryRequest
Bucket somaBucketRequest
Group somaGroupRequest
Cluster somaClusterRequest
Node somaNodeRequest
CheckConfig somaCheckConfigRequest
}
type treeResult struct {
ResultType string
ResultError error
JobId uuid.UUID
Repository somaRepositoryResult
Bucket somaRepositoryRequest
}
type treeKeeper struct {
repoId string
repoName string
team string
broken bool
ready bool
stopped bool
frozen bool
input chan treeRequest
shutdown chan bool
stopchan chan bool
conn *sql.DB
tree *tree.Tree
errChan chan *tree.Error
actionChan chan *tree.Action
start_job *sql.Stmt
get_view *sql.Stmt
}
func (tk *treeKeeper) run() {
log.Printf("Starting TreeKeeper for Repo %s (%s)", tk.repoName, tk.repoId)
tk.startupLoad()
var err error
if tk.broken {
tickTack := time.NewTicker(time.Second * 10).C
hoverloop:
for {
select {
case <-tickTack:
log.Printf("TK[%s]: BROKEN REPOSITORY %s flying holding patterns!\n",
tk.repoName, tk.repoId)
case <-tk.shutdown:
break hoverloop
case <-tk.stopchan:
tk.stop()
goto stopsign
}
}
return
}
if tk.start_job, err = tk.conn.Prepare(tkStmtStartJob); err != nil {
log.Fatal("treekeeper/start-job: ", err)
}
defer tk.start_job.Close()
if tk.get_view, err = tk.conn.Prepare(tkStmtGetViewFromCapability); err != nil {
log.Fatal("treekeeper/get-view-by-capability: ", err)
}
defer tk.get_view.Close()
log.Printf("TK[%s]: ready for service!\n", tk.repoName)
tk.ready = true
if SomaCfg.Observer {
fmt.Printf("TreeKeeper [%s] entered observer mode\n", tk.repoName)
<-tk.shutdown
goto exit
}
stopsign:
if tk.stopped {
<-tk.shutdown
goto exit
}
runloop:
for {
select {
case <-tk.shutdown:
break runloop
case <-tk.stopchan:
tk.stop()
goto stopsign
case req := <-tk.input:
tk.process(&req)
handlerMap[`jobDelay`].(jobDelay).notify <- req.JobId.String()
if !tk.frozen {
tk.buildDeploymentDetails()
tk.orderDeploymentDetails()
}
}
}
exit:
}
func (tk *treeKeeper) isReady() bool {
return tk.ready
}
func (tk *treeKeeper) isBroken() bool {
return tk.broken
}
func (tk *treeKeeper) stop() {
tk.stopped = true
tk.ready = false
tk.broken = false
}
func (tk *treeKeeper) isStopped() bool {
return tk.stopped
}
func (tk *treeKeeper) process(q *treeRequest) {
var (
err error
hasErrors bool
tx *sql.Tx
treeCheck *tree.Check
nullBucket sql.NullString
txStmtPropertyInstanceCreate *sql.Stmt
txStmtRepositoryPropertyServiceCreate *sql.Stmt
txStmtRepositoryPropertySystemCreate *sql.Stmt
txStmtRepositoryPropertyOncallCreate *sql.Stmt
txStmtRepositoryPropertyCustomCreate *sql.Stmt
txStmtCreateBucket *sql.Stmt
txStmtBucketPropertyServiceCreate *sql.Stmt
txStmtBucketPropertySystemCreate *sql.Stmt
txStmtBucketPropertyOncallCreate *sql.Stmt
txStmtBucketPropertyCustomCreate *sql.Stmt
txStmtGroupCreate *sql.Stmt
txStmtGroupUpdate *sql.Stmt
txStmtGroupDelete *sql.Stmt
txStmtGroupMemberNewNode *sql.Stmt
txStmtGroupMemberNewCluster *sql.Stmt
txStmtGroupMemberNewGroup *sql.Stmt
txStmtGroupMemberRemoveNode *sql.Stmt
txStmtGroupMemberRemoveCluster *sql.Stmt
txStmtGroupMemberRemoveGroup *sql.Stmt
txStmtGroupPropertyServiceCreate *sql.Stmt
txStmtGroupPropertySystemCreate *sql.Stmt
txStmtGroupPropertyOncallCreate *sql.Stmt
txStmtGroupPropertyCustomCreate *sql.Stmt
txStmtClusterCreate *sql.Stmt
txStmtClusterUpdate *sql.Stmt
txStmtClusterDelete *sql.Stmt
txStmtClusterMemberNew *sql.Stmt
txStmtClusterMemberRemove *sql.Stmt
txStmtClusterPropertyServiceCreate *sql.Stmt
txStmtClusterPropertySystemCreate *sql.Stmt
txStmtClusterPropertyOncallCreate *sql.Stmt
txStmtClusterPropertyCustomCreate *sql.Stmt
txStmtBucketAssignNode *sql.Stmt
txStmtUpdateNodeState *sql.Stmt
txStmtNodeUnassignFromBucket *sql.Stmt
txStmtNodePropertyServiceCreate *sql.Stmt
txStmtNodePropertySystemCreate *sql.Stmt
txStmtNodePropertyOncallCreate *sql.Stmt
txStmtNodePropertyCustomCreate *sql.Stmt
txStmtCreateCheckConfigurationBase *sql.Stmt
txStmtCreateCheckConfigurationThreshold *sql.Stmt
txStmtCreateCheckConfigurationConstraintSystem *sql.Stmt
txStmtCreateCheckConfigurationConstraintNative *sql.Stmt
txStmtCreateCheckConfigurationConstraintOncall *sql.Stmt
txStmtCreateCheckConfigurationConstraintCustom *sql.Stmt
txStmtCreateCheckConfigurationConstraintService *sql.Stmt
txStmtCreateCheckConfigurationConstraintAttribute *sql.Stmt
txStmtCreateCheck *sql.Stmt
txStmtCreateCheckInstance *sql.Stmt
txStmtCreateCheckInstanceConfiguration *sql.Stmt
txStmtDeleteCheck *sql.Stmt
txStmtDeleteCheckInstance *sql.Stmt
)
_, err = tk.start_job.Exec(q.JobId.String(), time.Now().UTC())
if err != nil {
log.Println(err)
}
log.Printf("Processing job: %s\n", q.JobId.String())
tk.tree.Begin()
switch q.Action {
//
// REPOSITORY MANIPULATION REQUESTS
case "add_system_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case `delete_system_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `repository`,
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case "add_service_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
Attributes: (*q.Repository.Repository.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
})
case "add_oncall_property_to_repository":
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_repository":
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
//
// BUCKET MANIPULATION REQUESTS
case "create_bucket":
tree.NewBucket(tree.BucketSpec{
Id: uuid.NewV4().String(),
Name: q.Bucket.Bucket.Name,
Environment: q.Bucket.Bucket.Environment,
Team: tk.team,
Deleted: q.Bucket.Bucket.IsDeleted,
Frozen: q.Bucket.Bucket.IsFrozen,
Repository: q.Bucket.Bucket.RepositoryId,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "repository",
ParentId: tk.repoId,
ParentName: tk.repoName,
})
case "add_system_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case `delete_system_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case "add_service_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
Attributes: (*q.Bucket.Bucket.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
})
case "add_oncall_property_to_bucket":
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_bucket":
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
//
// GROUP MANIPULATION REQUESTS
case "create_group":
tree.NewGroup(tree.GroupSpec{
Id: uuid.NewV4().String(),
Name: q.Group.Group.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Group.Group.BucketId,
})
case "delete_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_group_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_group_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: (*q.Group.Group.MemberGroups)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case `delete_system_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case "add_service_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
Attributes: (*q.Group.Group.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
})
case "add_oncall_property_to_group":
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_group":
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
//
// CLUSTER MANIPULATION REQUESTS
case "create_cluster":
tree.NewCluster(tree.ClusterSpec{
Id: uuid.NewV4().String(),
Name: q.Cluster.Cluster.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Cluster.Cluster.BucketId,
})
case "delete_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_cluster_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_cluster_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: (*q.Group.Group.MemberClusters)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case `delete_system_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case "add_service_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
Attributes: (*q.Cluster.Cluster.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
})
case "add_oncall_property_to_cluster":
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_cluster":
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
//
// NODE MANIPULATION REQUESTS
case "assign_node":
tree.NewNode(tree.NodeSpec{
Id: q.Node.Node.Id,
AssetId: q.Node.Node.AssetId,
Name: q.Node.Node.Name,
Team: q.Node.Node.TeamId,
ServerId: q.Node.Node.ServerId,
Online: q.Node.Node.IsOnline,
Deleted: q.Node.Node.IsDeleted,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Node.Node.Config.BucketId,
})
case "delete_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_node_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_node_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Group.Group.MemberNodes)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_node_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Cluster.Cluster.Members)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "cluster",
ParentId: q.Cluster.Cluster.Id,
})
case "add_system_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case `delete_system_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case "add_service_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
Attributes: (*q.Node.Node.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
})
case "add_oncall_property_to_node":
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_node":
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
//
// CHECK MANIPULATION REQUESTS
case `add_check_to_repository`:
fallthrough
case `add_check_to_bucket`:
fallthrough
case `add_check_to_group`:
fallthrough
case `add_check_to_cluster`:
fallthrough
case `add_check_to_node`:
if treeCheck, err = tk.convertCheck(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).SetCheck(*treeCheck)
}
case `remove_check_from_repository`:
fallthrough
case `remove_check_from_bucket`:
fallthrough
case `remove_check_from_group`:
fallthrough
case `remove_check_from_cluster`:
fallthrough
case `remove_check_from_node`:
if treeCheck, err = tk.convertCheckForDelete(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).DeleteCheck(*treeCheck)
}
}
// check if we accumulated an error in one of the switch cases
if err != nil {
goto bailout
}
// recalculate check instances
tk.tree.ComputeCheckInstances()
// open multi-statement transaction
if tx, err = tk.conn.Begin(); err != nil {
goto bailout
}
defer tx.Rollback()
// prepare statements within tx context
if txStmtPropertyInstanceCreate, err = tx.Prepare(tkStmtPropertyInstanceCreate); err != nil {
log.Println("Failed to prepare: tkStmtPropertyInstanceCreate")
goto bailout
}
defer txStmtPropertyInstanceCreate.Close()
if txStmtCreateCheckConfigurationBase, err = tx.Prepare(tkStmtCreateCheckConfigurationBase); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationBase")
goto bailout
}
defer txStmtCreateCheckConfigurationBase.Close()
if txStmtCreateCheckConfigurationThreshold, err = tx.Prepare(tkStmtCreateCheckConfigurationThreshold); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationThreshold")
goto bailout
}
defer txStmtCreateCheckConfigurationThreshold.Close()
if txStmtCreateCheckConfigurationConstraintSystem, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintSystem); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintSystem")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintSystem.Close()
if txStmtCreateCheckConfigurationConstraintNative, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintNative); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintNative")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintNative.Close()
if txStmtCreateCheckConfigurationConstraintOncall, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintOncall); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintOncall")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintOncall.Close()
if txStmtCreateCheckConfigurationConstraintCustom, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintCustom); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintCustom")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintCustom.Close()
if txStmtCreateCheckConfigurationConstraintService, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintService); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintService")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintService.Close()
if txStmtCreateCheckConfigurationConstraintAttribute, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintAttribute); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintAttribute")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintAttribute.Close()
if txStmtCreateCheck, err = tx.Prepare(tkStmtCreateCheck); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheck")
goto bailout
}
defer txStmtCreateCheck.Close()
if txStmtCreateCheckInstance, err = tx.Prepare(tkStmtCreateCheckInstance); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstance")
goto bailout
}
defer txStmtCreateCheckInstance.Close()
if txStmtCreateCheckInstanceConfiguration, err = tx.Prepare(tkStmtCreateCheckInstanceConfiguration); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstanceConfiguration")
goto bailout
}
defer txStmtCreateCheckInstanceConfiguration.Close()
if txStmtDeleteCheck, err = tx.Prepare(stmt.TxMarkCheckDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheck")
goto bailout
}
if txStmtDeleteCheckInstance, err = tx.Prepare(stmt.TxMarkCheckInstanceDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheckInstance")
goto bailout
}
//
// REPOSITORY
if txStmtRepositoryPropertyOncallCreate, err = tx.Prepare(tkStmtRepositoryPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyOncallCreate")
goto bailout
}
defer txStmtRepositoryPropertyOncallCreate.Close()
if txStmtRepositoryPropertyServiceCreate, err = tx.Prepare(tkStmtRepositoryPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyServiceCreate")
goto bailout
}
defer txStmtRepositoryPropertyServiceCreate.Close()
if txStmtRepositoryPropertySystemCreate, err = tx.Prepare(tkStmtRepositoryPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertySystemCreate")
goto bailout
}
defer txStmtRepositoryPropertySystemCreate.Close()
if txStmtRepositoryPropertyCustomCreate, err = tx.Prepare(tkStmtRepositoryPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyCustomCreate")
goto bailout
}
defer txStmtRepositoryPropertyCustomCreate.Close()
//
// BUCKET
if txStmtCreateBucket, err = tx.Prepare(tkStmtCreateBucket); err != nil {
log.Println("Failed to prepare: tkStmtCreateBucket")
goto bailout
}
defer txStmtCreateBucket.Close()
if txStmtBucketPropertyOncallCreate, err = tx.Prepare(tkStmtBucketPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyOncallCreate")
goto bailout
}
defer txStmtBucketPropertyOncallCreate.Close()
if txStmtBucketPropertyServiceCreate, err = tx.Prepare(tkStmtBucketPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyServiceCreate")
goto bailout
}
defer txStmtBucketPropertyServiceCreate.Close()
if txStmtBucketPropertySystemCreate, err = tx.Prepare(tkStmtBucketPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertySystemCreate")
goto bailout
}
defer txStmtBucketPropertySystemCreate.Close()
if txStmtBucketPropertyCustomCreate, err = tx.Prepare(tkStmtBucketPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyCustomCreate")
goto bailout
}
defer txStmtBucketPropertyCustomCreate.Close()
//
// GROUP
if txStmtGroupCreate, err = tx.Prepare(tkStmtGroupCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupCreate")
goto bailout
}
defer txStmtGroupCreate.Close()
if txStmtGroupUpdate, err = tx.Prepare(tkStmtGroupUpdate); err != nil {
log.Println("Failed to prepare: tkStmtGroupUpdate")
goto bailout
}
defer txStmtGroupUpdate.Close()
if txStmtGroupDelete, err = tx.Prepare(tkStmtGroupDelete); err != nil {
log.Println("Failed to prepare: tkStmtGroupDelete")
goto bailout
}
defer txStmtGroupDelete.Close()
if txStmtGroupMemberNewNode, err = tx.Prepare(tkStmtGroupMemberNewNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewNode")
goto bailout
}
defer txStmtGroupMemberNewNode.Close()
if txStmtGroupMemberNewCluster, err = tx.Prepare(tkStmtGroupMemberNewCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewCluster")
goto bailout
}
defer txStmtGroupMemberNewCluster.Close()
if txStmtGroupMemberNewGroup, err = tx.Prepare(tkStmtGroupMemberNewGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewGroup")
goto bailout
}
defer txStmtGroupMemberNewGroup.Close()
if txStmtGroupMemberRemoveNode, err = tx.Prepare(tkStmtGroupMemberRemoveNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveNode")
goto bailout
}
defer txStmtGroupMemberRemoveNode.Close()
if txStmtGroupMemberRemoveCluster, err = tx.Prepare(tkStmtGroupMemberRemoveCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveCluster")
goto bailout
}
defer txStmtGroupMemberRemoveCluster.Close()
if txStmtGroupMemberRemoveGroup, err = tx.Prepare(tkStmtGroupMemberRemoveGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveGroup")
goto bailout
}
defer txStmtGroupMemberRemoveGroup.Close()
if txStmtGroupPropertyOncallCreate, err = tx.Prepare(tkStmtGroupPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyOncallCreate")
goto bailout
}
defer txStmtGroupPropertyOncallCreate.Close()
if txStmtGroupPropertyServiceCreate, err = tx.Prepare(tkStmtGroupPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyServiceCreate")
goto bailout
}
defer txStmtGroupPropertyServiceCreate.Close()
if txStmtGroupPropertySystemCreate, err = tx.Prepare(tkStmtGroupPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertySystemCreate")
goto bailout
}
defer txStmtGroupPropertySystemCreate.Close()
if txStmtGroupPropertyCustomCreate, err = tx.Prepare(tkStmtGroupPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyCustomCreate")
goto bailout
}
defer txStmtGroupPropertyCustomCreate.Close()
//
// CLUSTER
if txStmtClusterCreate, err = tx.Prepare(tkStmtClusterCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterCreate")
goto bailout
}
defer txStmtClusterCreate.Close()
if txStmtClusterUpdate, err = tx.Prepare(tkStmtClusterUpdate); err != nil {
log.Println("Failed to prepare: tkStmtClusterUpdate")
goto bailout
}
defer txStmtClusterUpdate.Close()
if txStmtClusterDelete, err = tx.Prepare(tkStmtClusterDelete); err != nil {
log.Println("Failed to prepare: tkStmtClusterDelete")
goto bailout
}
defer txStmtClusterDelete.Close()
if txStmtClusterMemberNew, err = tx.Prepare(tkStmtClusterMemberNew); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberNew")
goto bailout
}
defer txStmtClusterMemberNew.Close()
if txStmtClusterMemberRemove, err = tx.Prepare(tkStmtClusterMemberRemove); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberRemove")
goto bailout
}
defer txStmtClusterMemberRemove.Close()
if txStmtClusterPropertyOncallCreate, err = tx.Prepare(tkStmtClusterPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyOncallCreate")
goto bailout
}
defer txStmtClusterPropertyOncallCreate.Close()
if txStmtClusterPropertyServiceCreate, err = tx.Prepare(tkStmtClusterPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyServiceCreate")
goto bailout
}
defer txStmtClusterPropertyServiceCreate.Close()
if txStmtClusterPropertySystemCreate, err = tx.Prepare(tkStmtClusterPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertySystemCreate")
goto bailout
}
defer txStmtClusterPropertySystemCreate.Close()
if txStmtClusterPropertyCustomCreate, err = tx.Prepare(tkStmtClusterPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyCustomCreate")
goto bailout
}
defer txStmtClusterPropertyCustomCreate.Close()
//
// NODE
if txStmtBucketAssignNode, err = tx.Prepare(tkStmtBucketAssignNode); err != nil {
log.Println("Failed to prepare: tkStmtBucketAssignNode")
goto bailout
}
defer txStmtBucketAssignNode.Close()
if txStmtUpdateNodeState, err = tx.Prepare(tkStmtUpdateNodeState); err != nil {
log.Println("Failed to prepare: tkStmtUpdateNodeState")
goto bailout
}
defer txStmtUpdateNodeState.Close()
if txStmtNodeUnassignFromBucket, err = tx.Prepare(tkStmtNodeUnassignFromBucket); err != nil {
log.Println("Failed to prepare: tkStmtNodeUnassignFromBucket")
goto bailout
}
defer txStmtNodeUnassignFromBucket.Close()
if txStmtNodePropertyOncallCreate, err = tx.Prepare(tkStmtNodePropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyOncallCreate")
goto bailout
}
defer txStmtNodePropertyOncallCreate.Close()
if txStmtNodePropertyServiceCreate, err = tx.Prepare(tkStmtNodePropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyServiceCreate")
goto bailout
}
defer txStmtNodePropertyServiceCreate.Close()
if txStmtNodePropertySystemCreate, err = tx.Prepare(tkStmtNodePropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertySystemCreate")
goto bailout
}
defer txStmtNodePropertySystemCreate.Close()
if txStmtNodePropertyCustomCreate, err = tx.Prepare(tkStmtNodePropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyCustomCreate")
goto bailout
}
defer txStmtNodePropertyCustomCreate.Close()
// defer constraint checks
if _, err = tx.Exec(tkStmtDeferAllConstraints); err != nil {
log.Println("Failed to exec: tkStmtDeferAllConstraints")
goto bailout
}
// save the check configuration as part of the transaction before
// processing the action channel
if strings.Contains(q.Action, "add_check_to_") {
if q.CheckConfig.CheckConfig.BucketId != "" {
nullBucket = sql.NullString{
String: q.CheckConfig.CheckConfig.BucketId,
Valid: true,
}
} else {
nullBucket = sql.NullString{String: "", Valid: false}
}
if _, err = txStmtCreateCheckConfigurationBase.Exec(
q.CheckConfig.CheckConfig.Id,
q.CheckConfig.CheckConfig.Name,
int64(q.CheckConfig.CheckConfig.Interval),
q.CheckConfig.CheckConfig.RepositoryId,
nullBucket,
q.CheckConfig.CheckConfig.CapabilityId,
q.CheckConfig.CheckConfig.ObjectId,
q.CheckConfig.CheckConfig.ObjectType,
q.CheckConfig.CheckConfig.IsActive,
q.CheckConfig.CheckConfig.IsEnabled,
q.CheckConfig.CheckConfig.Inheritance,
q.CheckConfig.CheckConfig.ChildrenOnly,
q.CheckConfig.CheckConfig.ExternalId,
); err != nil {
goto bailout
}
threshloop:
for _, thr := range q.CheckConfig.CheckConfig.Thresholds {
if _, err = txStmtCreateCheckConfigurationThreshold.Exec(
q.CheckConfig.CheckConfig.Id,
thr.Predicate.Symbol,
strconv.FormatInt(thr.Value, 10),
thr.Level.Name,
); err != nil {
break threshloop
}
}
if err != nil {
goto bailout
}
constrloop:
for _, constr := range q.CheckConfig.CheckConfig.Constraints {
switch constr.ConstraintType {
case "native":
if _, err = txStmtCreateCheckConfigurationConstraintNative.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Native.Name,
constr.Native.Value,
); err != nil {
break constrloop
}
case "oncall":
if _, err = txStmtCreateCheckConfigurationConstraintOncall.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Oncall.Id,
); err != nil {
break constrloop
}
case "custom":
if _, err = txStmtCreateCheckConfigurationConstraintCustom.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Custom.Id,
constr.Custom.RepositoryId,
constr.Custom.Value,
); err != nil {
break constrloop
}
case "system":
if _, err = txStmtCreateCheckConfigurationConstraintSystem.Exec(
q.CheckConfig.CheckConfig.Id,
constr.System.Name,
constr.System.Value,
); err != nil {
break constrloop
}
case "service":
if constr.Service.TeamId != tk.team {
err = fmt.Errorf("Service constraint has mismatched TeamID values: %s/%s",
tk.team, constr.Service.TeamId)
fmt.Println(err)
break constrloop
}
log.Printf(`SQL: tkStmtCreateCheckConfigurationConstraintService:
CheckConfig ID: %s
Team ID: %s
Service Name: %s%s`,
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name, "\n")
if _, err = txStmtCreateCheckConfigurationConstraintService.Exec(
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name,
); err != nil {
break constrloop
}
case "attribute":
if _, err = txStmtCreateCheckConfigurationConstraintAttribute.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Attribute.Name,
constr.Attribute.Value,
); err != nil {
break constrloop
}
}
}
if err != nil {
goto bailout
}
}
// mark the check configuration as deleted
if strings.HasPrefix(q.Action, `remove_check_from_`) {
if _, err = tx.Exec(stmt.TxMarkCheckConfigDeleted, q.CheckConfig.CheckConfig.Id); err != nil {
goto bailout
}
}
// if the error channel has entries, we can fully ignore the
// action channel
for i := len(tk.errChan); i > 0; i-- {
e := <-tk.errChan
b, _ := json.Marshal(e)
log.Println(string(b))
hasErrors = true
if err == nil {
err = fmt.Errorf(e.Action)
}
}
if hasErrors {
goto bailout
}
actionloop:
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
// we need all messages to figure out why for example a deferred
// constraint later failed
//jBxX, _ := json.Marshal(a)
//log.Printf("%s - Processing: %s\n", q.JobId.String(), string(jBxX))
switch a.Type {
// REPOSITORY
case "repository":
switch a.Action {
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtRepositoryPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Property.Custom.RepositoryId,
a.Property.View,
a.Property.Custom.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtRepositoryPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtRepositoryPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtRepositoryPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtRepositoryPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtRepositoryPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtRepositoryPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtRepositoryPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
sql.NullString{String: "", Valid: false},
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Repository.Id,
"repository",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// BUCKET
case "bucket":
switch a.Action {
case "create":
if _, err = txStmtCreateBucket.Exec(
a.Bucket.Id,
a.Bucket.Name,
a.Bucket.IsFrozen,
a.Bucket.IsDeleted,
a.Bucket.RepositoryId,
a.Bucket.Environment,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "node_assignment":
if _, err = txStmtBucketAssignNode.Exec(
a.ChildNode.Id,
a.Bucket.Id,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtBucketPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtBucketPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtBucketPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Bucket ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtBucketPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtBucketPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtBucketPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtBucketPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtBucketPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtBucketPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Bucket.Id,
"bucket",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// GROUP
case "group":
switch a.Action {
case "create":
if _, err = txStmtGroupCreate.Exec(
a.Group.Id,
a.Group.BucketId,
a.Group.Name,
a.Group.ObjectState,
a.Group.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtGroupUpdate.Exec(
a.Group.Id,
a.Group.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtGroupDelete.Exec(
a.Group.Id,
); err != nil {
break actionloop
}
case "member_new":
switch a.ChildType {
case "group":
log.Println("==> group/new membergroup")
if _, err = txStmtGroupMemberNewGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "cluster":
log.Println("==> group/new membercluster")
if _, err = txStmtGroupMemberNewCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "node":
log.Println("==> group/new membernode")
if _, err = txStmtGroupMemberNewNode.Exec(
a.Group.Id,
a.ChildNode.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
}
case "member_removed":
switch a.ChildType {
case "group":
if _, err = txStmtGroupMemberRemoveGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
); err != nil {
break actionloop
}
case "cluster":
if _, err = txStmtGroupMemberRemoveCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
); err != nil {
break actionloop
}
case "node":
if _, err = txStmtGroupMemberRemoveNode.Exec(
a.Group.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtGroupPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtGroupPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtGroupPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtGroupPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtGroupPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtGroupPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtGroupPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtGroupPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Group.Id,
"group",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// CLUSTER
case "cluster":
switch a.Action {
case "create":
if _, err = txStmtClusterCreate.Exec(
a.Cluster.Id,
a.Cluster.Name,
a.Cluster.BucketId,
a.Cluster.ObjectState,
a.Cluster.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtClusterUpdate.Exec(
a.Cluster.Id,
a.Cluster.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtClusterDelete.Exec(
a.Cluster.Id,
); err != nil {
break actionloop
}
case "member_new":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberNew.Exec(
a.Cluster.Id,
a.ChildNode.Id,
a.Cluster.BucketId,
); err != nil {
break actionloop
}
case "member_removed":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberRemove.Exec(
a.Cluster.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtClusterPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtClusterPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtClusterPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Cluster ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtClusterPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtClusterPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtClusterPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtClusterPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtClusterPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtClusterPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Cluster.Id,
"cluster",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// NODE
case "node":
switch a.Action {
case "delete":
if _, err = txStmtNodeUnassignFromBucket.Exec(
a.Node.Id,
a.Node.Config.BucketId,
a.Node.TeamId,
); err != nil {
break actionloop
}
fallthrough // need to call txStmtUpdateNodeState for delete as well
case "update":
log.Println("==> node/update")
if _, err = txStmtUpdateNodeState.Exec(
a.Node.Id,
a.Node.State,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtNodePropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
log.Printf(`SQL: tkStmtNodePropertySystemCreate:
Instance ID: %s
Source Instance ID: %s
Node ID: %s
View: %s
SystemProperty: %s
Object Type: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t
System Property Value: %s
Is Inherited: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited, "\n")
if _, err = txStmtNodePropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtNodePropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtNodePropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtNodePropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtNodePropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtNodePropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtNodePropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
log.Printf(`SQL: tkStmtCreateCheck:
Check ID: %s
Repository ID: %s
Bucket ID: %s
Source Check ID: %s
Source Type: %s
Inherited From: %s
Check Config ID: %s
Check Capability ID: %s
Node ID: %s%s`,
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id, "\n")
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id,
"node",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = tx.Exec(stmt.TxMarkCheckDeleted,
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
case "errorchannel":
continue actionloop
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
}
if err != nil {
goto bailout
}
// mark job as finished
if _, err = tx.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"success",
``, // empty error field
); err != nil {
goto bailout
}
// commit transaction
if err = tx.Commit(); err != nil {
goto bailout
}
log.Printf("SUCCESS - Finished job: %s\n", q.JobId.String())
// accept tree changes
tk.tree.Commit()
return
bailout:
log.Printf("FAILED - Finished job: %s\n", q.JobId.String())
log.Println(err)
tk.tree.Rollback()
tx.Rollback()
tk.conn.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"failed",
err.Error(),
)
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
jB, _ := json.Marshal(a)
log.Printf("Cleaned message: %s\n", string(jB))
}
return
}
func (tk *treeKeeper) convertCheckForDelete(conf *proto.CheckConfig) (*tree.Check, error) {
var err error
treechk := &tree.Check{
Id: uuid.Nil,
InheritedFrom: uuid.Nil,
}
if treechk.SourceId, err = uuid.FromString(conf.ExternalId); err != nil {
return nil, err
}
if treechk.ConfigId, err = uuid.FromString(conf.Id); err != nil {
return nil, err
}
return treechk, nil
}
func (tk *treeKeeper) convertCheck(conf *proto.CheckConfig) (*tree.Check, error) {
treechk := &tree.Check{
Id: uuid.Nil,
SourceId: uuid.Nil,
InheritedFrom: uuid.Nil,
Inheritance: conf.Inheritance,
ChildrenOnly: conf.ChildrenOnly,
Interval: conf.Interval,
}
treechk.CapabilityId, _ = uuid.FromString(conf.CapabilityId)
treechk.ConfigId, _ = uuid.FromString(conf.Id)
if err := tk.get_view.QueryRow(conf.CapabilityId).Scan(&treechk.View); err != nil {
return &tree.Check{}, err
}
treechk.Thresholds = make([]tree.CheckThreshold, len(conf.Thresholds))
for i, thr := range conf.Thresholds {
nthr := tree.CheckThreshold{
Predicate: thr.Predicate.Symbol,
Level: uint8(thr.Level.Numeric),
Value: thr.Value,
}
treechk.Thresholds[i] = nthr
}
treechk.Constraints = make([]tree.CheckConstraint, len(conf.Constraints))
for i, constr := range conf.Constraints {
ncon := tree.CheckConstraint{
Type: constr.ConstraintType,
}
switch constr.ConstraintType {
case "native":
ncon.Key = constr.Native.Name
ncon.Value = constr.Native.Value
case "oncall":
ncon.Key = "OncallId"
ncon.Value = constr.Oncall.Id
case "custom":
ncon.Key = constr.Custom.Id
ncon.Value = constr.Custom.Value
case "system":
ncon.Key = constr.System.Name
ncon.Value = constr.System.Value
case "service":
ncon.Key = "name"
ncon.Value = constr.Service.Name
case "attribute":
ncon.Key = constr.Attribute.Name
ncon.Value = constr.Attribute.Value
}
treechk.Constraints[i] = ncon
}
return treechk, nil
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package static
const containersJs = `
google.load("visualization", "1", {packages: ["corechart", "gauge"]});
// Draw a line chart.
function drawLineChart(seriesTitles, data, elementId, unit) {
// Convert the first column to a Date.
for (var i = 0; i < data.length; i++) {
if (data[i] != null) {
data[i][0] = new Date(data[i][0]);
}
}
// Add the definition of each column and the necessary data.
var dataTable = new google.visualization.DataTable();
dataTable.addColumn('datetime', seriesTitles[0]);
for (var i = 1; i < seriesTitles.length; i++) {
dataTable.addColumn('number', seriesTitles[i]);
}
dataTable.addRows(data);
// Create and draw the visualization.
var ac = null;
var opts = null;
// TODO(vmarmol): Remove this hack, it is to support the old charts and the new charts during the transition.
if (window.charts) {
if (!(elementId in window.charts)) {
ac = new google.visualization.LineChart(document.getElementById(elementId));
window.charts[elementId] = ac;
}
ac = window.charts[elementId];
opts = window.chartOptions;
} else {
ac = new google.visualization.LineChart(document.getElementById(elementId));
opts = {};
}
opts.vAxis = {title: unit};
opts.legend = {position: 'bottom'};
ac.draw(dataTable, window.chartOptions);
}
// Gets the length of the interval in nanoseconds.
function getInterval(current, previous) {
var cur = new Date(current);
var prev = new Date(previous);
// ms -> ns.
return (cur.getTime() - prev.getTime()) * 1000000;
}
// Checks if the specified stats include the specified resource.
function hasResource(stats, resource) {
return stats.stats.length > 0 && stats.stats[0][resource];
}
// Draw a gauge.
function drawGauge(elementId, cpuUsage, memoryUsage) {
var gauges = [['Label', 'Value']];
if (cpuUsage >= 0) {
gauges.push(['CPU', cpuUsage]);
}
if (memoryUsage >= 0) {
gauges.push(['Memory', memoryUsage]);
}
// Create and populate the data table.
var data = google.visualization.arrayToDataTable(gauges);
// Create and draw the visualization.
var options = {
width: 400, height: 120,
redFrom: 90, redTo: 100,
yellowFrom:75, yellowTo: 90,
minorTicks: 5,
animation: {
duration: 900,
easing: 'linear'
}
};
var chart = new google.visualization.Gauge(document.getElementById(elementId));
chart.draw(data, options);
}
// Get the machine info.
function getMachineInfo(callback) {
$.getJSON("/api/v1.0/machine", function(data) {
callback(data);
});
}
// Get the container stats for the specified container.
function getStats(containerName, callback) {
// Request 60s of container history and no samples.
var request = JSON.stringify({
// Update main.statsRequestedByUI while updating "num_stats" here.
"num_stats": 60,
"num_samples": 0
});
$.post("/api/v1.0/containers" + containerName, request, function(data) {
callback(data);
}, "json");
}
// Draw the graph for CPU usage.
function drawCpuTotalUsage(elementId, machineInfo, stats) {
if (!hasResource(stats, "cpu")) {
return;
}
var titles = ["Time", "Total"];
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.cpu.usage.total - prev.cpu.usage.total) / intervalInNs);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Cores");
}
// Draw the graph for per-core CPU usage.
function drawCpuPerCoreUsage(elementId, machineInfo, stats) {
if (!hasResource(stats, "cpu")) {
return;
}
// Add a title for each core.
var titles = ["Time"];
for (var i = 0; i < machineInfo.num_cores; i++) {
titles.push("Core " + i);
}
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
var elements = [];
elements.push(cur.timestamp);
for (var j = 0; j < machineInfo.num_cores; j++) {
elements.push((cur.cpu.usage.per_cpu_usage[j] - prev.cpu.usage.per_cpu_usage[j]) / intervalInNs);
}
data.push(elements);
}
drawLineChart(titles, data, elementId, "Cores");
}
// Draw the graph for CPU usage breakdown.
function drawCpuUsageBreakdown(elementId, containerInfo) {
if (!hasResource(containerInfo, "cpu")) {
return;
}
var titles = ["Time", "User", "Kernel"];
var data = [];
for (var i = 1; i < containerInfo.stats.length; i++) {
var cur = containerInfo.stats[i];
var prev = containerInfo.stats[i - 1];
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.cpu.usage.user - prev.cpu.usage.user) / intervalInNs);
elements.push((cur.cpu.usage.system - prev.cpu.usage.system) / intervalInNs);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Cores");
}
// Draw the gauges for overall resource usage.
function drawOverallUsage(elementId, machineInfo, containerInfo) {
var cur = containerInfo.stats[containerInfo.stats.length - 1];
var cpuUsage = 0;
if (containerInfo.spec.cpu && containerInfo.stats.length >= 2) {
var prev = containerInfo.stats[containerInfo.stats.length - 2];
var rawUsage = cur.cpu.usage.total - prev.cpu.usage.total;
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
// Convert to millicores and take the percentage
cpuUsage = Math.round(((rawUsage / intervalInNs) / machineInfo.num_cores) * 100);
if (cpuUsage > 100) {
cpuUsage = 100;
}
}
var memoryUsage = 0;
if (containerInfo.spec.memory) {
// Saturate to the machine size.
var limit = containerInfo.spec.memory.limit;
if (limit > machineInfo.memory_capacity) {
limit = machineInfo.memory_capacity;
}
memoryUsage = Math.round((cur.memory.usage / limit) * 100);
}
drawGauge(elementId, cpuUsage, memoryUsage);
}
var oneMegabyte = 1024 * 1024;
function drawMemoryUsage(elementId, containerInfo) {
if (!hasResource(containerInfo, "memory")) {
return;
}
var titles = ["Time", "Total", "Hot"];
var data = [];
for (var i = 0; i < containerInfo.stats.length; i++) {
var cur = containerInfo.stats[i];
var elements = [];
elements.push(cur.timestamp);
elements.push(cur.memory.usage / oneMegabyte);
elements.push(cur.memory.working_set / oneMegabyte);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Megabytes");
}
// Draw the graph for network tx/rx bytes.
function drawNetworkBytes(elementId, machineInfo, stats) {
if (!hasResource(stats, "network")) {
return;
}
var titles = ["Time", "Tx bytes", "Rx bytes"];
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInSec = getInterval(cur.timestamp, prev.timestamp) / 1000000000;
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.network.tx_bytes - prev.network.tx_bytes) / intervalInSec);
elements.push((cur.network.rx_bytes - prev.network.rx_bytes) / intervalInSec);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Bytes per second");
}
// Draw the graph for network errors
function drawNetworkErrors(elementId, machineInfo, stats) {
if (!hasResource(stats, "network")) {
return;
}
var titles = ["Time", "Tx", "Rx"];
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInSec = getInterval(cur.timestamp, prev.timestamp) / 1000000000;
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.network.tx_errors - prev.network.tx_errors) / intervalInSec);
elements.push((cur.network.rx_errors - prev.network.rx_errors) / intervalInSec);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Errors per second");
}
// Expects an array of closures to call. After each execution the JS runtime is given control back before continuing.
// This function returns asynchronously
function stepExecute(steps) {
// No steps, stop.
if (steps.length == 0) {
return;
}
// Get a step and execute it.
var step = steps.shift();
step();
// Schedule the next step.
setTimeout(function() {
stepExecute(steps);
}, 0);
}
// Draw all the charts on the page.
function drawCharts(machineInfo, containerInfo) {
var steps = [];
steps.push(function() {
drawOverallUsage("usage-gauge", machineInfo, containerInfo)
});
// CPU.
steps.push(function() {
drawCpuTotalUsage("cpu-total-usage-chart", machineInfo, containerInfo);
});
steps.push(function() {
drawCpuPerCoreUsage("cpu-per-core-usage-chart", machineInfo, containerInfo);
});
steps.push(function() {
drawCpuUsageBreakdown("cpu-usage-breakdown-chart", containerInfo);
});
// Memory.
steps.push(function() {
drawMemoryUsage("memory-usage-chart", containerInfo);
});
// Network.
steps.push(function() {
drawNetworkBytes("network-bytes-chart", machineInfo, containerInfo);
});
steps.push(function() {
drawNetworkErrors("network-errors-chart", machineInfo, containerInfo);
});
stepExecute(steps);
}
// Executed when the page finishes loading.
function startPage(containerName, hasCpu, hasMemory) {
// Don't fetch data if we don't have any resource.
if (!hasCpu && !hasMemory) {
return;
}
// TODO(vmarmol): Look into changing the view window to get a smoother animation.
window.chartOptions = {
curveType: 'function',
height: 300,
legend:{position:"none"},
focusTarget: "category",
};
window.charts = {};
// Get machine info, then get the stats every 1s.
getMachineInfo(function(machineInfo) {
setInterval(function() {
getStats(containerName, function(stats){
drawCharts(machineInfo, stats);
});
}, 1000);
});
}
`
Cleanup graph JS and don't show values below 0.
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package static
const containersJs = `
google.load("visualization", "1", {packages: ["corechart", "gauge"]});
// Draw a line chart.
function drawLineChart(seriesTitles, data, elementId, unit) {
var min = Infinity;
var max = -Infinity;
for (var i = 0; i < data.length; i++) {
// Convert the first column to a Date.
if (data[i] != null) {
data[i][0] = new Date(data[i][0]);
}
// Find min, max.
for (var j = 1; j < data[i].length; j++) {
var val = data[i][j];
if (val < min) {
min = val;
}
if (val > max) {
max = val;
}
}
}
// We don't want to show any values less than 0 so cap the min value at that.
// At the same time, show 10% of the graph below the min value if we can.
var minWindow = min - (max - min) / 10;
if (minWindow < 0) {
minWindow = 0;
}
// Add the definition of each column and the necessary data.
var dataTable = new google.visualization.DataTable();
dataTable.addColumn('datetime', seriesTitles[0]);
for (var i = 1; i < seriesTitles.length; i++) {
dataTable.addColumn('number', seriesTitles[i]);
}
dataTable.addRows(data);
// Create and draw the visualization.
if (!(elementId in window.charts)) {
window.charts[elementId] = new google.visualization.LineChart(document.getElementById(elementId));
}
// TODO(vmarmol): Look into changing the view window to get a smoother animation.
var opts = {
curveType: 'function',
height: 300,
legend:{position:"none"},
focusTarget: "category",
vAxis: {
title: unit,
viewWindow: {
min: minWindow,
},
},
legend: {
position: 'bottom',
},
};
window.charts[elementId].draw(dataTable, opts);
}
// Gets the length of the interval in nanoseconds.
function getInterval(current, previous) {
var cur = new Date(current);
var prev = new Date(previous);
// ms -> ns.
return (cur.getTime() - prev.getTime()) * 1000000;
}
// Checks if the specified stats include the specified resource.
function hasResource(stats, resource) {
return stats.stats.length > 0 && stats.stats[0][resource];
}
// Draw a gauge.
function drawGauge(elementId, cpuUsage, memoryUsage) {
var gauges = [['Label', 'Value']];
if (cpuUsage >= 0) {
gauges.push(['CPU', cpuUsage]);
}
if (memoryUsage >= 0) {
gauges.push(['Memory', memoryUsage]);
}
// Create and populate the data table.
var data = google.visualization.arrayToDataTable(gauges);
// Create and draw the visualization.
var options = {
width: 400, height: 120,
redFrom: 90, redTo: 100,
yellowFrom:75, yellowTo: 90,
minorTicks: 5,
animation: {
duration: 900,
easing: 'linear'
}
};
var chart = new google.visualization.Gauge(document.getElementById(elementId));
chart.draw(data, options);
}
// Get the machine info.
function getMachineInfo(callback) {
$.getJSON("/api/v1.0/machine", function(data) {
callback(data);
});
}
// Get the container stats for the specified container.
function getStats(containerName, callback) {
// Request 60s of container history and no samples.
var request = JSON.stringify({
// Update main.statsRequestedByUI while updating "num_stats" here.
"num_stats": 60,
"num_samples": 0
});
$.post("/api/v1.0/containers" + containerName, request, function(data) {
callback(data);
}, "json");
}
// Draw the graph for CPU usage.
function drawCpuTotalUsage(elementId, machineInfo, stats) {
if (!hasResource(stats, "cpu")) {
return;
}
var titles = ["Time", "Total"];
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.cpu.usage.total - prev.cpu.usage.total) / intervalInNs);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Cores");
}
// Draw the graph for per-core CPU usage.
function drawCpuPerCoreUsage(elementId, machineInfo, stats) {
if (!hasResource(stats, "cpu")) {
return;
}
// Add a title for each core.
var titles = ["Time"];
for (var i = 0; i < machineInfo.num_cores; i++) {
titles.push("Core " + i);
}
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
var elements = [];
elements.push(cur.timestamp);
for (var j = 0; j < machineInfo.num_cores; j++) {
elements.push((cur.cpu.usage.per_cpu_usage[j] - prev.cpu.usage.per_cpu_usage[j]) / intervalInNs);
}
data.push(elements);
}
drawLineChart(titles, data, elementId, "Cores");
}
// Draw the graph for CPU usage breakdown.
function drawCpuUsageBreakdown(elementId, containerInfo) {
if (!hasResource(containerInfo, "cpu")) {
return;
}
var titles = ["Time", "User", "Kernel"];
var data = [];
for (var i = 1; i < containerInfo.stats.length; i++) {
var cur = containerInfo.stats[i];
var prev = containerInfo.stats[i - 1];
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.cpu.usage.user - prev.cpu.usage.user) / intervalInNs);
elements.push((cur.cpu.usage.system - prev.cpu.usage.system) / intervalInNs);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Cores");
}
// Draw the gauges for overall resource usage.
function drawOverallUsage(elementId, machineInfo, containerInfo) {
var cur = containerInfo.stats[containerInfo.stats.length - 1];
var cpuUsage = 0;
if (containerInfo.spec.cpu && containerInfo.stats.length >= 2) {
var prev = containerInfo.stats[containerInfo.stats.length - 2];
var rawUsage = cur.cpu.usage.total - prev.cpu.usage.total;
var intervalInNs = getInterval(cur.timestamp, prev.timestamp);
// Convert to millicores and take the percentage
cpuUsage = Math.round(((rawUsage / intervalInNs) / machineInfo.num_cores) * 100);
if (cpuUsage > 100) {
cpuUsage = 100;
}
}
var memoryUsage = 0;
if (containerInfo.spec.memory) {
// Saturate to the machine size.
var limit = containerInfo.spec.memory.limit;
if (limit > machineInfo.memory_capacity) {
limit = machineInfo.memory_capacity;
}
memoryUsage = Math.round((cur.memory.usage / limit) * 100);
}
drawGauge(elementId, cpuUsage, memoryUsage);
}
var oneMegabyte = 1024 * 1024;
function drawMemoryUsage(elementId, containerInfo) {
if (!hasResource(containerInfo, "memory")) {
return;
}
var titles = ["Time", "Total", "Hot"];
var data = [];
for (var i = 0; i < containerInfo.stats.length; i++) {
var cur = containerInfo.stats[i];
var elements = [];
elements.push(cur.timestamp);
elements.push(cur.memory.usage / oneMegabyte);
elements.push(cur.memory.working_set / oneMegabyte);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Megabytes");
}
// Draw the graph for network tx/rx bytes.
function drawNetworkBytes(elementId, machineInfo, stats) {
if (!hasResource(stats, "network")) {
return;
}
var titles = ["Time", "Tx bytes", "Rx bytes"];
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInSec = getInterval(cur.timestamp, prev.timestamp) / 1000000000;
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.network.tx_bytes - prev.network.tx_bytes) / intervalInSec);
elements.push((cur.network.rx_bytes - prev.network.rx_bytes) / intervalInSec);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Bytes per second");
}
// Draw the graph for network errors
function drawNetworkErrors(elementId, machineInfo, stats) {
if (!hasResource(stats, "network")) {
return;
}
var titles = ["Time", "Tx", "Rx"];
var data = [];
for (var i = 1; i < stats.stats.length; i++) {
var cur = stats.stats[i];
var prev = stats.stats[i - 1];
var intervalInSec = getInterval(cur.timestamp, prev.timestamp) / 1000000000;
var elements = [];
elements.push(cur.timestamp);
elements.push((cur.network.tx_errors - prev.network.tx_errors) / intervalInSec);
elements.push((cur.network.rx_errors - prev.network.rx_errors) / intervalInSec);
data.push(elements);
}
drawLineChart(titles, data, elementId, "Errors per second");
}
// Expects an array of closures to call. After each execution the JS runtime is given control back before continuing.
// This function returns asynchronously
function stepExecute(steps) {
// No steps, stop.
if (steps.length == 0) {
return;
}
// Get a step and execute it.
var step = steps.shift();
step();
// Schedule the next step.
setTimeout(function() {
stepExecute(steps);
}, 0);
}
// Draw all the charts on the page.
function drawCharts(machineInfo, containerInfo) {
var steps = [];
steps.push(function() {
drawOverallUsage("usage-gauge", machineInfo, containerInfo)
});
// CPU.
steps.push(function() {
drawCpuTotalUsage("cpu-total-usage-chart", machineInfo, containerInfo);
});
steps.push(function() {
drawCpuPerCoreUsage("cpu-per-core-usage-chart", machineInfo, containerInfo);
});
steps.push(function() {
drawCpuUsageBreakdown("cpu-usage-breakdown-chart", containerInfo);
});
// Memory.
steps.push(function() {
drawMemoryUsage("memory-usage-chart", containerInfo);
});
// Network.
steps.push(function() {
drawNetworkBytes("network-bytes-chart", machineInfo, containerInfo);
});
steps.push(function() {
drawNetworkErrors("network-errors-chart", machineInfo, containerInfo);
});
stepExecute(steps);
}
// Executed when the page finishes loading.
function startPage(containerName, hasCpu, hasMemory) {
// Don't fetch data if we don't have any resource.
if (!hasCpu && !hasMemory) {
return;
}
window.charts = {};
// Get machine info, then get the stats every 1s.
getMachineInfo(function(machineInfo) {
setInterval(function() {
getStats(containerName, function(stats){
drawCharts(machineInfo, stats);
});
}, 1000);
});
}
`
|
// Package valuelocmap provides a concurrency-safe data structure that maps
// keys to value locations. A key is 128 bits and is specified using two
// uint64s (keyA, keyB). A value location is specified using a blockID, offset,
// and length triplet. Each mapping is assigned a timestamp and the greatest
// timestamp wins. The timestamp is also used to indicate a deletion marker; if
// timestamp & 1 == 1 then the mapping is considered a mark for deletion at
// that time. Deletion markers are used in case mappings come in out of order
// and for replication to others that may have missed the deletion.
//
// This implementation uses a tree structure of slices of key to location
// assignments. As the slices fill up, they are split into two and the tree
// structure grows. If a slice empties, it is merged with its pair in the tree
// structure and the tree shrinks. The tree is balanced by high bits of the
// key, and locations are distributed in the slices by the low bits.
//
// There are also functions for scanning key ranges, both to clean out old
// tombstones and to provide callbacks for replication or other tasks.
package valuelocmap
import (
"fmt"
"math"
"os"
"runtime"
"strconv"
"sync"
"sync/atomic"
"unsafe"
"github.com/gholt/brimtext"
)
type config struct {
cores int
pageSize int
splitMultiplier float64
}
func resolveConfig(opts ...func(*config)) *config {
cfg := &config{}
if env := os.Getenv("BRIMSTORE_VALUELOCMAP_CORES"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
cfg.cores = val
}
} else if env = os.Getenv("BRIMSTORE_CORES"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
cfg.cores = val
}
}
if cfg.cores <= 0 {
cfg.cores = runtime.GOMAXPROCS(0)
}
if env := os.Getenv("BRIMSTORE_VALUELOCMAP_PAGESIZE"); env != "" {
if val, err := strconv.Atoi(env); err == nil {
cfg.pageSize = val
}
}
if cfg.pageSize <= 0 {
cfg.pageSize = 524288
}
if env := os.Getenv("BRIMSTORE_VALUELOCMAP_SPLITMULTIPLIER"); env != "" {
if val, err := strconv.ParseFloat(env, 64); err == nil {
cfg.splitMultiplier = val
}
}
if cfg.splitMultiplier <= 0 {
cfg.splitMultiplier = 3.0
}
for _, opt := range opts {
opt(cfg)
}
if cfg.cores < 1 {
cfg.cores = 1
}
if cfg.pageSize < 1 {
cfg.pageSize = 1
}
if cfg.splitMultiplier <= 0 {
cfg.splitMultiplier = 0.01
}
return cfg
}
// OptList returns a slice with the opts given; useful if you want to possibly
// append more options to the list before using it with
// NewValueLocMap(list...).
func OptList(opts ...func(*config)) []func(*config) {
return opts
}
// OptCores indicates how many cores may be in use (for calculating the number
// of locks to create, for example) and how many cores may be used for resizes.
// Defaults to env BRIMSTORE_VALUELOCMAP_CORES, BRIMSTORE_CORES, or GOMAXPROCS.
func OptCores(cores int) func(*config) {
return func(cfg *config) {
cfg.cores = cores
}
}
// OptPageSize controls the size of each chunk of memory allocated. Defaults to
// env BRIMSTORE_VALUELOCMAP_PAGESIZE or 524,288.
func OptPageSize(bytes int) func(*config) {
return func(cfg *config) {
cfg.pageSize = bytes
}
}
// OptSplitMultiplier indicates how full a memory page can get before being
// split into two pages. Defaults to env BRIMSTORE_VALUELOCMAP_SPLITMULTIPLIER
// or 3.0.
func OptSplitMultiplier(multiplier float64) func(*config) {
return func(cfg *config) {
cfg.splitMultiplier = multiplier
}
}
// ValueLocMap instances are created with NewValueLocMap.
type ValueLocMap struct {
root *valueLocNode
cores int
splitCount int
outOfPlaceKeyDetections int32
replicationChan chan interface{}
}
// OVERALL NOTES:
//
// a is used to store at first, growth may then cause a split.
// While splitting, b will be set, c and d will still be nil.
// Once the split is complete, c and d will be set.
// Shrinking may cause an unsplit.
// During unsplit, a and e will be set, c and d will become nil.
// e is considered read-only/fallback during unsplit.
// Once unsplit is done, e will become nil.
//
// FOR SPEED'S SAKE THERE IS AN ASSUMPTION THAT ALL READS AND WRITES ACTIVE AT
// THE START OR DURING ONE RESIZE WILL BE COMPLETED BEFORE ANOTHER RESIZE OF
// THE SAME KEY SPACE STARTS.
//
// As one example, if a write starts, gathers a and b (split in progress), and
// somehow is stalled for an extended period of time and the split completes
// and another subsplit happens, then the write awakens and continues, it will
// have references to quite possibly incorrect memory areas.
//
// This code is not meant to be used with over-subscribed core counts that
// would create artificial goroutine slowdowns / starvations or with extremely
// small memory page sizes. In the rare case a single core on a CPU is going
// bad and is running slow, this code should still be safe as it is meant to be
// run with the data stored on multiple server replicas where if one server is
// behaving badly the other servers will supersede it. In addition, background
// routines are in place to detect and repair any out of place data and
// so these rare anomalies shoud be resolved fairly quickly. Any repairs done
// will be reported via the gatherStats' outOfPlaceKeyDetections value in case
// their rarity isn't as uncommon as they should be.
//
// If you would rather have perfect correctness at the cost of speed, you will
// have to use an additional lock around all uses of a-e.
type valueLocNode struct {
leftMask uint64
rangeStart uint64
rangeStop uint64
a *valueLocStore
b *valueLocStore
c *valueLocNode
d *valueLocNode
e *valueLocStore
resizing bool
resizingLock sync.RWMutex
}
type valueLocStore struct {
buckets []valueLoc
locks []sync.RWMutex
used int32
}
type valueLoc struct {
next *valueLoc
keyA uint64
keyB uint64
timestamp uint64
blockID uint16
offset uint32
length uint32
}
type valueLocMapStats struct {
goroutines int
debug bool
funcChan chan func()
cores int
depth uint64
depthCounts []uint64
sections uint64
storages uint64
buckets uint64
bucketCounts []uint64
splitCount int
outOfPlaceKeyDetections int32
locs uint64
pointerLocs uint64
unused uint64
used uint64
active uint64
length uint64
tombstones uint64
}
// NewValueLocMap creates a new ValueLocMap instance. You can provide Opt*
// functions for optional configuration items, such as OptCores:
//
// vlmWithDefaults := valuelocmap.NewValueLocMap()
// vlmWithOptions := valuelocmap.NewValueLocMap(
// valuelocmap.OptCores(10),
// valuelocmap.OptPageSize(4194304),
// )
// opts := valuelocmap.OptList()
// if commandLineOptionForCores {
// opts = append(opts, valuelocmap.OptCores(commandLineOptionValue))
// }
// vlmWithOptionsBuiltUp := valuelocmap.NewValueLocMap(opts...)
func NewValueLocMap(opts ...func(*config)) *ValueLocMap {
cfg := resolveConfig(opts...)
bucketCount := cfg.pageSize / int(unsafe.Sizeof(valueLoc{}))
if bucketCount < 1 {
bucketCount = 1
}
lockCount := cfg.cores
if lockCount > bucketCount {
lockCount = bucketCount
}
vlm := &ValueLocMap{
root: &valueLocNode{
leftMask: uint64(1) << 63,
rangeStart: 0,
rangeStop: math.MaxUint64,
a: &valueLocStore{
buckets: make([]valueLoc, bucketCount),
locks: make([]sync.RWMutex, lockCount),
},
},
cores: cfg.cores,
splitCount: int(float64(bucketCount) * cfg.splitMultiplier),
}
return vlm
}
// Get returns timestamp, blockID, offset, and length for keyA, keyB. The
// blockID will be 0 if keyA, keyB was not found. The timestamp & 1 == 1 if
// keyA, keyB is marked for deletion.
func (vlm *ValueLocMap) Get(keyA uint64, keyB uint64) (uint64, uint16, uint32, uint32) {
var timestamp uint64
var blockID uint16
var offset uint32
var length uint32
vln := vlm.root
VLN_SELECTION:
// Traverse the tree until we hit a leaf node (no c [and therefore no d]).
for {
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c == nil {
break
}
if keyA&vln.leftMask == 0 {
vln = c
} else {
vln = (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
}
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
bix := keyB % uint64(len(a.buckets))
lix := bix % uint64(len(a.locks))
f := func(s *valueLocStore, fb *valueLocStore) {
blockID = 0
s.locks[lix].RLock()
if fb != nil {
fb.locks[lix].RLock()
}
for item := &s.buckets[bix]; item != nil; item = item.next {
if item.blockID != 0 && item.keyA == keyA && item.keyB == keyB {
timestamp, blockID, offset, length = item.timestamp, item.blockID, item.offset, item.length
break
}
}
if fb != nil && blockID == 0 {
for item := &fb.buckets[bix]; item != nil; item = item.next {
if item.blockID != 0 && item.keyA == keyA && item.keyB == keyB {
timestamp, blockID, offset, length = item.timestamp, item.blockID, item.offset, item.length
break
}
}
}
if fb != nil {
fb.locks[lix].RUnlock()
}
s.locks[lix].RUnlock()
}
if keyA&vln.leftMask == 0 {
// If we're on the left side, even if a split is in progress or happens
// while we're reading it won't matter because we'd still read from the
// same memory block, assuming more than one split doesn't occur while
// we're reading.
f(a, nil)
// If an unsplit happened while we were reading, store a will end up
// nil and we need to retry the read.
a = (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
if a == nil {
vln = vlm.root
goto VLN_SELECTION
}
} else {
// If we're on the right side, then things might be a bit trickier...
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b != nil {
// If a split is in progress, then we can read from b and fallback
// to a and we're safe, assuming another split doesn't occur during
// our read.
f(b, a)
} else {
// If no split is in progress, we'll read from a and fallback to e
// if it exists...
f(a, (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e)))))
// If an unsplit happened while we were reading, store a will end
// up nil and we need to retry the read.
a = (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
if a == nil {
vln = vlm.root
goto VLN_SELECTION
}
// If we pass that test, we'll double check b...
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b != nil {
// If b is set, a split started while we were reading, so we'll
// re-read from b and fallback to a and we're safe, assuming
// another split doesn't occur during this re-read.
f(b, a)
} else {
// If b isn't set, either no split happened while we were
// reading, or the split happened and finished while we were
// reading, so we'll double check d to find out...
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
if d != nil {
// If a complete split occurred while we were reading,
// we'll traverse the tree node and jump back to any
// further tree node traversal and retry the read.
vln = d
goto VLN_SELECTION
}
}
}
}
return timestamp, blockID, offset, length
}
// Set returns the previous timestamp after updating keyA, keyB to have the
// timestamp, blockID, offset, and length.
//
// If blockID is 0 then keyA, keyB will be removed from the map, though this
// isn't usually done. Instead, setting the timestamp to a value where
// timestamp & 1 == 1 will mark keyA, keyB for deletion and the deletion marker
// will be automatically removed after some time (assuming it isn't overridden
// with another set).
//
// Normally a set will only take affect if the given timestamp is greater than
// any existing timestamp for keyA, keyB. You can set evenIfSameTimestamp to
// true to update the location even if the existing timestamp is the same as
// the timestamp passed in, which is useful to update where the value for keyA,
// keyB is now located (moving from memory to a disk file, or from one disk
// file to another, as examples).
//
// The previous timestamp returned can be used to determine if a set had any
// effect. If the previous timestamp is greater than (or equal to, if
// evenIfSameTimestamp is false) the timestamp passed in, the set had no
// effect. This information can be used to decide whether to persist the
// pointed to value, for example.
func (vlm *ValueLocMap) Set(keyA uint64, keyB uint64, timestamp uint64, blockID uint16, offset uint32, length uint32, evenIfSameTimestamp bool) uint64 {
var oldTimestamp uint64
var originalOldTimestampCheck bool
var originalOldTimestamp uint64
var vlmPrev *valueLocNode
vln := vlm.root
VLN_SELECTION:
// Traverse the tree until we hit a leaf node (no c [and therefore no d]).
for {
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c == nil {
break
}
vlmPrev = vln
if keyA&vln.leftMask == 0 {
vln = c
} else {
vln = (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
}
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
bix := keyB % uint64(len(a.buckets))
lix := bix % uint64(len(a.locks))
f := func(s *valueLocStore, fb *valueLocStore) {
oldTimestamp = 0
var sMatch *valueLoc
var fbMatch *valueLoc
var unusedItem *valueLoc
s.locks[lix].Lock()
if fb != nil {
fb.locks[lix].Lock()
}
for item := &s.buckets[bix]; item != nil; item = item.next {
if item.blockID == 0 {
if unusedItem == nil {
unusedItem = item
}
continue
}
if item.keyA == keyA && item.keyB == keyB {
sMatch = item
break
}
}
if fb != nil {
for item := &fb.buckets[bix]; item != nil; item = item.next {
if item.blockID == 0 {
continue
}
if item.keyA == keyA && item.keyB == keyB {
fbMatch = item
break
}
}
}
if sMatch != nil {
if fbMatch != nil {
if sMatch.timestamp >= fbMatch.timestamp {
oldTimestamp = sMatch.timestamp
if timestamp > sMatch.timestamp || (evenIfSameTimestamp && timestamp == sMatch.timestamp) {
sMatch.timestamp = timestamp
sMatch.blockID = blockID
sMatch.offset = offset
sMatch.length = length
}
} else {
oldTimestamp = fbMatch.timestamp
if timestamp > fbMatch.timestamp || (evenIfSameTimestamp && timestamp == fbMatch.timestamp) {
sMatch.timestamp = timestamp
sMatch.blockID = blockID
sMatch.offset = offset
sMatch.length = length
} else {
sMatch.timestamp = fbMatch.timestamp
sMatch.blockID = fbMatch.blockID
sMatch.offset = fbMatch.offset
sMatch.length = fbMatch.length
}
}
atomic.AddInt32(&fb.used, -1)
fbMatch.blockID = 0
} else {
oldTimestamp = sMatch.timestamp
if timestamp > sMatch.timestamp || (evenIfSameTimestamp && timestamp == sMatch.timestamp) {
sMatch.timestamp = timestamp
sMatch.blockID = blockID
sMatch.offset = offset
sMatch.length = length
}
}
} else {
atomic.AddInt32(&s.used, 1)
if unusedItem == nil {
unusedItem = &valueLoc{next: s.buckets[bix].next}
s.buckets[bix].next = unusedItem
}
unusedItem.keyA = keyA
unusedItem.keyB = keyB
if fbMatch != nil {
oldTimestamp = fbMatch.timestamp
if timestamp > fbMatch.timestamp || (evenIfSameTimestamp && timestamp == fbMatch.timestamp) {
unusedItem.timestamp = timestamp
unusedItem.blockID = blockID
unusedItem.offset = offset
unusedItem.length = length
} else {
unusedItem.timestamp = fbMatch.timestamp
unusedItem.blockID = fbMatch.blockID
unusedItem.offset = fbMatch.offset
unusedItem.length = fbMatch.length
}
atomic.AddInt32(&fb.used, -1)
fbMatch.blockID = 0
} else {
unusedItem.timestamp = timestamp
unusedItem.blockID = blockID
unusedItem.offset = offset
unusedItem.length = length
}
}
if fb != nil {
fb.locks[lix].Unlock()
}
s.locks[lix].Unlock()
}
if keyA&vln.leftMask == 0 {
// If we're on the left side, even if a split is in progress or happens
// while we're writing it won't matter because we'd still write to the
// same memory block, assuming more than one split doesn't occur while
// we're writing.
f(a, nil)
// If our write was not superseded...
if oldTimestamp < timestamp || (evenIfSameTimestamp && oldTimestamp == timestamp) {
// If an unsplit happened while we were writing, store a will end
// up nil and we need to clear what we wrote and retry the write.
aAgain := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
if aAgain == nil {
a.locks[lix].Lock()
for item := &a.buckets[bix]; item != nil; item = item.next {
if item.blockID == 0 {
continue
}
if item.keyA == keyA && item.keyB == keyB {
if item.timestamp == timestamp && item.blockID == blockID && item.offset == offset && item.length == length {
item.blockID = 0
}
break
}
}
a.locks[lix].Unlock()
if !originalOldTimestampCheck {
originalOldTimestampCheck = true
originalOldTimestamp = oldTimestamp
}
vln = vlm.root
goto VLN_SELECTION
}
// Otherwise, we read b and e and if both are nil (no split/unsplit
// in progress) we check a's used counter to see if we should
// request a split/unsplit.
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b == nil {
e := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e))))
if e == nil {
used := atomic.LoadInt32(&a.used)
if int(used) > vlm.splitCount {
go vln.split(vlm.splitCount, vlm.cores)
} else if used == 0 && vlmPrev != nil {
go vlmPrev.unsplit(vlm.cores)
}
}
}
}
} else {
// If we're on the right side, then things might be a bit trickier...
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b != nil {
// If a split is in progress, then we can write to b checking a for
// any competing value and we're safe, assuming another split
// doesn't occur during our write.
f(b, a)
} else {
// If no split is in progress, we'll write to a checking e (if it
// exists) for any competing value...
f(a, (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e)))))
// If our write was not superseded...
if oldTimestamp < timestamp || (evenIfSameTimestamp && oldTimestamp == timestamp) {
// If an unsplit happened while we were writing, store a will
// end up nil and we need to clear what we wrote and retry the
// write.
aAgain := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
if aAgain == nil {
a.locks[lix].Lock()
for item := &a.buckets[bix]; item != nil; item = item.next {
if item.blockID == 0 {
continue
}
if item.keyA == keyA && item.keyB == keyB {
if item.timestamp == timestamp && item.blockID == blockID && item.offset == offset && item.length == length {
item.blockID = 0
}
break
}
}
a.locks[lix].Unlock()
if !originalOldTimestampCheck {
originalOldTimestampCheck = true
originalOldTimestamp = oldTimestamp
}
vln = vlm.root
goto VLN_SELECTION
}
// If we pass that test, we'll double check b...
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b != nil {
// If b is set, a split started while we were writing, so
// we'll re-write to b checking a for a competing value
// (should at least be the one we just wrote) and we're
// safe, assuming another split doesn't occur during our
// write.
if !originalOldTimestampCheck {
originalOldTimestampCheck = true
originalOldTimestamp = oldTimestamp
}
f(b, a)
} else {
// If b isn't set, either no split happened while we were
// writing, or the split happened and finished while we
// were writing, so we'll double check d to find out...
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
if d != nil {
// If a complete split occurred while we were writing,
// we'll clear our write and then we'll traverse the
// tree node and jump back to any further tree node
// traversal and retry the write.
a.locks[lix].Lock()
for item := &a.buckets[bix]; item != nil; item = item.next {
if item.blockID == 0 {
continue
}
if item.keyA == keyA && item.keyB == keyB {
if item.timestamp == timestamp && item.blockID == blockID && item.offset == offset && item.length == length {
item.blockID = 0
}
break
}
}
a.locks[lix].Unlock()
if !originalOldTimestampCheck {
originalOldTimestampCheck = true
originalOldTimestamp = oldTimestamp
}
vln = d
goto VLN_SELECTION
} else {
// If no split is progress or had ocurred while we were
// writing, we check e to see if an unsplit is in
// progress and, if not, we check a's used counter to
// see if we should request a split/unsplit.
e := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e))))
if e == nil {
used := atomic.LoadInt32(&a.used)
if int(used) > vlm.splitCount {
go vln.split(vlm.splitCount, vlm.cores)
} else if used == 0 && vlmPrev != nil {
go vlmPrev.unsplit(vlm.cores)
}
}
}
}
}
}
}
if originalOldTimestampCheck && originalOldTimestamp < oldTimestamp {
oldTimestamp = originalOldTimestamp
}
return oldTimestamp
}
func (vlm *ValueLocMap) isResizing() bool {
return vlm.root.isResizing()
}
func (vln *valueLocNode) isResizing() bool {
vln.resizingLock.RLock()
if vln.resizing {
vln.resizingLock.RUnlock()
return true
}
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c != nil && c.isResizing() {
vln.resizingLock.RUnlock()
return true
}
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
if d != nil && d.isResizing() {
vln.resizingLock.RUnlock()
return true
}
vln.resizingLock.RUnlock()
return false
}
// GatherStats returns the active (non deletion markers) mapping count and
// total length referenced as well as a fmt.Stringer that contains debug
// information if debug is true; note that when debug is true additional
// resources are consumed to collect the additional information. Also note that
// while data collection is ongoing, other operations with the location map
// will be slower, especially if debug is true. You can use the goroutines
// setting to limit the impact of data collection; 0 will use the number of
// cores the ValueLocMap is configured for.
func (vlm *ValueLocMap) GatherStats(goroutines int, debug bool) (uint64, uint64, fmt.Stringer) {
if goroutines < 1 {
goroutines = vlm.cores
}
stats := &valueLocMapStats{
goroutines: goroutines,
debug: debug,
funcChan: make(chan func(), goroutines),
cores: vlm.cores,
}
funcsDone := make(chan struct{}, 1)
go func() {
wg := &sync.WaitGroup{}
for {
f := <-stats.funcChan
if f == nil {
break
}
wg.Add(1)
go func() {
f()
wg.Done()
}()
}
wg.Wait()
funcsDone <- struct{}{}
}()
if stats.debug {
stats.depthCounts = []uint64{0}
stats.splitCount = vlm.splitCount
stats.outOfPlaceKeyDetections = vlm.outOfPlaceKeyDetections
}
vlm.root.gatherStatsHelper(stats)
stats.funcChan <- nil
<-funcsDone
if debug {
stats.depthCounts = stats.depthCounts[1:]
}
return stats.active, stats.length, stats
}
func (vln *valueLocNode) gatherStatsHelper(stats *valueLocMapStats) {
if stats.debug {
stats.sections++
stats.depth++
if stats.depth < uint64(len(stats.depthCounts)) {
stats.depthCounts[stats.depth]++
} else {
stats.depthCounts = append(stats.depthCounts, 1)
}
}
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c != nil {
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
if stats.debug {
depthOrig := stats.depth
c.gatherStatsHelper(stats)
depthC := stats.depth
stats.depth = depthOrig
d.gatherStatsHelper(stats)
if depthC > stats.depth {
stats.depth = depthC
}
} else {
c.gatherStatsHelper(stats)
d.gatherStatsHelper(stats)
}
return
}
f := func(s *valueLocStore) {
if stats.buckets == 0 {
stats.buckets = uint64(len(s.buckets))
}
stats.funcChan <- func() {
var bucketCounts []uint64
var pointerLocs uint64
var locs uint64
var unused uint64
var used uint64
var active uint64
var length uint64
var tombstones uint64
if stats.debug {
bucketCounts = make([]uint64, len(s.buckets))
}
for bix := len(s.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(s.locks)
s.locks[lix].RLock()
if stats.debug {
for item := &s.buckets[bix]; item != nil; item = item.next {
bucketCounts[bix]++
if item.next != nil {
pointerLocs++
}
locs++
if item.blockID == 0 {
unused++
} else {
used++
if item.timestamp&1 == 0 {
active++
length += uint64(item.length)
} else {
tombstones++
}
}
}
} else {
for item := &s.buckets[bix]; item != nil; item = item.next {
if item.blockID > 0 {
if item.timestamp&1 == 0 {
active++
length += uint64(item.length)
}
}
}
}
s.locks[lix].RUnlock()
}
if stats.debug {
atomic.AddUint64(&stats.storages, 1)
atomic.AddUint64(&stats.pointerLocs, pointerLocs)
atomic.AddUint64(&stats.locs, locs)
atomic.AddUint64(&stats.used, used)
atomic.AddUint64(&stats.unused, unused)
atomic.AddUint64(&stats.tombstones, tombstones)
}
atomic.AddUint64(&stats.active, active)
atomic.AddUint64(&stats.length, length)
}
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
if a != nil {
f(a)
}
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b != nil {
f(b)
}
e := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e))))
if e != nil {
f(e)
}
}
func (stats *valueLocMapStats) String() string {
if stats.debug {
depthCounts := fmt.Sprintf("%d", stats.depthCounts[0])
for i := 1; i < len(stats.depthCounts); i++ {
depthCounts += fmt.Sprintf(" %d", stats.depthCounts[i])
}
return brimtext.Align([][]string{
[]string{"statsGoroutines", fmt.Sprintf("%d", stats.goroutines)},
[]string{"cores", fmt.Sprintf("%d", stats.cores)},
[]string{"pageSize", fmt.Sprintf("%d", stats.buckets*uint64(unsafe.Sizeof(valueLoc{})))},
[]string{"splitMultiplier", fmt.Sprintf("%f", float64(stats.splitCount)/float64(stats.buckets))},
[]string{"depth", fmt.Sprintf("%d", stats.depth)},
[]string{"depthCounts", depthCounts},
[]string{"sections", fmt.Sprintf("%d", stats.sections)},
[]string{"storages", fmt.Sprintf("%d", stats.storages)},
[]string{"bucketsPerPage", fmt.Sprintf("%d", stats.buckets)},
[]string{"splitCount", fmt.Sprintf("%d", stats.splitCount)},
[]string{"outOfPlaceKeyDetections", fmt.Sprintf("%d", stats.outOfPlaceKeyDetections)},
[]string{"locs", fmt.Sprintf("%d", stats.locs)},
[]string{"pointerLocs", fmt.Sprintf("%d %.1f%%", stats.pointerLocs, float64(stats.pointerLocs)/float64(stats.locs)*100)},
[]string{"unused", fmt.Sprintf("%d %.1f%%", stats.unused, float64(stats.unused)/float64(stats.locs)*100)},
[]string{"used", fmt.Sprintf("%d", stats.used)},
[]string{"active", fmt.Sprintf("%d", stats.active)},
[]string{"length", fmt.Sprintf("%d", stats.length)},
[]string{"tombstones", fmt.Sprintf("%d", stats.tombstones)},
}, nil)
} else {
return brimtext.Align([][]string{
[]string{"active", fmt.Sprintf("%d", stats.active)},
[]string{"length", fmt.Sprintf("%d", stats.length)},
}, nil)
}
}
func (vln *valueLocNode) split(splitCount int, cores int) {
vln.resizingLock.Lock()
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if vln.resizing || c != nil || int(atomic.LoadInt32(&a.used)) < splitCount {
vln.resizingLock.Unlock()
return
}
vln.resizing = true
vln.resizingLock.Unlock()
b := &valueLocStore{
buckets: make([]valueLoc, len(a.buckets)),
locks: make([]sync.RWMutex, len(a.locks)),
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b)), unsafe.Pointer(b))
wg := &sync.WaitGroup{}
var copies uint32
var clears uint32
f := func(coreOffset int, clear bool) {
for bix := len(a.buckets) - 1 - coreOffset; bix >= 0; bix -= cores {
lix := bix % len(a.locks)
b.locks[lix].Lock()
a.locks[lix].Lock()
NEXT_ITEM_A:
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 || itemA.keyA&vln.leftMask == 0 {
continue
}
var unusedItemB *valueLoc
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 {
if unusedItemB == nil {
unusedItemB = itemB
}
continue
}
if itemA.keyA == itemB.keyA && itemA.keyB == itemB.keyB {
if itemA.timestamp > itemB.timestamp {
itemB.keyA = itemA.keyA
itemB.keyB = itemA.keyB
itemB.timestamp = itemA.timestamp
itemB.blockID = itemA.blockID
itemB.offset = itemA.offset
itemB.length = itemA.length
atomic.AddUint32(&copies, 1)
}
if clear {
atomic.AddInt32(&a.used, -1)
itemA.blockID = 0
atomic.AddUint32(&clears, 1)
}
continue NEXT_ITEM_A
}
}
atomic.AddInt32(&b.used, 1)
if unusedItemB != nil {
unusedItemB.keyA = itemA.keyA
unusedItemB.keyB = itemA.keyB
unusedItemB.timestamp = itemA.timestamp
unusedItemB.blockID = itemA.blockID
unusedItemB.offset = itemA.offset
unusedItemB.length = itemA.length
} else {
b.buckets[bix].next = &valueLoc{
next: b.buckets[bix].next,
keyA: itemA.keyA,
keyB: itemA.keyB,
timestamp: itemA.timestamp,
blockID: itemA.blockID,
offset: itemA.offset,
length: itemA.length,
}
}
atomic.AddUint32(&copies, 1)
if clear {
atomic.AddInt32(&a.used, -1)
itemA.blockID = 0
atomic.AddUint32(&clears, 1)
}
}
a.locks[lix].Unlock()
b.locks[lix].Unlock()
}
wg.Done()
}
for passes := 0; passes < 2 || copies > 0; passes++ {
copies = 0
wg.Add(cores)
for core := 0; core < cores; core++ {
go f(core, false)
}
wg.Wait()
}
for passes := 0; passes < 2 || copies > 0 || clears > 0; passes++ {
copies = 0
clears = 0
wg.Add(cores)
for core := 0; core < cores; core++ {
go f(core, true)
}
wg.Wait()
}
newVLN := &valueLocNode{
leftMask: vln.leftMask >> 1,
rangeStart: vln.rangeStart + vln.leftMask,
rangeStop: vln.rangeStop,
a: b,
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d)), unsafe.Pointer(newVLN))
newVLN = &valueLocNode{
leftMask: vln.leftMask >> 1,
rangeStart: vln.rangeStart,
rangeStop: vln.rangeStop - vln.leftMask,
a: a,
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c)), unsafe.Pointer(newVLN))
vln.resizingLock.Lock()
vln.resizing = false
vln.resizingLock.Unlock()
}
func (vln *valueLocNode) unsplit(cores int) {
vln.resizingLock.Lock()
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if vln.resizing || c == nil {
vln.resizingLock.Unlock()
return
}
c.resizingLock.Lock()
cc := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&c.c))))
if c.resizing || cc != nil {
c.resizingLock.Unlock()
vln.resizingLock.Unlock()
return
}
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
d.resizingLock.Lock()
dc := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.c))))
if d.resizing || dc != nil {
d.resizingLock.Unlock()
c.resizingLock.Unlock()
vln.resizingLock.Unlock()
return
}
d.resizing = true
c.resizing = true
vln.resizing = true
d.resizingLock.Unlock()
c.resizingLock.Unlock()
vln.resizingLock.Unlock()
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&c.a))))
e := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.a))))
// Even if a has less items than e, we copy items from e to a because
// get/set and other routines assume a is left and e is right.
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a)), nil)
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b)), nil)
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e)), unsafe.Pointer(e))
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a)), unsafe.Pointer(a))
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&c.a)), nil)
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.a)), nil)
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c)), nil)
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d)), nil)
wg := &sync.WaitGroup{}
var copies uint32
var clears uint32
f := func(coreOffset int, clear bool) {
for bix := len(e.buckets) - 1 - coreOffset; bix >= 0; bix -= cores {
lix := bix % len(e.locks)
a.locks[lix].Lock()
e.locks[lix].Lock()
NEXT_ITEM_E:
for itemE := &e.buckets[bix]; itemE != nil; itemE = itemE.next {
if itemE.blockID == 0 {
continue
}
var unusedItemA *valueLoc
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 {
if unusedItemA == nil {
unusedItemA = itemA
}
continue
}
if itemE.keyA == itemA.keyA && itemE.keyB == itemA.keyB {
if itemE.timestamp > itemA.timestamp {
itemA.keyA = itemE.keyA
itemA.keyB = itemE.keyB
itemA.timestamp = itemE.timestamp
itemA.blockID = itemE.blockID
itemA.offset = itemE.offset
itemA.length = itemE.length
atomic.AddUint32(&copies, 1)
}
if clear {
atomic.AddInt32(&e.used, -1)
itemE.blockID = 0
atomic.AddUint32(&clears, 1)
}
continue NEXT_ITEM_E
}
}
atomic.AddInt32(&a.used, 1)
if unusedItemA != nil {
unusedItemA.keyA = itemE.keyA
unusedItemA.keyB = itemE.keyB
unusedItemA.timestamp = itemE.timestamp
unusedItemA.blockID = itemE.blockID
unusedItemA.offset = itemE.offset
unusedItemA.length = itemE.length
} else {
a.buckets[bix].next = &valueLoc{
next: a.buckets[bix].next,
keyA: itemE.keyA,
keyB: itemE.keyB,
timestamp: itemE.timestamp,
blockID: itemE.blockID,
offset: itemE.offset,
length: itemE.length,
}
}
atomic.AddUint32(&copies, 1)
if clear {
atomic.AddInt32(&e.used, -1)
itemE.blockID = 0
atomic.AddUint32(&clears, 1)
}
}
e.locks[lix].Unlock()
a.locks[lix].Unlock()
}
wg.Done()
}
for passes := 0; passes < 2 || copies > 0; passes++ {
copies = 0
wg.Add(cores)
for core := 0; core < cores; core++ {
go f(core, false)
}
wg.Wait()
}
for passes := 0; passes < 2 || copies > 0 || clears > 0; passes++ {
copies = 0
clears = 0
wg.Add(cores)
for core := 0; core < cores; core++ {
go f(core, true)
}
wg.Wait()
}
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&vln.e)), nil)
vln.resizingLock.Lock()
vln.resizing = false
vln.resizingLock.Unlock()
}
// Scan will scan the key range (of keyA) from start to stop (inclusive) and
// discard any tombstones older than the tombstoneCutoff.
func (vlm *ValueLocMap) Scan(tombstoneCutoff uint64, start uint64, stop uint64) {
vlm.root.scan(vlm, tombstoneCutoff, start, stop)
}
// ScanCount will scan the key range (of keyA) from start to stop (inclusive),
// discard any tombstones older than the tombstoneCutoff, and return the count
// of mappings found (including deletion markers). If the count at any point
// exceeds the max given, the scan will stop and the count thus far will be
// returned.
func (vlm *ValueLocMap) ScanCount(tombstoneCutoff uint64, start uint64, stop uint64, max uint64) uint64 {
return vlm.root.scanCount(vlm, tombstoneCutoff, start, stop, max, 0)
}
// ScanCallback will scan the key range (of keyA) from start to stop
// (inclusive) and call the callback with any mappings found (including
// deletion markers).
func (vlm *ValueLocMap) ScanCallback(start uint64, stop uint64, callback func(keyA uint64, keyB uint64, timestamp uint64)) {
vlm.root.scanCallback(vlm, start, stop, callback)
}
// ScanCallbackFull will scan the key range (of keyA) from start to stop
// (inclusive) and call the callback with any mappings found (including
// deletion markers).
func (vlm *ValueLocMap) ScanCallbackFull(start uint64, stop uint64, callback func(keyA uint64, keyB uint64, timestamp uint64, blockID uint16, offset uint32, length uint32)) {
vlm.root.scanCallbackFull(vlm, start, stop, callback)
}
func (vln *valueLocNode) scan(vlm *ValueLocMap, tombstoneCutoff uint64, pstart uint64, pstop uint64) {
if vln.rangeStart > pstop {
return
}
if vln.rangeStop < pstart {
return
}
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c != nil {
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
c.scan(vlm, tombstoneCutoff, pstart, pstop)
d.scan(vlm, tombstoneCutoff, pstart, pstop)
return
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b != nil {
// Just skip splits in progress and assume future scans will eventually
// hit these areas.
return
}
if atomic.LoadInt32(&a.used) <= 0 {
return
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
a.locks[lix].Lock()
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 {
continue
}
if itemA.keyA < vln.rangeStart || itemA.keyA > vln.rangeStop {
// Out of place key, extract and reinsert.
atomic.AddInt32(&vlm.outOfPlaceKeyDetections, 1)
go vlm.Set(itemA.keyA, itemA.keyB, itemA.timestamp, itemA.blockID, itemA.offset, itemA.length, false)
itemA.blockID = 0
continue
}
if itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
if itemA.timestamp&1 == 1 && itemA.timestamp < tombstoneCutoff {
atomic.AddInt32(&a.used, -1)
itemA.blockID = 0
}
}
a.locks[lix].Unlock()
}
}
func (vln *valueLocNode) scanCount(vlm *ValueLocMap, tombstoneCutoff uint64, pstart uint64, pstop uint64, max uint64, count uint64) uint64 {
if vln.rangeStart > pstop {
return count
}
if vln.rangeStop < pstart {
return count
}
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c != nil {
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
count = c.scanCount(vlm, tombstoneCutoff, pstart, pstop, max, count)
if count > max {
return count
}
return d.scanCount(vlm, tombstoneCutoff, pstart, pstop, max, count)
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b == nil {
if atomic.LoadInt32(&a.used) <= 0 {
return count
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
a.locks[lix].Lock()
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 {
continue
}
if itemA.keyA < vln.rangeStart || itemA.keyA > vln.rangeStop {
// Out of place key, extract and reinsert.
atomic.AddInt32(&vlm.outOfPlaceKeyDetections, 1)
go vlm.Set(itemA.keyA, itemA.keyB, itemA.timestamp, itemA.blockID, itemA.offset, itemA.length, false)
itemA.blockID = 0
continue
}
if itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
if itemA.timestamp&1 == 1 && itemA.timestamp < tombstoneCutoff {
atomic.AddInt32(&a.used, -1)
itemA.blockID = 0
}
count++
if count > max {
a.locks[lix].Unlock()
return count
}
}
a.locks[lix].Unlock()
}
} else {
// For tombstone and out of place key detection, just skip splits in
// progress and assume future scans will eventually hit these areas.
// Means we can use read locks here instead of write locks.
if atomic.LoadInt32(&a.used) <= 0 && atomic.LoadInt32(&b.used) <= 0 {
return count
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
b.locks[lix].RLock()
a.locks[lix].RLock()
NEXT_ITEM_A:
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 || itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 {
continue
}
if itemB.keyA == itemA.keyA && itemB.keyB == itemA.keyB {
if itemB.timestamp >= itemA.timestamp {
continue NEXT_ITEM_A
}
break
}
}
count++
if count > max {
a.locks[lix].RUnlock()
b.locks[lix].RUnlock()
return count
}
}
NEXT_ITEM_B:
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 || itemB.keyA < pstart || itemB.keyA > pstop {
continue
}
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 {
continue
}
if itemA.keyA == itemB.keyA && itemA.keyB == itemB.keyB {
if itemA.timestamp >= itemB.timestamp {
continue NEXT_ITEM_B
}
break
}
}
count++
if count > max {
a.locks[lix].RUnlock()
b.locks[lix].RUnlock()
return count
}
}
a.locks[lix].RUnlock()
b.locks[lix].RUnlock()
}
}
return count
}
func (vln *valueLocNode) scanCallback(vlm *ValueLocMap, pstart uint64, pstop uint64, callback func(keyA uint64, keyB uint64, timestamp uint64)) {
if vln.rangeStart > pstop {
return
}
if vln.rangeStop < pstart {
return
}
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c != nil {
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
c.scanCallback(vlm, pstart, pstop, callback)
d.scanCallback(vlm, pstart, pstop, callback)
return
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b == nil {
if atomic.LoadInt32(&a.used) <= 0 {
return
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
a.locks[lix].RLock()
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 || itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
callback(itemA.keyA, itemA.keyB, itemA.timestamp)
}
a.locks[lix].RUnlock()
}
} else {
if atomic.LoadInt32(&a.used) <= 0 && atomic.LoadInt32(&b.used) <= 0 {
return
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
b.locks[lix].RLock()
a.locks[lix].RLock()
NEXT_ITEM_A:
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 || itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 {
continue
}
if itemB.keyA == itemA.keyA && itemB.keyB == itemA.keyB {
if itemB.timestamp >= itemA.timestamp {
continue NEXT_ITEM_A
}
break
}
}
callback(itemA.keyA, itemA.keyB, itemA.timestamp)
}
NEXT_ITEM_B:
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 || itemB.keyA < pstart || itemB.keyA > pstop {
continue
}
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 {
continue
}
if itemA.keyA == itemB.keyA && itemA.keyB == itemB.keyB {
if itemA.timestamp >= itemB.timestamp {
continue NEXT_ITEM_B
}
break
}
}
callback(itemB.keyA, itemB.keyB, itemB.timestamp)
}
a.locks[lix].RUnlock()
b.locks[lix].RUnlock()
}
}
}
func (vln *valueLocNode) scanCallbackFull(vlm *ValueLocMap, pstart uint64, pstop uint64, callback func(keyA uint64, keyB uint64, timestamp uint64, blockID uint16, offset uint32, length uint32)) {
if vln.rangeStart > pstop {
return
}
if vln.rangeStop < pstart {
return
}
c := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.c))))
if c != nil {
d := (*valueLocNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.d))))
c.scanCallbackFull(vlm, pstart, pstop, callback)
d.scanCallbackFull(vlm, pstart, pstop, callback)
return
}
a := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.a))))
b := (*valueLocStore)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&vln.b))))
if b == nil {
if atomic.LoadInt32(&a.used) <= 0 {
return
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
a.locks[lix].RLock()
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 || itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
callback(itemA.keyA, itemA.keyB, itemA.timestamp, itemA.blockID, itemA.offset, itemA.length)
}
a.locks[lix].RUnlock()
}
} else {
if atomic.LoadInt32(&a.used) <= 0 && atomic.LoadInt32(&b.used) <= 0 {
return
}
for bix := len(a.buckets) - 1; bix >= 0; bix-- {
lix := bix % len(a.locks)
b.locks[lix].RLock()
a.locks[lix].RLock()
NEXT_ITEM_A:
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 || itemA.keyA < pstart || itemA.keyA > pstop {
continue
}
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 {
continue
}
if itemB.keyA == itemA.keyA && itemB.keyB == itemA.keyB {
if itemB.timestamp >= itemA.timestamp {
continue NEXT_ITEM_A
}
break
}
}
callback(itemA.keyA, itemA.keyB, itemA.timestamp, itemA.blockID, itemA.offset, itemA.length)
}
NEXT_ITEM_B:
for itemB := &b.buckets[bix]; itemB != nil; itemB = itemB.next {
if itemB.blockID == 0 || itemB.keyA < pstart || itemB.keyA > pstop {
continue
}
for itemA := &a.buckets[bix]; itemA != nil; itemA = itemA.next {
if itemA.blockID == 0 {
continue
}
if itemA.keyA == itemB.keyA && itemA.keyB == itemB.keyB {
if itemA.timestamp >= itemB.timestamp {
continue NEXT_ITEM_B
}
break
}
}
callback(itemB.keyA, itemB.keyB, itemB.timestamp, itemB.blockID, itemB.offset, itemB.length)
}
a.locks[lix].RUnlock()
b.locks[lix].RUnlock()
}
}
}
dropping the old valuelocmap
|
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package tests_test
import (
"flag"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/google/goexpect"
"fmt"
v12 "k8s.io/api/core/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/onsi/ginkgo/extensions/table"
"k8s.io/apimachinery/pkg/util/intstr"
"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/tests"
)
var _ = Describe("Networking", func() {
flag.Parse()
virtClient, err := kubecli.GetKubevirtClient()
tests.PanicOnError(err)
var inboundVM *v1.VirtualMachine
var outboundVM *v1.VirtualMachine
// newHelloWorldJob takes a dns entry or an IP which it will use create a pod
// which tries to contact the host on port 1500. It expects to receive "Hello World!" to succeed.
newHelloWorldJob := func(host string) *v12.Pod {
check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(nc %s 1500 -i 1 -w 1))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, host)}
job := tests.RenderJob("netcat", []string{"/bin/bash", "-c"}, check)
return job
}
logPodLogs := func(pod *v12.Pod) {
defer GinkgoRecover()
var s int64 = 500
logs := virtClient.CoreV1().Pods(inboundVM.Namespace).GetLogs(pod.Name, &v12.PodLogOptions{SinceSeconds: &s})
rawLogs, err := logs.DoRaw()
Expect(err).ToNot(HaveOccurred())
log.Log.Infof("%v", rawLogs)
}
waitForPodToFinish := func(pod *v12.Pod) v12.PodPhase {
Eventually(func() v12.PodPhase {
j, err := virtClient.Core().Pods(inboundVM.ObjectMeta.Namespace).Get(pod.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return j.Status.Phase
}, 30*time.Second, 1*time.Second).Should(Or(Equal(v12.PodSucceeded), Equal(v12.PodFailed)))
j, err := virtClient.Core().Pods(inboundVM.ObjectMeta.Namespace).Get(pod.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
logPodLogs(pod)
return j.Status.Phase
}
// TODO this is not optimal, since the one test which will initiate this, will look slow
tests.BeforeAll(func() {
tests.BeforeTestCleanup()
inboundChan := make(chan *v1.VirtualMachine)
outboundChan := make(chan *v1.VirtualMachine)
createAndLogin := func(labels map[string]string, hostname string, subdomain string) (vm *v1.VirtualMachine) {
vm = tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n")
vm.Labels = labels
vm.Spec.Subdomain = subdomain
vm.Spec.Hostname = hostname
// Start VM
vm, err = virtClient.VM(tests.NamespaceTestDefault).Create(vm)
Expect(err).ToNot(HaveOccurred())
tests.WaitForSuccessfulVMStartIgnoreWarnings(vm)
// Fetch the new VM with updated status
vm, err = virtClient.VM(tests.NamespaceTestDefault).Get(vm.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
// Lets make sure that the OS is up by waiting until we can login
expecter, err := tests.LoggedInCirrosExpecter(vm)
defer expecter.Close()
Expect(err).ToNot(HaveOccurred())
return vm
}
// Create inbound VM which listens on port 1500 for incoming connections and repeatedly returns "Hello World!"
go func() {
defer GinkgoRecover()
vm := createAndLogin(map[string]string{"expose": "me"}, "myvm", "my-subdomain")
expecter, _, err := tests.NewConsoleExpecter(virtClient, vm, 10*time.Second)
defer expecter.Close()
Expect(err).ToNot(HaveOccurred())
resp, err := expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "screen -d -m nc -klp 1500 -e echo -e \"Hello World!\"\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 60*time.Second)
log.DefaultLogger().Infof("%v", resp)
Expect(err).ToNot(HaveOccurred())
inboundChan <- vm
}()
// Create a VM and log in, to allow executing arbitrary commands from the terminal
go func() {
defer GinkgoRecover()
vm := createAndLogin(nil, "", "")
outboundChan <- vm
}()
inboundVM = <-inboundChan
outboundVM = <-outboundChan
})
Context("VirtualMachine attached to the pod network", func() {
table.DescribeTable("should be able to reach", func(destination string) {
var cmdCheck, addrShow, addr string
// assuming pod network is of standard MTU = 1500 (minus 50 bytes for vxlan overhead)
expectedMtu := 1450
ipHeaderSize := 28 // IPv4 specific
payloadSize := expectedMtu - ipHeaderSize
// Wait until the VM is booted, ping google and check if we can reach the internet
expecter, _, err := tests.NewConsoleExpecter(virtClient, outboundVM, 10*time.Second)
defer expecter.Close()
Expect(err).ToNot(HaveOccurred())
switch destination {
case "Internet":
addr = "www.google.com"
case "InboundVM":
addr = inboundVM.Status.Interfaces[0].IP
}
By("checking br1 MTU inside the pod")
vmPod := tests.GetRunningPodByLabel(outboundVM.Name, v1.DomainLabel, tests.NamespaceTestDefault)
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmPod,
vmPod.Spec.Containers[0].Name,
[]string{"ip", "address", "show", "br1"},
)
log.Log.Infof("%v", output)
Expect(err).ToNot(HaveOccurred())
// the following substring is part of 'ip address show' output
expectedMtuString := fmt.Sprintf("mtu %d", expectedMtu)
Expect(strings.Contains(output, expectedMtuString)).To(BeTrue())
By("checking eth0 MTU inside the VM")
addrShow = "ip address show eth0\n"
out, err := expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: addrShow},
&expect.BExp{R: fmt.Sprintf(".*%s.*\n", expectedMtuString)},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 180*time.Second)
log.Log.Infof("%v", out)
Expect(err).ToNot(HaveOccurred())
By("checking the VM can send MTU sized frames to another VM")
// NOTE: VM is not directly accessible from inside the pod because
// we transferred its IP address under DHCP server control, so the
// only thing we can validate is connectivity between VMs
//
// NOTE: cirros ping doesn't support -M do that could be used to
// validate end-to-end connectivity with Don't Fragment flag set
cmdCheck = fmt.Sprintf("ping %s -c 1 -w 5 -s %d\n", addr, payloadSize)
out, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 180*time.Second)
log.Log.Infof("%v", out)
Expect(err).ToNot(HaveOccurred())
},
table.Entry("the Inbound VM", "InboundVM"),
table.Entry("the internet", "Internet"),
)
table.DescribeTable("should be reachable via the propagated IP from a Pod", func(op v12.NodeSelectorOperator, hostNetwork bool) {
ip := inboundVM.Status.Interfaces[0].IP
//TODO if node count 1, skip whe nv12.NodeSelectorOpOut
nodes, err := virtClient.CoreV1().Nodes().List(v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(nodes.Items).ToNot(BeEmpty())
if len(nodes.Items) == 1 && op == v12.NodeSelectorOpNotIn {
Skip("Skip network test that requires multiple nodes when only one node is present.")
}
// Run netcat and give it one second to ghet "Hello World!" back from the VM
job := newHelloWorldJob(ip)
job.Spec.Affinity = &v12.Affinity{
NodeAffinity: &v12.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v12.NodeSelector{
NodeSelectorTerms: []v12.NodeSelectorTerm{
{
MatchExpressions: []v12.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: op, Values: []string{inboundVM.Status.NodeName}},
},
},
},
},
},
}
job.Spec.HostNetwork = hostNetwork
job, err = virtClient.CoreV1().Pods(inboundVM.ObjectMeta.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodSucceeded))
},
table.Entry("on the same node from Pod", v12.NodeSelectorOpIn, false),
table.Entry("on a different node from Pod", v12.NodeSelectorOpNotIn, false),
table.Entry("on the same node from Node", v12.NodeSelectorOpIn, true),
table.Entry("on a different node from Node", v12.NodeSelectorOpNotIn, true),
)
Context("with a service matching the vm exposed", func() {
BeforeEach(func() {
service := &v12.Service{
ObjectMeta: v13.ObjectMeta{
Name: "myservice",
},
Spec: v12.ServiceSpec{
Selector: map[string]string{
"expose": "me",
},
Ports: []v12.ServicePort{
{Protocol: v12.ProtocolTCP, Port: 1500, TargetPort: intstr.FromInt(1500)},
},
},
}
_, err := virtClient.CoreV1().Services(inboundVM.Namespace).Create(service)
Expect(err).ToNot(HaveOccurred())
})
It(" should be able to reach the vm based on labels specified on the vm", func() {
By("starting a pod which tries to reach the vm via the defined service")
job := newHelloWorldJob(fmt.Sprintf("%s.%s", "myservice", inboundVM.Namespace))
job, err = virtClient.CoreV1().Pods(inboundVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("waiting for the pod to report a successful connection attempt")
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodSucceeded))
})
It("should fail to reach the vm if an invalid servicename is used", func() {
By("starting a pod which tries to reach the vm via a non-existent service")
job := newHelloWorldJob(fmt.Sprintf("%s.%s", "wrongservice", inboundVM.Namespace))
job, err = virtClient.CoreV1().Pods(inboundVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("waiting for the pod to report an unsuccessful connection attempt")
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodFailed))
})
AfterEach(func() {
Expect(virtClient.CoreV1().Services(inboundVM.Namespace).Delete("myservice", &v13.DeleteOptions{})).To(Succeed())
})
})
Context("with a subdomain and a headless service given", func() {
BeforeEach(func() {
service := &v12.Service{
ObjectMeta: v13.ObjectMeta{
Name: inboundVM.Spec.Subdomain,
},
Spec: v12.ServiceSpec{
ClusterIP: v12.ClusterIPNone,
Selector: map[string]string{
"expose": "me",
},
/* Normally ports are not required on headless services, but there is a bug in kubedns:
https://github.com/kubernetes/kubernetes/issues/55158
*/
Ports: []v12.ServicePort{
{Protocol: v12.ProtocolTCP, Port: 1500, TargetPort: intstr.FromInt(1500)},
},
},
}
_, err := virtClient.CoreV1().Services(inboundVM.Namespace).Create(service)
Expect(err).ToNot(HaveOccurred())
})
It("should be able to reach the vm via its unique fully qualified domain name", func() {
By("starting a pod which tries to reach the vm via the defined service")
job := newHelloWorldJob(fmt.Sprintf("%s.%s.%s", inboundVM.Spec.Hostname, inboundVM.Spec.Subdomain, inboundVM.Namespace))
job, err = virtClient.CoreV1().Pods(inboundVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("waiting for the pod to report a successful connection attempt")
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodSucceeded))
})
AfterEach(func() {
Expect(virtClient.CoreV1().Services(inboundVM.Namespace).Delete(inboundVM.Spec.Subdomain, &v13.DeleteOptions{})).To(Succeed())
})
})
})
})
Get rid of go routines under network test
go routines under the test make debiging more complicate,
and it does not save a lot of time.
Signed-off-by: Lukianov Artyom <3521b33104adfa9965771fa0bc787b34a2677415@redhat.com>
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package tests_test
import (
"flag"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/google/goexpect"
"fmt"
v12 "k8s.io/api/core/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/onsi/ginkgo/extensions/table"
"k8s.io/apimachinery/pkg/util/intstr"
"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/tests"
)
var _ = Describe("Networking", func() {
flag.Parse()
virtClient, err := kubecli.GetKubevirtClient()
tests.PanicOnError(err)
var inboundVM *v1.VirtualMachine
var outboundVM *v1.VirtualMachine
// newHelloWorldJob takes a dns entry or an IP which it will use create a pod
// which tries to contact the host on port 1500. It expects to receive "Hello World!" to succeed.
newHelloWorldJob := func(host string) *v12.Pod {
check := []string{fmt.Sprintf(`set -x; x="$(head -n 1 < <(nc %s 1500 -i 1 -w 1))"; echo "$x" ; if [ "$x" = "Hello World!" ]; then echo "succeeded"; exit 0; else echo "failed"; exit 1; fi`, host)}
job := tests.RenderJob("netcat", []string{"/bin/bash", "-c"}, check)
return job
}
logPodLogs := func(pod *v12.Pod) {
defer GinkgoRecover()
var s int64 = 500
logs := virtClient.CoreV1().Pods(inboundVM.Namespace).GetLogs(pod.Name, &v12.PodLogOptions{SinceSeconds: &s})
rawLogs, err := logs.DoRaw()
Expect(err).ToNot(HaveOccurred())
log.Log.Infof("%v", rawLogs)
}
waitForPodToFinish := func(pod *v12.Pod) v12.PodPhase {
Eventually(func() v12.PodPhase {
j, err := virtClient.Core().Pods(inboundVM.ObjectMeta.Namespace).Get(pod.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return j.Status.Phase
}, 30*time.Second, 1*time.Second).Should(Or(Equal(v12.PodSucceeded), Equal(v12.PodFailed)))
j, err := virtClient.Core().Pods(inboundVM.ObjectMeta.Namespace).Get(pod.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
logPodLogs(pod)
return j.Status.Phase
}
// TODO this is not optimal, since the one test which will initiate this, will look slow
tests.BeforeAll(func() {
tests.BeforeTestCleanup()
// Create and start inbound VM
inboundVM = tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n")
inboundVM.Labels = map[string]string{"expose": "me"}
inboundVM.Spec.Subdomain = "myvm"
inboundVM.Spec.Hostname = "my-subdomain"
_, err = virtClient.VM(tests.NamespaceTestDefault).Create(inboundVM)
Expect(err).ToNot(HaveOccurred())
// Create and start outbound VM
outboundVM = tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n")
_, err = virtClient.VM(tests.NamespaceTestDefault).Create(outboundVM)
Expect(err).ToNot(HaveOccurred())
for _, networkVm := range []*v1.VirtualMachine{inboundVM, outboundVM} {
// Wait for VM start
tests.WaitForSuccessfulVMStart(networkVm)
// Fetch the new VM with updated status
vm, err := virtClient.VM(tests.NamespaceTestDefault).Get(networkVm.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
// Lets make sure that the OS is up by waiting until we can login
expecter, err := tests.LoggedInCirrosExpecter(vm)
Expect(err).ToNot(HaveOccurred())
expecter.Close()
}
inboundVM, err = virtClient.VM(tests.NamespaceTestDefault).Get(inboundVM.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
expecter, _, err := tests.NewConsoleExpecter(virtClient, inboundVM, 10*time.Second)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
resp, err := expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "screen -d -m nc -klp 1500 -e echo -e \"Hello World!\"\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 60*time.Second)
log.DefaultLogger().Infof("%v", resp)
Expect(err).ToNot(HaveOccurred())
outboundVM, err = virtClient.VM(tests.NamespaceTestDefault).Get(outboundVM.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
})
Context("VirtualMachine attached to the pod network", func() {
table.DescribeTable("should be able to reach", func(destination string) {
var cmdCheck, addrShow, addr string
// assuming pod network is of standard MTU = 1500 (minus 50 bytes for vxlan overhead)
expectedMtu := 1450
ipHeaderSize := 28 // IPv4 specific
payloadSize := expectedMtu - ipHeaderSize
// Wait until the VM is booted, ping google and check if we can reach the internet
expecter, _, err := tests.NewConsoleExpecter(virtClient, outboundVM, 10*time.Second)
defer expecter.Close()
Expect(err).ToNot(HaveOccurred())
switch destination {
case "Internet":
addr = "www.google.com"
case "InboundVM":
addr = inboundVM.Status.Interfaces[0].IP
}
By("checking br1 MTU inside the pod")
vmPod := tests.GetRunningPodByLabel(outboundVM.Name, v1.DomainLabel, tests.NamespaceTestDefault)
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmPod,
vmPod.Spec.Containers[0].Name,
[]string{"ip", "address", "show", "br1"},
)
log.Log.Infof("%v", output)
Expect(err).ToNot(HaveOccurred())
// the following substring is part of 'ip address show' output
expectedMtuString := fmt.Sprintf("mtu %d", expectedMtu)
Expect(strings.Contains(output, expectedMtuString)).To(BeTrue())
By("checking eth0 MTU inside the VM")
addrShow = "ip address show eth0\n"
out, err := expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: addrShow},
&expect.BExp{R: fmt.Sprintf(".*%s.*\n", expectedMtuString)},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 180*time.Second)
log.Log.Infof("%v", out)
Expect(err).ToNot(HaveOccurred())
By("checking the VM can send MTU sized frames to another VM")
// NOTE: VM is not directly accessible from inside the pod because
// we transferred its IP address under DHCP server control, so the
// only thing we can validate is connectivity between VMs
//
// NOTE: cirros ping doesn't support -M do that could be used to
// validate end-to-end connectivity with Don't Fragment flag set
cmdCheck = fmt.Sprintf("ping %s -c 1 -w 5 -s %d\n", addr, payloadSize)
out, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 180*time.Second)
log.Log.Infof("%v", out)
Expect(err).ToNot(HaveOccurred())
},
table.Entry("the Inbound VM", "InboundVM"),
table.Entry("the internet", "Internet"),
)
table.DescribeTable("should be reachable via the propagated IP from a Pod", func(op v12.NodeSelectorOperator, hostNetwork bool) {
ip := inboundVM.Status.Interfaces[0].IP
//TODO if node count 1, skip whe nv12.NodeSelectorOpOut
nodes, err := virtClient.CoreV1().Nodes().List(v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(nodes.Items).ToNot(BeEmpty())
if len(nodes.Items) == 1 && op == v12.NodeSelectorOpNotIn {
Skip("Skip network test that requires multiple nodes when only one node is present.")
}
// Run netcat and give it one second to ghet "Hello World!" back from the VM
job := newHelloWorldJob(ip)
job.Spec.Affinity = &v12.Affinity{
NodeAffinity: &v12.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v12.NodeSelector{
NodeSelectorTerms: []v12.NodeSelectorTerm{
{
MatchExpressions: []v12.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: op, Values: []string{inboundVM.Status.NodeName}},
},
},
},
},
},
}
job.Spec.HostNetwork = hostNetwork
job, err = virtClient.CoreV1().Pods(inboundVM.ObjectMeta.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodSucceeded))
},
table.Entry("on the same node from Pod", v12.NodeSelectorOpIn, false),
table.Entry("on a different node from Pod", v12.NodeSelectorOpNotIn, false),
table.Entry("on the same node from Node", v12.NodeSelectorOpIn, true),
table.Entry("on a different node from Node", v12.NodeSelectorOpNotIn, true),
)
Context("with a service matching the vm exposed", func() {
BeforeEach(func() {
service := &v12.Service{
ObjectMeta: v13.ObjectMeta{
Name: "myservice",
},
Spec: v12.ServiceSpec{
Selector: map[string]string{
"expose": "me",
},
Ports: []v12.ServicePort{
{Protocol: v12.ProtocolTCP, Port: 1500, TargetPort: intstr.FromInt(1500)},
},
},
}
_, err := virtClient.CoreV1().Services(inboundVM.Namespace).Create(service)
Expect(err).ToNot(HaveOccurred())
})
It(" should be able to reach the vm based on labels specified on the vm", func() {
By("starting a pod which tries to reach the vm via the defined service")
job := newHelloWorldJob(fmt.Sprintf("%s.%s", "myservice", inboundVM.Namespace))
job, err = virtClient.CoreV1().Pods(inboundVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("waiting for the pod to report a successful connection attempt")
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodSucceeded))
})
It("should fail to reach the vm if an invalid servicename is used", func() {
By("starting a pod which tries to reach the vm via a non-existent service")
job := newHelloWorldJob(fmt.Sprintf("%s.%s", "wrongservice", inboundVM.Namespace))
job, err = virtClient.CoreV1().Pods(inboundVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("waiting for the pod to report an unsuccessful connection attempt")
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodFailed))
})
AfterEach(func() {
Expect(virtClient.CoreV1().Services(inboundVM.Namespace).Delete("myservice", &v13.DeleteOptions{})).To(Succeed())
})
})
Context("with a subdomain and a headless service given", func() {
BeforeEach(func() {
service := &v12.Service{
ObjectMeta: v13.ObjectMeta{
Name: inboundVM.Spec.Subdomain,
},
Spec: v12.ServiceSpec{
ClusterIP: v12.ClusterIPNone,
Selector: map[string]string{
"expose": "me",
},
/* Normally ports are not required on headless services, but there is a bug in kubedns:
https://github.com/kubernetes/kubernetes/issues/55158
*/
Ports: []v12.ServicePort{
{Protocol: v12.ProtocolTCP, Port: 1500, TargetPort: intstr.FromInt(1500)},
},
},
}
_, err := virtClient.CoreV1().Services(inboundVM.Namespace).Create(service)
Expect(err).ToNot(HaveOccurred())
})
It("should be able to reach the vm via its unique fully qualified domain name", func() {
By("starting a pod which tries to reach the vm via the defined service")
job := newHelloWorldJob(fmt.Sprintf("%s.%s.%s", inboundVM.Spec.Hostname, inboundVM.Spec.Subdomain, inboundVM.Namespace))
job, err = virtClient.CoreV1().Pods(inboundVM.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())
By("waiting for the pod to report a successful connection attempt")
phase := waitForPodToFinish(job)
Expect(phase).To(Equal(v12.PodSucceeded))
})
AfterEach(func() {
Expect(virtClient.CoreV1().Services(inboundVM.Namespace).Delete(inboundVM.Spec.Subdomain, &v13.DeleteOptions{})).To(Succeed())
})
})
})
})
|
package zip
import (
gozip "archive/zip"
. "jvmgo/any"
"jvmgo/jvm/rtda"
rtc "jvmgo/jvm/rtda/class"
"jvmgo/util"
)
const (
JZENTRY_NAME = 0
JZENTRY_EXTRA = 1
JZENTRY_COMMENT = 2
)
func init() {
_zf(initIDs, "initIDs", "()V")
_zf(getEntryBytes, "getEntryBytes", "(JI)[B")
_zf(getEntryCrc, "getEntryCrc", "(J)J")
_zf(getEntryFlag, "getEntryFlag", "(J)I")
_zf(getEntrySize, "getEntrySize", "(J)J")
_zf(getEntryTime, "getEntryTime", "(J)J")
_zf(getNextEntry, "getNextEntry", "(JI)J")
_zf(getTotal, "getTotal", "(J)I")
_zf(open, "open", "(Ljava/lang/String;IJZ)J")
_zf(startsWithLOC, "startsWithLOC", "(J)Z")
}
func _zf(method Any, name, desc string) {
rtc.RegisterNativeMethod("java/util/zip/ZipFile", name, desc, method)
}
// private static native void initIDs();
// ()V
func initIDs(frame *rtda.Frame) {
// todo
}
// private static native long open(String name, int mode, long lastModified,
// boolean usemmap) throws IOException;
// (Ljava/lang/String;IJZ)J
func open(frame *rtda.Frame) {
vars := frame.LocalVars()
nameObj := vars.GetRef(0)
name := rtda.GoString(nameObj)
jzfile, err := openZip(name)
if err != nil {
// todo
panic("IOException")
}
stack := frame.OperandStack()
stack.PushLong(jzfile)
}
// private static native boolean startsWithLOC(long jzfile);
// (J)Z
func startsWithLOC(frame *rtda.Frame) {
// todo
stack := frame.OperandStack()
stack.PushBoolean(true)
}
// private static native int getTotal(long jzfile);
// (J)I
func getTotal(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
total := getEntryCount(jzfile)
stack := frame.OperandStack()
stack.PushInt(total)
}
// private static native long getNextEntry(long jzfile, int i);
// (JI)J
func getNextEntry(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
i := vars.GetInt(2)
jzentry := getJzentry(jzfile, i)
stack := frame.OperandStack()
stack.PushLong(jzentry)
}
// private static native byte[] getEntryBytes(long jzentry, int type);
// (JI)[B
func getEntryBytes(frame *rtda.Frame) {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
_type := vars.GetInt(2)
goBytes := _getEntryBytes(jzentry, _type)
jBytes := util.CastUint8sToInt8s(goBytes)
byteArr := rtc.NewByteArray(jBytes, frame.ClassLoader())
stack := frame.OperandStack()
stack.PushRef(byteArr)
}
func _getEntryBytes(jzentry int64, _type int32) []byte {
entry := getEntry(jzentry)
switch _type {
case JZENTRY_NAME:
return []byte(entry.Name)
case JZENTRY_EXTRA:
return entry.Extra
case JZENTRY_COMMENT:
return []byte(entry.Comment)
}
util.Panicf("BAD type: %v", _type)
return nil
}
// private static native int getEntryFlag(long jzentry);
// (J)I
func getEntryFlag(frame *rtda.Frame) {
entry := _getEntryPop(frame)
flag := int32(entry.Flags)
stack := frame.OperandStack()
stack.PushInt(flag)
}
// private static native long getEntryTime(long jzentry);
// (J)J
func getEntryTime(frame *rtda.Frame) {
entry := _getEntryPop(frame)
modDate := entry.ModifiedDate
modTime := entry.ModifiedTime
time := int64(modDate)<<16 | int64(modTime)
stack := frame.OperandStack()
stack.PushLong(time)
}
// private static native long getEntryCrc(long jzentry);
// (J)J
func getEntryCrc(frame *rtda.Frame) {
entry := _getEntryPop(frame)
crc := int64(entry.CRC32)
stack := frame.OperandStack()
stack.PushLong(crc)
}
// private static native long getEntrySize(long jzentry);
// (J)J
func getEntrySize(frame *rtda.Frame) {
entry := _getEntryPop(frame)
size := int64(entry.UncompressedSize64)
stack := frame.OperandStack()
stack.PushLong(size)
}
func _getEntryPop(frame *rtda.Frame) *gozip.File {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
entry := getEntry(jzentry)
return entry
}
native method: ZipFile.getEntryCSize()
package zip
import (
gozip "archive/zip"
. "jvmgo/any"
"jvmgo/jvm/rtda"
rtc "jvmgo/jvm/rtda/class"
"jvmgo/util"
)
const (
JZENTRY_NAME = 0
JZENTRY_EXTRA = 1
JZENTRY_COMMENT = 2
)
func init() {
_zf(initIDs, "initIDs", "()V")
_zf(getEntryBytes, "getEntryBytes", "(JI)[B")
_zf(getEntryCrc, "getEntryCrc", "(J)J")
_zf(getEntryCSize, "getEntryCSize", "(J)J")
_zf(getEntryFlag, "getEntryFlag", "(J)I")
_zf(getEntrySize, "getEntrySize", "(J)J")
_zf(getEntryTime, "getEntryTime", "(J)J")
_zf(getNextEntry, "getNextEntry", "(JI)J")
_zf(getTotal, "getTotal", "(J)I")
_zf(open, "open", "(Ljava/lang/String;IJZ)J")
_zf(startsWithLOC, "startsWithLOC", "(J)Z")
}
func _zf(method Any, name, desc string) {
rtc.RegisterNativeMethod("java/util/zip/ZipFile", name, desc, method)
}
// private static native void initIDs();
// ()V
func initIDs(frame *rtda.Frame) {
// todo
}
// private static native long open(String name, int mode, long lastModified,
// boolean usemmap) throws IOException;
// (Ljava/lang/String;IJZ)J
func open(frame *rtda.Frame) {
vars := frame.LocalVars()
nameObj := vars.GetRef(0)
name := rtda.GoString(nameObj)
jzfile, err := openZip(name)
if err != nil {
// todo
panic("IOException")
}
stack := frame.OperandStack()
stack.PushLong(jzfile)
}
// private static native boolean startsWithLOC(long jzfile);
// (J)Z
func startsWithLOC(frame *rtda.Frame) {
// todo
stack := frame.OperandStack()
stack.PushBoolean(true)
}
// private static native int getTotal(long jzfile);
// (J)I
func getTotal(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
total := getEntryCount(jzfile)
stack := frame.OperandStack()
stack.PushInt(total)
}
// private static native long getNextEntry(long jzfile, int i);
// (JI)J
func getNextEntry(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
i := vars.GetInt(2)
jzentry := getJzentry(jzfile, i)
stack := frame.OperandStack()
stack.PushLong(jzentry)
}
// private static native byte[] getEntryBytes(long jzentry, int type);
// (JI)[B
func getEntryBytes(frame *rtda.Frame) {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
_type := vars.GetInt(2)
goBytes := _getEntryBytes(jzentry, _type)
jBytes := util.CastUint8sToInt8s(goBytes)
byteArr := rtc.NewByteArray(jBytes, frame.ClassLoader())
stack := frame.OperandStack()
stack.PushRef(byteArr)
}
func _getEntryBytes(jzentry int64, _type int32) []byte {
entry := getEntry(jzentry)
switch _type {
case JZENTRY_NAME:
return []byte(entry.Name)
case JZENTRY_EXTRA:
return entry.Extra
case JZENTRY_COMMENT:
return []byte(entry.Comment)
}
util.Panicf("BAD type: %v", _type)
return nil
}
// private static native int getEntryFlag(long jzentry);
// (J)I
func getEntryFlag(frame *rtda.Frame) {
entry := _getEntryPop(frame)
flag := int32(entry.Flags)
stack := frame.OperandStack()
stack.PushInt(flag)
}
// private static native long getEntryTime(long jzentry);
// (J)J
func getEntryTime(frame *rtda.Frame) {
entry := _getEntryPop(frame)
modDate := entry.ModifiedDate
modTime := entry.ModifiedTime
time := int64(modDate)<<16 | int64(modTime)
stack := frame.OperandStack()
stack.PushLong(time)
}
// private static native long getEntryCrc(long jzentry);
// (J)J
func getEntryCrc(frame *rtda.Frame) {
entry := _getEntryPop(frame)
crc := int64(entry.CRC32)
stack := frame.OperandStack()
stack.PushLong(crc)
}
// private static native long getEntrySize(long jzentry);
// (J)J
func getEntrySize(frame *rtda.Frame) {
entry := _getEntryPop(frame)
size := int64(entry.UncompressedSize64)
stack := frame.OperandStack()
stack.PushLong(size)
}
// private static native long getEntryCSize(long jzentry);
// (J)J
func getEntryCSize(frame *rtda.Frame) {
entry := _getEntryPop(frame)
size := int64(entry.CompressedSize64)
stack := frame.OperandStack()
stack.PushLong(size)
}
func _getEntryPop(frame *rtda.Frame) *gozip.File {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
entry := getEntry(jzentry)
return entry
}
|
package zip
import (
gozip "archive/zip"
. "jvmgo/any"
"jvmgo/jvm/rtda"
rtc "jvmgo/jvm/rtda/class"
"jvmgo/util"
)
const (
JZENTRY_NAME = 0
JZENTRY_EXTRA = 1
JZENTRY_COMMENT = 2
)
func init() {
_zf(initIDs, "initIDs", "()V")
_zf(getEntryBytes, "getEntryBytes", "(JI)[B")
_zf(getEntryCrc, "getEntryCrc", "(J)J")
_zf(getEntryFlag, "getEntryFlag", "(J)I")
_zf(getEntryTime, "getEntryTime", "(J)J")
_zf(getNextEntry, "getNextEntry", "(JI)J")
_zf(getTotal, "getTotal", "(J)I")
_zf(open, "open", "(Ljava/lang/String;IJZ)J")
_zf(startsWithLOC, "startsWithLOC", "(J)Z")
}
func _zf(method Any, name, desc string) {
rtc.RegisterNativeMethod("java/util/zip/ZipFile", name, desc, method)
}
// private static native void initIDs();
// ()V
func initIDs(frame *rtda.Frame) {
// todo
}
// private static native long open(String name, int mode, long lastModified,
// boolean usemmap) throws IOException;
// (Ljava/lang/String;IJZ)J
func open(frame *rtda.Frame) {
vars := frame.LocalVars()
nameObj := vars.GetRef(0)
name := rtda.GoString(nameObj)
jzfile, err := openZip(name)
if err != nil {
// todo
panic("IOException")
}
stack := frame.OperandStack()
stack.PushLong(jzfile)
}
// private static native boolean startsWithLOC(long jzfile);
// (J)Z
func startsWithLOC(frame *rtda.Frame) {
// todo
stack := frame.OperandStack()
stack.PushBoolean(true)
}
// private static native int getTotal(long jzfile);
// (J)I
func getTotal(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
total := getEntryCount(jzfile)
stack := frame.OperandStack()
stack.PushInt(total)
}
// private static native long getNextEntry(long jzfile, int i);
// (JI)J
func getNextEntry(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
i := vars.GetInt(2)
jzentry := getJzentry(jzfile, i)
stack := frame.OperandStack()
stack.PushLong(jzentry)
}
// private static native byte[] getEntryBytes(long jzentry, int type);
// (JI)[B
func getEntryBytes(frame *rtda.Frame) {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
_type := vars.GetInt(2)
goBytes := _getEntryBytes(jzentry, _type)
jBytes := util.CastUint8sToInt8s(goBytes)
byteArr := rtc.NewByteArray(jBytes, frame.ClassLoader())
stack := frame.OperandStack()
stack.PushRef(byteArr)
}
func _getEntryBytes(jzentry int64, _type int32) []byte {
entry := getEntry(jzentry)
switch _type {
case JZENTRY_NAME:
return []byte(entry.Name)
case JZENTRY_EXTRA:
return entry.Extra
case JZENTRY_COMMENT:
return []byte(entry.Comment)
}
util.Panicf("BAD type: %v", _type)
return nil
}
// private static native int getEntryFlag(long jzentry);
// (J)I
func getEntryFlag(frame *rtda.Frame) {
entry := _getEntryPop(frame)
flag := int32(entry.Flags)
stack := frame.OperandStack()
stack.PushInt(flag)
}
// private static native long getEntryTime(long jzentry);
// (J)J
func getEntryTime(frame *rtda.Frame) {
entry := _getEntryPop(frame)
modDate := entry.ModifiedDate
modTime := entry.ModifiedTime
time := int64(modDate)<<16 | int64(modTime)
stack := frame.OperandStack()
stack.PushLong(time)
}
// private static native long getEntryCrc(long jzentry);
// (J)J
func getEntryCrc(frame *rtda.Frame) {
entry := _getEntryPop(frame)
crc := int64(entry.CRC32)
stack := frame.OperandStack()
stack.PushLong(crc)
}
func _getEntryPop(frame *rtda.Frame) *gozip.File {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
entry := getEntry(jzentry)
return entry
}
native method: ZipFile.getEntrySize()
package zip
import (
gozip "archive/zip"
. "jvmgo/any"
"jvmgo/jvm/rtda"
rtc "jvmgo/jvm/rtda/class"
"jvmgo/util"
)
const (
JZENTRY_NAME = 0
JZENTRY_EXTRA = 1
JZENTRY_COMMENT = 2
)
func init() {
_zf(initIDs, "initIDs", "()V")
_zf(getEntryBytes, "getEntryBytes", "(JI)[B")
_zf(getEntryCrc, "getEntryCrc", "(J)J")
_zf(getEntryFlag, "getEntryFlag", "(J)I")
_zf(getEntrySize, "getEntrySize", "(J)J")
_zf(getEntryTime, "getEntryTime", "(J)J")
_zf(getNextEntry, "getNextEntry", "(JI)J")
_zf(getTotal, "getTotal", "(J)I")
_zf(open, "open", "(Ljava/lang/String;IJZ)J")
_zf(startsWithLOC, "startsWithLOC", "(J)Z")
}
func _zf(method Any, name, desc string) {
rtc.RegisterNativeMethod("java/util/zip/ZipFile", name, desc, method)
}
// private static native void initIDs();
// ()V
func initIDs(frame *rtda.Frame) {
// todo
}
// private static native long open(String name, int mode, long lastModified,
// boolean usemmap) throws IOException;
// (Ljava/lang/String;IJZ)J
func open(frame *rtda.Frame) {
vars := frame.LocalVars()
nameObj := vars.GetRef(0)
name := rtda.GoString(nameObj)
jzfile, err := openZip(name)
if err != nil {
// todo
panic("IOException")
}
stack := frame.OperandStack()
stack.PushLong(jzfile)
}
// private static native boolean startsWithLOC(long jzfile);
// (J)Z
func startsWithLOC(frame *rtda.Frame) {
// todo
stack := frame.OperandStack()
stack.PushBoolean(true)
}
// private static native int getTotal(long jzfile);
// (J)I
func getTotal(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
total := getEntryCount(jzfile)
stack := frame.OperandStack()
stack.PushInt(total)
}
// private static native long getNextEntry(long jzfile, int i);
// (JI)J
func getNextEntry(frame *rtda.Frame) {
vars := frame.LocalVars()
jzfile := vars.GetLong(0)
i := vars.GetInt(2)
jzentry := getJzentry(jzfile, i)
stack := frame.OperandStack()
stack.PushLong(jzentry)
}
// private static native byte[] getEntryBytes(long jzentry, int type);
// (JI)[B
func getEntryBytes(frame *rtda.Frame) {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
_type := vars.GetInt(2)
goBytes := _getEntryBytes(jzentry, _type)
jBytes := util.CastUint8sToInt8s(goBytes)
byteArr := rtc.NewByteArray(jBytes, frame.ClassLoader())
stack := frame.OperandStack()
stack.PushRef(byteArr)
}
func _getEntryBytes(jzentry int64, _type int32) []byte {
entry := getEntry(jzentry)
switch _type {
case JZENTRY_NAME:
return []byte(entry.Name)
case JZENTRY_EXTRA:
return entry.Extra
case JZENTRY_COMMENT:
return []byte(entry.Comment)
}
util.Panicf("BAD type: %v", _type)
return nil
}
// private static native int getEntryFlag(long jzentry);
// (J)I
func getEntryFlag(frame *rtda.Frame) {
entry := _getEntryPop(frame)
flag := int32(entry.Flags)
stack := frame.OperandStack()
stack.PushInt(flag)
}
// private static native long getEntryTime(long jzentry);
// (J)J
func getEntryTime(frame *rtda.Frame) {
entry := _getEntryPop(frame)
modDate := entry.ModifiedDate
modTime := entry.ModifiedTime
time := int64(modDate)<<16 | int64(modTime)
stack := frame.OperandStack()
stack.PushLong(time)
}
// private static native long getEntryCrc(long jzentry);
// (J)J
func getEntryCrc(frame *rtda.Frame) {
entry := _getEntryPop(frame)
crc := int64(entry.CRC32)
stack := frame.OperandStack()
stack.PushLong(crc)
}
// private static native long getEntrySize(long jzentry);
// (J)J
func getEntrySize(frame *rtda.Frame) {
entry := _getEntryPop(frame)
size := int64(entry.UncompressedSize64)
stack := frame.OperandStack()
stack.PushLong(size)
}
func _getEntryPop(frame *rtda.Frame) *gozip.File {
vars := frame.LocalVars()
jzentry := vars.GetLong(0)
entry := getEntry(jzentry)
return entry
}
|
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"github.com/bouk/httprouter"
"github.com/influxdata/chronograf"
)
func validPermissions(perms *chronograf.Permissions) error {
if perms == nil {
return nil
}
for _, perm := range *perms {
if perm.Scope != chronograf.AllScope && perm.Scope != chronograf.DBScope {
return fmt.Errorf("Invalid permission scope")
}
if perm.Scope == chronograf.DBScope && perm.Name == "" {
return fmt.Errorf("Database scoped permission requires a name")
}
}
return nil
}
type sourceUserRequest struct {
Username string `json:"name,omitempty"` // Username for new account
Password string `json:"password,omitempty"` // Password for new account
Permissions chronograf.Permissions `json:"permissions,omitempty"` // Optional permissions
}
func (r *sourceUserRequest) ValidCreate() error {
if r.Username == "" {
return fmt.Errorf("Username required")
}
if r.Password == "" {
return fmt.Errorf("Password required")
}
return validPermissions(&r.Permissions)
}
func (r *sourceUserRequest) ValidUpdate() error {
if r.Password == "" && len(r.Permissions) == 0 {
return fmt.Errorf("No fields to update")
}
return validPermissions(&r.Permissions)
}
type sourceUser struct {
Username string `json:"name"` // Username for new account
Permissions chronograf.Permissions `json:"permissions,omitempty"` // Account's permissions
Links selfLinks `json:"links"` // Links are URI locations related to user
}
type enterpriseSourceUser struct {
Username string `json:"name"` // Username for new account
Permissions chronograf.Permissions `json:"permissions"` // Account's permissions
Roles []roleResponse `json:"roles"` // Roles if source uses them
Links selfLinks `json:"links"` // Links are URI locations related to user
}
type selfLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
func sourceUserResponse(u *chronograf.User, srcID int, hasRoles bool) interface{} {
// Permissions should always be returned. If no permissions, then
// return empty array
perms := u.Permissions
if len(perms) == 0 {
perms = make([]chronograf.Permission, 0)
}
if hasRoles {
res := enterpriseSourceUser{
Username: u.Name,
Permissions: perms,
Roles: make([]roleResponse, 0),
Links: newSelfLinks(srcID, "users", u.Name),
}
if len(u.Roles) > 0 {
rr := make([]roleResponse, len(u.Roles))
for i, role := range u.Roles {
rr[i] = newRoleResponse(srcID, &role)
}
res.Roles = rr
}
return &res
}
res := sourceUser{
Username: u.Name,
Permissions: perms,
Links: newSelfLinks(srcID, "users", u.Name),
}
return &res
}
func newSelfLinks(id int, parent, resource string) selfLinks {
httpAPISrcs := "/chronograf/v1/sources"
u := &url.URL{Path: resource}
encodedResource := u.String()
return selfLinks{
Self: fmt.Sprintf("%s/%d/%s/%s", httpAPISrcs, id, parent, encodedResource),
}
}
// NewSourceUser adds user to source
func (h *Service) NewSourceUser(w http.ResponseWriter, r *http.Request) {
var req sourceUserRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidCreate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
srcID, store, err := h.sourceUsersStore(ctx, w, r)
if err != nil {
return
}
user := &chronograf.User{
Name: req.Username,
Passwd: req.Password,
Permissions: req.Permissions,
}
res, err := store.Add(ctx, user)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
su := sourceUser{
Username: res.Name,
Permissions: req.Permissions,
Links: newSelfLinks(srcID, "users", res.Name),
}
w.Header().Add("Location", su.Links.Self)
encodeJSON(w, http.StatusCreated, su, h.Logger)
}
type sourceUsers struct {
Users []interface{} `json:"users"`
}
// SourceUsers retrieves all users from source.
func (h *Service) SourceUsers(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
store := ts.Users(ctx)
users, err := store.All(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
_, hasRoles := h.hasRoles(ctx, ts)
su := []interface{}{}
for _, u := range users {
res := sourceUserResponse(&u, srcID, hasRoles)
su = append(su, res)
}
res := sourceUsers{
Users: su,
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// SourceUserID retrieves a user with ID from store.
func (h *Service) SourceUserID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
uid := httprouter.GetParamFromContext(ctx, "uid")
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
store := ts.Users(ctx)
u, err := store.Get(ctx, uid)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
_, hasRoles := h.hasRoles(ctx, ts)
res := sourceUserResponse(u, srcID, hasRoles)
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// RemoveSourceUser removes the user from the InfluxDB source
func (h *Service) RemoveSourceUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
uid := httprouter.GetParamFromContext(ctx, "uid")
_, store, err := h.sourceUsersStore(ctx, w, r)
if err != nil {
return
}
if err := store.Delete(ctx, &chronograf.User{Name: uid}); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// UpdateSourceUser changes the password or permissions of a source user
func (h *Service) UpdateSourceUser(w http.ResponseWriter, r *http.Request) {
var req sourceUserRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidUpdate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
uid := httprouter.GetParamFromContext(ctx, "uid")
srcID, store, err := h.sourceUsersStore(ctx, w, r)
if err != nil {
return
}
user := &chronograf.User{
Name: uid,
Passwd: req.Password,
Permissions: req.Permissions,
}
if err := store.Update(ctx, user); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
su := sourceUser{
Username: user.Name,
Permissions: user.Permissions,
Links: newSelfLinks(srcID, "users", user.Name),
}
w.Header().Add("Location", su.Links.Self)
encodeJSON(w, http.StatusOK, su, h.Logger)
}
func (h *Service) sourcesSeries(ctx context.Context, w http.ResponseWriter, r *http.Request) (int, chronograf.TimeSeries, error) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return 0, nil, err
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return 0, nil, err
}
ts, err := h.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return 0, nil, err
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return 0, nil, err
}
return srcID, ts, nil
}
func (h *Service) sourceUsersStore(ctx context.Context, w http.ResponseWriter, r *http.Request) (int, chronograf.UsersStore, error) {
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return 0, nil, err
}
store := ts.Users(ctx)
return srcID, store, nil
}
// hasRoles checks if the influx source has roles or not
func (h *Service) hasRoles(ctx context.Context, ts chronograf.TimeSeries) (chronograf.RolesStore, bool) {
store, err := ts.Roles(ctx)
if err != nil {
return nil, false
}
return store, true
}
// Permissions returns all possible permissions for this source.
func (h *Service) Permissions(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
ts, err := h.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
perms := ts.Permissions(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
httpAPISrcs := "/chronograf/v1/sources"
res := struct {
Permissions chronograf.Permissions `json:"permissions"`
Links map[string]string `json:"links"` // Links are URI locations related to user
}{
Permissions: perms,
Links: map[string]string{
"self": fmt.Sprintf("%s/%d/permissions", httpAPISrcs, srcID),
"source": fmt.Sprintf("%s/%d", httpAPISrcs, srcID),
},
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
type sourceRoleRequest struct {
chronograf.Role
}
func (r *sourceRoleRequest) ValidCreate() error {
if r.Name == "" || len(r.Name) > 254 {
return fmt.Errorf("Name is required for a role")
}
for _, user := range r.Users {
if user.Name == "" {
return fmt.Errorf("Username required")
}
}
return validPermissions(&r.Permissions)
}
func (r *sourceRoleRequest) ValidUpdate() error {
if len(r.Name) > 254 {
return fmt.Errorf("Username too long; must be less than 254 characters")
}
for _, user := range r.Users {
if user.Name == "" {
return fmt.Errorf("Username required")
}
}
return validPermissions(&r.Permissions)
}
type roleResponse struct {
Users []sourceUser `json:"users"`
Name string `json:"name"`
Permissions chronograf.Permissions `json:"permissions"`
Links selfLinks `json:"links"`
}
func newRoleResponse(srcID int, res *chronograf.Role) roleResponse {
su := make([]sourceUser, len(res.Users))
for i := range res.Users {
name := res.Users[i].Name
su[i] = sourceUser{
Username: name,
Links: newSelfLinks(srcID, "users", name),
}
}
if res.Permissions == nil {
res.Permissions = make(chronograf.Permissions, 0)
}
return roleResponse{
Name: res.Name,
Permissions: res.Permissions,
Users: su,
Links: newSelfLinks(srcID, "roles", res.Name),
}
}
// NewRole adds role to source
func (h *Service) NewRole(w http.ResponseWriter, r *http.Request) {
var req sourceRoleRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidCreate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
res, err := roles.Add(ctx, &req.Role)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := newRoleResponse(srcID, res)
w.Header().Add("Location", rr.Links.Self)
encodeJSON(w, http.StatusCreated, rr, h.Logger)
}
// UpdateRole changes the permissions or users of a role
func (h *Service) UpdateRole(w http.ResponseWriter, r *http.Request) {
var req sourceRoleRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidUpdate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
rid := httprouter.GetParamFromContext(ctx, "rid")
req.Name = rid
if err := roles.Update(ctx, &req.Role); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
role, err := roles.Get(ctx, req.Name)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := newRoleResponse(srcID, role)
w.Header().Add("Location", rr.Links.Self)
encodeJSON(w, http.StatusOK, rr, h.Logger)
}
// RoleID retrieves a role with ID from store.
func (h *Service) RoleID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
rid := httprouter.GetParamFromContext(ctx, "rid")
role, err := roles.Get(ctx, rid)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := newRoleResponse(srcID, role)
encodeJSON(w, http.StatusOK, rr, h.Logger)
}
// Roles retrieves all roles from the store
func (h *Service) Roles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
store, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
roles, err := store.All(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := make([]roleResponse, len(roles))
for i, role := range roles {
rr[i] = newRoleResponse(srcID, &role)
}
res := struct {
Roles []roleResponse `json:"roles"`
}{rr}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// RemoveRole removes role from data source.
func (h *Service) RemoveRole(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
rid := httprouter.GetParamFromContext(ctx, "rid")
if err := roles.Delete(ctx, &chronograf.Role{Name: rid}); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
Update /users role response to omit users
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"github.com/bouk/httprouter"
"github.com/influxdata/chronograf"
)
func validPermissions(perms *chronograf.Permissions) error {
if perms == nil {
return nil
}
for _, perm := range *perms {
if perm.Scope != chronograf.AllScope && perm.Scope != chronograf.DBScope {
return fmt.Errorf("Invalid permission scope")
}
if perm.Scope == chronograf.DBScope && perm.Name == "" {
return fmt.Errorf("Database scoped permission requires a name")
}
}
return nil
}
type sourceUserRequest struct {
Username string `json:"name,omitempty"` // Username for new account
Password string `json:"password,omitempty"` // Password for new account
Permissions chronograf.Permissions `json:"permissions,omitempty"` // Optional permissions
}
func (r *sourceUserRequest) ValidCreate() error {
if r.Username == "" {
return fmt.Errorf("Username required")
}
if r.Password == "" {
return fmt.Errorf("Password required")
}
return validPermissions(&r.Permissions)
}
func (r *sourceUserRequest) ValidUpdate() error {
if r.Password == "" && len(r.Permissions) == 0 {
return fmt.Errorf("No fields to update")
}
return validPermissions(&r.Permissions)
}
type sourceUser struct {
Username string `json:"name"` // Username for new account
Permissions chronograf.Permissions `json:"permissions,omitempty"` // Account's permissions
Links selfLinks `json:"links"` // Links are URI locations related to user
}
type enterpriseSourceUser struct {
Username string `json:"name"` // Username for new account
Permissions chronograf.Permissions `json:"permissions"` // Account's permissions
Roles []userRoleResponse `json:"roles"` // Roles if source uses them
Links selfLinks `json:"links"` // Links are URI locations related to user
}
type userRoleResponse struct {
Name string `json:"name"`
Permissions chronograf.Permissions `json:"permissions"`
Links selfLinks `json:"links"`
}
func newUserRoleResponse(srcID int, res *chronograf.Role) userRoleResponse {
if res.Permissions == nil {
res.Permissions = make(chronograf.Permissions, 0)
}
return userRoleResponse{
Name: res.Name,
Permissions: res.Permissions,
Links: newSelfLinks(srcID, "roles", res.Name),
}
}
type selfLinks struct {
Self string `json:"self"` // Self link mapping to this resource
}
func sourceUserResponse(u *chronograf.User, srcID int, hasRoles bool) interface{} {
// Permissions should always be returned. If no permissions, then
// return empty array
perms := u.Permissions
if len(perms) == 0 {
perms = make([]chronograf.Permission, 0)
}
// If the source supports roles, we return all
// associated with this user
if hasRoles {
res := enterpriseSourceUser{
Username: u.Name,
Permissions: perms,
Roles: make([]userRoleResponse, 0),
Links: newSelfLinks(srcID, "users", u.Name),
}
if len(u.Roles) > 0 {
rr := make([]userRoleResponse, len(u.Roles))
for i, role := range u.Roles {
rr[i] = newUserRoleResponse(srcID, &role)
}
res.Roles = rr
}
return &res
}
res := sourceUser{
Username: u.Name,
Permissions: perms,
Links: newSelfLinks(srcID, "users", u.Name),
}
return &res
}
func newSelfLinks(id int, parent, resource string) selfLinks {
httpAPISrcs := "/chronograf/v1/sources"
u := &url.URL{Path: resource}
encodedResource := u.String()
return selfLinks{
Self: fmt.Sprintf("%s/%d/%s/%s", httpAPISrcs, id, parent, encodedResource),
}
}
// NewSourceUser adds user to source
func (h *Service) NewSourceUser(w http.ResponseWriter, r *http.Request) {
var req sourceUserRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidCreate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
srcID, store, err := h.sourceUsersStore(ctx, w, r)
if err != nil {
return
}
user := &chronograf.User{
Name: req.Username,
Passwd: req.Password,
Permissions: req.Permissions,
}
res, err := store.Add(ctx, user)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
su := sourceUser{
Username: res.Name,
Permissions: req.Permissions,
Links: newSelfLinks(srcID, "users", res.Name),
}
w.Header().Add("Location", su.Links.Self)
encodeJSON(w, http.StatusCreated, su, h.Logger)
}
type sourceUsers struct {
Users []interface{} `json:"users"`
}
// SourceUsers retrieves all users from source.
func (h *Service) SourceUsers(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
store := ts.Users(ctx)
users, err := store.All(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
_, hasRoles := h.hasRoles(ctx, ts)
su := []interface{}{}
for _, u := range users {
res := sourceUserResponse(&u, srcID, hasRoles)
su = append(su, res)
}
res := sourceUsers{
Users: su,
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// SourceUserID retrieves a user with ID from store.
func (h *Service) SourceUserID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
uid := httprouter.GetParamFromContext(ctx, "uid")
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
store := ts.Users(ctx)
u, err := store.Get(ctx, uid)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
_, hasRoles := h.hasRoles(ctx, ts)
res := sourceUserResponse(u, srcID, hasRoles)
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// RemoveSourceUser removes the user from the InfluxDB source
func (h *Service) RemoveSourceUser(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
uid := httprouter.GetParamFromContext(ctx, "uid")
_, store, err := h.sourceUsersStore(ctx, w, r)
if err != nil {
return
}
if err := store.Delete(ctx, &chronograf.User{Name: uid}); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
// UpdateSourceUser changes the password or permissions of a source user
func (h *Service) UpdateSourceUser(w http.ResponseWriter, r *http.Request) {
var req sourceUserRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidUpdate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
uid := httprouter.GetParamFromContext(ctx, "uid")
srcID, store, err := h.sourceUsersStore(ctx, w, r)
if err != nil {
return
}
user := &chronograf.User{
Name: uid,
Passwd: req.Password,
Permissions: req.Permissions,
}
if err := store.Update(ctx, user); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
su := sourceUser{
Username: user.Name,
Permissions: user.Permissions,
Links: newSelfLinks(srcID, "users", user.Name),
}
w.Header().Add("Location", su.Links.Self)
encodeJSON(w, http.StatusOK, su, h.Logger)
}
func (h *Service) sourcesSeries(ctx context.Context, w http.ResponseWriter, r *http.Request) (int, chronograf.TimeSeries, error) {
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return 0, nil, err
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return 0, nil, err
}
ts, err := h.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return 0, nil, err
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return 0, nil, err
}
return srcID, ts, nil
}
func (h *Service) sourceUsersStore(ctx context.Context, w http.ResponseWriter, r *http.Request) (int, chronograf.UsersStore, error) {
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return 0, nil, err
}
store := ts.Users(ctx)
return srcID, store, nil
}
// hasRoles checks if the influx source has roles or not
func (h *Service) hasRoles(ctx context.Context, ts chronograf.TimeSeries) (chronograf.RolesStore, bool) {
store, err := ts.Roles(ctx)
if err != nil {
return nil, false
}
return store, true
}
// Permissions returns all possible permissions for this source.
func (h *Service) Permissions(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, err := paramID("id", r)
if err != nil {
Error(w, http.StatusUnprocessableEntity, err.Error(), h.Logger)
return
}
src, err := h.SourcesStore.Get(ctx, srcID)
if err != nil {
notFound(w, srcID, h.Logger)
return
}
ts, err := h.TimeSeries(src)
if err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
if err = ts.Connect(ctx, &src); err != nil {
msg := fmt.Sprintf("Unable to connect to source %d: %v", srcID, err)
Error(w, http.StatusBadRequest, msg, h.Logger)
return
}
perms := ts.Permissions(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
httpAPISrcs := "/chronograf/v1/sources"
res := struct {
Permissions chronograf.Permissions `json:"permissions"`
Links map[string]string `json:"links"` // Links are URI locations related to user
}{
Permissions: perms,
Links: map[string]string{
"self": fmt.Sprintf("%s/%d/permissions", httpAPISrcs, srcID),
"source": fmt.Sprintf("%s/%d", httpAPISrcs, srcID),
},
}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
type sourceRoleRequest struct {
chronograf.Role
}
func (r *sourceRoleRequest) ValidCreate() error {
if r.Name == "" || len(r.Name) > 254 {
return fmt.Errorf("Name is required for a role")
}
for _, user := range r.Users {
if user.Name == "" {
return fmt.Errorf("Username required")
}
}
return validPermissions(&r.Permissions)
}
func (r *sourceRoleRequest) ValidUpdate() error {
if len(r.Name) > 254 {
return fmt.Errorf("Username too long; must be less than 254 characters")
}
for _, user := range r.Users {
if user.Name == "" {
return fmt.Errorf("Username required")
}
}
return validPermissions(&r.Permissions)
}
type roleResponse struct {
Users []sourceUser `json:"users"`
Name string `json:"name"`
Permissions chronograf.Permissions `json:"permissions"`
Links selfLinks `json:"links"`
}
func newRoleResponse(srcID int, res *chronograf.Role) roleResponse {
su := make([]sourceUser, len(res.Users))
for i := range res.Users {
name := res.Users[i].Name
su[i] = sourceUser{
Username: name,
Links: newSelfLinks(srcID, "users", name),
}
}
if res.Permissions == nil {
res.Permissions = make(chronograf.Permissions, 0)
}
return roleResponse{
Name: res.Name,
Permissions: res.Permissions,
Users: su,
Links: newSelfLinks(srcID, "roles", res.Name),
}
}
// NewRole adds role to source
func (h *Service) NewRole(w http.ResponseWriter, r *http.Request) {
var req sourceRoleRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidCreate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
res, err := roles.Add(ctx, &req.Role)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := newRoleResponse(srcID, res)
w.Header().Add("Location", rr.Links.Self)
encodeJSON(w, http.StatusCreated, rr, h.Logger)
}
// UpdateRole changes the permissions or users of a role
func (h *Service) UpdateRole(w http.ResponseWriter, r *http.Request) {
var req sourceRoleRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
invalidJSON(w, h.Logger)
return
}
if err := req.ValidUpdate(); err != nil {
invalidData(w, err, h.Logger)
return
}
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
rid := httprouter.GetParamFromContext(ctx, "rid")
req.Name = rid
if err := roles.Update(ctx, &req.Role); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
role, err := roles.Get(ctx, req.Name)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := newRoleResponse(srcID, role)
w.Header().Add("Location", rr.Links.Self)
encodeJSON(w, http.StatusOK, rr, h.Logger)
}
// RoleID retrieves a role with ID from store.
func (h *Service) RoleID(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
rid := httprouter.GetParamFromContext(ctx, "rid")
role, err := roles.Get(ctx, rid)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := newRoleResponse(srcID, role)
encodeJSON(w, http.StatusOK, rr, h.Logger)
}
// Roles retrieves all roles from the store
func (h *Service) Roles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
store, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
roles, err := store.All(ctx)
if err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
rr := make([]roleResponse, len(roles))
for i, role := range roles {
rr[i] = newRoleResponse(srcID, &role)
}
res := struct {
Roles []roleResponse `json:"roles"`
}{rr}
encodeJSON(w, http.StatusOK, res, h.Logger)
}
// RemoveRole removes role from data source.
func (h *Service) RemoveRole(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
srcID, ts, err := h.sourcesSeries(ctx, w, r)
if err != nil {
return
}
roles, ok := h.hasRoles(ctx, ts)
if !ok {
Error(w, http.StatusNotFound, fmt.Sprintf("Source %d does not have role capability", srcID), h.Logger)
return
}
rid := httprouter.GetParamFromContext(ctx, "rid")
if err := roles.Delete(ctx, &chronograf.Role{Name: rid}); err != nil {
Error(w, http.StatusBadRequest, err.Error(), h.Logger)
return
}
w.WriteHeader(http.StatusNoContent)
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pkcs11 implements logic for using PKCS #11 shared libraries.
package pkcs11
/*
#include <dlfcn.h>
#include <stdlib.h>
#define CK_PTR *
#define CK_DECLARE_FUNCTION(returnType, name) \
returnType name
#define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
returnType (* name)
#define CK_CALLBACK_FUNCTION(returnType, name) \
returnType (* name)
#ifndef NULL_PTR
#define NULL_PTR 0
#endif
#include "../third_party/pkcs11/pkcs11.h"
// Go can't call a C function pointer directly, so these are wrappers that
// perform the dereference in C.
CK_RV get_function_list(CK_C_GetFunctionList fn, CK_FUNCTION_LIST_PTR_PTR p) {
return (*fn)(p);
}
CK_RV ck_initialize(CK_FUNCTION_LIST_PTR fl, CK_C_INITIALIZE_ARGS_PTR args) {
return (*fl->C_Initialize)((CK_VOID_PTR)(args));
}
CK_RV ck_finalize(CK_FUNCTION_LIST_PTR fl) {
return (*fl->C_Finalize)(NULL_PTR);
}
CK_RV ck_init_token(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen,
CK_UTF8CHAR_PTR pLabel
) {
if (ulPinLen == 0) {
// TODO(ericchiang): This isn't tested since softhsm requires a PIN.
pPin = NULL_PTR;
}
return (*fl->C_InitToken)(slotID, pPin, ulPinLen, pLabel);
}
CK_RV ck_get_slot_list(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID_PTR pSlotList,
CK_ULONG_PTR pulCount
) {
return (*fl->C_GetSlotList)(CK_FALSE, pSlotList, pulCount);
}
CK_RV ck_get_info(
CK_FUNCTION_LIST_PTR fl,
CK_INFO_PTR pInfo
) {
return (*fl->C_GetInfo)(pInfo);
}
CK_RV ck_get_slot_info(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_SLOT_INFO_PTR pInfo
) {
return (*fl->C_GetSlotInfo)(slotID, pInfo);
}
CK_RV ck_get_token_info(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_TOKEN_INFO_PTR pInfo
) {
return (*fl->C_GetTokenInfo)(slotID, pInfo);
}
CK_RV ck_open_session(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_FLAGS flags,
CK_SESSION_HANDLE_PTR phSession
) {
return (*fl->C_OpenSession)(slotID, flags, NULL_PTR, NULL_PTR, phSession);
}
CK_RV ck_close_session(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession
) {
return (*fl->C_CloseSession)(hSession);
}
CK_RV ck_login(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_USER_TYPE userType,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen
) {
return (*fl->C_Login)(hSession, userType, pPin, ulPinLen);
}
CK_RV ck_logout(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession
) {
return (*fl->C_Logout)(hSession);
}
CK_RV ck_init_pin(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen
) {
return (*fl->C_InitPIN)(hSession, pPin, ulPinLen);
}
CK_RV ck_generate_key_pair(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_ATTRIBUTE_PTR pPublicKeyTemplate,
CK_ULONG ulPublicKeyAttributeCount,
CK_ATTRIBUTE_PTR pPrivateKeyTemplate,
CK_ULONG ulPrivateKeyAttributeCount,
CK_OBJECT_HANDLE_PTR phPublicKey,
CK_OBJECT_HANDLE_PTR phPrivateKey
) {
return (*fl->C_GenerateKeyPair)(
hSession,
pMechanism,
pPublicKeyTemplate,
ulPublicKeyAttributeCount,
pPrivateKeyTemplate,
ulPrivateKeyAttributeCount,
phPublicKey,
phPrivateKey
);
}
CK_RV ck_find_objects_init(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount
) {
return (*fl->C_FindObjectsInit)(hSession, pTemplate, ulCount);
}
CK_RV ck_find_objects(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE_PTR phObject,
CK_ULONG ulMaxObjectCount,
CK_ULONG_PTR pulObjectCount
) {
return (*fl->C_FindObjects)(hSession, phObject, ulMaxObjectCount, pulObjectCount);
}
CK_RV ck_find_objects_final(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession
) {
return (*fl->C_FindObjectsFinal)(hSession);
}
CK_RV ck_create_object(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR phObject
) {
return (*fl->C_CreateObject)(hSession, pTemplate, ulCount, phObject);
}
CK_RV ck_get_attribute_value(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE hObject,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount
) {
return (*fl->C_GetAttributeValue)(hSession, hObject, pTemplate, ulCount);
}
CK_RV ck_set_attribute_value(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE hObject,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount
) {
return (*fl->C_SetAttributeValue)(hSession, hObject, pTemplate, ulCount);
}
CK_RV ck_sign_init(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_OBJECT_HANDLE hKey
) {
return (*fl->C_SignInit)(hSession, pMechanism, hKey);
}
CK_RV ck_sign(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_BYTE_PTR pData,
CK_ULONG ulDataLen,
CK_BYTE_PTR pSignature,
CK_ULONG_PTR pulSignatureLen
) {
return (*fl->C_Sign)(hSession, pData, ulDataLen, pSignature, pulSignatureLen);
}
*/
// #cgo linux LDFLAGS: -ldl
import "C"
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/asn1"
"fmt"
"io"
"math/big"
"strings"
"unsafe"
)
// ckStringPadded copies a string into b, padded with ' '. If the string is larger
// than the provided buffer, this function returns false.
func ckStringPadded(b []C.CK_UTF8CHAR, s string) bool {
if len(s) > len(b) {
return false
}
for i := range b {
if i < len(s) {
b[i] = C.CK_UTF8CHAR(s[i])
} else {
b[i] = C.CK_UTF8CHAR(' ')
}
}
return true
}
// ckString converts a Go string to a cryptokit string. The string is still held
// by Go memory and doesn't need to be freed.
func ckString(s string) []C.CK_UTF8CHAR {
b := make([]C.CK_UTF8CHAR, len(s))
for i, c := range []byte(s) {
b[i] = C.CK_UTF8CHAR(c)
}
return b
}
// ckCString converts a Go string to a cryptokit string held by C. This is required,
// for example, when building a CK_ATTRIBUTE, which needs to hold a pointer to a
// cryptokit string.
func ckCString(s string) *C.CK_UTF8CHAR {
b := (*C.CK_UTF8CHAR)(C.malloc(C.sizeof_CK_UTF8CHAR * C.ulong(len(s))))
bs := unsafe.Slice(b, len(s))
for i, c := range []byte(s) {
bs[i] = C.CK_UTF8CHAR(c)
}
return b
}
func ckGoString(s *C.CK_UTF8CHAR, n C.CK_ULONG) string {
var sb strings.Builder
sli := unsafe.Slice(s, n)
for _, b := range sli {
sb.WriteByte(byte(b))
}
return sb.String()
}
// Error is returned for cryptokit specific API codes.
type Error struct {
fnName string
code C.CK_RV
}
func (e *Error) Error() string {
code, ok := ckRVString[e.code]
if !ok {
code = fmt.Sprintf("0x%x", e.code)
}
return fmt.Sprintf("pkcs11: %s() %s", e.fnName, code)
}
var ckRVString = map[C.CK_RV]string{
C.CKR_CANCEL: "CKR_CANCEL",
C.CKR_HOST_MEMORY: "CKR_HOST_MEMORY",
C.CKR_SLOT_ID_INVALID: "CKR_SLOT_ID_INVALID",
C.CKR_GENERAL_ERROR: "CKR_GENERAL_ERROR",
C.CKR_FUNCTION_FAILED: "CKR_FUNCTION_FAILED",
C.CKR_ARGUMENTS_BAD: "CKR_ARGUMENTS_BAD",
C.CKR_NO_EVENT: "CKR_NO_EVENT",
C.CKR_NEED_TO_CREATE_THREADS: "CKR_NEED_TO_CREATE_THREADS",
C.CKR_CANT_LOCK: "CKR_CANT_LOCK",
C.CKR_ATTRIBUTE_READ_ONLY: "CKR_ATTRIBUTE_READ_ONLY",
C.CKR_ATTRIBUTE_SENSITIVE: "CKR_ATTRIBUTE_SENSITIVE",
C.CKR_ATTRIBUTE_TYPE_INVALID: "CKR_ATTRIBUTE_TYPE_INVALID",
C.CKR_ATTRIBUTE_VALUE_INVALID: "CKR_ATTRIBUTE_VALUE_INVALID",
C.CKR_ACTION_PROHIBITED: "CKR_ACTION_PROHIBITED",
C.CKR_DATA_INVALID: "CKR_DATA_INVALID",
C.CKR_DATA_LEN_RANGE: "CKR_DATA_LEN_RANGE",
C.CKR_DEVICE_ERROR: "CKR_DEVICE_ERROR",
C.CKR_DEVICE_MEMORY: "CKR_DEVICE_MEMORY",
C.CKR_DEVICE_REMOVED: "CKR_DEVICE_REMOVED",
C.CKR_ENCRYPTED_DATA_INVALID: "CKR_ENCRYPTED_DATA_INVALID",
C.CKR_ENCRYPTED_DATA_LEN_RANGE: "CKR_ENCRYPTED_DATA_LEN_RANGE",
C.CKR_FUNCTION_CANCELED: "CKR_FUNCTION_CANCELED",
C.CKR_FUNCTION_NOT_PARALLEL: "CKR_FUNCTION_NOT_PARALLEL",
C.CKR_FUNCTION_NOT_SUPPORTED: "CKR_FUNCTION_NOT_SUPPORTED",
C.CKR_KEY_HANDLE_INVALID: "CKR_KEY_HANDLE_INVALID",
C.CKR_KEY_SIZE_RANGE: "CKR_KEY_SIZE_RANGE",
C.CKR_KEY_TYPE_INCONSISTENT: "CKR_KEY_TYPE_INCONSISTENT",
C.CKR_KEY_NOT_NEEDED: "CKR_KEY_NOT_NEEDED",
C.CKR_KEY_CHANGED: "CKR_KEY_CHANGED",
C.CKR_KEY_NEEDED: "CKR_KEY_NEEDED",
C.CKR_KEY_INDIGESTIBLE: "CKR_KEY_INDIGESTIBLE",
C.CKR_KEY_FUNCTION_NOT_PERMITTED: "CKR_KEY_FUNCTION_NOT_PERMITTED",
C.CKR_KEY_NOT_WRAPPABLE: "CKR_KEY_NOT_WRAPPABLE",
C.CKR_KEY_UNEXTRACTABLE: "CKR_KEY_UNEXTRACTABLE",
C.CKR_MECHANISM_INVALID: "CKR_MECHANISM_INVALID",
C.CKR_MECHANISM_PARAM_INVALID: "CKR_MECHANISM_PARAM_INVALID",
C.CKR_OBJECT_HANDLE_INVALID: "CKR_OBJECT_HANDLE_INVALID",
C.CKR_OPERATION_ACTIVE: "CKR_OPERATION_ACTIVE",
C.CKR_OPERATION_NOT_INITIALIZED: "CKR_OPERATION_NOT_INITIALIZED",
C.CKR_PIN_INCORRECT: "CKR_PIN_INCORRECT",
C.CKR_PIN_INVALID: "CKR_PIN_INVALID",
C.CKR_PIN_LEN_RANGE: "CKR_PIN_LEN_RANGE",
C.CKR_PIN_EXPIRED: "CKR_PIN_EXPIRED",
C.CKR_PIN_LOCKED: "CKR_PIN_LOCKED",
C.CKR_SESSION_CLOSED: "CKR_SESSION_CLOSED",
C.CKR_SESSION_COUNT: "CKR_SESSION_COUNT",
C.CKR_SESSION_HANDLE_INVALID: "CKR_SESSION_HANDLE_INVALID",
C.CKR_SESSION_PARALLEL_NOT_SUPPORTED: "CKR_SESSION_PARALLEL_NOT_SUPPORTED",
C.CKR_SESSION_READ_ONLY: "CKR_SESSION_READ_ONLY",
C.CKR_SESSION_EXISTS: "CKR_SESSION_EXISTS",
C.CKR_SESSION_READ_ONLY_EXISTS: "CKR_SESSION_READ_ONLY_EXISTS",
C.CKR_SESSION_READ_WRITE_SO_EXISTS: "CKR_SESSION_READ_WRITE_SO_EXISTS",
C.CKR_SIGNATURE_INVALID: "CKR_SIGNATURE_INVALID",
C.CKR_SIGNATURE_LEN_RANGE: "CKR_SIGNATURE_LEN_RANGE",
C.CKR_TEMPLATE_INCOMPLETE: "CKR_TEMPLATE_INCOMPLETE",
C.CKR_TEMPLATE_INCONSISTENT: "CKR_TEMPLATE_INCONSISTENT",
C.CKR_TOKEN_NOT_PRESENT: "CKR_TOKEN_NOT_PRESENT",
C.CKR_TOKEN_NOT_RECOGNIZED: "CKR_TOKEN_NOT_RECOGNIZED",
C.CKR_TOKEN_WRITE_PROTECTED: "CKR_TOKEN_WRITE_PROTECTED",
C.CKR_UNWRAPPING_KEY_HANDLE_INVALID: "CKR_UNWRAPPING_KEY_HANDLE_INVALID",
C.CKR_UNWRAPPING_KEY_SIZE_RANGE: "CKR_UNWRAPPING_KEY_SIZE_RANGE",
C.CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT: "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT",
C.CKR_USER_ALREADY_LOGGED_IN: "CKR_USER_ALREADY_LOGGED_IN",
C.CKR_USER_NOT_LOGGED_IN: "CKR_USER_NOT_LOGGED_IN",
C.CKR_USER_PIN_NOT_INITIALIZED: "CKR_USER_PIN_NOT_INITIALIZED",
C.CKR_USER_TYPE_INVALID: "CKR_USER_TYPE_INVALID",
C.CKR_USER_ANOTHER_ALREADY_LOGGED_IN: "CKR_USER_ANOTHER_ALREADY_LOGGED_IN",
C.CKR_USER_TOO_MANY_TYPES: "CKR_USER_TOO_MANY_TYPES",
C.CKR_WRAPPED_KEY_INVALID: "CKR_WRAPPED_KEY_INVALID",
C.CKR_WRAPPED_KEY_LEN_RANGE: "CKR_WRAPPED_KEY_LEN_RANGE",
C.CKR_WRAPPING_KEY_HANDLE_INVALID: "CKR_WRAPPING_KEY_HANDLE_INVALID",
C.CKR_WRAPPING_KEY_SIZE_RANGE: "CKR_WRAPPING_KEY_SIZE_RANGE",
C.CKR_WRAPPING_KEY_TYPE_INCONSISTENT: "CKR_WRAPPING_KEY_TYPE_INCONSISTENT",
C.CKR_RANDOM_SEED_NOT_SUPPORTED: "CKR_RANDOM_SEED_NOT_SUPPORTED",
C.CKR_RANDOM_NO_RNG: "CKR_RANDOM_NO_RNG",
C.CKR_DOMAIN_PARAMS_INVALID: "CKR_DOMAIN_PARAMS_INVALID",
C.CKR_CURVE_NOT_SUPPORTED: "CKR_CURVE_NOT_SUPPORTED",
C.CKR_BUFFER_TOO_SMALL: "CKR_BUFFER_TOO_SMALL",
C.CKR_SAVED_STATE_INVALID: "CKR_SAVED_STATE_INVALID",
C.CKR_INFORMATION_SENSITIVE: "CKR_INFORMATION_SENSITIVE",
C.CKR_STATE_UNSAVEABLE: "CKR_STATE_UNSAVEABLE",
C.CKR_CRYPTOKI_NOT_INITIALIZED: "CKR_CRYPTOKI_NOT_INITIALIZED",
C.CKR_CRYPTOKI_ALREADY_INITIALIZED: "CKR_CRYPTOKI_ALREADY_INITIALIZED",
C.CKR_MUTEX_BAD: "CKR_MUTEX_BAD",
C.CKR_MUTEX_NOT_LOCKED: "CKR_MUTEX_NOT_LOCKED",
C.CKR_FUNCTION_REJECTED: "CKR_FUNCTION_REJECTED",
C.CKR_VENDOR_DEFINED: "CKR_VENDOR_DEFINED",
}
func isOk(fnName string, rv C.CK_RV) error {
if rv == C.CKR_OK {
return nil
}
return &Error{fnName, rv}
}
// Module represents an opened shared library. By default, this package
// requests locking support from the module, but concurrent safety may
// depend on the underlying library.
type Module struct {
// mod is a pointer to the dlopen handle. Kept around to dlfree
// when the Module is closed.
mod unsafe.Pointer
// List of C functions provided by the module.
fl C.CK_FUNCTION_LIST_PTR
// Version of the module, used for compatibility.
version C.CK_VERSION
info Info
}
// Open dlopens a shared library by path, initializing the module.
func Open(path string) (*Module, error) {
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
mod := C.dlopen(cPath, C.RTLD_NOW)
if mod == nil {
return nil, fmt.Errorf("pkcs11: dlopen error: %s", C.GoString(C.dlerror()))
}
cSym := C.CString("C_GetFunctionList")
defer C.free(unsafe.Pointer(cSym))
getFuncListFn := (C.CK_C_GetFunctionList)(C.dlsym(mod, cSym))
if getFuncListFn == nil {
err := fmt.Errorf("pkcs11: lookup function list symbol: %s", C.GoString(C.dlerror()))
C.dlclose(mod)
return nil, err
}
var p C.CK_FUNCTION_LIST_PTR
rv := C.get_function_list(getFuncListFn, &p)
if err := isOk("C_GetFunctionList", rv); err != nil {
C.dlclose(mod)
return nil, err
}
args := C.CK_C_INITIALIZE_ARGS{
flags: C.CKF_OS_LOCKING_OK,
}
if err := isOk("C_Initialize", C.ck_initialize(p, &args)); err != nil {
C.dlclose(mod)
return nil, err
}
var info C.CK_INFO
if err := isOk("C_GetInfo", C.ck_get_info(p, &info)); err != nil {
C.dlclose(mod)
return nil, err
}
return &Module{
mod: mod,
fl: p,
version: info.cryptokiVersion,
info: Info{
Manufacturer: toString(info.manufacturerID[:]),
Version: Version{
Major: uint8(info.libraryVersion.major),
Minor: uint8(info.libraryVersion.minor),
},
},
}, nil
}
// Close finalizes the module and releases any resources associated with the
// shared library.
func (m *Module) Close() error {
if err := isOk("C_Finalize", C.ck_finalize(m.fl)); err != nil {
return err
}
if C.dlclose(m.mod) != 0 {
return fmt.Errorf("pkcs11: dlclose error: %s", C.GoString(C.dlerror()))
}
return nil
}
// createSlot configures a slot object. Internally this calls C_InitToken and
// C_InitPIN to set the admin and user PIN on the slot.
func (m *Module) createSlot(id uint32, opts slotOptions) error {
if opts.Label == "" {
return fmt.Errorf("no label provided")
}
if opts.PIN == "" {
return fmt.Errorf("no user pin provided")
}
if opts.AdminPIN == "" {
return fmt.Errorf("no admin pin provided")
}
var cLabel [32]C.CK_UTF8CHAR
if !ckStringPadded(cLabel[:], opts.Label) {
return fmt.Errorf("pkcs11: label too long")
}
cPIN := ckString(opts.AdminPIN)
cPINLen := C.CK_ULONG(len(cPIN))
rv := C.ck_init_token(
m.fl,
C.CK_SLOT_ID(id),
&cPIN[0],
cPINLen,
&cLabel[0],
)
if err := isOk("C_InitToken", rv); err != nil {
return err
}
so := Options{
AdminPIN: opts.AdminPIN,
ReadWrite: true,
}
s, err := m.Slot(id, so)
if err != nil {
return fmt.Errorf("getting slot: %w", err)
}
defer s.Close()
if err := s.initPIN(opts.PIN); err != nil {
return fmt.Errorf("configuring user pin: %w", err)
}
if err := s.logout(); err != nil {
return fmt.Errorf("logout: %v", err)
}
return nil
}
// SlotIDs returns the IDs of all slots associated with this module, including
// ones that haven't been initalized.
func (m *Module) SlotIDs() ([]uint32, error) {
var n C.CK_ULONG
rv := C.ck_get_slot_list(m.fl, nil, &n)
if err := isOk("C_GetSlotList", rv); err != nil {
return nil, err
}
l := make([]C.CK_SLOT_ID, int(n))
rv = C.ck_get_slot_list(m.fl, &l[0], &n)
if err := isOk("C_GetSlotList", rv); err != nil {
return nil, err
}
if int(n) > len(l) {
return nil, fmt.Errorf("pkcs11: C_GetSlotList returned too many elements, got %d, want %d", int(n), len(l))
}
l = l[:int(n)]
ids := make([]uint32, len(l))
for i, id := range l {
ids[i] = uint32(id)
}
return ids, nil
}
// Version holds a major and minor version.
type Version struct {
Major uint8
Minor uint8
}
// Info holds global information about the module.
type Info struct {
// Manufacturer of the implementation. When multiple PKCS #11 devices are
// present this is used to differentiate devices.
Manufacturer string
// Version of the module.
Version Version
// Human readable description of the module.
Description string
}
// SlotInfo holds information about the slot and underlying token.
type SlotInfo struct {
Label string
Model string
Serial string
Description string
}
func toString(b []C.uchar) string {
lastIndex := len(b)
for i := len(b); i > 0; i-- {
if b[i-1] != C.uchar(' ') {
break
}
lastIndex = i - 1
}
var sb strings.Builder
for _, c := range b[:lastIndex] {
sb.WriteByte(byte(c))
}
return sb.String()
}
// Info returns additional information about the module.
func (m *Module) Info() Info {
return m.info
}
// SlotInfo queries for information about the slot, such as the label.
func (m *Module) SlotInfo(id uint32) (*SlotInfo, error) {
var (
cSlotInfo C.CK_SLOT_INFO
cTokenInfo C.CK_TOKEN_INFO
slotID = C.CK_SLOT_ID(id)
)
rv := C.ck_get_slot_info(m.fl, slotID, &cSlotInfo)
if err := isOk("C_GetSlotInfo", rv); err != nil {
return nil, err
}
info := SlotInfo{
Description: toString(cSlotInfo.slotDescription[:]),
}
if (cSlotInfo.flags & C.CKF_TOKEN_PRESENT) == 0 {
return &info, nil
}
rv = C.ck_get_token_info(m.fl, slotID, &cTokenInfo)
if err := isOk("C_GetTokenInfo", rv); err != nil {
return nil, err
}
info.Label = toString(cTokenInfo.label[:])
info.Model = toString(cTokenInfo.model[:])
info.Serial = toString(cTokenInfo.serialNumber[:])
return &info, nil
}
// Slot represents a session to a slot.
//
// A slot holds a listable set of objects, such as certificates and
// cryptographic keys.
type Slot struct {
fl C.CK_FUNCTION_LIST_PTR
h C.CK_SESSION_HANDLE
}
type slotOptions struct {
AdminPIN string
PIN string
Label string
}
// Options holds configuration options for the slot session.
type Options struct {
PIN string
AdminPIN string
// ReadWrite indicates that the slot should be opened with write capabilities,
// such as generating keys or importing certificates.
//
// By default, sessions can access objects and perform signing requests.
ReadWrite bool
}
// Slot creates a session with the given slot, by default read-only. Users
// must call Close to release the session.
//
// The returned Slot's behavior is undefined once the Module is closed.
func (m *Module) Slot(id uint32, opts Options) (*Slot, error) {
if opts.AdminPIN != "" && opts.PIN != "" {
return nil, fmt.Errorf("can't specify pin and admin pin")
}
var (
h C.CK_SESSION_HANDLE
slotID = C.CK_SLOT_ID(id)
// "For legacy reasons, the CKF_SERIAL_SESSION bit MUST always be set".
//
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959742
flags C.CK_FLAGS = C.CKF_SERIAL_SESSION
)
if opts.ReadWrite {
flags = flags | C.CKF_RW_SESSION
}
rv := C.ck_open_session(m.fl, slotID, flags, &h)
if err := isOk("C_OpenSession", rv); err != nil {
return nil, err
}
s := &Slot{fl: m.fl, h: h}
if opts.PIN != "" {
if err := s.login(opts.PIN); err != nil {
s.Close()
return nil, err
}
}
if opts.AdminPIN != "" {
if err := s.loginAdmin(opts.AdminPIN); err != nil {
s.Close()
return nil, err
}
}
return s, nil
}
// Close releases the slot session.
func (s *Slot) Close() error {
return isOk("C_CloseSession", C.ck_close_session(s.fl, s.h))
}
// TODO(ericchiang): merge with SlotInitialize.
func (s *Slot) initPIN(pin string) error {
if pin == "" {
return fmt.Errorf("invalid pin")
}
cPIN := ckString(pin)
cPINLen := C.CK_ULONG(len(cPIN))
return isOk("C_InitPIN", C.ck_init_pin(s.fl, s.h, &cPIN[0], cPINLen))
}
func (s *Slot) logout() error {
return isOk("C_Logout", C.ck_logout(s.fl, s.h))
}
func (s *Slot) login(pin string) error {
// TODO(ericchiang): check for CKR_USER_ALREADY_LOGGED_IN and auto logout.
if pin == "" {
return fmt.Errorf("invalid pin")
}
cPIN := ckString(pin)
cPINLen := C.CK_ULONG(len(cPIN))
return isOk("C_Login", C.ck_login(s.fl, s.h, C.CKU_USER, &cPIN[0], cPINLen))
}
func (s *Slot) loginAdmin(adminPIN string) error {
// TODO(ericchiang): maybe run commands, detect CKR_USER_NOT_LOGGED_IN, then
// automatically login?
if adminPIN == "" {
return fmt.Errorf("invalid admin pin")
}
cPIN := ckString(adminPIN)
cPINLen := C.CK_ULONG(len(cPIN))
return isOk("C_Login", C.ck_login(s.fl, s.h, C.CKU_SO, &cPIN[0], cPINLen))
}
// Class is the primary object type. Such as a certificate, public key, or
// private key.
type Class int
// Set of classes supported by this package.
const (
ClassData Class = 0x00000000
ClassCertificate Class = 0x00000001
ClassPublicKey Class = 0x00000002
ClassPrivateKey Class = 0x00000003
ClassSecretKey Class = 0x00000004
ClassDomainParameters Class = 0x00000006
)
var classString = map[Class]string{
ClassData: "CKO_DATA",
ClassCertificate: "CKO_CERTIFICATE",
ClassPublicKey: "CKO_PUBLIC_KEY",
ClassPrivateKey: "CKO_PRIVATE_KEY",
ClassSecretKey: "CKO_SECRET_KEY",
ClassDomainParameters: "CKO_DOMAIN_PARAMETERS",
}
// String returns a human readable version of the object class.
func (c Class) String() string {
if s, ok := classString[c]; ok {
return s
}
return fmt.Sprintf("Class(0x%08x)", int(c))
}
func (c Class) ckType() (C.CK_OBJECT_CLASS, bool) {
switch c {
case ClassData:
return C.CKO_DATA, true
case ClassCertificate:
return C.CKO_CERTIFICATE, true
case ClassPublicKey:
return C.CKO_PUBLIC_KEY, true
case ClassPrivateKey:
return C.CKO_PRIVATE_KEY, true
case ClassSecretKey:
return C.CKO_SECRET_KEY, true
case ClassDomainParameters:
return C.CKO_DOMAIN_PARAMETERS, true
}
return 0, false
}
func (s *Slot) newObject(o C.CK_OBJECT_HANDLE) (Object, error) {
objClass := C.CK_OBJECT_CLASS_PTR(C.malloc(C.sizeof_CK_OBJECT_CLASS))
defer C.free(unsafe.Pointer(objClass))
a := []C.CK_ATTRIBUTE{
{C.CKA_CLASS, C.CK_VOID_PTR(objClass), C.CK_ULONG(C.sizeof_CK_OBJECT_CLASS)},
}
rv := C.ck_get_attribute_value(s.fl, s.h, o, &a[0], C.CK_ULONG(len(a)))
if err := isOk("C_GetAttributeValue", rv); err != nil {
return Object{}, err
}
return Object{s.fl, s.h, o, *objClass}, nil
}
type createOptions struct {
Label string
X509Certificate *x509.Certificate
}
func (s *Slot) create(opts createOptions) (*Object, error) {
if opts.X509Certificate != nil {
return s.createX509Certificate(opts)
}
return nil, fmt.Errorf("no objects provided to import")
}
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959709
func (s *Slot) createX509Certificate(opts createOptions) (*Object, error) {
if opts.X509Certificate == nil {
return nil, fmt.Errorf("no certificate provided")
}
objClass := (*C.CK_OBJECT_CLASS)(C.malloc(C.sizeof_CK_OBJECT_CLASS))
defer C.free(unsafe.Pointer(objClass))
*objClass = C.CKO_CERTIFICATE
ct := (*C.CK_CERTIFICATE_TYPE)(C.malloc(C.sizeof_CK_CERTIFICATE_TYPE))
defer C.free(unsafe.Pointer(ct))
*ct = C.CKC_X_509
cSubj := C.CBytes(opts.X509Certificate.RawSubject)
defer C.free(cSubj)
cValue := C.CBytes(opts.X509Certificate.Raw)
defer C.free(cValue)
attrs := []C.CK_ATTRIBUTE{
{C.CKA_CLASS, C.CK_VOID_PTR(objClass), C.CK_ULONG(C.sizeof_CK_OBJECT_CLASS)},
{C.CKA_CERTIFICATE_TYPE, C.CK_VOID_PTR(ct), C.CK_ULONG(C.sizeof_CK_CERTIFICATE_TYPE)},
{C.CKA_SUBJECT, C.CK_VOID_PTR(cSubj), C.CK_ULONG(len(opts.X509Certificate.RawSubject))},
{C.CKA_VALUE, C.CK_VOID_PTR(cValue), C.CK_ULONG(len(opts.X509Certificate.Raw))},
}
if opts.Label != "" {
cs := ckCString(opts.Label)
defer C.free(unsafe.Pointer(cs))
attrs = append(attrs, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(opts.Label)),
})
}
var h C.CK_OBJECT_HANDLE
rv := C.ck_create_object(s.fl, s.h, &attrs[0], C.CK_ULONG(len(attrs)), &h)
if err := isOk("C_CreateObject", rv); err != nil {
return nil, err
}
obj, err := s.newObject(h)
if err != nil {
return nil, err
}
return &obj, nil
}
// Filter hold options for returning a subset of objects from a slot.
//
// The returned object will match all provided parameters. For example, if
// Class=ClassPrivateKey and Label="foo", the returned object must be a
// private key with label "foo".
type Filter struct {
Class Class
Label string
}
// Objects searches a slot for objects that match the given options, or all
// objects if no options are provided.
//
// The returned objects behavior is undefined once the Slot object is closed.
func (s *Slot) Objects(opts Filter) ([]Object, error) {
var attrs []C.CK_ATTRIBUTE
if opts.Label != "" {
cs := ckCString(opts.Label)
defer C.free(unsafe.Pointer(cs))
attrs = append(attrs, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(opts.Label)),
})
}
if opts.Class != 0 {
c, ok := Class(opts.Class).ckType()
if ok {
objClass := C.CK_OBJECT_CLASS_PTR(C.malloc(C.sizeof_CK_OBJECT_CLASS))
defer C.free(unsafe.Pointer(objClass))
*objClass = c
attrs = append(attrs, C.CK_ATTRIBUTE{
C.CKA_CLASS,
C.CK_VOID_PTR(objClass),
C.CK_ULONG(C.sizeof_CK_OBJECT_CLASS),
})
}
}
var rv C.CK_RV
if len(attrs) > 0 {
rv = C.ck_find_objects_init(s.fl, s.h, &attrs[0], C.CK_ULONG(len(attrs)))
} else {
rv = C.ck_find_objects_init(s.fl, s.h, nil, 0)
}
if err := isOk("C_FindObjectsInit", rv); err != nil {
return nil, err
}
var handles []C.CK_OBJECT_HANDLE
const objectsAtATime = 16
for {
cObjHandles := make([]C.CK_OBJECT_HANDLE, objectsAtATime)
cObjMax := C.CK_ULONG(objectsAtATime)
var n C.CK_ULONG
rv := C.ck_find_objects(s.fl, s.h, &cObjHandles[0], cObjMax, &n)
if err := isOk("C_FindObjects", rv); err != nil {
return nil, err
}
if n == 0 {
break
}
handles = append(handles, cObjHandles[:int(n)]...)
}
var objs []Object
for _, h := range handles {
o, err := s.newObject(h)
if err != nil {
return nil, err
}
objs = append(objs, o)
}
return objs, nil
}
// Object represents a single object stored within a slot. For example a key or
// certificate.
type Object struct {
fl C.CK_FUNCTION_LIST_PTR
h C.CK_SESSION_HANDLE
o C.CK_OBJECT_HANDLE
c C.CK_OBJECT_CLASS
}
// Class returns the type of the object stored. For example, certificate, public
// key, or private key.
func (o Object) Class() Class {
return Class(int(o.c))
}
func (o Object) getAttribute(attrs []C.CK_ATTRIBUTE) error {
return isOk("C_GetAttributeValue",
C.ck_get_attribute_value(o.fl, o.h, o.o, &attrs[0], C.CK_ULONG(len(attrs))),
)
}
func (o Object) setAttribute(attrs []C.CK_ATTRIBUTE) error {
return isOk("C_SetAttributeValue",
C.ck_set_attribute_value(o.fl, o.h, o.o, &attrs[0], C.CK_ULONG(len(attrs))),
)
}
// Label returns a string value attached to an object, which can be used to
// identify or group sets of keys and certificates.
func (o Object) Label() (string, error) {
attrs := []C.CK_ATTRIBUTE{{C.CKA_LABEL, nil, 0}}
if err := o.getAttribute(attrs); err != nil {
return "", err
}
n := attrs[0].ulValueLen
cLabel := (*C.CK_UTF8CHAR)(C.malloc(C.ulong(n)))
defer C.free(unsafe.Pointer(cLabel))
attrs[0].pValue = C.CK_VOID_PTR(cLabel)
if err := o.getAttribute(attrs); err != nil {
return "", err
}
return ckGoString(cLabel, n), nil
}
// setLabel sets the label of the object overwriting any previous value.
func (o Object) setLabel(s string) error {
cs := ckCString(s)
defer C.free(unsafe.Pointer(cs))
attrs := []C.CK_ATTRIBUTE{{C.CKA_LABEL, C.CK_VOID_PTR(cs), C.CK_ULONG(len(s))}}
return o.setAttribute(attrs)
}
// Certificate parses the underlying object as a certificate. If the object
// isn't a certificate, this method fails.
func (o Object) Certificate() (*Certificate, error) {
if o.Class() != ClassCertificate {
return nil, fmt.Errorf("object has class: %s", o.Class())
}
ct := (*C.CK_CERTIFICATE_TYPE)(C.malloc(C.sizeof_CK_CERTIFICATE_TYPE))
defer C.free(unsafe.Pointer(ct))
attrs := []C.CK_ATTRIBUTE{
{C.CKA_CERTIFICATE_TYPE, C.CK_VOID_PTR(ct), C.CK_ULONG(C.sizeof_CK_CERTIFICATE_TYPE)},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
return &Certificate{o, *ct}, nil
}
// PublicKey parses the underlying object as a public key. Both RSA and ECDSA
// keys are supported.
//
// If the object isn't a public key, this method fails.
func (o Object) PublicKey() (crypto.PublicKey, error) {
if o.Class() != ClassPublicKey {
return nil, fmt.Errorf("object has class: %s", o.Class())
}
kt := (*C.CK_KEY_TYPE)(C.malloc(C.sizeof_CK_KEY_TYPE))
defer C.free(unsafe.Pointer(kt))
attrs := []C.CK_ATTRIBUTE{
{C.CKA_KEY_TYPE, C.CK_VOID_PTR(kt), C.CK_ULONG(C.sizeof_CK_KEY_TYPE)},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
switch *kt {
case C.CKK_EC:
return o.ecdsaPublicKey()
case C.CKK_RSA:
return o.rsaPublicKey()
default:
return nil, fmt.Errorf("unsupported key type: 0x%x", *kt)
}
}
func (o Object) rsaPublicKey() (crypto.PublicKey, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398838
attrs := []C.CK_ATTRIBUTE{
{C.CKA_MODULUS, nil, 0},
{C.CKA_PUBLIC_EXPONENT, nil, 0},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attributes: %w", err)
}
if attrs[0].ulValueLen == 0 {
return nil, fmt.Errorf("no modulus attribute returned")
}
if attrs[1].ulValueLen == 0 {
return nil, fmt.Errorf("no public exponent returned")
}
cN := (C.CK_VOID_PTR)(C.malloc(attrs[0].ulValueLen * C.sizeof_CK_BYTE))
defer C.free(unsafe.Pointer(cN))
attrs[0].pValue = cN
cE := (C.CK_VOID_PTR)(C.malloc(attrs[1].ulValueLen))
defer C.free(unsafe.Pointer(cE))
attrs[1].pValue = cE
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attribute values: %w", err)
}
nBytes := C.GoBytes(unsafe.Pointer(cN), C.int(attrs[0].ulValueLen))
eBytes := C.GoBytes(unsafe.Pointer(cE), C.int(attrs[1].ulValueLen))
var n, e big.Int
n.SetBytes(nBytes)
e.SetBytes(eBytes)
return &rsa.PublicKey{N: &n, E: int(e.Int64())}, nil
}
func (o Object) ecdsaPublicKey() (crypto.PublicKey, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398881
attrs := []C.CK_ATTRIBUTE{
{C.CKA_EC_PARAMS, nil, 0},
{C.CKA_EC_POINT, nil, 0},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attributes: %w", err)
}
if attrs[0].ulValueLen == 0 {
return nil, fmt.Errorf("no ec paramaters available")
}
if attrs[1].ulValueLen == 0 {
return nil, fmt.Errorf("no ec point available")
}
cParam := (C.CK_VOID_PTR)(C.malloc(attrs[0].ulValueLen))
defer C.free(unsafe.Pointer(cParam))
attrs[0].pValue = cParam
cPoint := (C.CK_VOID_PTR)(C.malloc(attrs[1].ulValueLen))
defer C.free(unsafe.Pointer(cPoint))
attrs[1].pValue = cPoint
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attribute values: %w", err)
}
paramBytes := C.GoBytes(unsafe.Pointer(cParam), C.int(attrs[0].ulValueLen))
pointBytes := C.GoBytes(unsafe.Pointer(cPoint), C.int(attrs[1].ulValueLen))
var curve elliptic.Curve
if bytes.Equal(paramBytes, p256OIDRaw) {
curve = elliptic.P256()
} else if bytes.Equal(paramBytes, p384OIDRaw) {
curve = elliptic.P384()
} else if bytes.Equal(paramBytes, p521OIDRaw) {
curve = elliptic.P521()
} else {
return nil, fmt.Errorf("unsupported curve")
}
var rawPoint asn1.RawValue
if _, err := asn1.Unmarshal(pointBytes, &rawPoint); err != nil {
return nil, fmt.Errorf("decoding ec point: %v", err)
}
x, y := elliptic.Unmarshal(curve, rawPoint.Bytes)
if x == nil {
return nil, fmt.Errorf("invalid point format")
}
return &ecdsa.PublicKey{
Curve: curve,
X: x,
Y: y,
}, nil
}
// PrivateKey parses the underlying object as a private key. Both RSA and ECDSA
// keys are supported.
//
// The returned PrivateKey implements crypto.Signer and optionally crypto.Decrypter
// depending on the supported mechanisms.
//
// If the object isn't a public key, this method fails.
func (o Object) PrivateKey(pub crypto.PublicKey) (crypto.PrivateKey, error) {
if o.Class() != ClassPrivateKey {
return nil, fmt.Errorf("object has class: %s", o.Class())
}
kt := (*C.CK_KEY_TYPE)(C.malloc(C.sizeof_CK_KEY_TYPE))
defer C.free(unsafe.Pointer(kt))
attrs := []C.CK_ATTRIBUTE{
{C.CKA_KEY_TYPE, C.CK_VOID_PTR(kt), C.CK_ULONG(C.sizeof_CK_KEY_TYPE)},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
switch *kt {
case C.CKK_EC:
p, ok := pub.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected ecdsa public key, got: %T", pub)
}
return &ecdsaPrivateKey{o, p}, nil
case C.CKK_RSA:
p, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected rsa public key, got: %T", pub)
}
return &rsaPrivateKey{o, p}, nil
default:
return nil, fmt.Errorf("unsupported key type: 0x%x", *kt)
}
}
// Precomputed ASN1 signature prefixes.
//
// Borrowed from crypto/rsa.
var hashPrefixes = map[crypto.Hash][]byte{
crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c},
crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20},
crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30},
crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40},
}
type rsaPrivateKey struct {
o Object
pub *rsa.PublicKey
}
func (r *rsaPrivateKey) Public() crypto.PublicKey {
return r.pub
}
func (r *rsaPrivateKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
if o, ok := opts.(*rsa.PSSOptions); ok {
return r.signPSS(digest, o)
}
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398842
size := opts.HashFunc().Size()
if size != len(digest) {
return nil, fmt.Errorf("input mush be hashed")
}
prefix, ok := hashPrefixes[opts.HashFunc()]
if !ok {
return nil, fmt.Errorf("unsupported hash function: %s", opts.HashFunc())
}
cBytes := make([]C.CK_BYTE, len(prefix)+len(digest))
for i, b := range prefix {
cBytes[i] = C.CK_BYTE(b)
}
for i, b := range digest {
cBytes[len(prefix)+i] = C.CK_BYTE(b)
}
cSig := make([]C.CK_BYTE, r.pub.Size())
cSigLen := C.CK_ULONG(len(cSig))
m := C.CK_MECHANISM{C.CKM_RSA_PKCS, nil, 0}
rv := C.ck_sign_init(r.o.fl, r.o.h, &m, r.o.o)
if err := isOk("C_SignInit", rv); err != nil {
return nil, err
}
rv = C.ck_sign(r.o.fl, r.o.h, &cBytes[0], C.CK_ULONG(len(cBytes)), &cSig[0], &cSigLen)
if err := isOk("C_Sign", rv); err != nil {
return nil, err
}
if int(cSigLen) != len(cSig) {
return nil, fmt.Errorf("expected signature of length %d, got %d", len(cSig), cSigLen)
}
sig := make([]byte, len(cSig))
for i, b := range cSig {
sig[i] = byte(b)
}
return sig, nil
}
func (r *rsaPrivateKey) signPSS(digest []byte, opts *rsa.PSSOptions) ([]byte, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398846
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398845
cParam := (C.CK_RSA_PKCS_PSS_PARAMS_PTR)(C.malloc(C.sizeof_CK_RSA_PKCS_PSS_PARAMS))
defer C.free(unsafe.Pointer(cParam))
switch opts.Hash {
case crypto.SHA256:
cParam.hashAlg = C.CKM_SHA256
cParam.mgf = C.CKG_MGF1_SHA256
case crypto.SHA384:
cParam.hashAlg = C.CKM_SHA384
cParam.mgf = C.CKG_MGF1_SHA384
case crypto.SHA512:
cParam.hashAlg = C.CKM_SHA512
cParam.mgf = C.CKG_MGF1_SHA512
default:
return nil, fmt.Errorf("unsupported hash algorithm: %s", opts.Hash)
}
switch opts.SaltLength {
case rsa.PSSSaltLengthAuto:
// Same logic as crypto/rsa.
l := (r.pub.N.BitLen()-1+7)/8 - 2 - opts.Hash.Size()
cParam.sLen = C.CK_ULONG(l)
case rsa.PSSSaltLengthEqualsHash:
cParam.sLen = C.CK_ULONG(opts.Hash.Size())
default:
cParam.sLen = C.CK_ULONG(opts.SaltLength)
}
cBytes := make([]C.CK_BYTE, len(digest))
for i, b := range digest {
cBytes[i] = C.CK_BYTE(b)
}
cSig := make([]C.CK_BYTE, r.pub.Size())
cSigLen := C.CK_ULONG(len(cSig))
m := C.CK_MECHANISM{
mechanism: C.CKM_RSA_PKCS_PSS,
pParameter: C.CK_VOID_PTR(cParam),
ulParameterLen: C.CK_ULONG(C.sizeof_CK_RSA_PKCS_PSS_PARAMS),
}
rv := C.ck_sign_init(r.o.fl, r.o.h, &m, r.o.o)
if err := isOk("C_SignInit", rv); err != nil {
return nil, err
}
rv = C.ck_sign(r.o.fl, r.o.h, &cBytes[0], C.CK_ULONG(len(cBytes)), &cSig[0], &cSigLen)
if err := isOk("C_Sign", rv); err != nil {
return nil, err
}
if int(cSigLen) != len(cSig) {
return nil, fmt.Errorf("expected signature of length %d, got %d", len(cSig), cSigLen)
}
sig := make([]byte, len(cSig))
for i, b := range cSig {
sig[i] = byte(b)
}
return sig, nil
}
type ecdsaPrivateKey struct {
o Object
pub *ecdsa.PublicKey
}
func (e *ecdsaPrivateKey) Public() crypto.PublicKey {
return e.pub
}
type ecdsaSignature struct {
R, S *big.Int
}
func (e *ecdsaPrivateKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398884
m := C.CK_MECHANISM{C.CKM_ECDSA, nil, 0}
rv := C.ck_sign_init(e.o.fl, e.o.h, &m, e.o.o)
if err := isOk("C_SignInit", rv); err != nil {
return nil, err
}
byteLen := (e.pub.Curve.Params().BitSize + 7) / 8
cSig := make([]C.CK_BYTE, byteLen*2)
cSigLen := C.CK_ULONG(len(cSig))
cBytes := make([]C.CK_BYTE, len(digest))
for i, b := range digest {
cBytes[i] = C.CK_BYTE(b)
}
rv = C.ck_sign(e.o.fl, e.o.h, &cBytes[0], C.CK_ULONG(len(digest)), &cSig[0], &cSigLen)
if err := isOk("C_Sign", rv); err != nil {
return nil, err
}
if int(cSigLen) != len(cSig) {
return nil, fmt.Errorf("expected signature of length %d, got %d", len(cSig), cSigLen)
}
sig := make([]byte, len(cSig))
for i, b := range cSig {
sig[i] = byte(b)
}
var (
r = big.NewInt(0)
s = big.NewInt(0)
)
r.SetBytes(sig[:len(sig)/2])
s.SetBytes(sig[len(sig)/2:])
return asn1.Marshal(ecdsaSignature{r, s})
}
// CertificateType determines the kind of certificate a certificate object holds.
// This can be X.509, WTLS, GPG, etc.
//
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959709
type CertificateType int
// Certificate types supported by this package.
const (
CertificateX509 CertificateType = iota + 1
CertificateUnknown
)
// Certificate holds a certificate object. Because certificates object can hold
// various kinds of certificates, callers should check the type before calling
// methods that parse the certificate.
//
// cert, err := obj.Certificate()
// if err != nil {
// // ...
// }
// if cert.Type() != pkcs11.CertificateX509 {
// // unexpected kind of certificate ...
// }
// x509Cert, err := cert.X509()
//
type Certificate struct {
o Object
t C.CK_CERTIFICATE_TYPE
}
// Type returns the format of the underlying certificate.
func (c *Certificate) Type() CertificateType {
switch c.t {
case C.CKC_X_509:
return CertificateX509
default:
return CertificateUnknown
}
}
// X509 parses the underlying certificate as an X.509 certificate.
//
// If the certificate holds a different type of certificate, this method
// returns an error.
func (c *Certificate) X509() (*x509.Certificate, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959712
if c.t != C.CKC_X_509 {
return nil, fmt.Errorf("invalid certificate type")
}
// TODO(ericchiang): Do we want to support CKA_URL?
var n C.CK_ULONG
attrs := []C.CK_ATTRIBUTE{
{C.CKA_VALUE, nil, n},
}
if err := c.o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
n = attrs[0].ulValueLen
if n == 0 {
return nil, fmt.Errorf("certificate value not present")
}
cRaw := (C.CK_VOID_PTR)(C.malloc(C.ulong(n)))
defer C.free(unsafe.Pointer(cRaw))
attrs[0].pValue = cRaw
if err := c.o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
raw := C.GoBytes(unsafe.Pointer(cRaw), C.int(n))
cert, err := x509.ParseCertificate(raw)
if err != nil {
return nil, fmt.Errorf("parsing certificate: %v", err)
}
return cert, nil
}
// keyOptions holds parameters used for generating a private key.
type keyOptions struct {
// RSABits indicates that the generated key should be a RSA key and also
// provides the number of bits.
RSABits int
// ECDSACurve indicates that the generated key should be an ECDSA key and
// identifies the curve used to generate the key.
ECDSACurve elliptic.Curve
// Label for the final object.
LabelPublic string
LabelPrivate string
}
// https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1.1
// Generate a private key on the slot, creating associated private and public
// key objects.
func (s *Slot) generate(opts keyOptions) (crypto.PrivateKey, error) {
if opts.ECDSACurve != nil && opts.RSABits != 0 {
return nil, fmt.Errorf("conflicting key parameters provided")
}
if opts.ECDSACurve != nil {
return s.generateECDSA(opts)
}
if opts.RSABits != 0 {
return s.generateRSA(opts)
}
return nil, fmt.Errorf("no key parameters provided")
}
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959719
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/os/pkcs11-curr-v2.40-os.html#_Toc416959971
func (s *Slot) generateRSA(o keyOptions) (crypto.PrivateKey, error) {
var (
mechanism = C.CK_MECHANISM{
mechanism: C.CKM_RSA_PKCS_KEY_PAIR_GEN,
}
pubH C.CK_OBJECT_HANDLE
privH C.CK_OBJECT_HANDLE
)
cTrue := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
cFalse := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
defer C.free(unsafe.Pointer(cTrue))
defer C.free(unsafe.Pointer(cFalse))
*((*C.CK_BBOOL)(cTrue)) = C.CK_TRUE
*((*C.CK_BBOOL)(cFalse)) = C.CK_FALSE
cModBits := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_ULONG))
defer C.free(unsafe.Pointer(cModBits))
*((*C.CK_ULONG)(cModBits)) = C.CK_ULONG(o.RSABits)
privTmpl := []C.CK_ATTRIBUTE{
{C.CKA_PRIVATE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SENSITIVE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SIGN, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPrivate != "" {
cs := ckCString(o.LabelPrivate)
defer C.free(unsafe.Pointer(cs))
privTmpl = append(privTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPrivate)),
})
}
pubTmpl := []C.CK_ATTRIBUTE{
{C.CKA_MODULUS_BITS, cModBits, C.CK_ULONG(C.sizeof_CK_ULONG)},
{C.CKA_VERIFY, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPublic != "" {
cs := ckCString(o.LabelPublic)
defer C.free(unsafe.Pointer(cs))
pubTmpl = append(pubTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPublic)),
})
}
rv := C.ck_generate_key_pair(
s.fl, s.h, &mechanism,
&pubTmpl[0], C.CK_ULONG(len(pubTmpl)),
&privTmpl[0], C.CK_ULONG(len(privTmpl)),
&pubH, &privH,
)
if err := isOk("C_GenerateKeyPair", rv); err != nil {
return nil, err
}
pubObj, err := s.newObject(pubH)
if err != nil {
return nil, fmt.Errorf("public key object: %w", err)
}
privObj, err := s.newObject(privH)
if err != nil {
return nil, fmt.Errorf("private key object: %w", err)
}
pub, err := pubObj.PublicKey()
if err != nil {
return nil, fmt.Errorf("parsing public key: %w", err)
}
priv, err := privObj.PrivateKey(pub)
if err != nil {
return nil, fmt.Errorf("parsing private key: %w", err)
}
return priv, nil
}
// https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1.1
//
// Generated with https://play.golang.org/p/tkqXov5Xpwp
var (
p256OIDRaw = []byte{0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}
p384OIDRaw = []byte{0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x22}
p521OIDRaw = []byte{0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x23}
)
// generateECDSA implements the CKM_ECDSA_KEY_PAIR_GEN mechanism.
//
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959719
// https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1.1
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/os/pkcs11-curr-v2.40-os.html#_Toc416960014
func (s *Slot) generateECDSA(o keyOptions) (crypto.PrivateKey, error) {
var (
mechanism = C.CK_MECHANISM{
mechanism: C.CKM_EC_KEY_PAIR_GEN,
}
pubH C.CK_OBJECT_HANDLE
privH C.CK_OBJECT_HANDLE
)
if o.ECDSACurve == nil {
return nil, fmt.Errorf("no curve provided")
}
var oid []byte
switch o.ECDSACurve.Params().Name {
case "P-256":
oid = p256OIDRaw
case "P-384":
oid = p384OIDRaw
case "P-521":
oid = p521OIDRaw
default:
return nil, fmt.Errorf("unsupported ECDSA curve")
}
// When passing a struct or array to C, that value can't refer to Go
// memory. Allocate all attribute values in C rather than in Go.
cOID := (C.CK_VOID_PTR)(C.CBytes(oid))
defer C.free(unsafe.Pointer(cOID))
cTrue := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
cFalse := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
defer C.free(unsafe.Pointer(cTrue))
defer C.free(unsafe.Pointer(cFalse))
*((*C.CK_BBOOL)(cTrue)) = C.CK_TRUE
*((*C.CK_BBOOL)(cFalse)) = C.CK_FALSE
privTmpl := []C.CK_ATTRIBUTE{
{C.CKA_PRIVATE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SENSITIVE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SIGN, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPrivate != "" {
cs := ckCString(o.LabelPrivate)
defer C.free(unsafe.Pointer(cs))
privTmpl = append(privTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPrivate)),
})
}
pubTmpl := []C.CK_ATTRIBUTE{
{C.CKA_EC_PARAMS, cOID, C.CK_ULONG(len(oid))},
{C.CKA_VERIFY, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPublic != "" {
cs := ckCString(o.LabelPublic)
defer C.free(unsafe.Pointer(cs))
pubTmpl = append(pubTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPublic)),
})
}
rv := C.ck_generate_key_pair(
s.fl, s.h, &mechanism,
&pubTmpl[0], C.CK_ULONG(len(pubTmpl)),
&privTmpl[0], C.CK_ULONG(len(privTmpl)),
&pubH, &privH,
)
if err := isOk("C_GenerateKeyPair", rv); err != nil {
return nil, err
}
pubObj, err := s.newObject(pubH)
if err != nil {
return nil, fmt.Errorf("public key object: %w", err)
}
privObj, err := s.newObject(privH)
if err != nil {
return nil, fmt.Errorf("private key object: %w", err)
}
pub, err := pubObj.PublicKey()
if err != nil {
return nil, fmt.Errorf("parsing public key: %w", err)
}
priv, err := privObj.PrivateKey(pub)
if err != nil {
return nil, fmt.Errorf("parsing private key: %w", err)
}
return priv, nil
}
pkcs11: call C_FindObjectsFinal after running the search
No test since libsofthsm2 doesn't exhibit this behavior, but was
reported internally.
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pkcs11 implements logic for using PKCS #11 shared libraries.
package pkcs11
/*
#include <dlfcn.h>
#include <stdlib.h>
#define CK_PTR *
#define CK_DECLARE_FUNCTION(returnType, name) \
returnType name
#define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
returnType (* name)
#define CK_CALLBACK_FUNCTION(returnType, name) \
returnType (* name)
#ifndef NULL_PTR
#define NULL_PTR 0
#endif
#include "../third_party/pkcs11/pkcs11.h"
// Go can't call a C function pointer directly, so these are wrappers that
// perform the dereference in C.
CK_RV get_function_list(CK_C_GetFunctionList fn, CK_FUNCTION_LIST_PTR_PTR p) {
return (*fn)(p);
}
CK_RV ck_initialize(CK_FUNCTION_LIST_PTR fl, CK_C_INITIALIZE_ARGS_PTR args) {
return (*fl->C_Initialize)((CK_VOID_PTR)(args));
}
CK_RV ck_finalize(CK_FUNCTION_LIST_PTR fl) {
return (*fl->C_Finalize)(NULL_PTR);
}
CK_RV ck_init_token(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen,
CK_UTF8CHAR_PTR pLabel
) {
if (ulPinLen == 0) {
// TODO(ericchiang): This isn't tested since softhsm requires a PIN.
pPin = NULL_PTR;
}
return (*fl->C_InitToken)(slotID, pPin, ulPinLen, pLabel);
}
CK_RV ck_get_slot_list(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID_PTR pSlotList,
CK_ULONG_PTR pulCount
) {
return (*fl->C_GetSlotList)(CK_FALSE, pSlotList, pulCount);
}
CK_RV ck_get_info(
CK_FUNCTION_LIST_PTR fl,
CK_INFO_PTR pInfo
) {
return (*fl->C_GetInfo)(pInfo);
}
CK_RV ck_get_slot_info(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_SLOT_INFO_PTR pInfo
) {
return (*fl->C_GetSlotInfo)(slotID, pInfo);
}
CK_RV ck_get_token_info(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_TOKEN_INFO_PTR pInfo
) {
return (*fl->C_GetTokenInfo)(slotID, pInfo);
}
CK_RV ck_open_session(
CK_FUNCTION_LIST_PTR fl,
CK_SLOT_ID slotID,
CK_FLAGS flags,
CK_SESSION_HANDLE_PTR phSession
) {
return (*fl->C_OpenSession)(slotID, flags, NULL_PTR, NULL_PTR, phSession);
}
CK_RV ck_close_session(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession
) {
return (*fl->C_CloseSession)(hSession);
}
CK_RV ck_login(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_USER_TYPE userType,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen
) {
return (*fl->C_Login)(hSession, userType, pPin, ulPinLen);
}
CK_RV ck_logout(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession
) {
return (*fl->C_Logout)(hSession);
}
CK_RV ck_init_pin(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_UTF8CHAR_PTR pPin,
CK_ULONG ulPinLen
) {
return (*fl->C_InitPIN)(hSession, pPin, ulPinLen);
}
CK_RV ck_generate_key_pair(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_ATTRIBUTE_PTR pPublicKeyTemplate,
CK_ULONG ulPublicKeyAttributeCount,
CK_ATTRIBUTE_PTR pPrivateKeyTemplate,
CK_ULONG ulPrivateKeyAttributeCount,
CK_OBJECT_HANDLE_PTR phPublicKey,
CK_OBJECT_HANDLE_PTR phPrivateKey
) {
return (*fl->C_GenerateKeyPair)(
hSession,
pMechanism,
pPublicKeyTemplate,
ulPublicKeyAttributeCount,
pPrivateKeyTemplate,
ulPrivateKeyAttributeCount,
phPublicKey,
phPrivateKey
);
}
CK_RV ck_find_objects_init(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount
) {
return (*fl->C_FindObjectsInit)(hSession, pTemplate, ulCount);
}
CK_RV ck_find_objects(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE_PTR phObject,
CK_ULONG ulMaxObjectCount,
CK_ULONG_PTR pulObjectCount
) {
return (*fl->C_FindObjects)(hSession, phObject, ulMaxObjectCount, pulObjectCount);
}
CK_RV ck_find_objects_final(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession
) {
return (*fl->C_FindObjectsFinal)(hSession);
}
CK_RV ck_create_object(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount,
CK_OBJECT_HANDLE_PTR phObject
) {
return (*fl->C_CreateObject)(hSession, pTemplate, ulCount, phObject);
}
CK_RV ck_get_attribute_value(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE hObject,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount
) {
return (*fl->C_GetAttributeValue)(hSession, hObject, pTemplate, ulCount);
}
CK_RV ck_set_attribute_value(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_OBJECT_HANDLE hObject,
CK_ATTRIBUTE_PTR pTemplate,
CK_ULONG ulCount
) {
return (*fl->C_SetAttributeValue)(hSession, hObject, pTemplate, ulCount);
}
CK_RV ck_sign_init(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_MECHANISM_PTR pMechanism,
CK_OBJECT_HANDLE hKey
) {
return (*fl->C_SignInit)(hSession, pMechanism, hKey);
}
CK_RV ck_sign(
CK_FUNCTION_LIST_PTR fl,
CK_SESSION_HANDLE hSession,
CK_BYTE_PTR pData,
CK_ULONG ulDataLen,
CK_BYTE_PTR pSignature,
CK_ULONG_PTR pulSignatureLen
) {
return (*fl->C_Sign)(hSession, pData, ulDataLen, pSignature, pulSignatureLen);
}
*/
// #cgo linux LDFLAGS: -ldl
import "C"
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/asn1"
"fmt"
"io"
"math/big"
"strings"
"unsafe"
)
// ckStringPadded copies a string into b, padded with ' '. If the string is larger
// than the provided buffer, this function returns false.
func ckStringPadded(b []C.CK_UTF8CHAR, s string) bool {
if len(s) > len(b) {
return false
}
for i := range b {
if i < len(s) {
b[i] = C.CK_UTF8CHAR(s[i])
} else {
b[i] = C.CK_UTF8CHAR(' ')
}
}
return true
}
// ckString converts a Go string to a cryptokit string. The string is still held
// by Go memory and doesn't need to be freed.
func ckString(s string) []C.CK_UTF8CHAR {
b := make([]C.CK_UTF8CHAR, len(s))
for i, c := range []byte(s) {
b[i] = C.CK_UTF8CHAR(c)
}
return b
}
// ckCString converts a Go string to a cryptokit string held by C. This is required,
// for example, when building a CK_ATTRIBUTE, which needs to hold a pointer to a
// cryptokit string.
func ckCString(s string) *C.CK_UTF8CHAR {
b := (*C.CK_UTF8CHAR)(C.malloc(C.sizeof_CK_UTF8CHAR * C.ulong(len(s))))
bs := unsafe.Slice(b, len(s))
for i, c := range []byte(s) {
bs[i] = C.CK_UTF8CHAR(c)
}
return b
}
func ckGoString(s *C.CK_UTF8CHAR, n C.CK_ULONG) string {
var sb strings.Builder
sli := unsafe.Slice(s, n)
for _, b := range sli {
sb.WriteByte(byte(b))
}
return sb.String()
}
// Error is returned for cryptokit specific API codes.
type Error struct {
fnName string
code C.CK_RV
}
func (e *Error) Error() string {
code, ok := ckRVString[e.code]
if !ok {
code = fmt.Sprintf("0x%x", e.code)
}
return fmt.Sprintf("pkcs11: %s() %s", e.fnName, code)
}
var ckRVString = map[C.CK_RV]string{
C.CKR_CANCEL: "CKR_CANCEL",
C.CKR_HOST_MEMORY: "CKR_HOST_MEMORY",
C.CKR_SLOT_ID_INVALID: "CKR_SLOT_ID_INVALID",
C.CKR_GENERAL_ERROR: "CKR_GENERAL_ERROR",
C.CKR_FUNCTION_FAILED: "CKR_FUNCTION_FAILED",
C.CKR_ARGUMENTS_BAD: "CKR_ARGUMENTS_BAD",
C.CKR_NO_EVENT: "CKR_NO_EVENT",
C.CKR_NEED_TO_CREATE_THREADS: "CKR_NEED_TO_CREATE_THREADS",
C.CKR_CANT_LOCK: "CKR_CANT_LOCK",
C.CKR_ATTRIBUTE_READ_ONLY: "CKR_ATTRIBUTE_READ_ONLY",
C.CKR_ATTRIBUTE_SENSITIVE: "CKR_ATTRIBUTE_SENSITIVE",
C.CKR_ATTRIBUTE_TYPE_INVALID: "CKR_ATTRIBUTE_TYPE_INVALID",
C.CKR_ATTRIBUTE_VALUE_INVALID: "CKR_ATTRIBUTE_VALUE_INVALID",
C.CKR_ACTION_PROHIBITED: "CKR_ACTION_PROHIBITED",
C.CKR_DATA_INVALID: "CKR_DATA_INVALID",
C.CKR_DATA_LEN_RANGE: "CKR_DATA_LEN_RANGE",
C.CKR_DEVICE_ERROR: "CKR_DEVICE_ERROR",
C.CKR_DEVICE_MEMORY: "CKR_DEVICE_MEMORY",
C.CKR_DEVICE_REMOVED: "CKR_DEVICE_REMOVED",
C.CKR_ENCRYPTED_DATA_INVALID: "CKR_ENCRYPTED_DATA_INVALID",
C.CKR_ENCRYPTED_DATA_LEN_RANGE: "CKR_ENCRYPTED_DATA_LEN_RANGE",
C.CKR_FUNCTION_CANCELED: "CKR_FUNCTION_CANCELED",
C.CKR_FUNCTION_NOT_PARALLEL: "CKR_FUNCTION_NOT_PARALLEL",
C.CKR_FUNCTION_NOT_SUPPORTED: "CKR_FUNCTION_NOT_SUPPORTED",
C.CKR_KEY_HANDLE_INVALID: "CKR_KEY_HANDLE_INVALID",
C.CKR_KEY_SIZE_RANGE: "CKR_KEY_SIZE_RANGE",
C.CKR_KEY_TYPE_INCONSISTENT: "CKR_KEY_TYPE_INCONSISTENT",
C.CKR_KEY_NOT_NEEDED: "CKR_KEY_NOT_NEEDED",
C.CKR_KEY_CHANGED: "CKR_KEY_CHANGED",
C.CKR_KEY_NEEDED: "CKR_KEY_NEEDED",
C.CKR_KEY_INDIGESTIBLE: "CKR_KEY_INDIGESTIBLE",
C.CKR_KEY_FUNCTION_NOT_PERMITTED: "CKR_KEY_FUNCTION_NOT_PERMITTED",
C.CKR_KEY_NOT_WRAPPABLE: "CKR_KEY_NOT_WRAPPABLE",
C.CKR_KEY_UNEXTRACTABLE: "CKR_KEY_UNEXTRACTABLE",
C.CKR_MECHANISM_INVALID: "CKR_MECHANISM_INVALID",
C.CKR_MECHANISM_PARAM_INVALID: "CKR_MECHANISM_PARAM_INVALID",
C.CKR_OBJECT_HANDLE_INVALID: "CKR_OBJECT_HANDLE_INVALID",
C.CKR_OPERATION_ACTIVE: "CKR_OPERATION_ACTIVE",
C.CKR_OPERATION_NOT_INITIALIZED: "CKR_OPERATION_NOT_INITIALIZED",
C.CKR_PIN_INCORRECT: "CKR_PIN_INCORRECT",
C.CKR_PIN_INVALID: "CKR_PIN_INVALID",
C.CKR_PIN_LEN_RANGE: "CKR_PIN_LEN_RANGE",
C.CKR_PIN_EXPIRED: "CKR_PIN_EXPIRED",
C.CKR_PIN_LOCKED: "CKR_PIN_LOCKED",
C.CKR_SESSION_CLOSED: "CKR_SESSION_CLOSED",
C.CKR_SESSION_COUNT: "CKR_SESSION_COUNT",
C.CKR_SESSION_HANDLE_INVALID: "CKR_SESSION_HANDLE_INVALID",
C.CKR_SESSION_PARALLEL_NOT_SUPPORTED: "CKR_SESSION_PARALLEL_NOT_SUPPORTED",
C.CKR_SESSION_READ_ONLY: "CKR_SESSION_READ_ONLY",
C.CKR_SESSION_EXISTS: "CKR_SESSION_EXISTS",
C.CKR_SESSION_READ_ONLY_EXISTS: "CKR_SESSION_READ_ONLY_EXISTS",
C.CKR_SESSION_READ_WRITE_SO_EXISTS: "CKR_SESSION_READ_WRITE_SO_EXISTS",
C.CKR_SIGNATURE_INVALID: "CKR_SIGNATURE_INVALID",
C.CKR_SIGNATURE_LEN_RANGE: "CKR_SIGNATURE_LEN_RANGE",
C.CKR_TEMPLATE_INCOMPLETE: "CKR_TEMPLATE_INCOMPLETE",
C.CKR_TEMPLATE_INCONSISTENT: "CKR_TEMPLATE_INCONSISTENT",
C.CKR_TOKEN_NOT_PRESENT: "CKR_TOKEN_NOT_PRESENT",
C.CKR_TOKEN_NOT_RECOGNIZED: "CKR_TOKEN_NOT_RECOGNIZED",
C.CKR_TOKEN_WRITE_PROTECTED: "CKR_TOKEN_WRITE_PROTECTED",
C.CKR_UNWRAPPING_KEY_HANDLE_INVALID: "CKR_UNWRAPPING_KEY_HANDLE_INVALID",
C.CKR_UNWRAPPING_KEY_SIZE_RANGE: "CKR_UNWRAPPING_KEY_SIZE_RANGE",
C.CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT: "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT",
C.CKR_USER_ALREADY_LOGGED_IN: "CKR_USER_ALREADY_LOGGED_IN",
C.CKR_USER_NOT_LOGGED_IN: "CKR_USER_NOT_LOGGED_IN",
C.CKR_USER_PIN_NOT_INITIALIZED: "CKR_USER_PIN_NOT_INITIALIZED",
C.CKR_USER_TYPE_INVALID: "CKR_USER_TYPE_INVALID",
C.CKR_USER_ANOTHER_ALREADY_LOGGED_IN: "CKR_USER_ANOTHER_ALREADY_LOGGED_IN",
C.CKR_USER_TOO_MANY_TYPES: "CKR_USER_TOO_MANY_TYPES",
C.CKR_WRAPPED_KEY_INVALID: "CKR_WRAPPED_KEY_INVALID",
C.CKR_WRAPPED_KEY_LEN_RANGE: "CKR_WRAPPED_KEY_LEN_RANGE",
C.CKR_WRAPPING_KEY_HANDLE_INVALID: "CKR_WRAPPING_KEY_HANDLE_INVALID",
C.CKR_WRAPPING_KEY_SIZE_RANGE: "CKR_WRAPPING_KEY_SIZE_RANGE",
C.CKR_WRAPPING_KEY_TYPE_INCONSISTENT: "CKR_WRAPPING_KEY_TYPE_INCONSISTENT",
C.CKR_RANDOM_SEED_NOT_SUPPORTED: "CKR_RANDOM_SEED_NOT_SUPPORTED",
C.CKR_RANDOM_NO_RNG: "CKR_RANDOM_NO_RNG",
C.CKR_DOMAIN_PARAMS_INVALID: "CKR_DOMAIN_PARAMS_INVALID",
C.CKR_CURVE_NOT_SUPPORTED: "CKR_CURVE_NOT_SUPPORTED",
C.CKR_BUFFER_TOO_SMALL: "CKR_BUFFER_TOO_SMALL",
C.CKR_SAVED_STATE_INVALID: "CKR_SAVED_STATE_INVALID",
C.CKR_INFORMATION_SENSITIVE: "CKR_INFORMATION_SENSITIVE",
C.CKR_STATE_UNSAVEABLE: "CKR_STATE_UNSAVEABLE",
C.CKR_CRYPTOKI_NOT_INITIALIZED: "CKR_CRYPTOKI_NOT_INITIALIZED",
C.CKR_CRYPTOKI_ALREADY_INITIALIZED: "CKR_CRYPTOKI_ALREADY_INITIALIZED",
C.CKR_MUTEX_BAD: "CKR_MUTEX_BAD",
C.CKR_MUTEX_NOT_LOCKED: "CKR_MUTEX_NOT_LOCKED",
C.CKR_FUNCTION_REJECTED: "CKR_FUNCTION_REJECTED",
C.CKR_VENDOR_DEFINED: "CKR_VENDOR_DEFINED",
}
func isOk(fnName string, rv C.CK_RV) error {
if rv == C.CKR_OK {
return nil
}
return &Error{fnName, rv}
}
// Module represents an opened shared library. By default, this package
// requests locking support from the module, but concurrent safety may
// depend on the underlying library.
type Module struct {
// mod is a pointer to the dlopen handle. Kept around to dlfree
// when the Module is closed.
mod unsafe.Pointer
// List of C functions provided by the module.
fl C.CK_FUNCTION_LIST_PTR
// Version of the module, used for compatibility.
version C.CK_VERSION
info Info
}
// Open dlopens a shared library by path, initializing the module.
func Open(path string) (*Module, error) {
cPath := C.CString(path)
defer C.free(unsafe.Pointer(cPath))
mod := C.dlopen(cPath, C.RTLD_NOW)
if mod == nil {
return nil, fmt.Errorf("pkcs11: dlopen error: %s", C.GoString(C.dlerror()))
}
cSym := C.CString("C_GetFunctionList")
defer C.free(unsafe.Pointer(cSym))
getFuncListFn := (C.CK_C_GetFunctionList)(C.dlsym(mod, cSym))
if getFuncListFn == nil {
err := fmt.Errorf("pkcs11: lookup function list symbol: %s", C.GoString(C.dlerror()))
C.dlclose(mod)
return nil, err
}
var p C.CK_FUNCTION_LIST_PTR
rv := C.get_function_list(getFuncListFn, &p)
if err := isOk("C_GetFunctionList", rv); err != nil {
C.dlclose(mod)
return nil, err
}
args := C.CK_C_INITIALIZE_ARGS{
flags: C.CKF_OS_LOCKING_OK,
}
if err := isOk("C_Initialize", C.ck_initialize(p, &args)); err != nil {
C.dlclose(mod)
return nil, err
}
var info C.CK_INFO
if err := isOk("C_GetInfo", C.ck_get_info(p, &info)); err != nil {
C.dlclose(mod)
return nil, err
}
return &Module{
mod: mod,
fl: p,
version: info.cryptokiVersion,
info: Info{
Manufacturer: toString(info.manufacturerID[:]),
Version: Version{
Major: uint8(info.libraryVersion.major),
Minor: uint8(info.libraryVersion.minor),
},
},
}, nil
}
// Close finalizes the module and releases any resources associated with the
// shared library.
func (m *Module) Close() error {
if err := isOk("C_Finalize", C.ck_finalize(m.fl)); err != nil {
return err
}
if C.dlclose(m.mod) != 0 {
return fmt.Errorf("pkcs11: dlclose error: %s", C.GoString(C.dlerror()))
}
return nil
}
// createSlot configures a slot object. Internally this calls C_InitToken and
// C_InitPIN to set the admin and user PIN on the slot.
func (m *Module) createSlot(id uint32, opts slotOptions) error {
if opts.Label == "" {
return fmt.Errorf("no label provided")
}
if opts.PIN == "" {
return fmt.Errorf("no user pin provided")
}
if opts.AdminPIN == "" {
return fmt.Errorf("no admin pin provided")
}
var cLabel [32]C.CK_UTF8CHAR
if !ckStringPadded(cLabel[:], opts.Label) {
return fmt.Errorf("pkcs11: label too long")
}
cPIN := ckString(opts.AdminPIN)
cPINLen := C.CK_ULONG(len(cPIN))
rv := C.ck_init_token(
m.fl,
C.CK_SLOT_ID(id),
&cPIN[0],
cPINLen,
&cLabel[0],
)
if err := isOk("C_InitToken", rv); err != nil {
return err
}
so := Options{
AdminPIN: opts.AdminPIN,
ReadWrite: true,
}
s, err := m.Slot(id, so)
if err != nil {
return fmt.Errorf("getting slot: %w", err)
}
defer s.Close()
if err := s.initPIN(opts.PIN); err != nil {
return fmt.Errorf("configuring user pin: %w", err)
}
if err := s.logout(); err != nil {
return fmt.Errorf("logout: %v", err)
}
return nil
}
// SlotIDs returns the IDs of all slots associated with this module, including
// ones that haven't been initalized.
func (m *Module) SlotIDs() ([]uint32, error) {
var n C.CK_ULONG
rv := C.ck_get_slot_list(m.fl, nil, &n)
if err := isOk("C_GetSlotList", rv); err != nil {
return nil, err
}
l := make([]C.CK_SLOT_ID, int(n))
rv = C.ck_get_slot_list(m.fl, &l[0], &n)
if err := isOk("C_GetSlotList", rv); err != nil {
return nil, err
}
if int(n) > len(l) {
return nil, fmt.Errorf("pkcs11: C_GetSlotList returned too many elements, got %d, want %d", int(n), len(l))
}
l = l[:int(n)]
ids := make([]uint32, len(l))
for i, id := range l {
ids[i] = uint32(id)
}
return ids, nil
}
// Version holds a major and minor version.
type Version struct {
Major uint8
Minor uint8
}
// Info holds global information about the module.
type Info struct {
// Manufacturer of the implementation. When multiple PKCS #11 devices are
// present this is used to differentiate devices.
Manufacturer string
// Version of the module.
Version Version
// Human readable description of the module.
Description string
}
// SlotInfo holds information about the slot and underlying token.
type SlotInfo struct {
Label string
Model string
Serial string
Description string
}
func toString(b []C.uchar) string {
lastIndex := len(b)
for i := len(b); i > 0; i-- {
if b[i-1] != C.uchar(' ') {
break
}
lastIndex = i - 1
}
var sb strings.Builder
for _, c := range b[:lastIndex] {
sb.WriteByte(byte(c))
}
return sb.String()
}
// Info returns additional information about the module.
func (m *Module) Info() Info {
return m.info
}
// SlotInfo queries for information about the slot, such as the label.
func (m *Module) SlotInfo(id uint32) (*SlotInfo, error) {
var (
cSlotInfo C.CK_SLOT_INFO
cTokenInfo C.CK_TOKEN_INFO
slotID = C.CK_SLOT_ID(id)
)
rv := C.ck_get_slot_info(m.fl, slotID, &cSlotInfo)
if err := isOk("C_GetSlotInfo", rv); err != nil {
return nil, err
}
info := SlotInfo{
Description: toString(cSlotInfo.slotDescription[:]),
}
if (cSlotInfo.flags & C.CKF_TOKEN_PRESENT) == 0 {
return &info, nil
}
rv = C.ck_get_token_info(m.fl, slotID, &cTokenInfo)
if err := isOk("C_GetTokenInfo", rv); err != nil {
return nil, err
}
info.Label = toString(cTokenInfo.label[:])
info.Model = toString(cTokenInfo.model[:])
info.Serial = toString(cTokenInfo.serialNumber[:])
return &info, nil
}
// Slot represents a session to a slot.
//
// A slot holds a listable set of objects, such as certificates and
// cryptographic keys.
type Slot struct {
fl C.CK_FUNCTION_LIST_PTR
h C.CK_SESSION_HANDLE
}
type slotOptions struct {
AdminPIN string
PIN string
Label string
}
// Options holds configuration options for the slot session.
type Options struct {
PIN string
AdminPIN string
// ReadWrite indicates that the slot should be opened with write capabilities,
// such as generating keys or importing certificates.
//
// By default, sessions can access objects and perform signing requests.
ReadWrite bool
}
// Slot creates a session with the given slot, by default read-only. Users
// must call Close to release the session.
//
// The returned Slot's behavior is undefined once the Module is closed.
func (m *Module) Slot(id uint32, opts Options) (*Slot, error) {
if opts.AdminPIN != "" && opts.PIN != "" {
return nil, fmt.Errorf("can't specify pin and admin pin")
}
var (
h C.CK_SESSION_HANDLE
slotID = C.CK_SLOT_ID(id)
// "For legacy reasons, the CKF_SERIAL_SESSION bit MUST always be set".
//
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959742
flags C.CK_FLAGS = C.CKF_SERIAL_SESSION
)
if opts.ReadWrite {
flags = flags | C.CKF_RW_SESSION
}
rv := C.ck_open_session(m.fl, slotID, flags, &h)
if err := isOk("C_OpenSession", rv); err != nil {
return nil, err
}
s := &Slot{fl: m.fl, h: h}
if opts.PIN != "" {
if err := s.login(opts.PIN); err != nil {
s.Close()
return nil, err
}
}
if opts.AdminPIN != "" {
if err := s.loginAdmin(opts.AdminPIN); err != nil {
s.Close()
return nil, err
}
}
return s, nil
}
// Close releases the slot session.
func (s *Slot) Close() error {
return isOk("C_CloseSession", C.ck_close_session(s.fl, s.h))
}
// TODO(ericchiang): merge with SlotInitialize.
func (s *Slot) initPIN(pin string) error {
if pin == "" {
return fmt.Errorf("invalid pin")
}
cPIN := ckString(pin)
cPINLen := C.CK_ULONG(len(cPIN))
return isOk("C_InitPIN", C.ck_init_pin(s.fl, s.h, &cPIN[0], cPINLen))
}
func (s *Slot) logout() error {
return isOk("C_Logout", C.ck_logout(s.fl, s.h))
}
func (s *Slot) login(pin string) error {
// TODO(ericchiang): check for CKR_USER_ALREADY_LOGGED_IN and auto logout.
if pin == "" {
return fmt.Errorf("invalid pin")
}
cPIN := ckString(pin)
cPINLen := C.CK_ULONG(len(cPIN))
return isOk("C_Login", C.ck_login(s.fl, s.h, C.CKU_USER, &cPIN[0], cPINLen))
}
func (s *Slot) loginAdmin(adminPIN string) error {
// TODO(ericchiang): maybe run commands, detect CKR_USER_NOT_LOGGED_IN, then
// automatically login?
if adminPIN == "" {
return fmt.Errorf("invalid admin pin")
}
cPIN := ckString(adminPIN)
cPINLen := C.CK_ULONG(len(cPIN))
return isOk("C_Login", C.ck_login(s.fl, s.h, C.CKU_SO, &cPIN[0], cPINLen))
}
// Class is the primary object type. Such as a certificate, public key, or
// private key.
type Class int
// Set of classes supported by this package.
const (
ClassData Class = 0x00000000
ClassCertificate Class = 0x00000001
ClassPublicKey Class = 0x00000002
ClassPrivateKey Class = 0x00000003
ClassSecretKey Class = 0x00000004
ClassDomainParameters Class = 0x00000006
)
var classString = map[Class]string{
ClassData: "CKO_DATA",
ClassCertificate: "CKO_CERTIFICATE",
ClassPublicKey: "CKO_PUBLIC_KEY",
ClassPrivateKey: "CKO_PRIVATE_KEY",
ClassSecretKey: "CKO_SECRET_KEY",
ClassDomainParameters: "CKO_DOMAIN_PARAMETERS",
}
// String returns a human readable version of the object class.
func (c Class) String() string {
if s, ok := classString[c]; ok {
return s
}
return fmt.Sprintf("Class(0x%08x)", int(c))
}
func (c Class) ckType() (C.CK_OBJECT_CLASS, bool) {
switch c {
case ClassData:
return C.CKO_DATA, true
case ClassCertificate:
return C.CKO_CERTIFICATE, true
case ClassPublicKey:
return C.CKO_PUBLIC_KEY, true
case ClassPrivateKey:
return C.CKO_PRIVATE_KEY, true
case ClassSecretKey:
return C.CKO_SECRET_KEY, true
case ClassDomainParameters:
return C.CKO_DOMAIN_PARAMETERS, true
}
return 0, false
}
func (s *Slot) newObject(o C.CK_OBJECT_HANDLE) (Object, error) {
objClass := C.CK_OBJECT_CLASS_PTR(C.malloc(C.sizeof_CK_OBJECT_CLASS))
defer C.free(unsafe.Pointer(objClass))
a := []C.CK_ATTRIBUTE{
{C.CKA_CLASS, C.CK_VOID_PTR(objClass), C.CK_ULONG(C.sizeof_CK_OBJECT_CLASS)},
}
rv := C.ck_get_attribute_value(s.fl, s.h, o, &a[0], C.CK_ULONG(len(a)))
if err := isOk("C_GetAttributeValue", rv); err != nil {
return Object{}, err
}
return Object{s.fl, s.h, o, *objClass}, nil
}
type createOptions struct {
Label string
X509Certificate *x509.Certificate
}
func (s *Slot) create(opts createOptions) (*Object, error) {
if opts.X509Certificate != nil {
return s.createX509Certificate(opts)
}
return nil, fmt.Errorf("no objects provided to import")
}
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959709
func (s *Slot) createX509Certificate(opts createOptions) (*Object, error) {
if opts.X509Certificate == nil {
return nil, fmt.Errorf("no certificate provided")
}
objClass := (*C.CK_OBJECT_CLASS)(C.malloc(C.sizeof_CK_OBJECT_CLASS))
defer C.free(unsafe.Pointer(objClass))
*objClass = C.CKO_CERTIFICATE
ct := (*C.CK_CERTIFICATE_TYPE)(C.malloc(C.sizeof_CK_CERTIFICATE_TYPE))
defer C.free(unsafe.Pointer(ct))
*ct = C.CKC_X_509
cSubj := C.CBytes(opts.X509Certificate.RawSubject)
defer C.free(cSubj)
cValue := C.CBytes(opts.X509Certificate.Raw)
defer C.free(cValue)
attrs := []C.CK_ATTRIBUTE{
{C.CKA_CLASS, C.CK_VOID_PTR(objClass), C.CK_ULONG(C.sizeof_CK_OBJECT_CLASS)},
{C.CKA_CERTIFICATE_TYPE, C.CK_VOID_PTR(ct), C.CK_ULONG(C.sizeof_CK_CERTIFICATE_TYPE)},
{C.CKA_SUBJECT, C.CK_VOID_PTR(cSubj), C.CK_ULONG(len(opts.X509Certificate.RawSubject))},
{C.CKA_VALUE, C.CK_VOID_PTR(cValue), C.CK_ULONG(len(opts.X509Certificate.Raw))},
}
if opts.Label != "" {
cs := ckCString(opts.Label)
defer C.free(unsafe.Pointer(cs))
attrs = append(attrs, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(opts.Label)),
})
}
var h C.CK_OBJECT_HANDLE
rv := C.ck_create_object(s.fl, s.h, &attrs[0], C.CK_ULONG(len(attrs)), &h)
if err := isOk("C_CreateObject", rv); err != nil {
return nil, err
}
obj, err := s.newObject(h)
if err != nil {
return nil, err
}
return &obj, nil
}
// Filter hold options for returning a subset of objects from a slot.
//
// The returned object will match all provided parameters. For example, if
// Class=ClassPrivateKey and Label="foo", the returned object must be a
// private key with label "foo".
type Filter struct {
Class Class
Label string
}
// Objects searches a slot for objects that match the given options, or all
// objects if no options are provided.
//
// The returned objects behavior is undefined once the Slot object is closed.
func (s *Slot) Objects(opts Filter) (objs []Object, err error) {
var attrs []C.CK_ATTRIBUTE
if opts.Label != "" {
cs := ckCString(opts.Label)
defer C.free(unsafe.Pointer(cs))
attrs = append(attrs, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(opts.Label)),
})
}
if opts.Class != 0 {
c, ok := Class(opts.Class).ckType()
if ok {
objClass := C.CK_OBJECT_CLASS_PTR(C.malloc(C.sizeof_CK_OBJECT_CLASS))
defer C.free(unsafe.Pointer(objClass))
*objClass = c
attrs = append(attrs, C.CK_ATTRIBUTE{
C.CKA_CLASS,
C.CK_VOID_PTR(objClass),
C.CK_ULONG(C.sizeof_CK_OBJECT_CLASS),
})
}
}
var rv C.CK_RV
if len(attrs) > 0 {
rv = C.ck_find_objects_init(s.fl, s.h, &attrs[0], C.CK_ULONG(len(attrs)))
} else {
rv = C.ck_find_objects_init(s.fl, s.h, nil, 0)
}
if err := isOk("C_FindObjectsInit", rv); err != nil {
return nil, err
}
defer func() {
rv := C.ck_find_objects_final(s.fl, s.h)
if ferr := isOk("C_FindObjectsFinal", rv); ferr != nil && err == nil {
err = ferr
}
}()
var handles []C.CK_OBJECT_HANDLE
const objectsAtATime = 16
for {
cObjHandles := make([]C.CK_OBJECT_HANDLE, objectsAtATime)
cObjMax := C.CK_ULONG(objectsAtATime)
var n C.CK_ULONG
rv := C.ck_find_objects(s.fl, s.h, &cObjHandles[0], cObjMax, &n)
if err := isOk("C_FindObjects", rv); err != nil {
return nil, err
}
if n == 0 {
break
}
handles = append(handles, cObjHandles[:int(n)]...)
}
for _, h := range handles {
o, err := s.newObject(h)
if err != nil {
return nil, err
}
objs = append(objs, o)
}
return objs, nil
}
// Object represents a single object stored within a slot. For example a key or
// certificate.
type Object struct {
fl C.CK_FUNCTION_LIST_PTR
h C.CK_SESSION_HANDLE
o C.CK_OBJECT_HANDLE
c C.CK_OBJECT_CLASS
}
// Class returns the type of the object stored. For example, certificate, public
// key, or private key.
func (o Object) Class() Class {
return Class(int(o.c))
}
func (o Object) getAttribute(attrs []C.CK_ATTRIBUTE) error {
return isOk("C_GetAttributeValue",
C.ck_get_attribute_value(o.fl, o.h, o.o, &attrs[0], C.CK_ULONG(len(attrs))),
)
}
func (o Object) setAttribute(attrs []C.CK_ATTRIBUTE) error {
return isOk("C_SetAttributeValue",
C.ck_set_attribute_value(o.fl, o.h, o.o, &attrs[0], C.CK_ULONG(len(attrs))),
)
}
// Label returns a string value attached to an object, which can be used to
// identify or group sets of keys and certificates.
func (o Object) Label() (string, error) {
attrs := []C.CK_ATTRIBUTE{{C.CKA_LABEL, nil, 0}}
if err := o.getAttribute(attrs); err != nil {
return "", err
}
n := attrs[0].ulValueLen
cLabel := (*C.CK_UTF8CHAR)(C.malloc(C.ulong(n)))
defer C.free(unsafe.Pointer(cLabel))
attrs[0].pValue = C.CK_VOID_PTR(cLabel)
if err := o.getAttribute(attrs); err != nil {
return "", err
}
return ckGoString(cLabel, n), nil
}
// setLabel sets the label of the object overwriting any previous value.
func (o Object) setLabel(s string) error {
cs := ckCString(s)
defer C.free(unsafe.Pointer(cs))
attrs := []C.CK_ATTRIBUTE{{C.CKA_LABEL, C.CK_VOID_PTR(cs), C.CK_ULONG(len(s))}}
return o.setAttribute(attrs)
}
// Certificate parses the underlying object as a certificate. If the object
// isn't a certificate, this method fails.
func (o Object) Certificate() (*Certificate, error) {
if o.Class() != ClassCertificate {
return nil, fmt.Errorf("object has class: %s", o.Class())
}
ct := (*C.CK_CERTIFICATE_TYPE)(C.malloc(C.sizeof_CK_CERTIFICATE_TYPE))
defer C.free(unsafe.Pointer(ct))
attrs := []C.CK_ATTRIBUTE{
{C.CKA_CERTIFICATE_TYPE, C.CK_VOID_PTR(ct), C.CK_ULONG(C.sizeof_CK_CERTIFICATE_TYPE)},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
return &Certificate{o, *ct}, nil
}
// PublicKey parses the underlying object as a public key. Both RSA and ECDSA
// keys are supported.
//
// If the object isn't a public key, this method fails.
func (o Object) PublicKey() (crypto.PublicKey, error) {
if o.Class() != ClassPublicKey {
return nil, fmt.Errorf("object has class: %s", o.Class())
}
kt := (*C.CK_KEY_TYPE)(C.malloc(C.sizeof_CK_KEY_TYPE))
defer C.free(unsafe.Pointer(kt))
attrs := []C.CK_ATTRIBUTE{
{C.CKA_KEY_TYPE, C.CK_VOID_PTR(kt), C.CK_ULONG(C.sizeof_CK_KEY_TYPE)},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
switch *kt {
case C.CKK_EC:
return o.ecdsaPublicKey()
case C.CKK_RSA:
return o.rsaPublicKey()
default:
return nil, fmt.Errorf("unsupported key type: 0x%x", *kt)
}
}
func (o Object) rsaPublicKey() (crypto.PublicKey, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398838
attrs := []C.CK_ATTRIBUTE{
{C.CKA_MODULUS, nil, 0},
{C.CKA_PUBLIC_EXPONENT, nil, 0},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attributes: %w", err)
}
if attrs[0].ulValueLen == 0 {
return nil, fmt.Errorf("no modulus attribute returned")
}
if attrs[1].ulValueLen == 0 {
return nil, fmt.Errorf("no public exponent returned")
}
cN := (C.CK_VOID_PTR)(C.malloc(attrs[0].ulValueLen * C.sizeof_CK_BYTE))
defer C.free(unsafe.Pointer(cN))
attrs[0].pValue = cN
cE := (C.CK_VOID_PTR)(C.malloc(attrs[1].ulValueLen))
defer C.free(unsafe.Pointer(cE))
attrs[1].pValue = cE
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attribute values: %w", err)
}
nBytes := C.GoBytes(unsafe.Pointer(cN), C.int(attrs[0].ulValueLen))
eBytes := C.GoBytes(unsafe.Pointer(cE), C.int(attrs[1].ulValueLen))
var n, e big.Int
n.SetBytes(nBytes)
e.SetBytes(eBytes)
return &rsa.PublicKey{N: &n, E: int(e.Int64())}, nil
}
func (o Object) ecdsaPublicKey() (crypto.PublicKey, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398881
attrs := []C.CK_ATTRIBUTE{
{C.CKA_EC_PARAMS, nil, 0},
{C.CKA_EC_POINT, nil, 0},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attributes: %w", err)
}
if attrs[0].ulValueLen == 0 {
return nil, fmt.Errorf("no ec paramaters available")
}
if attrs[1].ulValueLen == 0 {
return nil, fmt.Errorf("no ec point available")
}
cParam := (C.CK_VOID_PTR)(C.malloc(attrs[0].ulValueLen))
defer C.free(unsafe.Pointer(cParam))
attrs[0].pValue = cParam
cPoint := (C.CK_VOID_PTR)(C.malloc(attrs[1].ulValueLen))
defer C.free(unsafe.Pointer(cPoint))
attrs[1].pValue = cPoint
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting attribute values: %w", err)
}
paramBytes := C.GoBytes(unsafe.Pointer(cParam), C.int(attrs[0].ulValueLen))
pointBytes := C.GoBytes(unsafe.Pointer(cPoint), C.int(attrs[1].ulValueLen))
var curve elliptic.Curve
if bytes.Equal(paramBytes, p256OIDRaw) {
curve = elliptic.P256()
} else if bytes.Equal(paramBytes, p384OIDRaw) {
curve = elliptic.P384()
} else if bytes.Equal(paramBytes, p521OIDRaw) {
curve = elliptic.P521()
} else {
return nil, fmt.Errorf("unsupported curve")
}
var rawPoint asn1.RawValue
if _, err := asn1.Unmarshal(pointBytes, &rawPoint); err != nil {
return nil, fmt.Errorf("decoding ec point: %v", err)
}
x, y := elliptic.Unmarshal(curve, rawPoint.Bytes)
if x == nil {
return nil, fmt.Errorf("invalid point format")
}
return &ecdsa.PublicKey{
Curve: curve,
X: x,
Y: y,
}, nil
}
// PrivateKey parses the underlying object as a private key. Both RSA and ECDSA
// keys are supported.
//
// The returned PrivateKey implements crypto.Signer and optionally crypto.Decrypter
// depending on the supported mechanisms.
//
// If the object isn't a public key, this method fails.
func (o Object) PrivateKey(pub crypto.PublicKey) (crypto.PrivateKey, error) {
if o.Class() != ClassPrivateKey {
return nil, fmt.Errorf("object has class: %s", o.Class())
}
kt := (*C.CK_KEY_TYPE)(C.malloc(C.sizeof_CK_KEY_TYPE))
defer C.free(unsafe.Pointer(kt))
attrs := []C.CK_ATTRIBUTE{
{C.CKA_KEY_TYPE, C.CK_VOID_PTR(kt), C.CK_ULONG(C.sizeof_CK_KEY_TYPE)},
}
if err := o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
switch *kt {
case C.CKK_EC:
p, ok := pub.(*ecdsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected ecdsa public key, got: %T", pub)
}
return &ecdsaPrivateKey{o, p}, nil
case C.CKK_RSA:
p, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected rsa public key, got: %T", pub)
}
return &rsaPrivateKey{o, p}, nil
default:
return nil, fmt.Errorf("unsupported key type: 0x%x", *kt)
}
}
// Precomputed ASN1 signature prefixes.
//
// Borrowed from crypto/rsa.
var hashPrefixes = map[crypto.Hash][]byte{
crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c},
crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20},
crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30},
crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40},
}
type rsaPrivateKey struct {
o Object
pub *rsa.PublicKey
}
func (r *rsaPrivateKey) Public() crypto.PublicKey {
return r.pub
}
func (r *rsaPrivateKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
if o, ok := opts.(*rsa.PSSOptions); ok {
return r.signPSS(digest, o)
}
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398842
size := opts.HashFunc().Size()
if size != len(digest) {
return nil, fmt.Errorf("input mush be hashed")
}
prefix, ok := hashPrefixes[opts.HashFunc()]
if !ok {
return nil, fmt.Errorf("unsupported hash function: %s", opts.HashFunc())
}
cBytes := make([]C.CK_BYTE, len(prefix)+len(digest))
for i, b := range prefix {
cBytes[i] = C.CK_BYTE(b)
}
for i, b := range digest {
cBytes[len(prefix)+i] = C.CK_BYTE(b)
}
cSig := make([]C.CK_BYTE, r.pub.Size())
cSigLen := C.CK_ULONG(len(cSig))
m := C.CK_MECHANISM{C.CKM_RSA_PKCS, nil, 0}
rv := C.ck_sign_init(r.o.fl, r.o.h, &m, r.o.o)
if err := isOk("C_SignInit", rv); err != nil {
return nil, err
}
rv = C.ck_sign(r.o.fl, r.o.h, &cBytes[0], C.CK_ULONG(len(cBytes)), &cSig[0], &cSigLen)
if err := isOk("C_Sign", rv); err != nil {
return nil, err
}
if int(cSigLen) != len(cSig) {
return nil, fmt.Errorf("expected signature of length %d, got %d", len(cSig), cSigLen)
}
sig := make([]byte, len(cSig))
for i, b := range cSig {
sig[i] = byte(b)
}
return sig, nil
}
func (r *rsaPrivateKey) signPSS(digest []byte, opts *rsa.PSSOptions) ([]byte, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398846
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398845
cParam := (C.CK_RSA_PKCS_PSS_PARAMS_PTR)(C.malloc(C.sizeof_CK_RSA_PKCS_PSS_PARAMS))
defer C.free(unsafe.Pointer(cParam))
switch opts.Hash {
case crypto.SHA256:
cParam.hashAlg = C.CKM_SHA256
cParam.mgf = C.CKG_MGF1_SHA256
case crypto.SHA384:
cParam.hashAlg = C.CKM_SHA384
cParam.mgf = C.CKG_MGF1_SHA384
case crypto.SHA512:
cParam.hashAlg = C.CKM_SHA512
cParam.mgf = C.CKG_MGF1_SHA512
default:
return nil, fmt.Errorf("unsupported hash algorithm: %s", opts.Hash)
}
switch opts.SaltLength {
case rsa.PSSSaltLengthAuto:
// Same logic as crypto/rsa.
l := (r.pub.N.BitLen()-1+7)/8 - 2 - opts.Hash.Size()
cParam.sLen = C.CK_ULONG(l)
case rsa.PSSSaltLengthEqualsHash:
cParam.sLen = C.CK_ULONG(opts.Hash.Size())
default:
cParam.sLen = C.CK_ULONG(opts.SaltLength)
}
cBytes := make([]C.CK_BYTE, len(digest))
for i, b := range digest {
cBytes[i] = C.CK_BYTE(b)
}
cSig := make([]C.CK_BYTE, r.pub.Size())
cSigLen := C.CK_ULONG(len(cSig))
m := C.CK_MECHANISM{
mechanism: C.CKM_RSA_PKCS_PSS,
pParameter: C.CK_VOID_PTR(cParam),
ulParameterLen: C.CK_ULONG(C.sizeof_CK_RSA_PKCS_PSS_PARAMS),
}
rv := C.ck_sign_init(r.o.fl, r.o.h, &m, r.o.o)
if err := isOk("C_SignInit", rv); err != nil {
return nil, err
}
rv = C.ck_sign(r.o.fl, r.o.h, &cBytes[0], C.CK_ULONG(len(cBytes)), &cSig[0], &cSigLen)
if err := isOk("C_Sign", rv); err != nil {
return nil, err
}
if int(cSigLen) != len(cSig) {
return nil, fmt.Errorf("expected signature of length %d, got %d", len(cSig), cSigLen)
}
sig := make([]byte, len(cSig))
for i, b := range cSig {
sig[i] = byte(b)
}
return sig, nil
}
type ecdsaPrivateKey struct {
o Object
pub *ecdsa.PublicKey
}
func (e *ecdsaPrivateKey) Public() crypto.PublicKey {
return e.pub
}
type ecdsaSignature struct {
R, S *big.Int
}
func (e *ecdsaPrivateKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cs01/pkcs11-curr-v2.40-cs01.html#_Toc399398884
m := C.CK_MECHANISM{C.CKM_ECDSA, nil, 0}
rv := C.ck_sign_init(e.o.fl, e.o.h, &m, e.o.o)
if err := isOk("C_SignInit", rv); err != nil {
return nil, err
}
byteLen := (e.pub.Curve.Params().BitSize + 7) / 8
cSig := make([]C.CK_BYTE, byteLen*2)
cSigLen := C.CK_ULONG(len(cSig))
cBytes := make([]C.CK_BYTE, len(digest))
for i, b := range digest {
cBytes[i] = C.CK_BYTE(b)
}
rv = C.ck_sign(e.o.fl, e.o.h, &cBytes[0], C.CK_ULONG(len(digest)), &cSig[0], &cSigLen)
if err := isOk("C_Sign", rv); err != nil {
return nil, err
}
if int(cSigLen) != len(cSig) {
return nil, fmt.Errorf("expected signature of length %d, got %d", len(cSig), cSigLen)
}
sig := make([]byte, len(cSig))
for i, b := range cSig {
sig[i] = byte(b)
}
var (
r = big.NewInt(0)
s = big.NewInt(0)
)
r.SetBytes(sig[:len(sig)/2])
s.SetBytes(sig[len(sig)/2:])
return asn1.Marshal(ecdsaSignature{r, s})
}
// CertificateType determines the kind of certificate a certificate object holds.
// This can be X.509, WTLS, GPG, etc.
//
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959709
type CertificateType int
// Certificate types supported by this package.
const (
CertificateX509 CertificateType = iota + 1
CertificateUnknown
)
// Certificate holds a certificate object. Because certificates object can hold
// various kinds of certificates, callers should check the type before calling
// methods that parse the certificate.
//
// cert, err := obj.Certificate()
// if err != nil {
// // ...
// }
// if cert.Type() != pkcs11.CertificateX509 {
// // unexpected kind of certificate ...
// }
// x509Cert, err := cert.X509()
//
type Certificate struct {
o Object
t C.CK_CERTIFICATE_TYPE
}
// Type returns the format of the underlying certificate.
func (c *Certificate) Type() CertificateType {
switch c.t {
case C.CKC_X_509:
return CertificateX509
default:
return CertificateUnknown
}
}
// X509 parses the underlying certificate as an X.509 certificate.
//
// If the certificate holds a different type of certificate, this method
// returns an error.
func (c *Certificate) X509() (*x509.Certificate, error) {
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959712
if c.t != C.CKC_X_509 {
return nil, fmt.Errorf("invalid certificate type")
}
// TODO(ericchiang): Do we want to support CKA_URL?
var n C.CK_ULONG
attrs := []C.CK_ATTRIBUTE{
{C.CKA_VALUE, nil, n},
}
if err := c.o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
n = attrs[0].ulValueLen
if n == 0 {
return nil, fmt.Errorf("certificate value not present")
}
cRaw := (C.CK_VOID_PTR)(C.malloc(C.ulong(n)))
defer C.free(unsafe.Pointer(cRaw))
attrs[0].pValue = cRaw
if err := c.o.getAttribute(attrs); err != nil {
return nil, fmt.Errorf("getting certificate type: %w", err)
}
raw := C.GoBytes(unsafe.Pointer(cRaw), C.int(n))
cert, err := x509.ParseCertificate(raw)
if err != nil {
return nil, fmt.Errorf("parsing certificate: %v", err)
}
return cert, nil
}
// keyOptions holds parameters used for generating a private key.
type keyOptions struct {
// RSABits indicates that the generated key should be a RSA key and also
// provides the number of bits.
RSABits int
// ECDSACurve indicates that the generated key should be an ECDSA key and
// identifies the curve used to generate the key.
ECDSACurve elliptic.Curve
// Label for the final object.
LabelPublic string
LabelPrivate string
}
// https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1.1
// Generate a private key on the slot, creating associated private and public
// key objects.
func (s *Slot) generate(opts keyOptions) (crypto.PrivateKey, error) {
if opts.ECDSACurve != nil && opts.RSABits != 0 {
return nil, fmt.Errorf("conflicting key parameters provided")
}
if opts.ECDSACurve != nil {
return s.generateECDSA(opts)
}
if opts.RSABits != 0 {
return s.generateRSA(opts)
}
return nil, fmt.Errorf("no key parameters provided")
}
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959719
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/os/pkcs11-curr-v2.40-os.html#_Toc416959971
func (s *Slot) generateRSA(o keyOptions) (crypto.PrivateKey, error) {
var (
mechanism = C.CK_MECHANISM{
mechanism: C.CKM_RSA_PKCS_KEY_PAIR_GEN,
}
pubH C.CK_OBJECT_HANDLE
privH C.CK_OBJECT_HANDLE
)
cTrue := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
cFalse := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
defer C.free(unsafe.Pointer(cTrue))
defer C.free(unsafe.Pointer(cFalse))
*((*C.CK_BBOOL)(cTrue)) = C.CK_TRUE
*((*C.CK_BBOOL)(cFalse)) = C.CK_FALSE
cModBits := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_ULONG))
defer C.free(unsafe.Pointer(cModBits))
*((*C.CK_ULONG)(cModBits)) = C.CK_ULONG(o.RSABits)
privTmpl := []C.CK_ATTRIBUTE{
{C.CKA_PRIVATE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SENSITIVE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SIGN, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPrivate != "" {
cs := ckCString(o.LabelPrivate)
defer C.free(unsafe.Pointer(cs))
privTmpl = append(privTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPrivate)),
})
}
pubTmpl := []C.CK_ATTRIBUTE{
{C.CKA_MODULUS_BITS, cModBits, C.CK_ULONG(C.sizeof_CK_ULONG)},
{C.CKA_VERIFY, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPublic != "" {
cs := ckCString(o.LabelPublic)
defer C.free(unsafe.Pointer(cs))
pubTmpl = append(pubTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPublic)),
})
}
rv := C.ck_generate_key_pair(
s.fl, s.h, &mechanism,
&pubTmpl[0], C.CK_ULONG(len(pubTmpl)),
&privTmpl[0], C.CK_ULONG(len(privTmpl)),
&pubH, &privH,
)
if err := isOk("C_GenerateKeyPair", rv); err != nil {
return nil, err
}
pubObj, err := s.newObject(pubH)
if err != nil {
return nil, fmt.Errorf("public key object: %w", err)
}
privObj, err := s.newObject(privH)
if err != nil {
return nil, fmt.Errorf("private key object: %w", err)
}
pub, err := pubObj.PublicKey()
if err != nil {
return nil, fmt.Errorf("parsing public key: %w", err)
}
priv, err := privObj.PrivateKey(pub)
if err != nil {
return nil, fmt.Errorf("parsing private key: %w", err)
}
return priv, nil
}
// https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1.1
//
// Generated with https://play.golang.org/p/tkqXov5Xpwp
var (
p256OIDRaw = []byte{0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}
p384OIDRaw = []byte{0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x22}
p521OIDRaw = []byte{0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x23}
)
// generateECDSA implements the CKM_ECDSA_KEY_PAIR_GEN mechanism.
//
// http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc416959719
// https://datatracker.ietf.org/doc/html/rfc5480#section-2.1.1.1
// http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/os/pkcs11-curr-v2.40-os.html#_Toc416960014
func (s *Slot) generateECDSA(o keyOptions) (crypto.PrivateKey, error) {
var (
mechanism = C.CK_MECHANISM{
mechanism: C.CKM_EC_KEY_PAIR_GEN,
}
pubH C.CK_OBJECT_HANDLE
privH C.CK_OBJECT_HANDLE
)
if o.ECDSACurve == nil {
return nil, fmt.Errorf("no curve provided")
}
var oid []byte
switch o.ECDSACurve.Params().Name {
case "P-256":
oid = p256OIDRaw
case "P-384":
oid = p384OIDRaw
case "P-521":
oid = p521OIDRaw
default:
return nil, fmt.Errorf("unsupported ECDSA curve")
}
// When passing a struct or array to C, that value can't refer to Go
// memory. Allocate all attribute values in C rather than in Go.
cOID := (C.CK_VOID_PTR)(C.CBytes(oid))
defer C.free(unsafe.Pointer(cOID))
cTrue := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
cFalse := (C.CK_VOID_PTR)(C.malloc(C.sizeof_CK_BBOOL))
defer C.free(unsafe.Pointer(cTrue))
defer C.free(unsafe.Pointer(cFalse))
*((*C.CK_BBOOL)(cTrue)) = C.CK_TRUE
*((*C.CK_BBOOL)(cFalse)) = C.CK_FALSE
privTmpl := []C.CK_ATTRIBUTE{
{C.CKA_PRIVATE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SENSITIVE, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
{C.CKA_SIGN, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPrivate != "" {
cs := ckCString(o.LabelPrivate)
defer C.free(unsafe.Pointer(cs))
privTmpl = append(privTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPrivate)),
})
}
pubTmpl := []C.CK_ATTRIBUTE{
{C.CKA_EC_PARAMS, cOID, C.CK_ULONG(len(oid))},
{C.CKA_VERIFY, cTrue, C.CK_ULONG(C.sizeof_CK_BBOOL)},
}
if o.LabelPublic != "" {
cs := ckCString(o.LabelPublic)
defer C.free(unsafe.Pointer(cs))
pubTmpl = append(pubTmpl, C.CK_ATTRIBUTE{
C.CKA_LABEL,
C.CK_VOID_PTR(cs),
C.CK_ULONG(len(o.LabelPublic)),
})
}
rv := C.ck_generate_key_pair(
s.fl, s.h, &mechanism,
&pubTmpl[0], C.CK_ULONG(len(pubTmpl)),
&privTmpl[0], C.CK_ULONG(len(privTmpl)),
&pubH, &privH,
)
if err := isOk("C_GenerateKeyPair", rv); err != nil {
return nil, err
}
pubObj, err := s.newObject(pubH)
if err != nil {
return nil, fmt.Errorf("public key object: %w", err)
}
privObj, err := s.newObject(privH)
if err != nil {
return nil, fmt.Errorf("private key object: %w", err)
}
pub, err := pubObj.PublicKey()
if err != nil {
return nil, fmt.Errorf("parsing public key: %w", err)
}
priv, err := privObj.PrivateKey(pub)
if err != nil {
return nil, fmt.Errorf("parsing private key: %w", err)
}
return priv, nil
}
|
package tictactoe
import (
"errors"
"github.com/jkomoros/boardgame"
)
type MovePlaceToken struct {
//Which token to place the token
Slot int
//Which player we THINK is making the move.
TargetPlayerIndex int
}
func (m *MovePlaceToken) Legal(payload boardgame.StatePayload) error {
p := payload.(*statePayload)
if p.game.CurrentPlayer != m.TargetPlayerIndex {
return errors.New("The specified player is not the current player.")
}
if p.users[m.TargetPlayerIndex].UnusedTokens.Len() < 1 {
return errors.New("There aren't any remaining tokens for the current player to place.")
}
if p.game.Slots.ComponentAt(m.Slot) != nil {
return errors.New("The specified slot is already taken.")
}
return nil
}
Sketch out the Apply method for MovePlaceToken. Part of #14.
package tictactoe
import (
"errors"
"github.com/jkomoros/boardgame"
)
type MovePlaceToken struct {
//Which token to place the token
Slot int
//Which player we THINK is making the move.
TargetPlayerIndex int
}
func (m *MovePlaceToken) Legal(payload boardgame.StatePayload) error {
p := payload.(*statePayload)
if p.game.CurrentPlayer != m.TargetPlayerIndex {
return errors.New("The specified player is not the current player.")
}
if p.users[m.TargetPlayerIndex].UnusedTokens.Len() < 1 {
return errors.New("There aren't any remaining tokens for the current player to place.")
}
if p.game.Slots.ComponentAt(m.Slot) != nil {
return errors.New("The specified slot is already taken.")
}
return nil
}
func (m *MovePlaceToken) Apply(payload boardgame.StatePayload) boardgame.StatePayload {
result := payload.Copy()
p := result.(*statePayload)
c := p.users[m.TargetPlayerIndex].UnusedTokens.RemoveFirst()
p.game.Slots.InsertAtSlot(c, m.Slot)
return result
}
|
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package idx
import (
"github.com/m3db/m3ninx/search"
"github.com/m3db/m3ninx/search/query"
)
// NewTermQuery returns a new query for finding documents which match a term exactly.
func NewTermQuery(field, term []byte) Query {
return Query{
query: query.NewTermQuery(field, term),
}
}
// NewRegexpQuery returns a new query for finding documents which match a regular expression.
func NewRegexpQuery(field, regexp []byte) (Query, error) {
q, err := query.NewRegexpQuery(field, regexp)
if err != nil {
return Query{}, err
}
return Query{
query: q,
}, nil
}
// NewConjunctionQuery returns a new query for finding documents which match each of the
// given queries.
func NewConjunctionQuery(queries ...Query) (Query, error) {
qs := make([]search.Query, 0, len(queries))
for _, q := range queries {
qs = append(qs, q.query)
}
return Query{
query: query.NewConjuctionQuery(qs),
}, nil
}
// NewDisjunctionQuery returns a new query for finding documents which match at least one
// of the given queries.
func NewDisjunctionQuery(queries ...Query) (Query, error) {
qs := make([]search.Query, 0, len(queries))
for _, q := range queries {
qs = append(qs, q.query)
}
return Query{
query: query.NewDisjuctionQuery(qs),
}, nil
}
// Query encapsulates a search query for an index.
type Query struct {
query search.Query
}
// SearchQuery returns the underlying search query for use during execution.
func (q Query) SearchQuery() search.Query {
return q.query
}
NegationQuery ctor (#43)
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package idx
import (
"errors"
"github.com/m3db/m3ninx/search"
"github.com/m3db/m3ninx/search/query"
)
// NewTermQuery returns a new query for finding documents which match a term exactly.
func NewTermQuery(field, term []byte) Query {
return Query{
query: query.NewTermQuery(field, term),
}
}
// NewRegexpQuery returns a new query for finding documents which match a regular expression.
func NewRegexpQuery(field, regexp []byte) (Query, error) {
q, err := query.NewRegexpQuery(field, regexp)
if err != nil {
return Query{}, err
}
return Query{
query: q,
}, nil
}
// NewNegationQuery returns a new query for finding documents which don't match a given query.
func NewNegationQuery(q Query) (Query, error) {
return Query{}, errors.New("not implemented")
}
// NewConjunctionQuery returns a new query for finding documents which match each of the
// given queries.
func NewConjunctionQuery(queries ...Query) (Query, error) {
qs := make([]search.Query, 0, len(queries))
for _, q := range queries {
qs = append(qs, q.query)
}
return Query{
query: query.NewConjuctionQuery(qs),
}, nil
}
// NewDisjunctionQuery returns a new query for finding documents which match at least one
// of the given queries.
func NewDisjunctionQuery(queries ...Query) (Query, error) {
qs := make([]search.Query, 0, len(queries))
for _, q := range queries {
qs = append(qs, q.query)
}
return Query{
query: query.NewDisjuctionQuery(qs),
}, nil
}
// Query encapsulates a search query for an index.
type Query struct {
query search.Query
}
// SearchQuery returns the underlying search query for use during execution.
func (q Query) SearchQuery() search.Query {
return q.query
}
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package api includes all types used to communicate between the various
// parts of the Kubernetes system.
package api
// ContainerManifest corresponds to the Container Manifest format, documented at:
// https://developers.google.com/compute/docs/containers#container_manifest
// This is used as the representation of Kubernete's workloads.
type ContainerManifest struct {
Version string `yaml:"version" json:"version"`
Volumes []Volume `yaml:"volumes" json:"volumes"`
Containers []Container `yaml:"containers" json:"containers"`
Id string `yaml:"id,omitempty" json:"id,omitempty"`
}
type Volume struct {
Name string `yaml:"name" json:"name"`
}
type Port struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
HostPort int `yaml:"hostPort,omitempty" json:"hostPort,omitempty"`
ContainerPort int `yaml:"containerPort,omitempty" json:"containerPort,omitempty"`
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"`
}
type VolumeMount struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
ReadOnly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"`
MountPath string `yaml:"mountPath,omitempty" json:"mountPath,omitempty"`
}
type EnvVar struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Value string `yaml:"value,omitempty" json:"value,omitempty"`
}
// Container represents a single container that is expected to be run on the host.
type Container struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Image string `yaml:"image,omitempty" json:"image,omitempty"`
Command string `yaml:"command,omitempty" json:"command,omitempty"`
WorkingDir string `yaml:"workingDir,omitempty" json:"workingDir,omitempty"`
Ports []Port `yaml:"ports,omitempty" json:"ports,omitempty"`
Env []EnvVar `yaml:"env,omitempty" json:"env,omitempty"`
Memory int `yaml:"memory,omitempty" json:"memory,omitempty"`
CPU int `yaml:"cpu,omitempty" json:"cpu,omitempty"`
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty" json:"volumeMounts,omitempty"`
}
// Event is the representation of an event logged to etcd backends
type Event struct {
Event string `json:"event,omitempty"`
Manifest *ContainerManifest `json:"manifest,omitempty"`
Container *Container `json:"container,omitempty"`
Timestamp int64 `json:"timestamp"`
}
// The below types are used by kube_client and api_server.
// JSONBase is shared by all objects sent to, or returned from the client
type JSONBase struct {
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
ID string `json:"id,omitempty" yaml:"id,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`
SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
}
// TaskState is the state of a task, used as either input (desired state) or output (current state)
type TaskState struct {
Manifest ContainerManifest `json:"manifest,omitempty" yaml:"manifest,omitempty"`
Status string `json:"status,omitempty" yaml:"status,omitempty"`
Host string `json:"host,omitempty" yaml:"host,omitempty"`
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
Info interface{} `json:"info,omitempty" yaml:"info,omitempty"`
}
type TaskList struct {
JSONBase
Items []Pod `json:"items" yaml:"items,omitempty"`
}
// Task is a single task, used as either input (create, update) or as output (list, get)
type Pod struct {
JSONBase
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
DesiredState TaskState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState TaskState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
}
// ReplicationControllerState is the state of a replication controller, either input (create, update) or as output (list, get)
type ReplicationControllerState struct {
Replicas int `json:"replicas" yaml:"replicas"`
ReplicasInSet map[string]string `json:"replicasInSet,omitempty" yaml:"replicasInSet,omitempty"`
TaskTemplate TaskTemplate `json:"taskTemplate,omitempty" yaml:"taskTemplate,omitempty"`
}
type ReplicationControllerList struct {
JSONBase
Items []ReplicationController `json:"items,omitempty" yaml:"items,omitempty"`
}
// ReplicationController represents the configuration of a replication controller
type ReplicationController struct {
JSONBase
DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// TaskTemplate holds the information used for creating tasks
type TaskTemplate struct {
DesiredState TaskState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// ServiceList holds a list of services
type ServiceList struct {
JSONBase
Items []Service `json:"items" yaml:"items"`
}
// Defines a service abstraction by a name (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the labels that define the service.
type Service struct {
JSONBase
Port int `json:"port,omitempty" yaml:"port,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// Defines the endpoints that implement the actual service, for example:
// Name: "mysql", Endpoints: ["10.10.1.1:1909", "10.10.2.2:8834"]
type Endpoints struct {
Name string
Endpoints []string
}
More Task -> Pod
Kubernetes-commit: 66e2575f2b047371999a2ed1329122b58abd8ae3
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package api includes all types used to communicate between the various
// parts of the Kubernetes system.
package api
// ContainerManifest corresponds to the Container Manifest format, documented at:
// https://developers.google.com/compute/docs/containers#container_manifest
// This is used as the representation of Kubernete's workloads.
type ContainerManifest struct {
Version string `yaml:"version" json:"version"`
Volumes []Volume `yaml:"volumes" json:"volumes"`
Containers []Container `yaml:"containers" json:"containers"`
Id string `yaml:"id,omitempty" json:"id,omitempty"`
}
type Volume struct {
Name string `yaml:"name" json:"name"`
}
type Port struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
HostPort int `yaml:"hostPort,omitempty" json:"hostPort,omitempty"`
ContainerPort int `yaml:"containerPort,omitempty" json:"containerPort,omitempty"`
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"`
}
type VolumeMount struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
ReadOnly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"`
MountPath string `yaml:"mountPath,omitempty" json:"mountPath,omitempty"`
}
type EnvVar struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Value string `yaml:"value,omitempty" json:"value,omitempty"`
}
// Container represents a single container that is expected to be run on the host.
type Container struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Image string `yaml:"image,omitempty" json:"image,omitempty"`
Command string `yaml:"command,omitempty" json:"command,omitempty"`
WorkingDir string `yaml:"workingDir,omitempty" json:"workingDir,omitempty"`
Ports []Port `yaml:"ports,omitempty" json:"ports,omitempty"`
Env []EnvVar `yaml:"env,omitempty" json:"env,omitempty"`
Memory int `yaml:"memory,omitempty" json:"memory,omitempty"`
CPU int `yaml:"cpu,omitempty" json:"cpu,omitempty"`
VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty" json:"volumeMounts,omitempty"`
}
// Event is the representation of an event logged to etcd backends
type Event struct {
Event string `json:"event,omitempty"`
Manifest *ContainerManifest `json:"manifest,omitempty"`
Container *Container `json:"container,omitempty"`
Timestamp int64 `json:"timestamp"`
}
// The below types are used by kube_client and api_server.
// JSONBase is shared by all objects sent to, or returned from the client
type JSONBase struct {
Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
ID string `json:"id,omitempty" yaml:"id,omitempty"`
CreationTimestamp string `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`
SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
}
// PodState is the state of a pod, used as either input (desired state) or output (current state)
type PodState struct {
Manifest ContainerManifest `json:"manifest,omitempty" yaml:"manifest,omitempty"`
Status string `json:"status,omitempty" yaml:"status,omitempty"`
Host string `json:"host,omitempty" yaml:"host,omitempty"`
HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
Info interface{} `json:"info,omitempty" yaml:"info,omitempty"`
}
type PodList struct {
JSONBase
Items []Pod `json:"items" yaml:"items,omitempty"`
}
// Pod is a collection of containers, used as either input (create, update) or as output (list, get)
type Pod struct {
JSONBase
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState PodState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
}
// ReplicationControllerState is the state of a replication controller, either input (create, update) or as output (list, get)
type ReplicationControllerState struct {
Replicas int `json:"replicas" yaml:"replicas"`
ReplicasInSet map[string]string `json:"replicasInSet,omitempty" yaml:"replicasInSet,omitempty"`
PodTemplate PodTemplate `json:"podTemplate,omitempty" yaml:"podTemplate,omitempty"`
}
type ReplicationControllerList struct {
JSONBase
Items []ReplicationController `json:"items,omitempty" yaml:"items,omitempty"`
}
// ReplicationController represents the configuration of a replication controller
type ReplicationController struct {
JSONBase
DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// PodTemplate holds the information used for creating pods
type PodTemplate struct {
DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// ServiceList holds a list of services
type ServiceList struct {
JSONBase
Items []Service `json:"items" yaml:"items"`
}
// Defines a service abstraction by a name (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the labels that define the service.
type Service struct {
JSONBase
Port int `json:"port,omitempty" yaml:"port,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
}
// Defines the endpoints that implement the actual service, for example:
// Name: "mysql", Endpoints: ["10.10.1.1:1909", "10.10.2.2:8834"]
type Endpoints struct {
Name string
Endpoints []string
}
|
package app
import (
"errors"
"fmt"
)
var (
// ErrNotFound Item not found
ErrNotFound = errors.New("not found")
// ErrInvalid Invalid request
ErrInvalid = errors.New("invalid")
// ErrExists Item already exists
ErrExists = errors.New("exists")
// ErrNoAccess Access to item not allowed
ErrNoAccess = errors.New("no access")
// ErrInternal Internal fatal error
ErrInternal = errors.New("internal error")
// ErrCreate Create of object failed
ErrCreate = errors.New("unable to create")
// ErrInUse object is locked and in use by someone else
ErrInUse = errors.New("in use")
)
// Error holds the error code and additional messages.
type Error struct {
Err error // Error code
Message string // Message related to error
}
// Implement error interface.
func (e *Error) Error() string {
if e.Message != "" {
return e.Err.Error() + ":" + e.Message
}
return e.Err.Error()
}
// newError creates a new instance of an Error.
func newError(err error, msg string) *Error {
return &Error{
Message: msg,
Err: err,
}
}
// Is returns true if the particular error code in an Error
// is equal to the expected error. This is useful comparing
// without having to cast and unpack.
func Is(err error, what error) bool {
if e, ok := err.(*Error); ok {
return e.Err == what
}
return err == what
}
// Errorf takes and error, a message string and a set of arguments and produces
// a new Error.
func Errorf(err error, message string, args ...interface{}) *Error {
msg := fmt.Sprintf(message, args...)
return newError(err, msg)
}
Add function to call panic after formatting the panic message.
package app
import (
"errors"
"fmt"
)
var (
// ErrNotFound Item not found
ErrNotFound = errors.New("not found")
// ErrInvalid Invalid request
ErrInvalid = errors.New("invalid")
// ErrExists Item already exists
ErrExists = errors.New("exists")
// ErrNoAccess Access to item not allowed
ErrNoAccess = errors.New("no access")
// ErrInternal Internal fatal error
ErrInternal = errors.New("internal error")
// ErrCreate Create of object failed
ErrCreate = errors.New("unable to create")
// ErrInUse object is locked and in use by someone else
ErrInUse = errors.New("in use")
)
// Error holds the error code and additional messages.
type Error struct {
Err error // Error code
Message string // Message related to error
}
// Implement error interface.
func (e *Error) Error() string {
if e.Message != "" {
return e.Err.Error() + ":" + e.Message
}
return e.Err.Error()
}
// newError creates a new instance of an Error.
func newError(err error, msg string) *Error {
return &Error{
Message: msg,
Err: err,
}
}
// Is returns true if the particular error code in an Error
// is equal to the expected error. This is useful comparing
// without having to cast and unpack.
func Is(err error, what error) bool {
if e, ok := err.(*Error); ok {
return e.Err == what
}
return err == what
}
// Errorf takes and error, a message string and a set of arguments and produces
// a new Error.
func Errorf(err error, message string, args ...interface{}) *Error {
msg := fmt.Sprintf(message, args...)
return newError(err, msg)
}
// Panicf will format the message and call panic.
func Panicf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
panic(msg)
}
|
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import "os"
var atexitFuncs []func()
// atexit registers a function f that will be run when exit is called.
func atexit(f func()) {
atexitFuncs = append(atexitFuncs, f)
}
// exit calls all atexit handlers before exiting the process with status.
func exit(status int) {
for _, f := range atexitFuncs {
f()
}
os.Exit(status)
}
pkg/exit: Fix package to be actually usable
Ooops. Was a bit too hasty with my copy & paste.
Signed-off-by: Damien Lespiau <64bd3cb94f359c1a3ce68dae5e26b40578526277@intel.com>
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exit
import "os"
var atexitFuncs []func()
// Atexit registers a function f that will be run when exit is called.
func Atexit(f func()) {
atexitFuncs = append(atexitFuncs, f)
}
// Exit calls all atexit handlers before exiting the process with status.
func Exit(status int) {
for _, f := range atexitFuncs {
f()
}
os.Exit(status)
}
|
package sarama
import (
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Headers []*RecordHeader // only set if kafka is version 0.11+
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Key, Value []byte
Topic string
Partition int32
Offset int64
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
func (ce ConsumerError) Unwrap() error {
return ce.Err
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
// Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
// records from these partitions until they have been resumed using Resume()/ResumeAll().
// Note that this method does not affect partition subscription.
// In particular, it does not cause a group rebalance when automatic assignment is used.
Pause(topicPartitions map[string][]int32)
// Resume resumes specified partitions which have been paused with Pause()/PauseAll().
// New calls to the broker will return records from these partitions if there are any to be fetched.
Resume(topicPartitions map[string][]int32)
// Pause suspends fetching from all partitions. Future calls to the broker will not return any
// records from these partitions until they have been resumed using Resume()/ResumeAll().
// Note that this method does not affect partition subscription.
// In particular, it does not cause a group rebalance when automatic assignment is used.
PauseAll()
// Resume resumes all partitions which have been paused with Pause()/PauseAll().
// New calls to the broker will return records from these partitions if there are any to be fetched.
ResumeAll()
}
type consumer struct {
conf *Config
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
client Client
lock sync.Mutex
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
return newConsumer(client)
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newConsumer(cli)
}
func newConsumer(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
return c.client.Close()
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
preferredReadReplica: invalidPreferredReplicaID,
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// Pause implements Consumer.
func (c *consumer) Pause(topicPartitions map[string][]int32) {
c.lock.Lock()
defer c.lock.Unlock()
for topic, partitions := range topicPartitions {
for _, partition := range partitions {
if topicConsumers, ok := c.children[topic]; ok {
if partitionConsumer, ok := topicConsumers[partition]; ok {
partitionConsumer.Pause()
}
}
}
}
}
// Resume implements Consumer.
func (c *consumer) Resume(topicPartitions map[string][]int32) {
c.lock.Lock()
defer c.lock.Unlock()
for topic, partitions := range topicPartitions {
for _, partition := range partitions {
if topicConsumers, ok := c.children[topic]; ok {
if partitionConsumer, ok := topicConsumers[partition]; ok {
partitionConsumer.Resume()
}
}
}
}
}
// PauseAll implements Consumer.
func (c *consumer) PauseAll() {
c.lock.Lock()
defer c.lock.Unlock()
for _, partitions := range c.children {
for _, partitionConsumer := range partitions {
partitionConsumer.Pause()
}
}
}
// ResumeAll implements Consumer.
func (c *consumer) ResumeAll() {
c.lock.Lock()
defer c.lock.Unlock()
for _, partitions := range c.children {
for _, partitionConsumer := range partitions {
partitionConsumer.Resume()
}
}
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
// Pause suspends fetching from this partition. Future calls to the broker will not return
// any records from these partition until it have been resumed using Resume().
// Note that this method does not affect partition subscription.
// In particular, it does not cause a group rebalance when automatic assignment is used.
Pause()
// Resume resumes this partition which have been paused with Pause().
// New calls to the broker will return records from these partitions if there are any to be fetched.
// If the partition was not previously paused, this method is a no-op.
Resume()
// IsPaused indicates if this partition consumer is paused or not
IsPaused() bool
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
preferredReadReplica int32
trigger, dying chan none
closeOnce sync.Once
topic string
partition int32
responseResult error
fetchSize int32
offset int64
retries int32
paused int32
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) computeBackoff() time.Duration {
if child.conf.Consumer.Retry.BackoffFunc != nil {
retries := atomic.AddInt32(&child.retries, 1)
return child.conf.Consumer.Retry.BackoffFunc(int(retries))
}
return child.conf.Consumer.Retry.Backoff
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.computeBackoff()):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) preferredBroker() (*Broker, error) {
if child.preferredReadReplica >= 0 {
broker, err := child.consumer.client.Broker(child.preferredReadReplica)
if err == nil {
return broker, nil
}
Logger.Printf(
"consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader",
child.topic, child.partition, child.preferredReadReplica)
// if we couldn't find it, discard the replica preference and trigger a
// metadata refresh whilst falling back to consuming from the leader again
child.preferredReadReplica = invalidPreferredReplicaID
_ = child.consumer.client.RefreshMetadata(child.topic)
}
// if preferred replica cannot be found fallback to leader
return child.consumer.client.Leader(child.topic, child.partition)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
broker, err := child.preferredBroker()
if err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(broker)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
child.highWaterMarkOffset = newestOffset
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
child.closeOnce.Do(func() {
close(child.dying)
})
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
var consumerErrors ConsumerErrors
for err := range child.errors {
consumerErrors = append(consumerErrors, err)
}
if len(consumerErrors) > 0 {
return consumerErrors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
if child.responseResult == nil {
atomic.StoreInt32(&child.retries, 0)
}
for i, msg := range msgs {
child.interceptors(msg)
messageSelect:
select {
case <-child.dying:
child.broker.acks.Done()
continue feederLoop
case child.messages <- msg:
firstAttempt = true
case <-expiryTicker.C:
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
remainingLoop:
for _, msg = range msgs[i:] {
child.interceptors(msg)
select {
case child.messages <- msg:
case <-child.dying:
break remainingLoop
}
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
firstAttempt = false
goto messageSelect
}
}
}
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
timestamp := msg.Msg.Timestamp
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
if msg.Msg.LogAppendTime {
timestamp = msgBlock.Msg.Timestamp
}
}
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
}
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
messages := make([]*ConsumerMessage, 0, len(batch.Records))
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if offset < child.offset {
continue
}
timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
if batch.LogAppendTime {
timestamp = batch.MaxTimestamp
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: timestamp,
Headers: rec.Headers,
})
child.offset = offset + 1
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
var (
metricRegistry = child.conf.MetricRegistry
consumerBatchSizeMetric metrics.Histogram
)
if metricRegistry != nil {
consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
}
// If request was throttled and empty we log and return without error
if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
Logger.Printf(
"consumer/broker/%d FetchResponse throttled %v\n",
child.broker.broker.ID(), response.ThrottleTime)
return nil, nil
}
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if !errors.Is(block.Err, ErrNoError) {
return nil, block.Err
}
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
consumerBatchSizeMetric.Update(int64(nRecs))
if block.PreferredReadReplica != invalidPreferredReplicaID {
child.preferredReadReplica = block.PreferredReadReplica
}
if nRecs == 0 {
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
// check int32 overflow
if child.fetchSize < 0 {
child.fetchSize = math.MaxInt32
}
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
} else if block.LastRecordsBatchOffset != nil && *block.LastRecordsBatchOffset < block.HighWaterMarkOffset {
// check last record offset to avoid stuck if high watermark was not reached
Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.LastRecordsBatchOffset)
child.offset = *block.LastRecordsBatchOffset + 1
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
// abortedProducerIDs contains producerID which message should be ignored as uncommitted
// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
abortedTransactions := block.getAbortedTransactions()
var messages []*ConsumerMessage
for _, records := range block.RecordsSet {
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.MsgSet)
if err != nil {
return nil, err
}
messages = append(messages, messageSetMessages...)
case defaultRecords:
// Consume remaining abortedTransaction up to last offset of current batch
for _, txn := range abortedTransactions {
if txn.FirstOffset > records.RecordBatch.LastOffset() {
break
}
abortedProducerIDs[txn.ProducerID] = struct{}{}
// Pop abortedTransactions so that we never add it again
abortedTransactions = abortedTransactions[1:]
}
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
if err != nil {
return nil, err
}
// Parse and commit offset but do not expose messages that are:
// - control records
// - part of an aborted transaction when set to `ReadCommitted`
// control record
isControl, err := records.isControl()
if err != nil {
// I don't know why there is this continue in case of error to begin with
// Safe bet is to ignore control messages if ReadUncommitted
// and block on them in case of error and ReadCommitted
if child.conf.Consumer.IsolationLevel == ReadCommitted {
return nil, err
}
continue
}
if isControl {
controlRecord, err := records.getControlRecord()
if err != nil {
return nil, err
}
if controlRecord.Type == ControlRecordAbort {
delete(abortedProducerIDs, records.RecordBatch.ProducerID)
}
continue
}
// filter aborted transactions
if child.conf.Consumer.IsolationLevel == ReadCommitted {
_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
if records.RecordBatch.IsTransactional && isAborted {
continue
}
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
return messages, nil
}
func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
for _, interceptor := range child.conf.Consumer.Interceptors {
msg.safelyApplyInterceptor(interceptor)
}
}
// Pause implements PartitionConsumer.
func (child *partitionConsumer) Pause() {
atomic.StoreInt32(&child.paused, 1)
}
// Resume implements PartitionConsumer.
func (child *partitionConsumer) Resume() {
atomic.StoreInt32(&child.paused, 0)
}
// IsPaused implements PartitionConsumer.
func (child *partitionConsumer) IsPaused() bool {
return atomic.LoadInt32(&child.paused) == 1
}
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
subscriptions map[*partitionConsumer]none
wait chan none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none, 1),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
func (bc *brokerConsumer) subscriptionManager() {
var partitionConsumers []*partitionConsumer
for {
// check for any partition consumer asking to subscribe if there aren't
// any, trigger the network request by sending "nil" to the
// newSubscriptions channel
select {
case pc, ok := <-bc.input:
if !ok {
goto done
}
// add to list of subscribing consumers
partitionConsumers = append(partitionConsumers, pc)
// wait up to 250ms to drain input of any further incoming
// subscriptions
for batchComplete := false; !batchComplete; {
select {
case pc, ok := <-bc.input:
if !ok {
goto done
}
partitionConsumers = append(partitionConsumers, pc)
case <-time.After(250 * time.Millisecond):
batchComplete = true
}
}
Logger.Printf(
"consumer/broker/%d accumulated %d new subscriptions\n",
bc.broker.ID(), len(partitionConsumers))
bc.wait <- none{}
bc.newSubscriptions <- partitionConsumers
// clear out the batch
partitionConsumers = nil
case bc.newSubscriptions <- nil:
}
}
done:
close(bc.wait)
if len(partitionConsumers) > 0 {
bc.newSubscriptions <- partitionConsumers
}
close(bc.newSubscriptions)
}
// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
// no-op
}
}
}
// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
func (bc *brokerConsumer) handleResponses() {
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
if result == nil {
if preferredBroker, err := child.preferredBroker(); err == nil {
if bc.broker.ID() != preferredBroker.ID() {
// not an error but needs redispatching to consume from preferred replica
Logger.Printf(
"consumer/broker/%d abandoned in favor of preferred replica broker/%d\n",
bc.broker.ID(), preferredBroker.ID())
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
continue
}
// Discard any replica preference.
child.preferredReadReplica = invalidPreferredReplicaID
if errors.Is(result, errTimedOut) {
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
} else if errors.Is(result, ErrOffsetOutOfRange) {
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
} else if errors.Is(result, ErrUnknownTopicOrPartition) || errors.Is(result, ErrNotLeaderForPartition) || errors.Is(result, ErrLeaderNotAvailable) || errors.Is(result, ErrReplicaNotAvailable) {
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
} else {
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
<-bc.wait
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
request.Version = 1
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
}
if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
request.Version = 7
// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
// and the epoch to -1 tells the broker not to generate as session ID we're going
// to just ignore anyway.
request.SessionID = 0
request.SessionEpoch = -1
}
if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
request.Version = 10
}
if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
request.Version = 11
request.RackID = bc.consumer.conf.RackID
}
for child := range bc.subscriptions {
if !child.IsPaused() {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
}
return bc.broker.Fetch(request)
}
consumer: avoid obscure sync between subscriptionManager and subscriptionConsumer threads
since subscription manager was improved to batch subscriptions (see https://github.com/Shopify/sarama/pull/2109/commits/dadcd808a5f6c6394e91f6a614818cf8eab732de)
it created a deadlock in the case when new subscription are added after a rebalance
package sarama
import (
"errors"
"fmt"
"math"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Headers []*RecordHeader // only set if kafka is version 0.11+
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Key, Value []byte
Topic string
Partition int32
Offset int64
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
func (ce ConsumerError) Unwrap() error {
return ce.Err
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
// Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
// records from these partitions until they have been resumed using Resume()/ResumeAll().
// Note that this method does not affect partition subscription.
// In particular, it does not cause a group rebalance when automatic assignment is used.
Pause(topicPartitions map[string][]int32)
// Resume resumes specified partitions which have been paused with Pause()/PauseAll().
// New calls to the broker will return records from these partitions if there are any to be fetched.
Resume(topicPartitions map[string][]int32)
// Pause suspends fetching from all partitions. Future calls to the broker will not return any
// records from these partitions until they have been resumed using Resume()/ResumeAll().
// Note that this method does not affect partition subscription.
// In particular, it does not cause a group rebalance when automatic assignment is used.
PauseAll()
// Resume resumes all partitions which have been paused with Pause()/PauseAll().
// New calls to the broker will return records from these partitions if there are any to be fetched.
ResumeAll()
}
type consumer struct {
conf *Config
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
client Client
lock sync.Mutex
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
return newConsumer(client)
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// For clients passed in by the client, ensure we don't
// call Close() on it.
cli := &nopCloserClient{client}
return newConsumer(cli)
}
func newConsumer(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
return c.client.Close()
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
preferredReadReplica: invalidPreferredReplicaID,
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// Pause implements Consumer.
func (c *consumer) Pause(topicPartitions map[string][]int32) {
c.lock.Lock()
defer c.lock.Unlock()
for topic, partitions := range topicPartitions {
for _, partition := range partitions {
if topicConsumers, ok := c.children[topic]; ok {
if partitionConsumer, ok := topicConsumers[partition]; ok {
partitionConsumer.Pause()
}
}
}
}
}
// Resume implements Consumer.
func (c *consumer) Resume(topicPartitions map[string][]int32) {
c.lock.Lock()
defer c.lock.Unlock()
for topic, partitions := range topicPartitions {
for _, partition := range partitions {
if topicConsumers, ok := c.children[topic]; ok {
if partitionConsumer, ok := topicConsumers[partition]; ok {
partitionConsumer.Resume()
}
}
}
}
}
// PauseAll implements Consumer.
func (c *consumer) PauseAll() {
c.lock.Lock()
defer c.lock.Unlock()
for _, partitions := range c.children {
for _, partitionConsumer := range partitions {
partitionConsumer.Pause()
}
}
}
// ResumeAll implements Consumer.
func (c *consumer) ResumeAll() {
c.lock.Lock()
defer c.lock.Unlock()
for _, partitions := range c.children {
for _, partitionConsumer := range partitions {
partitionConsumer.Resume()
}
}
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
// Pause suspends fetching from this partition. Future calls to the broker will not return
// any records from these partition until it have been resumed using Resume().
// Note that this method does not affect partition subscription.
// In particular, it does not cause a group rebalance when automatic assignment is used.
Pause()
// Resume resumes this partition which have been paused with Pause().
// New calls to the broker will return records from these partitions if there are any to be fetched.
// If the partition was not previously paused, this method is a no-op.
Resume()
// IsPaused indicates if this partition consumer is paused or not
IsPaused() bool
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
preferredReadReplica int32
trigger, dying chan none
closeOnce sync.Once
topic string
partition int32
responseResult error
fetchSize int32
offset int64
retries int32
paused int32
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) computeBackoff() time.Duration {
if child.conf.Consumer.Retry.BackoffFunc != nil {
retries := atomic.AddInt32(&child.retries, 1)
return child.conf.Consumer.Retry.BackoffFunc(int(retries))
}
return child.conf.Consumer.Retry.Backoff
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.computeBackoff()):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) preferredBroker() (*Broker, error) {
if child.preferredReadReplica >= 0 {
broker, err := child.consumer.client.Broker(child.preferredReadReplica)
if err == nil {
return broker, nil
}
Logger.Printf(
"consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader",
child.topic, child.partition, child.preferredReadReplica)
// if we couldn't find it, discard the replica preference and trigger a
// metadata refresh whilst falling back to consuming from the leader again
child.preferredReadReplica = invalidPreferredReplicaID
_ = child.consumer.client.RefreshMetadata(child.topic)
}
// if preferred replica cannot be found fallback to leader
return child.consumer.client.Leader(child.topic, child.partition)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
broker, err := child.preferredBroker()
if err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(broker)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
child.highWaterMarkOffset = newestOffset
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
child.closeOnce.Do(func() {
close(child.dying)
})
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
var consumerErrors ConsumerErrors
for err := range child.errors {
consumerErrors = append(consumerErrors, err)
}
if len(consumerErrors) > 0 {
return consumerErrors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
if child.responseResult == nil {
atomic.StoreInt32(&child.retries, 0)
}
for i, msg := range msgs {
child.interceptors(msg)
messageSelect:
select {
case <-child.dying:
child.broker.acks.Done()
continue feederLoop
case child.messages <- msg:
firstAttempt = true
case <-expiryTicker.C:
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
remainingLoop:
for _, msg = range msgs[i:] {
child.interceptors(msg)
select {
case child.messages <- msg:
case <-child.dying:
break remainingLoop
}
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
firstAttempt = false
goto messageSelect
}
}
}
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
timestamp := msg.Msg.Timestamp
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
if msg.Msg.LogAppendTime {
timestamp = msgBlock.Msg.Timestamp
}
}
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
}
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
messages := make([]*ConsumerMessage, 0, len(batch.Records))
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if offset < child.offset {
continue
}
timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
if batch.LogAppendTime {
timestamp = batch.MaxTimestamp
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: timestamp,
Headers: rec.Headers,
})
child.offset = offset + 1
}
if len(messages) == 0 {
child.offset++
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
var (
metricRegistry = child.conf.MetricRegistry
consumerBatchSizeMetric metrics.Histogram
)
if metricRegistry != nil {
consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
}
// If request was throttled and empty we log and return without error
if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
Logger.Printf(
"consumer/broker/%d FetchResponse throttled %v\n",
child.broker.broker.ID(), response.ThrottleTime)
return nil, nil
}
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if !errors.Is(block.Err, ErrNoError) {
return nil, block.Err
}
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
consumerBatchSizeMetric.Update(int64(nRecs))
if block.PreferredReadReplica != invalidPreferredReplicaID {
child.preferredReadReplica = block.PreferredReadReplica
}
if nRecs == 0 {
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
// check int32 overflow
if child.fetchSize < 0 {
child.fetchSize = math.MaxInt32
}
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
} else if block.LastRecordsBatchOffset != nil && *block.LastRecordsBatchOffset < block.HighWaterMarkOffset {
// check last record offset to avoid stuck if high watermark was not reached
Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.LastRecordsBatchOffset)
child.offset = *block.LastRecordsBatchOffset + 1
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
// abortedProducerIDs contains producerID which message should be ignored as uncommitted
// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
abortedTransactions := block.getAbortedTransactions()
var messages []*ConsumerMessage
for _, records := range block.RecordsSet {
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.MsgSet)
if err != nil {
return nil, err
}
messages = append(messages, messageSetMessages...)
case defaultRecords:
// Consume remaining abortedTransaction up to last offset of current batch
for _, txn := range abortedTransactions {
if txn.FirstOffset > records.RecordBatch.LastOffset() {
break
}
abortedProducerIDs[txn.ProducerID] = struct{}{}
// Pop abortedTransactions so that we never add it again
abortedTransactions = abortedTransactions[1:]
}
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
if err != nil {
return nil, err
}
// Parse and commit offset but do not expose messages that are:
// - control records
// - part of an aborted transaction when set to `ReadCommitted`
// control record
isControl, err := records.isControl()
if err != nil {
// I don't know why there is this continue in case of error to begin with
// Safe bet is to ignore control messages if ReadUncommitted
// and block on them in case of error and ReadCommitted
if child.conf.Consumer.IsolationLevel == ReadCommitted {
return nil, err
}
continue
}
if isControl {
controlRecord, err := records.getControlRecord()
if err != nil {
return nil, err
}
if controlRecord.Type == ControlRecordAbort {
delete(abortedProducerIDs, records.RecordBatch.ProducerID)
}
continue
}
// filter aborted transactions
if child.conf.Consumer.IsolationLevel == ReadCommitted {
_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
if records.RecordBatch.IsTransactional && isAborted {
continue
}
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
return messages, nil
}
func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
for _, interceptor := range child.conf.Consumer.Interceptors {
msg.safelyApplyInterceptor(interceptor)
}
}
// Pause implements PartitionConsumer.
func (child *partitionConsumer) Pause() {
atomic.StoreInt32(&child.paused, 1)
}
// Resume implements PartitionConsumer.
func (child *partitionConsumer) Resume() {
atomic.StoreInt32(&child.paused, 0)
}
// IsPaused implements PartitionConsumer.
func (child *partitionConsumer) IsPaused() bool {
return atomic.LoadInt32(&child.paused) == 1
}
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
subscriptions map[*partitionConsumer]none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available.
func (bc *brokerConsumer) subscriptionManager() {
defer close(bc.newSubscriptions)
for {
var partitionConsumers []*partitionConsumer
// Check for any partition consumer asking to subscribe if there aren't
// any, trigger the network request (to fetch Kafka messages) by sending "nil" to the
// newSubscriptions channel
select {
case pc, ok := <-bc.input:
if !ok {
return
}
partitionConsumers = append(partitionConsumers, pc)
case bc.newSubscriptions <- nil:
continue
}
// wait up to 250ms to drain input of any further incoming
// subscriptions
for batchComplete := false; !batchComplete; {
select {
case pc, ok := <-bc.input:
if !ok {
return
}
partitionConsumers = append(partitionConsumers, pc)
case <-time.After(250 * time.Millisecond):
batchComplete = true
}
}
Logger.Printf(
"consumer/broker/%d accumulated %d new subscriptions\n",
bc.broker.ID(), len(partitionConsumers))
bc.newSubscriptions <- partitionConsumers
}
}
// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
// this is a the main loop that fetches Kafka messages
func (bc *brokerConsumer) subscriptionConsumer() {
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Take a small nap to avoid burning the CPU.
time.Sleep(250 * time.Millisecond)
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
// no-op
}
}
}
// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
func (bc *brokerConsumer) handleResponses() {
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
if result == nil {
if preferredBroker, err := child.preferredBroker(); err == nil {
if bc.broker.ID() != preferredBroker.ID() {
// not an error but needs redispatching to consume from preferred replica
Logger.Printf(
"consumer/broker/%d abandoned in favor of preferred replica broker/%d\n",
bc.broker.ID(), preferredBroker.ID())
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
continue
}
// Discard any replica preference.
child.preferredReadReplica = invalidPreferredReplicaID
if errors.Is(result, errTimedOut) {
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
} else if errors.Is(result, ErrOffsetOutOfRange) {
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
} else if errors.Is(result, ErrUnknownTopicOrPartition) || errors.Is(result, ErrNotLeaderForPartition) || errors.Is(result, ErrLeaderNotAvailable) || errors.Is(result, ErrReplicaNotAvailable) {
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
} else {
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
request.Version = 1
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
}
if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
request.Version = 7
// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
// and the epoch to -1 tells the broker not to generate as session ID we're going
// to just ignore anyway.
request.SessionID = 0
request.SessionEpoch = -1
}
if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
request.Version = 10
}
if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
request.Version = 11
request.RackID = bc.consumer.conf.RackID
}
for child := range bc.subscriptions {
if !child.IsPaused() {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
}
return bc.broker.Fetch(request)
}
|
package peerstream_multiplex
import (
"net"
smux "github.com/jbenet/go-stream-mux"
mp "github.com/jbenet/go-stream-muxer/Godeps/_workspace/src/github.com/whyrusleeping/go-multiplex" // Conn is a connection to a remote peer.
)
type conn struct {
*mp.Multiplex
}
func (c *conn) Close() error {
return c.Multiplex.Close()
}
func (c *conn) IsClosed() bool {
return c.Multiplex.IsClosed()
}
// OpenStream creates a new stream.
func (c *conn) OpenStream() (smux.Stream, error) {
return c.Multiplex.NewStream(), nil
}
// Serve starts listening for incoming requests and handles them
// using given StreamHandler
func (c *conn) Serve(handler smux.StreamHandler) {
c.Multiplex.Serve(func(s *mp.Stream) {
handler(s)
})
}
// Transport is a go-peerstream transport that constructs
// multiplex-backed connections.
type Transport struct{}
// DefaultTransport has default settings for multiplex
var DefaultTransport = &Transport{}
func (t *Transport) NewConn(nc net.Conn, isServer bool) (smux.Conn, error) {
return &conn{mp.NewMultiplex(nc, isServer)}, nil
}
links fix
package peerstream_multiplex
import (
"net"
smux "github.com/jbenet/go-stream-muxer"
mp "github.com/jbenet/go-stream-muxer/Godeps/_workspace/src/github.com/whyrusleeping/go-multiplex" // Conn is a connection to a remote peer.
)
type conn struct {
*mp.Multiplex
}
func (c *conn) Close() error {
return c.Multiplex.Close()
}
func (c *conn) IsClosed() bool {
return c.Multiplex.IsClosed()
}
// OpenStream creates a new stream.
func (c *conn) OpenStream() (smux.Stream, error) {
return c.Multiplex.NewStream(), nil
}
// Serve starts listening for incoming requests and handles them
// using given StreamHandler
func (c *conn) Serve(handler smux.StreamHandler) {
c.Multiplex.Serve(func(s *mp.Stream) {
handler(s)
})
}
// Transport is a go-peerstream transport that constructs
// multiplex-backed connections.
type Transport struct{}
// DefaultTransport has default settings for multiplex
var DefaultTransport = &Transport{}
func (t *Transport) NewConn(nc net.Conn, isServer bool) (smux.Conn, error) {
return &conn{mp.NewMultiplex(nc, isServer)}, nil
}
|
// Copyright (c) 2013 Mathieu Turcotte
// Licensed under the MIT license.
package main
import (
"flag"
"fmt"
"github.com/MathieuTurcotte/sourcemap"
"os"
)
var line = flag.Int("line", -1, "line number to lookup")
var column = flag.Int("column", -1, "column number to lookup")
var printMap = flag.Bool("print", false, "whether to print the source map")
func main() {
flag.Parse()
sourceMap, err := sourcemap.Read(os.Stdin)
if err != nil {
fmt.Println(err)
} else if *printMap {
fmt.Printf("%+v\n", sourceMap)
}
mapping, err := sourceMap.GetSourceMapping(*line, *column)
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("%+v\n", mapping)
}
}
Remove print option.
// Copyright (c) 2013 Mathieu Turcotte
// Licensed under the MIT license.
// Program that reads a source map from stdin and finds the original mapping
// for the given line and column numbers in the generated source.
package main
import (
"flag"
"fmt"
"github.com/MathieuTurcotte/sourcemap"
"os"
)
var line = flag.Int("line", -1, "line number to lookup")
var column = flag.Int("column", -1, "column number to lookup")
func main() {
flag.Parse()
sourceMap, err := sourcemap.Read(os.Stdin)
if err != nil {
fmt.Print(err)
os.Exit(1)
}
mapping, err := sourceMap.GetSourceMapping(*line, *column)
if err != nil {
fmt.Print(err)
os.Exit(1)
}
fmt.Printf("%+v\n", mapping)
}
|
package spotify
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
)
func TestFeaturedPlaylists(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/featured_playlists.txt")
defer server.Close()
country := "SE"
msg, p, err := client.FeaturedPlaylists(context.Background(), Country(country))
if err != nil {
t.Error(err)
return
}
if msg != "Enjoy a mellow afternoon." {
t.Errorf("Want 'Enjoy a mellow afternoon.', got'%s'\n", msg)
}
if p.Playlists == nil || len(p.Playlists) == 0 {
t.Fatal("Empty playlists result")
}
expected := "Hangover Friendly Singer-Songwriter"
if name := p.Playlists[0].Name; name != expected {
t.Errorf("Want '%s', got '%s'\n", expected, name)
}
}
func TestFeaturedPlaylistsExpiredToken(t *testing.T) {
json := `{
"error": {
"status": 401,
"message": "The access token expired"
}
}`
client, server := testClientString(http.StatusUnauthorized, json)
defer server.Close()
msg, pl, err := client.FeaturedPlaylists(context.Background())
if msg != "" || pl != nil || err == nil {
t.Fatal("Expected an error")
}
serr, ok := err.(Error)
if !ok {
t.Fatalf("Expected spotify Error, got %T", err)
}
if serr.Status != http.StatusUnauthorized {
t.Error("Expected HTTP 401")
}
}
func TestPlaylistsForUser(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlists_for_user.txt")
defer server.Close()
playlists, err := client.GetPlaylistsForUser(context.Background(), "whizler")
if err != nil {
t.Error(err)
}
if l := len(playlists.Playlists); l == 0 {
t.Fatal("Didn't get any results")
}
p := playlists.Playlists[0]
if p.Name != "Nederlandse Tipparade" {
t.Error("Expected Nederlandse Tipparade, got", p.Name)
}
if p.Tracks.Total != 29 {
t.Error("Expected 29 tracks, got", p.Tracks.Total)
}
}
func TestGetPlaylistOpt(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/get_playlist_opt.txt")
defer server.Close()
fields := "href,name,owner(!href,external_urls),tracks.items(added_by.id,track(name,href,album(name,href)))"
p, err := client.GetPlaylist(context.Background(), "59ZbFPES4DQwEjBpWHzrtC", Fields(fields))
if err != nil {
t.Error(err)
}
if p.Collaborative {
t.Error("Playlist shouldn't be collaborative")
}
if p.Description != "" {
t.Error("No description should be included")
}
if p.Tracks.Total != 10 {
t.Error("Expected 10 tracks")
}
}
func TestFollowPlaylistSetsContentType(t *testing.T) {
client, server := testClientString(http.StatusOK, "", func(req *http.Request) {
if req.Header.Get("Content-Type") != "application/json" {
t.Error("Follow playlist request didn't contain Content-Type: application/json")
}
})
defer server.Close()
err := client.FollowPlaylist(context.Background(), "playlistID", true)
if err != nil {
t.Error(err)
}
}
func TestGetPlaylistTracks(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_tracks.txt")
defer server.Close()
tracks, err := client.GetPlaylistTracks(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 47 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Tracks) == 0 {
t.Fatal("No tracks returned")
}
expected := "Time Of Our Lives"
actual := tracks.Tracks[0].Track.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Tracks[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2014-11-25" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsEpisodes(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_items_episodes.json")
defer server.Close()
tracks, err := client.GetPlaylistItems(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 4 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Items) == 0 {
t.Fatal("No tracks returned")
}
expected := "112: Dirty Coms"
actual := tracks.Items[0].Track.Episode.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Items[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsTracks(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_items_tracks.json")
defer server.Close()
tracks, err := client.GetPlaylistItems(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 2 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Items) == 0 {
t.Fatal("No tracks returned")
}
expected := "Typhoons"
actual := tracks.Items[0].Track.Track.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Items[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsTracksAndEpisodes(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_items_episodes_and_tracks.json")
defer server.Close()
tracks, err := client.GetPlaylistItems(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 4 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Items) == 0 {
t.Fatal("No tracks returned")
}
expected := "491- The Missing Middle"
actual := tracks.Items[0].Track.Episode.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Items[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
expected = "Typhoons"
actual = tracks.Items[2].Track.Track.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added = tracks.Items[0].AddedAt
tm, err = time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsOverride(t *testing.T) {
var types string
client, server := testClientString(http.StatusForbidden, "", func(r *http.Request) {
types = r.URL.Query().Get("additional_types")
})
defer server.Close()
_, _ = client.GetPlaylistItems(context.Background(), "playlistID", AdditionalTypes(EpisodeAdditionalType))
if types != "episode" {
t.Errorf("Expected additional type episode, got %s\n", types)
}
}
func TestGetPlaylistItemsDefault(t *testing.T) {
var types string
client, server := testClientString(http.StatusForbidden, "", func(r *http.Request) {
types = r.URL.Query().Get("additional_types")
})
defer server.Close()
_, _ = client.GetPlaylistItems(context.Background(), "playlistID")
if types != "episode,track" {
t.Errorf("Expected additional type episode, got %s\n", types)
}
}
func TestUserFollowsPlaylist(t *testing.T) {
client, server := testClientString(http.StatusOK, `[ true, false ]`)
defer server.Close()
follows, err := client.UserFollowsPlaylist(context.Background(), ID("2v3iNvBS8Ay1Gt2uXtUKUT"), "possan", "elogain")
if err != nil {
t.Error(err)
}
if len(follows) != 2 || !follows[0] || follows[1] {
t.Errorf("Expected '[true, false]', got %#v\n", follows)
}
}
// NOTE collaborative is a fmt boolean.
var newPlaylist = `
{
"collaborative": %t,
"description": "Test Description",
"external_urls": {
"spotify": "api.http://open.spotify.com/user/thelinmichael/playlist/7d2D2S200NyUE5KYs80PwO"
},
"followers": {
"href": null,
"total": 0
},
"href": "https://api.spotify.com/v1/users/thelinmichael/playlists/7d2D2S200NyUE5KYs80PwO",
"id": "7d2D2S200NyUE5KYs80PwO",
"images": [ ],
"name": "A New Playlist",
"owner": {
"external_urls": {
"spotify": "api.http://open.spotify.com/user/thelinmichael"
},
"href": "https://api.spotify.com/v1/users/thelinmichael",
"id": "thelinmichael",
"type": "user",
"url": "spotify:user:thelinmichael"
},
"public": false,
"snapshot_id": "s0o3TSuYnRLl2jch+oA4OEbKwq/fNxhGBkSPnvhZdmWjNV0q3uCAWuGIhEx8SHIx",
"tracks": {
"href": "https://api.spotify.com/v1/users/thelinmichael/playlists/7d2D2S200NyUE5KYs80PwO/tracks",
"items": [ ],
"limit": 100,
"next": null,
"offset": 0,
"previous": null,
"total": 0
},
"type": "playlist",
"url": "spotify:user:thelinmichael:playlist:7d2D2S200NyUE5KYs80PwO"
}`
func TestCreatePlaylist(t *testing.T) {
client, server := testClientString(http.StatusCreated, fmt.Sprintf(newPlaylist, false))
defer server.Close()
p, err := client.CreatePlaylistForUser(context.Background(), "thelinmichael", "A New Playlist", "Test Description", false, false)
if err != nil {
t.Error(err)
}
if p.IsPublic {
t.Error("Expected private playlist, got public")
}
if p.Name != "A New Playlist" {
t.Errorf("Expected 'A New Playlist', got '%s'\n", p.Name)
}
if p.Description != "Test Description" {
t.Errorf("Expected 'Test Description', got '%s'\n", p.Description)
}
if p.Tracks.Total != 0 {
t.Error("Expected new playlist to be empty")
}
if p.Collaborative {
t.Error("Expected non-collaborative playlist, got collaborative")
}
}
func TestCreateCollaborativePlaylist(t *testing.T) {
client, server := testClientString(http.StatusCreated, fmt.Sprintf(newPlaylist, true))
defer server.Close()
p, err := client.CreatePlaylistForUser(context.Background(), "thelinmichael", "A New Playlist", "Test Description", false, true)
if err != nil {
t.Error(err)
}
if p.IsPublic {
t.Error("Expected private playlist, got public")
}
if p.Name != "A New Playlist" {
t.Errorf("Expected 'A New Playlist', got '%s'\n", p.Name)
}
if p.Description != "Test Description" {
t.Errorf("Expected 'Test Description', got '%s'\n", p.Description)
}
if p.Tracks.Total != 0 {
t.Error("Expected new playlist to be empty")
}
if !p.Collaborative {
t.Error("Expected collaborative playlist, got non-collaborative")
}
}
func TestRenamePlaylist(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistName(context.Background(), ID("playlist-id"), "new name"); err != nil {
t.Error(err)
}
}
func TestChangePlaylistAccess(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistAccess(context.Background(), ID("playlist-id"), true); err != nil {
t.Error(err)
}
}
func TestChangePlaylistDescription(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistDescription(context.Background(), ID("playlist-id"), "new description"); err != nil {
t.Error(err)
}
}
func TestChangePlaylistNamdAndAccess(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistNameAndAccess(context.Background(), ID("playlist-id"), "new_name", true); err != nil {
t.Error(err)
}
}
func TestChangePlaylistNamdAccessAndDescription(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistNameAccessAndDescription(context.Background(), ID("playlist-id"), "new_name", "new description", true); err != nil {
t.Error(err)
}
}
func TestChangePlaylistNameFailure(t *testing.T) {
client, server := testClientString(http.StatusForbidden, "")
defer server.Close()
if err := client.ChangePlaylistName(context.Background(), ID("playlist-id"), "new_name"); err == nil {
t.Error("Expected error but didn't get one")
}
}
func TestAddTracksToPlaylist(t *testing.T) {
client, server := testClientString(http.StatusCreated, `{ "snapshot_id" : "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" }`)
defer server.Close()
snapshot, err := client.AddTracksToPlaylist(context.Background(), ID("playlist_id"), ID("track1"), ID("track2"))
if err != nil {
t.Error(err)
}
if snapshot != "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" {
t.Error("Didn't get expected snapshot ID")
}
}
func TestRemoveTracksFromPlaylist(t *testing.T) {
client, server := testClientString(http.StatusOK, `{ "snapshot_id" : "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" }`, func(req *http.Request) {
requestBody, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal("Could not read request body:", err)
}
var body map[string]interface{}
err = json.Unmarshal(requestBody, &body)
if err != nil {
t.Fatal("Error decoding request body:", err)
}
tracksArray, ok := body["tracks"]
if !ok {
t.Error("No tracks JSON object in request body")
}
tracksSlice := tracksArray.([]interface{})
if l := len(tracksSlice); l != 2 {
t.Fatalf("Expected 2 tracks, got %d\n", l)
}
track0 := tracksSlice[0].(map[string]interface{})
trackURI, ok := track0["uri"]
if !ok {
t.Error("Track object doesn't contain 'uri' field")
}
if trackURI != "spotify:track:track1" {
t.Errorf("Expected URI: 'spotify:track:track1', got '%s'\n", trackURI)
}
})
defer server.Close()
snapshotID, err := client.RemoveTracksFromPlaylist(context.Background(), "playlistID", "track1", "track2")
if err != nil {
t.Error(err)
}
if snapshotID != "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" {
t.Error("Incorrect snapshot ID")
}
}
func TestRemoveTracksFromPlaylistOpt(t *testing.T) {
client, server := testClientString(http.StatusOK, `{ "snapshot_id" : "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" }`, func(req *http.Request) {
requestBody, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal(err)
}
var body map[string]interface{}
err = json.Unmarshal(requestBody, &body)
if err != nil {
t.Fatal(err)
}
if _, ok := body["snapshot_id"]; ok {
t.Error("JSON contains snapshot_id field when none was specified")
fmt.Println(string(requestBody))
return
}
jsonTracks := body["tracks"].([]interface{})
if len(jsonTracks) != 3 {
t.Fatal("Expected 3 tracks, got", len(jsonTracks))
}
track1 := jsonTracks[1].(map[string]interface{})
expected := "spotify:track:track1"
if track1["uri"] != expected {
t.Fatalf("Want '%s', got '%s'\n", expected, track1["uri"])
}
indices := track1["positions"].([]interface{})
if len(indices) != 1 || int(indices[0].(float64)) != 9 {
t.Error("Track indices incorrect")
}
})
defer server.Close()
tracks := []TrackToRemove{
NewTrackToRemove("track0", []int{0, 4}), // remove track0 in position 0 and 4
NewTrackToRemove("track1", []int{9}), // remove track1 in position 9...
NewTrackToRemove("track2", []int{8}),
}
// intentionally not passing a snapshot ID here
snapshotID, err := client.RemoveTracksFromPlaylistOpt(context.Background(), "playlistID", tracks, "")
if err != nil || snapshotID != "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" {
t.Fatal("Remove call failed. err=", err)
}
}
func TestReplacePlaylistTracks(t *testing.T) {
client, server := testClientString(http.StatusCreated, "")
defer server.Close()
err := client.ReplacePlaylistTracks(context.Background(), "playlistID", "track1", "track2")
if err != nil {
t.Error(err)
}
}
func TestReplacePlaylistItems(t *testing.T) {
var body []byte
client, server := testClientString(http.StatusCreated, `{"snapshot_id": "test_snapshot"}`, func(request *http.Request) {
var err error
body, err = ioutil.ReadAll(request.Body)
defer func() {
err := request.Body.Close()
if err != nil {
t.Error(err)
}
}()
if err != nil {
t.Error(err)
}
})
defer server.Close()
snapshot, err := client.ReplacePlaylistItems(context.Background(), "playlistID", "spotify:track:track1", "spotify:track:track2")
if err != nil {
t.Error(err)
}
if snapshot != "test_snapshot" {
t.Error("Incorrect snapshot returned")
}
if string(body) != `{"uris":["spotify:track:track1","spotify:track:track2"]}` {
t.Errorf("Expected '{\"uris\":[\"spotify:track:track1\", \"spotify:track:track2\"]}' as body, got %s", string(body))
}
}
func TestReplacePlaylistTracksForbidden(t *testing.T) {
client, server := testClientString(http.StatusForbidden, "")
defer server.Close()
err := client.ReplacePlaylistTracks(context.Background(), "playlistID", "track1", "track2")
if err == nil {
t.Error("Replace succeeded but shouldn't have")
}
}
func TestReplacePlaylistItemsForbidden(t *testing.T) {
client, server := testClientString(http.StatusForbidden, "")
defer server.Close()
snapshot, err := client.ReplacePlaylistItems(context.Background(), "playlistID", "spotify:track:track1", "spotify:track:track2")
if err == nil {
t.Error("Replace succeeded but shouldn't have")
}
if snapshot != "" {
t.Fatal("Incorrect snapshot returned")
}
}
func TestReorderPlaylistRequest(t *testing.T) {
client, server := testClientString(http.StatusNotFound, "", func(req *http.Request) {
if ct := req.Header.Get("Content-Type"); ct != "application/json" {
t.Errorf("Expected Content-Type: application/json, got '%s'\n", ct)
}
if req.Method != "PUT" {
t.Errorf("Expected a PUT, got a %s\n", req.Method)
}
// unmarshal the JSON into a map[string]interface{}
// so we can test for existence of certain keys
var body map[string]interface{}
err := json.NewDecoder(req.Body).Decode(&body)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if start, ok := body["range_start"]; ok {
if start != float64(3) {
t.Errorf("Expected range_start to be 3, but it was %#v\n", start)
}
} else {
t.Errorf("Required field range_start is missing")
}
if ib, ok := body["insert_before"]; ok {
if ib != float64(8) {
t.Errorf("Expected insert_before to be 8, but it was %#v\n", ib)
}
} else {
t.Errorf("Required field insert_before is missing")
}
if _, ok := body["range_length"]; ok {
t.Error("Parameter range_length shouldn't have been in body")
}
if _, ok := body["snapshot_id"]; ok {
t.Error("Parameter snapshot_id shouldn't have been in body")
}
})
defer server.Close()
_, err := client.ReorderPlaylistTracks(context.Background(), "playlist", PlaylistReorderOptions{
RangeStart: 3,
InsertBefore: 8,
})
if err == nil || !strings.Contains(err.Error(), "HTTP 404: Not Found") {
t.Errorf("Expected error 'spotify: HTTP 404: Not Found (body empty)', got %v", err)
}
}
func TestSetPlaylistImage(t *testing.T) {
client, server := testClientString(http.StatusAccepted, "", func(req *http.Request) {
if ct := req.Header.Get("Content-Type"); ct != "image/jpeg" {
t.Errorf("wrong content type, got %s, want image/jpeg", ct)
}
if req.Method != "PUT" {
t.Errorf("expected a PUT, got a %s\n", req.Method)
}
body, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(body, []byte("Zm9v")) {
t.Errorf("invalid request body: want Zm9v, got %s", string(body))
}
})
defer server.Close()
err := client.SetPlaylistImage(context.Background(), "playlist", bytes.NewReader([]byte("foo")))
if err != nil {
t.Fatal(err)
}
}
Introduce expectedBody constant
package spotify
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
)
func TestFeaturedPlaylists(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/featured_playlists.txt")
defer server.Close()
country := "SE"
msg, p, err := client.FeaturedPlaylists(context.Background(), Country(country))
if err != nil {
t.Error(err)
return
}
if msg != "Enjoy a mellow afternoon." {
t.Errorf("Want 'Enjoy a mellow afternoon.', got'%s'\n", msg)
}
if p.Playlists == nil || len(p.Playlists) == 0 {
t.Fatal("Empty playlists result")
}
expected := "Hangover Friendly Singer-Songwriter"
if name := p.Playlists[0].Name; name != expected {
t.Errorf("Want '%s', got '%s'\n", expected, name)
}
}
func TestFeaturedPlaylistsExpiredToken(t *testing.T) {
json := `{
"error": {
"status": 401,
"message": "The access token expired"
}
}`
client, server := testClientString(http.StatusUnauthorized, json)
defer server.Close()
msg, pl, err := client.FeaturedPlaylists(context.Background())
if msg != "" || pl != nil || err == nil {
t.Fatal("Expected an error")
}
serr, ok := err.(Error)
if !ok {
t.Fatalf("Expected spotify Error, got %T", err)
}
if serr.Status != http.StatusUnauthorized {
t.Error("Expected HTTP 401")
}
}
func TestPlaylistsForUser(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlists_for_user.txt")
defer server.Close()
playlists, err := client.GetPlaylistsForUser(context.Background(), "whizler")
if err != nil {
t.Error(err)
}
if l := len(playlists.Playlists); l == 0 {
t.Fatal("Didn't get any results")
}
p := playlists.Playlists[0]
if p.Name != "Nederlandse Tipparade" {
t.Error("Expected Nederlandse Tipparade, got", p.Name)
}
if p.Tracks.Total != 29 {
t.Error("Expected 29 tracks, got", p.Tracks.Total)
}
}
func TestGetPlaylistOpt(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/get_playlist_opt.txt")
defer server.Close()
fields := "href,name,owner(!href,external_urls),tracks.items(added_by.id,track(name,href,album(name,href)))"
p, err := client.GetPlaylist(context.Background(), "59ZbFPES4DQwEjBpWHzrtC", Fields(fields))
if err != nil {
t.Error(err)
}
if p.Collaborative {
t.Error("Playlist shouldn't be collaborative")
}
if p.Description != "" {
t.Error("No description should be included")
}
if p.Tracks.Total != 10 {
t.Error("Expected 10 tracks")
}
}
func TestFollowPlaylistSetsContentType(t *testing.T) {
client, server := testClientString(http.StatusOK, "", func(req *http.Request) {
if req.Header.Get("Content-Type") != "application/json" {
t.Error("Follow playlist request didn't contain Content-Type: application/json")
}
})
defer server.Close()
err := client.FollowPlaylist(context.Background(), "playlistID", true)
if err != nil {
t.Error(err)
}
}
func TestGetPlaylistTracks(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_tracks.txt")
defer server.Close()
tracks, err := client.GetPlaylistTracks(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 47 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Tracks) == 0 {
t.Fatal("No tracks returned")
}
expected := "Time Of Our Lives"
actual := tracks.Tracks[0].Track.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Tracks[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2014-11-25" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsEpisodes(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_items_episodes.json")
defer server.Close()
tracks, err := client.GetPlaylistItems(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 4 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Items) == 0 {
t.Fatal("No tracks returned")
}
expected := "112: Dirty Coms"
actual := tracks.Items[0].Track.Episode.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Items[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsTracks(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_items_tracks.json")
defer server.Close()
tracks, err := client.GetPlaylistItems(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 2 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Items) == 0 {
t.Fatal("No tracks returned")
}
expected := "Typhoons"
actual := tracks.Items[0].Track.Track.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Items[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsTracksAndEpisodes(t *testing.T) {
client, server := testClientFile(http.StatusOK, "test_data/playlist_items_episodes_and_tracks.json")
defer server.Close()
tracks, err := client.GetPlaylistItems(context.Background(), "playlistID")
if err != nil {
t.Error(err)
}
if tracks.Total != 4 {
t.Errorf("Got %d tracks, expected 47\n", tracks.Total)
}
if len(tracks.Items) == 0 {
t.Fatal("No tracks returned")
}
expected := "491- The Missing Middle"
actual := tracks.Items[0].Track.Episode.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added := tracks.Items[0].AddedAt
tm, err := time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
expected = "Typhoons"
actual = tracks.Items[2].Track.Track.Name
if expected != actual {
t.Errorf("Got '%s', expected '%s'\n", actual, expected)
}
added = tracks.Items[0].AddedAt
tm, err = time.Parse(TimestampLayout, added)
if err != nil {
t.Error(err)
}
if f := tm.Format(DateLayout); f != "2022-05-20" {
t.Errorf("Expected added at 2014-11-25, got %s\n", f)
}
}
func TestGetPlaylistItemsOverride(t *testing.T) {
var types string
client, server := testClientString(http.StatusForbidden, "", func(r *http.Request) {
types = r.URL.Query().Get("additional_types")
})
defer server.Close()
_, _ = client.GetPlaylistItems(context.Background(), "playlistID", AdditionalTypes(EpisodeAdditionalType))
if types != "episode" {
t.Errorf("Expected additional type episode, got %s\n", types)
}
}
func TestGetPlaylistItemsDefault(t *testing.T) {
var types string
client, server := testClientString(http.StatusForbidden, "", func(r *http.Request) {
types = r.URL.Query().Get("additional_types")
})
defer server.Close()
_, _ = client.GetPlaylistItems(context.Background(), "playlistID")
if types != "episode,track" {
t.Errorf("Expected additional type episode, got %s\n", types)
}
}
func TestUserFollowsPlaylist(t *testing.T) {
client, server := testClientString(http.StatusOK, `[ true, false ]`)
defer server.Close()
follows, err := client.UserFollowsPlaylist(context.Background(), ID("2v3iNvBS8Ay1Gt2uXtUKUT"), "possan", "elogain")
if err != nil {
t.Error(err)
}
if len(follows) != 2 || !follows[0] || follows[1] {
t.Errorf("Expected '[true, false]', got %#v\n", follows)
}
}
// NOTE collaborative is a fmt boolean.
var newPlaylist = `
{
"collaborative": %t,
"description": "Test Description",
"external_urls": {
"spotify": "api.http://open.spotify.com/user/thelinmichael/playlist/7d2D2S200NyUE5KYs80PwO"
},
"followers": {
"href": null,
"total": 0
},
"href": "https://api.spotify.com/v1/users/thelinmichael/playlists/7d2D2S200NyUE5KYs80PwO",
"id": "7d2D2S200NyUE5KYs80PwO",
"images": [ ],
"name": "A New Playlist",
"owner": {
"external_urls": {
"spotify": "api.http://open.spotify.com/user/thelinmichael"
},
"href": "https://api.spotify.com/v1/users/thelinmichael",
"id": "thelinmichael",
"type": "user",
"url": "spotify:user:thelinmichael"
},
"public": false,
"snapshot_id": "s0o3TSuYnRLl2jch+oA4OEbKwq/fNxhGBkSPnvhZdmWjNV0q3uCAWuGIhEx8SHIx",
"tracks": {
"href": "https://api.spotify.com/v1/users/thelinmichael/playlists/7d2D2S200NyUE5KYs80PwO/tracks",
"items": [ ],
"limit": 100,
"next": null,
"offset": 0,
"previous": null,
"total": 0
},
"type": "playlist",
"url": "spotify:user:thelinmichael:playlist:7d2D2S200NyUE5KYs80PwO"
}`
func TestCreatePlaylist(t *testing.T) {
client, server := testClientString(http.StatusCreated, fmt.Sprintf(newPlaylist, false))
defer server.Close()
p, err := client.CreatePlaylistForUser(context.Background(), "thelinmichael", "A New Playlist", "Test Description", false, false)
if err != nil {
t.Error(err)
}
if p.IsPublic {
t.Error("Expected private playlist, got public")
}
if p.Name != "A New Playlist" {
t.Errorf("Expected 'A New Playlist', got '%s'\n", p.Name)
}
if p.Description != "Test Description" {
t.Errorf("Expected 'Test Description', got '%s'\n", p.Description)
}
if p.Tracks.Total != 0 {
t.Error("Expected new playlist to be empty")
}
if p.Collaborative {
t.Error("Expected non-collaborative playlist, got collaborative")
}
}
func TestCreateCollaborativePlaylist(t *testing.T) {
client, server := testClientString(http.StatusCreated, fmt.Sprintf(newPlaylist, true))
defer server.Close()
p, err := client.CreatePlaylistForUser(context.Background(), "thelinmichael", "A New Playlist", "Test Description", false, true)
if err != nil {
t.Error(err)
}
if p.IsPublic {
t.Error("Expected private playlist, got public")
}
if p.Name != "A New Playlist" {
t.Errorf("Expected 'A New Playlist', got '%s'\n", p.Name)
}
if p.Description != "Test Description" {
t.Errorf("Expected 'Test Description', got '%s'\n", p.Description)
}
if p.Tracks.Total != 0 {
t.Error("Expected new playlist to be empty")
}
if !p.Collaborative {
t.Error("Expected collaborative playlist, got non-collaborative")
}
}
func TestRenamePlaylist(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistName(context.Background(), ID("playlist-id"), "new name"); err != nil {
t.Error(err)
}
}
func TestChangePlaylistAccess(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistAccess(context.Background(), ID("playlist-id"), true); err != nil {
t.Error(err)
}
}
func TestChangePlaylistDescription(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistDescription(context.Background(), ID("playlist-id"), "new description"); err != nil {
t.Error(err)
}
}
func TestChangePlaylistNamdAndAccess(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistNameAndAccess(context.Background(), ID("playlist-id"), "new_name", true); err != nil {
t.Error(err)
}
}
func TestChangePlaylistNamdAccessAndDescription(t *testing.T) {
client, server := testClientString(http.StatusOK, "")
defer server.Close()
if err := client.ChangePlaylistNameAccessAndDescription(context.Background(), ID("playlist-id"), "new_name", "new description", true); err != nil {
t.Error(err)
}
}
func TestChangePlaylistNameFailure(t *testing.T) {
client, server := testClientString(http.StatusForbidden, "")
defer server.Close()
if err := client.ChangePlaylistName(context.Background(), ID("playlist-id"), "new_name"); err == nil {
t.Error("Expected error but didn't get one")
}
}
func TestAddTracksToPlaylist(t *testing.T) {
client, server := testClientString(http.StatusCreated, `{ "snapshot_id" : "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" }`)
defer server.Close()
snapshot, err := client.AddTracksToPlaylist(context.Background(), ID("playlist_id"), ID("track1"), ID("track2"))
if err != nil {
t.Error(err)
}
if snapshot != "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" {
t.Error("Didn't get expected snapshot ID")
}
}
func TestRemoveTracksFromPlaylist(t *testing.T) {
client, server := testClientString(http.StatusOK, `{ "snapshot_id" : "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" }`, func(req *http.Request) {
requestBody, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal("Could not read request body:", err)
}
var body map[string]interface{}
err = json.Unmarshal(requestBody, &body)
if err != nil {
t.Fatal("Error decoding request body:", err)
}
tracksArray, ok := body["tracks"]
if !ok {
t.Error("No tracks JSON object in request body")
}
tracksSlice := tracksArray.([]interface{})
if l := len(tracksSlice); l != 2 {
t.Fatalf("Expected 2 tracks, got %d\n", l)
}
track0 := tracksSlice[0].(map[string]interface{})
trackURI, ok := track0["uri"]
if !ok {
t.Error("Track object doesn't contain 'uri' field")
}
if trackURI != "spotify:track:track1" {
t.Errorf("Expected URI: 'spotify:track:track1', got '%s'\n", trackURI)
}
})
defer server.Close()
snapshotID, err := client.RemoveTracksFromPlaylist(context.Background(), "playlistID", "track1", "track2")
if err != nil {
t.Error(err)
}
if snapshotID != "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" {
t.Error("Incorrect snapshot ID")
}
}
func TestRemoveTracksFromPlaylistOpt(t *testing.T) {
client, server := testClientString(http.StatusOK, `{ "snapshot_id" : "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" }`, func(req *http.Request) {
requestBody, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal(err)
}
var body map[string]interface{}
err = json.Unmarshal(requestBody, &body)
if err != nil {
t.Fatal(err)
}
if _, ok := body["snapshot_id"]; ok {
t.Error("JSON contains snapshot_id field when none was specified")
fmt.Println(string(requestBody))
return
}
jsonTracks := body["tracks"].([]interface{})
if len(jsonTracks) != 3 {
t.Fatal("Expected 3 tracks, got", len(jsonTracks))
}
track1 := jsonTracks[1].(map[string]interface{})
expected := "spotify:track:track1"
if track1["uri"] != expected {
t.Fatalf("Want '%s', got '%s'\n", expected, track1["uri"])
}
indices := track1["positions"].([]interface{})
if len(indices) != 1 || int(indices[0].(float64)) != 9 {
t.Error("Track indices incorrect")
}
})
defer server.Close()
tracks := []TrackToRemove{
NewTrackToRemove("track0", []int{0, 4}), // remove track0 in position 0 and 4
NewTrackToRemove("track1", []int{9}), // remove track1 in position 9...
NewTrackToRemove("track2", []int{8}),
}
// intentionally not passing a snapshot ID here
snapshotID, err := client.RemoveTracksFromPlaylistOpt(context.Background(), "playlistID", tracks, "")
if err != nil || snapshotID != "JbtmHBDBAYu3/bt8BOXKjzKx3i0b6LCa/wVjyl6qQ2Yf6nFXkbmzuEa+ZI/U1yF+" {
t.Fatal("Remove call failed. err=", err)
}
}
func TestReplacePlaylistTracks(t *testing.T) {
client, server := testClientString(http.StatusCreated, "")
defer server.Close()
err := client.ReplacePlaylistTracks(context.Background(), "playlistID", "track1", "track2")
if err != nil {
t.Error(err)
}
}
func TestReplacePlaylistItems(t *testing.T) {
var body []byte
client, server := testClientString(http.StatusCreated, `{"snapshot_id": "test_snapshot"}`, func(request *http.Request) {
var err error
body, err = ioutil.ReadAll(request.Body)
defer func() {
err := request.Body.Close()
if err != nil {
t.Error(err)
}
}()
if err != nil {
t.Error(err)
}
})
defer server.Close()
snapshot, err := client.ReplacePlaylistItems(context.Background(), "playlistID", "spotify:track:track1", "spotify:track:track2")
if err != nil {
t.Error(err)
}
if snapshot != "test_snapshot" {
t.Error("Incorrect snapshot returned")
}
const expectedBody = `{"uris":["spotify:track:track1","spotify:track:track2"]}`
if string(body) != expectedBody {
t.Errorf("Expected '%s' as body, got %s", expectedBody, string(body))
}
}
func TestReplacePlaylistTracksForbidden(t *testing.T) {
client, server := testClientString(http.StatusForbidden, "")
defer server.Close()
err := client.ReplacePlaylistTracks(context.Background(), "playlistID", "track1", "track2")
if err == nil {
t.Error("Replace succeeded but shouldn't have")
}
}
func TestReplacePlaylistItemsForbidden(t *testing.T) {
client, server := testClientString(http.StatusForbidden, "")
defer server.Close()
snapshot, err := client.ReplacePlaylistItems(context.Background(), "playlistID", "spotify:track:track1", "spotify:track:track2")
if err == nil {
t.Error("Replace succeeded but shouldn't have")
}
if snapshot != "" {
t.Fatal("Incorrect snapshot returned")
}
}
func TestReorderPlaylistRequest(t *testing.T) {
client, server := testClientString(http.StatusNotFound, "", func(req *http.Request) {
if ct := req.Header.Get("Content-Type"); ct != "application/json" {
t.Errorf("Expected Content-Type: application/json, got '%s'\n", ct)
}
if req.Method != "PUT" {
t.Errorf("Expected a PUT, got a %s\n", req.Method)
}
// unmarshal the JSON into a map[string]interface{}
// so we can test for existence of certain keys
var body map[string]interface{}
err := json.NewDecoder(req.Body).Decode(&body)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if start, ok := body["range_start"]; ok {
if start != float64(3) {
t.Errorf("Expected range_start to be 3, but it was %#v\n", start)
}
} else {
t.Errorf("Required field range_start is missing")
}
if ib, ok := body["insert_before"]; ok {
if ib != float64(8) {
t.Errorf("Expected insert_before to be 8, but it was %#v\n", ib)
}
} else {
t.Errorf("Required field insert_before is missing")
}
if _, ok := body["range_length"]; ok {
t.Error("Parameter range_length shouldn't have been in body")
}
if _, ok := body["snapshot_id"]; ok {
t.Error("Parameter snapshot_id shouldn't have been in body")
}
})
defer server.Close()
_, err := client.ReorderPlaylistTracks(context.Background(), "playlist", PlaylistReorderOptions{
RangeStart: 3,
InsertBefore: 8,
})
if err == nil || !strings.Contains(err.Error(), "HTTP 404: Not Found") {
t.Errorf("Expected error 'spotify: HTTP 404: Not Found (body empty)', got %v", err)
}
}
func TestSetPlaylistImage(t *testing.T) {
client, server := testClientString(http.StatusAccepted, "", func(req *http.Request) {
if ct := req.Header.Get("Content-Type"); ct != "image/jpeg" {
t.Errorf("wrong content type, got %s, want image/jpeg", ct)
}
if req.Method != "PUT" {
t.Errorf("expected a PUT, got a %s\n", req.Method)
}
body, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(body, []byte("Zm9v")) {
t.Errorf("invalid request body: want Zm9v, got %s", string(body))
}
})
defer server.Close()
err := client.SetPlaylistImage(context.Background(), "playlist", bytes.NewReader([]byte("foo")))
if err != nil {
t.Fatal(err)
}
}
|
package salesforcefsdb
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"time"
httputil "github.com/grokify/gotilla/net/httputilmore"
)
type FsdbClient struct {
Config SalesforceClientConfig
RestClient RestClient
}
func NewFsdbClient(cfg SalesforceClientConfig) FsdbClient {
cl := FsdbClient{}
cl.Config = cfg
cl.RestClient = NewRestClient(cfg)
return cl
}
func (fc *FsdbClient) GetPathForSfidAndType(sSfid string, sType string) (string, error) {
sFilename := fc.GetFilenameForSfidAndType(sSfid, sType)
sDir, err := fc.GetDirForType(sType)
sPath := path.Join(sDir, sFilename)
return sPath, err
}
func (fc *FsdbClient) GetFilenameForSfidAndType(sSfid string, sType string) string {
filename := "sf_" + sType + "_" + sSfid + ".json"
return filename
}
func (fc *FsdbClient) GetDirForType(sType string) (string, error) {
sDir := path.Join(fc.Config.ConfigGeneral.DataDir, sType)
err := os.MkdirAll(sDir, 0755)
return sDir, err
}
func (fc *FsdbClient) GetSobjectForSfidAndType(sSfidTry string, sType string) (SobjectFsdb, error) {
sobTry, err := fc.GetSobjectForSfidAndTypeFromLocal(sSfidTry, sType)
if err == nil {
if sobTry.Meta.HttpStatusCodeI32 == int32(200) && sobTry.Meta.EpochRetrievedSourceI64 > 0 {
now := time.Now()
iEpochNow := now.Unix()
diff := iEpochNow - sobTry.Meta.EpochRetrievedSourceI64
if diff < fc.Config.ConfigGeneral.MaxAgeSec {
return sobTry, nil
}
}
}
sobTry, err = fc.GetSobjectForSfidAndTypeFromRemote(sSfidTry, sType)
return sobTry, err
}
func (fc *FsdbClient) GetSobjectForSfidAndTypeFromLocal(sSfidTry string, sType string) (SobjectFsdb, error) {
sobTry := NewSobjectFsdb()
sPath, err := fc.GetPathForSfidAndType(sSfidTry, sType)
if err != nil {
return sobTry, err
}
if _, err := os.Stat(sPath); os.IsNotExist(err) {
return sobTry, err
}
abData, err := ioutil.ReadFile(sPath)
if err != nil {
return sobTry, err
}
err = json.Unmarshal(abData, &sobTry)
if err == nil && sobTry.Meta.HttpStatusCodeI32 == int32(301) && len(sobTry.Meta.RedirectSfidS) > 0 {
sobTry2, err := fc.GetSobjectForSfidAndTypeFromLocal(sobTry.Meta.RedirectSfidS, sType)
if err == nil {
return sobTry2, nil
}
}
return sobTry, err
}
func (fc *FsdbClient) GetSobjectForSfidAndTypeFromRemote(sSfidTry string, sType string) (SobjectFsdb, error) {
if fc.Config.ConfigGeneral.FlagDisableRemote == true {
sobNop := NewSobjectFsdb()
err := errors.New("404 File Not Found")
return sobNop, err
}
resTry, err := fc.RestClient.GetSobjectResponseForSfidAndType(sSfidTry, sType)
sobTry := NewSobjectFsdbForResponse(resTry)
if resTry.StatusCode == 404 && sType == "Account" {
resOpp, err := fc.RestClient.GetSobjectResponseForSfidAndType(sSfidTry, "Opportunity")
if err == nil {
sobOpp := NewSobjectFsdbForResponse(resOpp)
fc.WriteSobjectFsdb(sSfidTry, "Opportunity", sobOpp)
sSfidAct := fmt.Sprintf("%s", sobOpp.Data["AccountId"])
if len(sSfidAct) > 0 {
resAct, err := fc.RestClient.GetSobjectResponseForSfidAndType(sSfidAct, "Account")
if err != nil {
return sobTry, err
}
sobAct := NewSobjectFsdbForResponse(resAct)
fc.WriteSobjectFsdb(sSfidAct, "Account", sobAct)
sobjAct301 := NewSobjectFsdb()
sobjAct301.SetEpochRetrievedSource()
sobjAct301.Meta.HttpStatusCodeI32 = int32(301)
sobjAct301.Meta.RedirectSfidS = sSfidAct
fc.WriteSobjectFsdb(sSfidTry, "Account", sobjAct301)
return sobAct, nil
}
}
}
if resTry.StatusCode >= 400 {
err := errors.New(resTry.Status)
return sobTry, err
}
err = fc.WriteSobjectFsdb(sSfidTry, sType, sobTry)
return sobTry, err
}
func (fc *FsdbClient) WriteSobjectFsdb(sSfid string, sType string, sobjectFsdb SobjectFsdb) error {
if fc.Config.ConfigGeneral.FlagSaveFs == false {
return nil
}
j, err := json.MarshalIndent(sobjectFsdb, "", " ")
if err != nil {
return err
}
sPath, err := fc.GetPathForSfidAndType(sSfid, sType)
if err != nil {
return err
}
err = ioutil.WriteFile(sPath, j, 0755)
if err != nil {
return err
}
return nil
}
type SobjectFsdb struct {
Meta SobjectFsdbMeta
Data map[string]interface{}
}
func NewSobjectFsdb() SobjectFsdb {
sobjectFsdb := SobjectFsdb{Data: map[string]interface{}{}, Meta: SobjectFsdbMeta{}}
sobjectFsdb.Meta.EpochRetrievedSourceI64 = int64(0)
sobjectFsdb.Meta.HttpStatusCodeI32 = int32(0)
return sobjectFsdb
}
func NewSobjectFsdbForResponse(res *http.Response) SobjectFsdb {
sobjectFsdb := NewSobjectFsdb()
sobjectFsdb.LoadResponse(res)
return sobjectFsdb
}
type SobjectFsdbMeta struct {
EpochRetrievedSourceI64 int64
HttpStatusCodeI32 int32
RedirectSfidS string
}
func (so *SobjectFsdb) SetEpochRetrievedSource() {
now := time.Now()
so.Meta.EpochRetrievedSourceI64 = now.Unix()
}
func (so *SobjectFsdb) LoadResponse(res *http.Response) error {
body, err := httputil.ResponseBody(res)
if err != nil {
return err
}
msi := map[string]interface{}{}
json.Unmarshal(body, &msi)
so.Data = msi
now := time.Now()
so.Meta.EpochRetrievedSourceI64 = now.Unix()
so.Meta.HttpStatusCodeI32 = int32(res.StatusCode)
return nil
}
fix ineffassign
package salesforcefsdb
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"time"
httputil "github.com/grokify/gotilla/net/httputilmore"
)
type FsdbClient struct {
Config SalesforceClientConfig
RestClient RestClient
}
func NewFsdbClient(cfg SalesforceClientConfig) FsdbClient {
cl := FsdbClient{}
cl.Config = cfg
cl.RestClient = NewRestClient(cfg)
return cl
}
func (fc *FsdbClient) GetPathForSfidAndType(sSfid string, sType string) (string, error) {
sFilename := fc.GetFilenameForSfidAndType(sSfid, sType)
sDir, err := fc.GetDirForType(sType)
sPath := path.Join(sDir, sFilename)
return sPath, err
}
func (fc *FsdbClient) GetFilenameForSfidAndType(sSfid string, sType string) string {
filename := "sf_" + sType + "_" + sSfid + ".json"
return filename
}
func (fc *FsdbClient) GetDirForType(sType string) (string, error) {
sDir := path.Join(fc.Config.ConfigGeneral.DataDir, sType)
err := os.MkdirAll(sDir, 0755)
return sDir, err
}
func (fc *FsdbClient) GetSobjectForSfidAndType(sSfidTry string, sType string) (SobjectFsdb, error) {
sobTry, err := fc.GetSobjectForSfidAndTypeFromLocal(sSfidTry, sType)
if err == nil {
if sobTry.Meta.HttpStatusCodeI32 == int32(200) && sobTry.Meta.EpochRetrievedSourceI64 > 0 {
now := time.Now()
iEpochNow := now.Unix()
diff := iEpochNow - sobTry.Meta.EpochRetrievedSourceI64
if diff < fc.Config.ConfigGeneral.MaxAgeSec {
return sobTry, nil
}
}
}
sobTry, err = fc.GetSobjectForSfidAndTypeFromRemote(sSfidTry, sType)
return sobTry, err
}
func (fc *FsdbClient) GetSobjectForSfidAndTypeFromLocal(sSfidTry string, sType string) (SobjectFsdb, error) {
sobTry := NewSobjectFsdb()
sPath, err := fc.GetPathForSfidAndType(sSfidTry, sType)
if err != nil {
return sobTry, err
}
if _, err := os.Stat(sPath); os.IsNotExist(err) {
return sobTry, err
}
abData, err := ioutil.ReadFile(sPath)
if err != nil {
return sobTry, err
}
err = json.Unmarshal(abData, &sobTry)
if err == nil && sobTry.Meta.HttpStatusCodeI32 == int32(301) && len(sobTry.Meta.RedirectSfidS) > 0 {
sobTry2, err := fc.GetSobjectForSfidAndTypeFromLocal(sobTry.Meta.RedirectSfidS, sType)
if err == nil {
return sobTry2, nil
}
}
return sobTry, err
}
func (fc *FsdbClient) GetSobjectForSfidAndTypeFromRemote(sSfidTry string, sType string) (SobjectFsdb, error) {
if fc.Config.ConfigGeneral.FlagDisableRemote == true {
err := errors.New("404 File Not Found")
return NewSobjectFsdb(), err
}
resTry, err := fc.RestClient.GetSobjectResponseForSfidAndType(sSfidTry, sType)
if err != nil {
return NewSobjectFsdb(), err
}
sobTry := NewSobjectFsdbForResponse(resTry)
if resTry.StatusCode == 404 && sType == "Account" {
resOpp, err := fc.RestClient.GetSobjectResponseForSfidAndType(sSfidTry, "Opportunity")
if err == nil {
sobOpp := NewSobjectFsdbForResponse(resOpp)
fc.WriteSobjectFsdb(sSfidTry, "Opportunity", sobOpp)
sSfidAct := fmt.Sprintf("%s", sobOpp.Data["AccountId"])
if len(sSfidAct) > 0 {
resAct, err := fc.RestClient.GetSobjectResponseForSfidAndType(sSfidAct, "Account")
if err != nil {
return sobTry, err
}
sobAct := NewSobjectFsdbForResponse(resAct)
fc.WriteSobjectFsdb(sSfidAct, "Account", sobAct)
sobjAct301 := NewSobjectFsdb()
sobjAct301.SetEpochRetrievedSource()
sobjAct301.Meta.HttpStatusCodeI32 = int32(301)
sobjAct301.Meta.RedirectSfidS = sSfidAct
fc.WriteSobjectFsdb(sSfidTry, "Account", sobjAct301)
return sobAct, nil
}
}
}
if resTry.StatusCode >= 400 {
err := errors.New(resTry.Status)
return sobTry, err
}
err = fc.WriteSobjectFsdb(sSfidTry, sType, sobTry)
return sobTry, err
}
func (fc *FsdbClient) WriteSobjectFsdb(sSfid string, sType string, sobjectFsdb SobjectFsdb) error {
if fc.Config.ConfigGeneral.FlagSaveFs == false {
return nil
}
j, err := json.MarshalIndent(sobjectFsdb, "", " ")
if err != nil {
return err
}
sPath, err := fc.GetPathForSfidAndType(sSfid, sType)
if err != nil {
return err
}
err = ioutil.WriteFile(sPath, j, 0755)
if err != nil {
return err
}
return nil
}
type SobjectFsdb struct {
Meta SobjectFsdbMeta
Data map[string]interface{}
}
func NewSobjectFsdb() SobjectFsdb {
sobjectFsdb := SobjectFsdb{Data: map[string]interface{}{}, Meta: SobjectFsdbMeta{}}
sobjectFsdb.Meta.EpochRetrievedSourceI64 = int64(0)
sobjectFsdb.Meta.HttpStatusCodeI32 = int32(0)
return sobjectFsdb
}
func NewSobjectFsdbForResponse(res *http.Response) SobjectFsdb {
sobjectFsdb := NewSobjectFsdb()
sobjectFsdb.LoadResponse(res)
return sobjectFsdb
}
type SobjectFsdbMeta struct {
EpochRetrievedSourceI64 int64
HttpStatusCodeI32 int32
RedirectSfidS string
}
func (so *SobjectFsdb) SetEpochRetrievedSource() {
now := time.Now()
so.Meta.EpochRetrievedSourceI64 = now.Unix()
}
func (so *SobjectFsdb) LoadResponse(res *http.Response) error {
body, err := httputil.ResponseBody(res)
if err != nil {
return err
}
msi := map[string]interface{}{}
json.Unmarshal(body, &msi)
so.Data = msi
now := time.Now()
so.Meta.EpochRetrievedSourceI64 = now.Unix()
so.Meta.HttpStatusCodeI32 = int32(res.StatusCode)
return nil
}
|
package queue
import "time"
// -----------------------------------------------------------------------------
// TimeoutReached returns `true` if the time elapsed since `m.Timeout` is
// greater or equal to `d`. It returns `false` otherwise.
func (m *Message) TimeoutReached(d time.Duration) bool {
if elapsed := time.Since(m.Timeout); elapsed >= d {
return true
}
return false
}
// -----------------------------------------------------------------------------
// Message is a structure representing messages sent and buffered between
// `queue.ZMQBroker` and `queue.ZMQWorker`.
type Message struct {
ID int
Msg string
Timeout time.Time
}
// NewMessage returns a new `queue.Message`.
func NewMessage(ID int, msg string) *Message { return &Message{ID: ID, Msg: msg, Timeout: time.Now()} }
feat(message): add Copy and ToString functions
package queue
import (
"fmt"
"time"
)
// -----------------------------------------------------------------------------
// TimeoutReached returns `true` if the time elapsed since `m.Timeout` is
// greater or equal to `d`. It returns `false` otherwise.
func (m *Message) TimeoutReached(d time.Duration) bool {
if elapsed := time.Since(m.Timeout); elapsed >= d {
return true
}
return false
}
// Copy returns a new copy of `m` with updated `m.Timeout`.
func (m *Message) Copy() *Message { return NewMessage(m.ID, m.Msg) }
// ToString returns the `string` representation of `m`.
func (m *Message) ToString() string {
return fmt.Sprintf("{ID: %d, Msg: \"%s\"}", m.ID, m.Msg)
}
// -----------------------------------------------------------------------------
// Message is a structure representing messages sent and buffered between
// `queue.ZMQBroker` and `queue.ZMQWorker`.
type Message struct {
ID int
Msg string
Timeout time.Time
}
// NewMessage returns a new `queue.Message`.
func NewMessage(ID int, msg string) *Message { return &Message{ID: ID, Msg: msg, Timeout: time.Now()} }
|
package ergoq
import (
"fmt"
"github.com/garyburd/redigo/redis"
)
const (
RETRY_QUEUE = "ergoq-retry:"
MESSAGE_COUNTER = "ergoq-counter:"
)
var (
popScript *redis.Script
ackScript *redis.Script
queueNonAckedScript *redis.Script
)
func init() {
// redis lua script to POP message from queue
popScript = redis.NewScript(2, fmt.Sprintf(`
local queue = KEYS[1]
local timestamp = KEYS[2]
local function add_message_to_retry_queue(queue, message, timestamp)
local retry_queue = "%s" .. queue
local queue_message_counter = "%s" .. queue
local message_id = redis.call("INCR", queue_message_counter)
local full_message = message_id .. ":"
full_message = full_message .. message
redis.call("ZADD", retry_queue, timestamp, full_message)
return message_id
end
local value = redis.call("RPOP", queue)
if value == false then
return redis.error_reply("no message returned")
end
local id = add_message_to_retry_queue(queue, value, timestamp)
return {queue, value, id .. ""}`, RETRY_QUEUE, MESSAGE_COUNTER))
// redis lua script to acknowledge message
ackScript = redis.NewScript(2, fmt.Sprintf(`
local queue = KEYS[1]
local id = KEYS[2]
local retry_queue = "%s" .. queue
local lex = "[" .. id .. ":"
local result = redis.call("ZREMRANGEBYLEX", retry_queue, lex, lex)
return result`, RETRY_QUEUE))
// redis lua script to re-queue non acked messages
queueNonAckedScript = redis.NewScript(3, fmt.Sprintf(`
local queue = KEYS[1]
local time = KEYS[2]
local timeout = KEYS[3]
local retryQueue = "%s" .. queue
local result = redis.call("ZRANGE", retryQueue, 0, time - timeout)
local num = 0
for _, value in ipairs(result) do
local index = string.find(value, ":")
index = index + 1
local tempValue = value:sub(index)
redis.call("RPUSH", queue, tempValue)
num = num + 1
end
redis.call("ZREMRANGEBYSCORE", retryQueue, 0, time - timeout)
return num .. ""`, RETRY_QUEUE))
}
Fixed bug with pop, ZRANGE was used instead of ZRANGEBYSCORE (headbang)
package ergoq
import (
"fmt"
"github.com/garyburd/redigo/redis"
)
const (
RETRY_QUEUE = "ergoq-retry:"
MESSAGE_COUNTER = "ergoq-counter:"
)
var (
popScript *redis.Script
ackScript *redis.Script
queueNonAckedScript *redis.Script
)
func init() {
// redis lua script to POP message from queue
popScript = redis.NewScript(2, fmt.Sprintf(`
local queue = KEYS[1]
local ts = KEYS[2]
local function add_message_to_retry_queue(queue, message, timestamp)
local retry_queue = "%s" .. queue
local queue_message_counter = "%s" .. queue
local message_id = redis.call("INCR", queue_message_counter)
local full_message = message_id .. ":"
full_message = full_message .. message
redis.call("ZADD", retry_queue, timestamp, full_message)
return message_id
end
local value = redis.call("RPOP", queue)
if value == false then
return redis.error_reply("no message returned")
end
local id = add_message_to_retry_queue(queue, value, ts)
return {queue, value, id .. ""}`, RETRY_QUEUE, MESSAGE_COUNTER))
// redis lua script to acknowledge message
ackScript = redis.NewScript(2, fmt.Sprintf(`
local queue = KEYS[1]
local id = KEYS[2]
local retry_queue = "%s" .. queue
local lex = "[" .. id .. ":"
local result = redis.call("ZREMRANGEBYLEX", retry_queue, lex, lex)
return result`, RETRY_QUEUE))
// redis lua script to re-queue non acked messages
queueNonAckedScript = redis.NewScript(3, fmt.Sprintf(`
local queue = KEYS[1]
local time = KEYS[2]
local timeout = KEYS[3]
local retryQueue = "%s" .. queue
local result = redis.call("ZRANGEBYSCORE", retryQueue, 0, time - timeout)
local num = 0
for _, value in ipairs(result) do
local index = string.find(value, ":")
local id = value:sub(0, index)
index = index + 1
local tempValue = value:sub(index)
redis.call("RPUSH", queue, tempValue)
num = num + 1
local lex = "[" .. id
redis.call("ZREMRANGEBYLEX", retryQueue, lex, lex)
end
return num .. ""`, RETRY_QUEUE))
}
|
package wayffunctionaltest
import (
"bytes"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"errors"
"flag"
"fmt"
toml "github.com/pelletier/go-toml"
"github.com/wayf-dk/go-libxml2/types"
"github.com/wayf-dk/gosaml"
"github.com/wayf-dk/goxml"
"github.com/wayf-dk/lMDQ"
"github.com/wayf-dk/wayfhybrid"
"github.com/y0ssar1an/q"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
// "runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"testing"
"text/template"
"time"
)
var (
_ = log.Printf // For debugging; delete when done.
_ = fmt.Printf
_ = q.Q
)
type (
Testparams struct {
Idp, BirkIdp, FinalIdp string
Spmd, Idpmd, Hubidpmd, Hubspmd, Birkmd, Firstidpmd *goxml.Xp
Cookiejar map[string]map[string]*http.Cookie
IdpentityID string
DSIdpentityID string
// Resolv map[string]string
Initialrequest *goxml.Xp
Newresponse *goxml.Xp
Resp *http.Response
Responsebody []byte
Err error
Trace, Logxml, Encryptresponse bool
Privatekey string
Privatekeypw string
Certificate string
Hashalgorithm string
Attributestmt *goxml.Xp
Hub bool
Birk bool
Env string
ConsentGiven bool
ElementsToSign []string
}
overwrites map[string]interface{}
mod struct {
path, value string
function func(*goxml.Xp)
}
mods []mod
modsset map[string]mods
M map[string]interface{} // just an alias
)
const lMDQ_METADATA_SCHEMA_PATH = "src/github.com/wayf-dk/goxml/schemas/ws-federation.xsd"
var (
mdsources = map[string]map[string]string{
"prodz": {
"hub": "../hybrid-metadata-test.mddb",
"internal": "../hybrid-metadata.mddb",
"externalIdP": "../hybrid-metadata-test.mddb",
"externalSP": "../hybrid-metadata.mddb",
},
"prod": {
"hub": "../hybrid-metadata.mddb",
"internal": "../hybrid-metadata.mddb",
"externalIdP": "../hybrid-metadata.mddb",
"externalSP": "../hybrid-metadata.mddb",
},
"dev": {
"hub": "../hybrid-metadata-test.mddb",
"internal": "../hybrid-metadata-test.mddb",
"externalIdP": "../hybrid-metadata-test.mddb",
"externalSP": "../hybrid-metadata-test.mddb",
},
}
wayf_hub_public, internal, externalIdP, externalSP *lMDQ.MDQ
Md wayfhybrid.MdSets
testAttributes = map[string][]string{
"eduPersonPrincipalName": {"joe@this.is.not.a.valid.idp"},
"mail": {"joe@example.com"},
"gn": {`Anton Banton <SamlRequest id="abc">abc</SamlRequest>`},
"sn": {"Cantonsen"},
"norEduPersonLIN": {"123456789"},
"eduPersonScopedAffiliation": {"student@this.is.not.a.valid.idp", "member@this.is.not.a.valid.idp"},
"preferredLanguage": {"da"},
"eduPersonEntitlement": {"https://example.com/course101"},
"eduPersonAssurance": {"2"},
"organizationName": {"Orphanage - home for the homeless"},
"cn": {"Anton Banton Cantonsen"},
"eduPersonPrimaryAffiliation": {"student"},
"eduPersonAffiliation": {"alum"},
"schacHomeOrganizationType": {"abc"},
"schacPersonalUniqueID": {"urn:mace:terena.org:schac:personalUniqueID:dk:CPR:2408586234"},
"schacCountryOfCitizenship": {"dk"},
"displayName": {"Anton Banton Cantonsen"},
}
do = flag.String("do", "hub", "Which tests to run")
hub = flag.String("hub", "wayf.wayf.dk", "the hostname for the hub server to be tested")
hubbe = flag.String("hubbe", "", "the hub backend server")
birk = flag.String("birk", "birk.wayf.dk", "the hostname for the BIRK server to be tested")
birkbe = flag.String("birkbe", "", "the birk backend server")
ds = flag.String("ds", "ds.wayf.dk", "the discovery server")
trace = flag.Bool("xtrace", false, "trace the request/response flow")
logxml = flag.Bool("logxml", false, "dump requests/responses in xml")
env = flag.String("env", "prod", "which environment to test dev, hybrid, prod - if not dev")
refreshmd = flag.Bool("refreshmd", true, "update local metadatcache before testing")
testcertpath = flag.String("testcertpath", "/etc/ssl/wayf/certs/wildcard.test.lan.pem", "path to the testing cert")
testSPs *goxml.Xp
dohub, dobirk bool
old, r, w *os.File
outC = make(chan string)
templatevalues = map[string]map[string]string{
"prod": {
"eptid": "WAYF-DK-c52a92a5467ae336a2be77cd06719c645e72dfd2",
"pnameid": "WAYF-DK-c52a92a5467ae336a2be77cd06719c645e72dfd2",
},
"prodz": {
"eptid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
"pnameid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
},
"dev": {
"eptid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
"pnameid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
},
"hybrid": {
"eptid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
"pnameid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
},
}
resolv map[string]string
wg sync.WaitGroup
tr *http.Transport
client *http.Client
)
func TestMain(m *testing.M) {
flag.Parse()
dohub = *do == "hub"
dobirk = *do == "birk"
log.Printf("do: %q hub: %q backend: %q birk: %q backend: %q\n", *do, *hub, *hubbe, *birk, *birkbe)
Md.Hub = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["hub"] + "?mode=ro", Table: "HYBRID_HUB"}
Md.Internal = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["internal"] + "?mode=ro", Table: "HYBRID_INTERNAL"}
Md.ExternalIdP = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["externalIdP"] + "?mode=ro", Table: "HYBRID_EXTERNAL_IDP"}
Md.ExternalSP = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["externalSP"] + "?mode=ro", Table: "HYBRID_EXTERNAL_SP"}
for _, md := range []gosaml.Md{Md.Hub, Md.Internal, Md.ExternalIdP, Md.ExternalSP} {
err := md.(*lMDQ.MDQ).Open()
if err != nil {
panic(err)
}
}
tomlConfig, err := toml.LoadFile(wayfhybrid.X.Path + "hybrid-config/hybrid-config.toml")
if err != nil { // Handle errors reading the config file
panic(fmt.Errorf("Fatal error config file: %s\n", err))
}
err = tomlConfig.Unmarshal(wayfhybrid.X)
if err != nil {
panic(fmt.Errorf("Fatal error %s\n", err))
}
for _, ad := range wayfhybrid.X.AttributeDescriptions {
k := wayfhybrid.AttributeKey{ad.Name, wayfhybrid.X.AttributenameFormats[ad.Nameformat].Ns}
wayfhybrid.AttributeDescriptions[k] = ad
wayfhybrid.AttributeDescriptionsList[ad.Nameformat] = append(wayfhybrid.AttributeDescriptionsList[ad.Nameformat], ad)
}
//gosaml.Config.CertPath = "testdata/"
//wayfhybrid.Md = Md
//go wayfhybrid.Main()
// need non-birk, non-request.validate and non-IDPList SPs for testing ....
var numberOfTestSPs int
testSPs, numberOfTestSPs, _ = Md.Internal.MDQFilter("/md:EntityDescriptor/md:Extensions/wayf:wayf[wayf:federation='WAYF' and not(wayf:IDPList)]/../../md:SPSSODescriptor/..")
if numberOfTestSPs == 0 {
log.Fatal("No testSP candidates")
}
resolv = map[string]string{"wayf.wayf.dk:443": *hub + ":443", "birk.wayf.dk:443": *birk + ":443", "krib.wayf.dk:443": *hub + ":443", "ds.wayf.dk:443": *ds + ":443"}
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Dial: func(network, addr string) (net.Conn, error) { return net.Dial(network, resolv[addr]) },
DisableCompression: true,
}
client = &http.Client{
Transport: tr,
CheckRedirect: func(req *http.Request, via []*http.Request) error { return errors.New("redirect-not-allowed") },
}
os.Exit(m.Run())
}
func (tp *Testparams) logxml(x interface{}) {
if tp.Logxml {
var xml *goxml.Xp
switch i := x.(type) {
case *url.URL:
query := i.Query()
req, _ := base64.StdEncoding.DecodeString(query.Get("SAMLRequest"))
xml = goxml.NewXp(gosaml.Inflate(req))
case *goxml.Xp:
xml = i
}
log.Println(xml.PP())
}
}
func stdoutstart() {
old = os.Stdout // keep backup of the real stdout
r, w, _ = os.Pipe()
os.Stdout = w
outC = make(chan string)
// copy the output in a separate goroutine so printing can't block indefinitely
go func() {
var buf bytes.Buffer
io.Copy(&buf, r)
outC <- buf.String()
}()
}
func stdoutend(t *testing.T, expected string) {
// back to normal state
var b bytes.Buffer
w.Close()
os.Stdout = old // restoring the real stdout
got := <-outC
tmpl := template.Must(template.New("expected").Parse(expected))
_ = tmpl.Execute(&b, templatevalues[*env])
expected = b.String()
if expected == "" {
// t.Errorf("unexpected empty expected string\n")
}
if expected != got {
t.Errorf("\nexpected:\n%s\ngot:\n%s\n", expected, got)
}
//fmt.Printf("\nexpected:\n%s\ngot:\n%s\n", expected, got)
}
func Newtp(overwrite *overwrites) (tp *Testparams) {
tp = new(Testparams)
tp.Privatekeypw = os.Getenv("PW")
if tp.Privatekeypw == "" {
log.Fatal("no PW environment var")
}
tp.Env = *env
tp.Hub = dohub
tp.Birk = dobirk
tp.Trace = *trace
tp.Logxml = *logxml
tp.Hashalgorithm = "sha256"
tp.ElementsToSign = []string{"saml:Assertion[1]"}
tp.Idp = "https://this.is.not.a.valid.idp"
tp.FinalIdp = tp.Idp
tp.Spmd, _ = Md.Internal.MDQ("https://wayfsp.wayf.dk")
if overwrite != nil { // overwrite default values with test specific values while it still matters
for k, v := range *overwrite {
reflect.ValueOf(tp).Elem().FieldByName(k).Set(reflect.ValueOf(v))
}
}
// don't use urn:... entityID'ed IdPs for now
tp.BirkIdp = regexp.MustCompile("^(https?://)(.*)$").ReplaceAllString(tp.Idp, "${1}birk.wayf.dk/birk.php/$2")
tp.Hubidpmd, _ = Md.Hub.MDQ("https://wayf.wayf.dk")
tp.Hubspmd = tp.Hubidpmd
tp.Idpmd, _ = Md.Internal.MDQ(tp.Idp)
tp.Birkmd, _ = Md.ExternalIdP.MDQ(tp.BirkIdp)
tp.DSIdpentityID = tp.Idp
switch *do {
case "hub":
tp.Firstidpmd = tp.Hubidpmd
tp.DSIdpentityID = tp.BirkIdp
case "birk":
tp.Firstidpmd = tp.Birkmd
}
tp.Cookiejar = make(map[string]map[string]*http.Cookie)
tp.Cookiejar["wayf.dk"] = make(map[string]*http.Cookie)
tp.Cookiejar["wayf.dk"]["wayfid"] = &http.Cookie{Name: "wayfid", Value: *hubbe}
//tp.Cookiejar["wayf.dk"] = make(map[string]*http.Cookie)
tp.Cookiejar["wayf.dk"]["birkid"] = &http.Cookie{Name: "birkid", Value: *birkbe}
tp.Attributestmt = newAttributeStatement(testAttributes)
cert := tp.Idpmd.Query1(nil, `//md:KeyDescriptor[@use="signing" or not(@use)]/ds:KeyInfo/ds:X509Data/ds:X509Certificate`)
if cert == "" {
fmt.Errorf("Could not find signing cert for: %s", tp.Idpmd.Query1(nil, "/@entityID"))
}
tp.Certificate = cert
keyname, _, err := gosaml.PublicKeyInfo(cert)
if err != nil {
log.Fatal(err)
}
pk, err := ioutil.ReadFile("signing/" + keyname + ".key")
if err != nil {
log.Fatal(err)
}
tp.Privatekey = string(pk)
// due to dependencies on tp.Idpmd we need to overwrite again for specific keys
// to be able to test for "wrong" keys
if overwrite != nil {
lateOverWrites := []string{"Privatekey", "Certificate"}
for _, k := range lateOverWrites {
if v, ok := (*overwrite)[k]; ok {
reflect.ValueOf(tp).Elem().FieldByName(k).Set(reflect.ValueOf(v))
}
}
}
return
}
func newAttributeStatement(attrs map[string][]string) (ats *goxml.Xp) {
template := `<saml:Assertion xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<saml:Subject>
<saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient"></saml:NameID>
</saml:Subject>
<saml:AuthnStatement>
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
<saml:AttributeStatement/>
</saml:Assertion>`
ats = goxml.NewXpFromString(template)
ats.QueryDashP(nil, "./saml:Subject/saml:NameID", gosaml.Id(), nil)
attributeStmt := ats.Query(nil, "./saml:AttributeStatement")[0]
i := 1
for attr, attrvals := range attrs {
attrelement := ats.QueryDashP(attributeStmt, `saml:Attribute[`+strconv.Itoa(i)+`]`, "", nil)
ats.QueryDashP(attrelement, "@Name", attr, nil)
ats.QueryDashP(attrelement, "@NameFormat", "urn:oasis:names:tc:SAML:2.0:attrname-format:basic", nil)
j := 1
for _, attrval := range attrvals {
attrvalelement := ats.QueryDashP(attrelement, `saml:AttributeValue[`+strconv.Itoa(j)+`]`, attrval, nil)
ats.QueryDashP(attrvalelement, "@xsi:type", "xs:string", nil)
j = j + 1
}
i = i + 1
}
return
}
// Does what the browser does follow redirects and POSTs and displays errors
func browse(m modsset, overwrite interface{}) (tp *Testparams) {
var htmlresponse *goxml.Xp
switch t := overwrite.(type) {
case *overwrites:
tp = Newtp(t)
case *Testparams:
tp = t
case nil:
tp = Newtp(nil)
}
stage := map[string]string{"hub": "wayf.wayf.dk", "birk": "wayf.wayf.dk"}[*do]
ApplyMods(tp.Attributestmt, m["attributemods"])
tp.Initialrequest, _ = gosaml.NewAuthnRequest(nil, tp.Spmd, tp.Firstidpmd, []string{tp.IdpentityID}, "")
ApplyMods(tp.Initialrequest, m["requestmods"])
u, _ := gosaml.SAMLRequest2Url(tp.Initialrequest, "", "", "", "")
// when to stop
finalDestination, _ := url.Parse(tp.Initialrequest.Query1(nil, "./@AssertionConsumerServiceURL"))
finalIdp, _ := url.Parse(tp.FinalIdp)
redirects := 7
method := "GET"
body := ""
for {
redirects--
if redirects == 0 { // if we go wild ...
return
}
if method == "POST" {
tp.logxml(tp.Newresponse)
acs := tp.Newresponse.Query1(nil, "@Destination")
issuer, _ := url.Parse(tp.Newresponse.Query1(nil, "./saml:Issuer"))
if (tp.Birk || tp.Hub) && map[string]bool{"birk.wayf.dk": true, "wayf.wayf.dk": true}[issuer.Host] {
// in the new hybrid consent is made in js - and the flag for bypassing it is in js - sad!
tp.ConsentGiven = strings.Contains(htmlresponse.PP(), `,"BypassConfirmation":false`)
}
u, _ = url.Parse(acs)
//q.Q(u, finalDestination)
if u.Host == finalDestination.Host {
tp.logxml(tp.Newresponse)
err := ValidateSignature(tp.Firstidpmd, tp.Newresponse)
if err != nil {
fmt.Printf("signature errors: %s\n", err)
}
break
}
if u.Host == stage { // only change the response to the place we are actually testing (wayf|birk|krib).wayf.dk
ApplyMods(tp.Newresponse, m["responsemods"])
}
data := url.Values{}
data.Set("SAMLResponse", base64.StdEncoding.EncodeToString([]byte(tp.Newresponse.Doc.Dump(false))))
body = data.Encode()
//log.Println("SAMLResponse", tp.Newresponse.PP())
} else {
tp.logxml(u)
}
//q.Q("u", method, redirects, u)
tp.Resp, tp.Responsebody, tp.Err = tp.sendRequest(u, method, body, tp.Cookiejar)
if tp.Err != nil {
switch tp.Err.(type) {
case *url.Error:
log.Panic()
default:
q.Q(tp.Err)
return nil
}
}
htmlresponse = goxml.NewHtmlXp(tp.Responsebody)
// delete stuff that just takes too much space when debugging
tp.Resp.TLS = nil
tp.Resp.Body = nil
//q.Q("resp", tp, tp.Err, tp.Resp, string(tp.Responsebody))
if u, _ = tp.Resp.Location(); u != nil { // we don't care about the StatusCode - Location means redirect
if tp.Err == nil {
query := u.Query()
// we got to a discoveryservice - choose our testidp
if len(query["return"]) > 0 && len(query["returnIDParam"]) > 0 {
u, _ = url.Parse(query["return"][0])
q := u.Query()
q.Set(query["returnIDParam"][0], tp.DSIdpentityID)
u.RawQuery = q.Encode()
} else if strings.Contains(u.Path, "getconsent.php") { // hub consent
u.RawQuery = u.RawQuery + "&yes=1"
tp.ConsentGiven = true
}
}
//q.Q(u.Host, finalIdp.Host)
if u.Host != finalIdp.Host {
method = "GET"
body = ""
} else { // we have reached our IdP
tp.newresponse(u)
method = "POST"
}
continue
}
if tp.Resp.StatusCode == 500 {
error := ""
if tp.Resp.Header.Get("content-type") == "text/html" { // hub errors
error = htmlresponse.Query1(nil, `//a[@id="errormsg"]/text()`)
} else { // birk & krib errors
error = string(tp.Responsebody)
error = regexp.MustCompile("^\\d* ").ReplaceAllString(error, "")
}
fmt.Println(strings.Trim(error, "\n "))
break
} else {
tp.Newresponse, _ = gosaml.Html2SAMLResponse(tp.Responsebody)
if tp.Newresponse.Query1(nil, ".") == "" { // from old hub - disjoint federations
fmt.Println("unknown error")
break
}
method = "POST"
}
}
if tp.Trace {
log.Println()
}
return
}
func (tp *Testparams) newresponse(u *url.URL) {
// get the SAMLRequest
query := u.Query()
req, _ := base64.StdEncoding.DecodeString(query["SAMLRequest"][0])
authnrequest := goxml.NewXp(gosaml.Inflate(req))
tp.logxml(authnrequest)
switch tp.FinalIdp {
case "https://login.test-nemlog-in.dk":
tp.Newresponse = goxml.NewXpFromFile("testdata/nemlogin.encryptedresponse.xml")
tp.logxml(tp.Newresponse)
case "https://this.is.not.a.valid.idp":
// create a response
tp.Newresponse = gosaml.NewResponse(tp.Idpmd, tp.Hubspmd, authnrequest, tp.Attributestmt)
wayfhybrid.CopyAttributes(tp.Attributestmt, tp.Newresponse, tp.Hubspmd)
for _, xpath := range tp.ElementsToSign {
element := tp.Newresponse.Query(nil, xpath)[0]
before := tp.Newresponse.Query(element, "*[2]")[0]
err := tp.Newresponse.Sign(element.(types.Element), before.(types.Element), []byte(tp.Privatekey), []byte(tp.Privatekeypw), tp.Certificate, tp.Hashalgorithm)
if err != nil {
// q.Q("Newresponse", err.(goxml.Werror).Stack(2))
log.Fatal(err)
}
}
//tp.logxml(tp.Newresponse)
if tp.Encryptresponse {
assertion := tp.Newresponse.Query(nil, "saml:Assertion[1]")[0]
cert := tp.Hubspmd.Query1(nil, `//md:KeyDescriptor[@use="encryption" or not(@use)]/ds:KeyInfo/ds:X509Data/ds:X509Certificate`)
if cert == "" {
fmt.Errorf("Could not find encryption cert for: %s", tp.Hubspmd.Query1(nil, "/@entityID"))
}
_, publickey, _ := gosaml.PublicKeyInfo(cert)
if tp.Env == "xdev" {
cert, err := ioutil.ReadFile(*testcertpath)
pk, err := x509.ParseCertificate(cert)
if err != nil {
return
}
publickey = pk.PublicKey.(*rsa.PublicKey)
}
ea := goxml.NewXpFromString(`<saml:EncryptedAssertion xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"></saml:EncryptedAssertion>`)
err := tp.Newresponse.Encrypt(assertion.(types.Element), publickey, ea)
if err != nil {
log.Fatal(err)
}
tp.Encryptresponse = false // for now only possible for idp -> hub
tp.logxml(tp.Newresponse)
}
}
}
// SendRequest sends a http request - GET or POST using the supplied url, server, method and cookies
// It updates the cookies and returns a http.Response and a posssible response body and error
// The server parameter contains the dns name of the actual server, which should respond to the host part of the url
func (tp *Testparams) sendRequest(url *url.URL, method, body string, cookies map[string]map[string]*http.Cookie) (resp *http.Response, responsebody []byte, err error) {
var payload io.Reader
if method == "POST" {
payload = strings.NewReader(body)
}
host := url.Host
cookiedomain := "wayf.dk"
req, err := http.NewRequest(method, url.String(), payload)
for _, cookie := range cookies[cookiedomain] {
req.AddCookie(cookie)
}
if method == "POST" {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(body)))
}
req.Header.Add("Host", host)
resp, err = client.Do(req)
if err != nil && !strings.HasSuffix(err.Error(), "redirect-not-allowed") {
// we need to do the redirect ourselves so a self inflicted redirect "error" is not an error
log.Println(err)
debug.PrintStack()
return nil, nil, errors.New("emit macho dwarf: elf header corrupted")
//log.Fatalln("client.do", err)
}
location, _ := resp.Location()
loc := ""
if location != nil {
loc = location.Host + location.Path
}
setcookies := resp.Cookies()
for _, cookie := range setcookies {
if cookies[cookiedomain] == nil {
cookies[cookiedomain] = make(map[string]*http.Cookie)
}
cookies[cookiedomain][cookie.Name] = cookie
}
// We can't get to the body if we got a redirect pseudo error above
if err == nil {
responsebody, err = ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
}
// We didn't get a Location: header - we are POST'ing a SAMLResponse
if loc == "" {
response := goxml.NewHtmlXp(responsebody)
samlbase64 := response.Query1(nil, `//input[@name="SAMLResponse"]/@value`)
if samlbase64 != "" {
samlxml, _ := base64.StdEncoding.DecodeString(samlbase64)
samlresponse := goxml.NewXp(samlxml)
u, _ := url.Parse(samlresponse.Query1(nil, "@Destination"))
loc = u.Host + u.Path
}
}
if tp.Trace {
log.Printf("%-4s %-70s %s %-15s %s\n", req.Method, host+req.URL.Path, resp.Proto, resp.Status, loc)
}
// we need to nullify the damn redirec-not-allowed error from above
err = nil
return
}
// ApplyMods changes a SAML message by applying an array of xpath expressions and a value
// If the value is "" the nodes are unlinked
// if the value starts with "+ " the the node content is prefixed with the rest of the value
// Otherwise the node content is replaced with the value
func ApplyMods(xp *goxml.Xp, m mods) {
for _, change := range m {
if change.function != nil {
change.function(xp)
} else if change.value == "" {
//log.Printf("changeval: '%s'\n", change.value)
for _, element := range xp.Query(nil, change.path) {
//log.Printf("unlink: %s\n", change.path)
parent, _ := element.ParentNode()
parent.RemoveChild(element)
defer element.Free()
}
} else if strings.HasPrefix(change.value, "+ ") {
for _, value := range xp.QueryMulti(nil, change.path) {
xp.QueryDashP(nil, change.path, change.value[2:]+value, nil)
}
} else if strings.HasPrefix(change.value, "- ") {
for _, value := range xp.QueryMulti(nil, change.path) {
xp.QueryDashP(nil, change.path, value+change.value[2:], nil)
}
} else {
xp.QueryDashP(nil, change.path, change.value, nil)
}
}
//q.Q(string(xp.PP()))
}
func ValidateSignature(md, xp *goxml.Xp) (err error) {
certificates := md.QueryMulti(nil, `./md:IDPSSODescriptor/md:KeyDescriptor[@use="signing" or not(@use)]/ds:KeyInfo/ds:X509Data/ds:X509Certificate`)
if len(certificates) == 0 {
err = errors.New("no certificates found in metadata")
return
}
signatures := xp.Query(nil, "(/samlp:Response[ds:Signature] | /samlp:Response/saml:Assertion[ds:Signature])")
destination := xp.Query1(nil, "/samlp:Response/@Destination")
if len(signatures) == 0 {
err = fmt.Errorf("%s neither the assertion nor the response was signed", destination)
return
}
for _, signature := range signatures {
err = gosaml.VerifySign(xp, certificates, signature)
if err != nil {
return
}
}
return
}
// TestAttributeNameFormat tests if the hub delivers the attributes in the correct format - only one (or none) is allowed
// Currently if none is specified we deliver both but lie about the format so we say that it is basic even though it actually is uri
// As PHPH always uses uri we just count the number of RequestedAttributes
func TestAttributeNameFormat(t *testing.T) {
const (
mdcounturi = "count(//md:RequestedAttribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:uri'])"
mdcountbasic = "count(//md:RequestedAttribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:basic'])"
mdcount = "count(//md:RequestedAttribute)"
ascounturi = "count(//saml:Attribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:uri'])"
ascountbasic = "count(//saml:Attribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:basic'])"
)
stdoutstart()
attrnameformats := []string{"uri", "basic"}
attrnameformatqueries := map[string]string{
"uri": "/*/*/*/wayf:wayf[wayf:AttributeNameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:uri']/../../@entityID",
"basic": "/*/*/*/wayf:wayf[wayf:AttributeNameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:basic']/../../@entityID",
}
for _, attrname := range attrnameformats {
eID := testSPs.Query1(nil, attrnameformatqueries[attrname])
md, _ := Md.Internal.MDQ(eID)
if md == nil {
log.Fatalln("No SP found for testing attributenameformat: ", attrname)
}
tp := browse(nil, &overwrites{"Spmd": md})
if tp != nil {
//samlresponse := Html2SAMLResponse(tp)
requested := md.QueryNumber(nil, mdcount)
uricount := tp.Newresponse.QueryNumber(nil, ascounturi)
basiccount := tp.Newresponse.QueryNumber(nil, ascountbasic)
fmt.Printf("%t %t %t\n", basiccount == requested*2, uricount == requested, basiccount == requested)
//fmt.Printf("requested %d uri %d basic %d\n", requested, uricount, basiccount)
}
}
expected := ""
if dohub || dobirk {
expected += `false true false
false false true
`
}
stdoutend(t, expected)
}
// TestMultipleSPs tests just test a lot of SPs - if any fails signature validation it fails
func xTestMultipleSPs(t *testing.T) {
stdoutstart()
spquery := "/*/*/@entityID"
eIDs := testSPs.QueryMulti(nil, spquery)
for _, eID := range eIDs {
log.Println("eID", eID)
md, _ := Md.Internal.MDQ(eID)
if md == nil {
log.Fatalln("No SP found for testing multiple SPs: ", eID)
}
if md.Query1(nil, "./md:Extensions/wayf:wayf/wayf:feds[.='eduGAIN']") == "eduGAIN" {
continue
}
if md.Query1(nil, "./md:Extensions/wayf:wayf/wayf:feds[.='WAYF']") == "" {
continue
}
log.Println("eID", eID)
browse(nil, &overwrites{"Spmd": md})
}
expected := ""
stdoutend(t, expected)
}
// TestDigestMethodSha1 tests that the Signature|DigestMethod is what the sp asks for
func xTestDigestMethodSendingSha1(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2000/09/xmldsig#rsa-sha1
http://www.w3.org/2000/09/xmldsig#sha1
`
}
stdoutend(t, expected)
}
// TestDigestMethodSha256_1 tests that the Signature|DigestMethod is what the sp asks for
func TestDigestMethodSendingSha256(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://wayfsp2.wayf.dk")
//entitymd, _ := Md.Internal.MDQ("https://ucsyd.papirfly.com/AuthServices")
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2001/04/xmldsig-more#rsa-sha256
http://www.w3.org/2001/04/xmlenc#sha256
`
}
stdoutend(t, expected)
}
// TestDigestMethodSha1 tests that the Signature|DigestMethod is what the sp asks for
func xTestDigestMethodReceivingSha1(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2000/09/xmldsig#rsa-sha1
http://www.w3.org/2000/09/xmldsig#sha1
`
}
stdoutend(t, expected)
}
// TestDigestMethodSha256_1 tests that the Signature|DigestMethod is what the sp asks for
func TestDigestMethodReceivingSha256(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://wayfsp2.wayf.dk")
//entitymd, _ := Md.Internal.MDQ("https://ucsyd.papirfly.com/AuthServices")
tp := browse(nil, &overwrites{"Spmd": entitymd, "Hashalgorithm": "sha256"})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2001/04/xmldsig-more#rsa-sha256
http://www.w3.org/2001/04/xmlenc#sha256
`
}
stdoutend(t, expected)
}
// TestConsentDisabled tests that a SP with consent.disabled set actually bypasses the consent form
func TestSigningResponse(t *testing.T) {
stdoutstart()
expected := ""
// find an entity with consent disabled, but no a birk entity as we know that using ssp does not understand the wayf namespace yet ...
entityID := testSPs.Query1(nil, "/*/*/*/wayf:wayf[wayf:saml20.sign.response='1']/../../md:SPSSODescriptor/../@entityID")
if entityID != "" {
entitymd, _ := Md.Internal.MDQ(entityID)
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
responseSignatures := len(samlresponse.QueryMulti(nil, "/samlp:Response/ds:Signature"))
assertionSignatures := len(samlresponse.QueryMulti(nil, "/samlp:Response/saml:Assertion/ds:Signature"))
fmt.Printf("Response signature = %d Assertion signatures = %d\n", responseSignatures, assertionSignatures)
expected = `Response signature = 1 Assertion signatures = 0
`
}
} else {
expected += "no entity suited for test found"
}
stdoutend(t, expected)
}
// TestConsentDisabled tests that a SP with consent.disabled set actually bypasses the consent form
func TestConsentDisabled(t *testing.T) {
stdoutstart()
expected := ""
// find an entity with consent disabled, but no a birk entity as we know that using ssp does not understand the wayf namespace yet ...
entityID := testSPs.Query1(nil, "/*/*/*/wayf:wayf[wayf:consent.disable='1']/../../md:SPSSODescriptor/../@entityID")
if entityID != "" {
entitymd, _ := Md.Internal.MDQ(entityID)
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
fmt.Printf("consent given %t\n", tp.ConsentGiven)
}
expected += `consent given false
`
} else {
expected += "no entity suited for test found"
}
stdoutend(t, expected)
}
// TestConsentDisabled tests that a SP with consent.disabled set actually bypasses the consent form
func TestConsentGiven(t *testing.T) {
stdoutstart()
expected := ""
// find an entity with consent disabled, but no a birk entity as we know that using ssp does not understand the wayf namespace yet ...
entityID := testSPs.Query1(nil, "/*/*/*/wayf:wayf[not(wayf:consent.disable='1')]/../../md:SPSSODescriptor/../@entityID")
if entityID != "" {
entitymd, _ := Md.Internal.MDQ(entityID)
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
fmt.Printf("consent given %t\n", tp.ConsentGiven)
}
expected += `consent given true
`
} else {
expected += "no entity suited for test found"
}
stdoutend(t, expected)
}
// TestPersistentNameID tests that the persistent nameID (and eptid) is the same from both the hub and BIRK
func xTestPersistentNameID(t *testing.T) {
expected := ""
stdoutstart()
defer stdoutend(t, expected)
entityID := testSPs.Query1(nil, "/*/*/md:SPSSODescriptor/md:NameIDFormat[.='urn:oasis:names:tc:SAML:2.0:nameid-format:persistent']/../md:AttributeConsumingService/md:RequestedAttribute[@Name='urn:oid:1.3.6.1.4.1.5923.1.1.1.10' or @Name='eduPersonTargetedID']/../../../@entityID")
log.Println("ent", entityID)
entitymd, _ := Md.Internal.MDQ(entityID)
if entitymd == nil {
return
//log.Fatalln("no SP found for testing TestPersistentNameID")
}
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
entityID := entitymd.Query1(nil, "@entityID")
nameidformat := samlresponse.Query1(nil, "//saml:NameID/@Format")
nameid := samlresponse.Query1(nil, "//saml:NameID")
audience := samlresponse.Query1(nil, "//saml:Audience")
spnamequalifier := samlresponse.Query1(nil, "//saml:NameID/@SPNameQualifier")
eptid := samlresponse.Query1(nil, "//saml:Attribute[@Name='urn:oid:1.3.6.1.4.1.5923.1.1.1.10' or @Name='eduPersonTargetedID']/saml:AttributeValue")
fmt.Printf("%s %s %s %s %s\n", nameidformat, nameid, eptid, audience, spnamequalifier)
expected += `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent {{.pnameid}} {{.eptid}} ` + entityID + ` ` + entityID + "\n"
}
}
// TestTransientNameID tests that the transient nameID (and eptid) is the same from both the hub and BIRK
func TestTransientNameID(t *testing.T) {
stdoutstart()
var expected string
eID := testSPs.Query1(nil, "/*/*/*/wayf:wayf/wayf:feds[.='WAYF']/../../../md:SPSSODescriptor/md:NameIDFormat[.='urn:oasis:names:tc:SAML:2.0:nameid-format:transient']/../../@entityID")
entitymd, _ := Md.Internal.MDQ(eID)
var tp *Testparams
entityID := ""
// m := modsset{"responsemods": mods{mod{"./saml:Assertion/saml:Issuer", "+ 1234", nil}}}
// m := modsset{"responsemods": mods{mod{"./saml:Assertion/ds:Signature/ds:SignatureValue", "+ 1234", nil}}}
tp = browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
entityID = entitymd.Query1(nil, "@entityID")
nameid := samlresponse.Query1(nil, "//saml:NameID")
nameidformat := samlresponse.Query1(nil, "//saml:NameID/@Format")
audience := samlresponse.Query1(nil, "//saml:Audience")
spnamequalifier := samlresponse.Query1(nil, "//saml:NameID/@SPNameQualifier")
fmt.Printf("%s %t %s %s\n", nameidformat, nameid != "", audience, spnamequalifier)
expected = `urn:oasis:names:tc:SAML:2.0:nameid-format:transient true ` + entityID + ` ` + entityID + "\n"
}
stdoutend(t, expected)
}
/*
// TestUnspecifiedNameID tests that the
func TestUnspecifiedNameID(t *testing.T) {
stdoutstart()
m := modsset{"requestmods": mods{mod{"/samlp:NameIDPolicy[1]/@Format", "urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified"}}}
// BIRK always sends NameIDPolicy/@Format=transient - but respects what the hub sends back - thus we need to fix the request BIRK sends to the hub (WAYFMMISC-940)
// n := modsset{"birkrequestmods": m["requestmods"]}
hub := DoRunTestHub(m)
birk := DoRunTestBirk(m)
expected := ""
for _, tp := range []*Testparams{hub, birk} {
if tp == nil || tp.Resp.StatusCode != 200 {
continue
}
samlresponse := Html2SAMLResponse(tp)
nameidformat := samlresponse.Query1(nil, "//saml:NameID/@Format")
nameid := samlresponse.Query1(nil, "//saml:NameID")
eptid := samlresponse.Query1(nil, "//saml:Attribute[@Name='urn:oid:1.3.6.1.4.1.5923.1.1.1.10']/saml:AttributeValue")
fmt.Printf("%s %t %s\n", nameidformat, nameid != "", eptid)
expected += `urn:oasis:names:tc:SAML:2.0:nameid-format:transient true {{.eptid}}
`
}
stdoutend(t, expected)
}
*/
func xTestNemLogin(t *testing.T) {
var expected string
if *env != "dev" {
return
}
stdoutstart()
// common res for hub and birk
expected += `cn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Anton Banton Cantonsen
`
gosaml.TestTime, _ = time.Parse(gosaml.XsDateTime, "2017-10-09T20:48:49.385Z")
tp := Newtp(&overwrites{"Idp": "https://nemlogin.wayf.dk", "FinalIdp": "https://login.test-nemlog-in.dk"})
// cert := ioutil.ReadFile("testdata/2481cb9e1194df81050c7d22b823540b9442112c.X509Certificate")
// tp.
res := browse(nil, tp)
if true || res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
stdoutend(t, expected)
gosaml.TestTime = time.Time{}
}
// TestFullAttributeset1 test that the full attributeset is delivered to the default test sp
func TestFullAttributeset(t *testing.T) {
var expected string
stdoutstart()
// common res for hub and birk
expected += `cn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Anton Banton Cantonsen
eduPersonAffiliation urn:oasis:names:tc:SAML:2.0:attrname-format:basic
alum
member
student
eduPersonAssurance urn:oasis:names:tc:SAML:2.0:attrname-format:basic
2
eduPersonEntitlement urn:oasis:names:tc:SAML:2.0:attrname-format:basic
https://example.com/course101
eduPersonPrimaryAffiliation urn:oasis:names:tc:SAML:2.0:attrname-format:basic
student
eduPersonPrincipalName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@this.is.not.a.valid.idp
eduPersonScopedAffiliation urn:oasis:names:tc:SAML:2.0:attrname-format:basic
alum@this.is.not.a.valid.idp
member@this.is.not.a.valid.idp
student@this.is.not.a.valid.idp
eduPersonTargetedID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
{{.eptid}}
gn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Anton Banton <SamlRequest id="abc">abc</SamlRequest>
mail urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@example.com
norEduPersonLIN urn:oasis:names:tc:SAML:2.0:attrname-format:basic
123456789
organizationName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Orphanage - home for the homeless
preferredLanguage urn:oasis:names:tc:SAML:2.0:attrname-format:basic
da
schacCountryOfCitizenship urn:oasis:names:tc:SAML:2.0:attrname-format:basic
dk
schacDateOfBirth urn:oasis:names:tc:SAML:2.0:attrname-format:basic
18580824
schacHomeOrganization urn:oasis:names:tc:SAML:2.0:attrname-format:basic
this.is.not.a.valid.idp
schacHomeOrganizationType urn:oasis:names:tc:SAML:2.0:attrname-format:basic
urn:mace:terena.org:schac:homeOrganizationType:int:other
schacPersonalUniqueID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
urn:mace:terena.org:schac:personalUniqueID:dk:CPR:2408586234
schacYearOfBirth urn:oasis:names:tc:SAML:2.0:attrname-format:basic
1858
sn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Cantonsen
`
res := browse(nil, nil)
if res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
stdoutend(t, expected)
}
// TestFullAttributesetSP2 test that the full attributeset is delivered to the PHPH service
func TestFullAttributesetSP2(t *testing.T) {
var expected string
stdoutstart()
spmd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
res := browse(nil, &overwrites{"Spmd": spmd})
if res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
expected += `eduPersonPrincipalName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@this.is.not.a.valid.idp
eduPersonTargetedID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
WAYF-DK-493ee01e49107fed7c4b89622d8087bc5064cc15
mail urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@example.com
`
stdoutend(t, expected)
}
func TestFullEncryptedAttributeset1(t *testing.T) {
var expected string
stdoutstart()
spmd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
overwrite := &overwrites{"Encryptresponse": true, "Spmd": spmd}
res := browse(nil, overwrite)
if res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
expected += `eduPersonPrincipalName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@this.is.not.a.valid.idp
eduPersonTargetedID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
WAYF-DK-493ee01e49107fed7c4b89622d8087bc5064cc15
mail urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@example.com
`
stdoutend(t, expected)
}
func TestAccessForNonIntersectingAdHocFederations(t *testing.T) {
var expected string
stdoutstart()
spmd, _ := Md.Internal.MDQ("https://this.is.not.a.valid.sp")
overwrite := &overwrites{"Spmd": spmd}
res := browse(nil, overwrite)
if res != nil {
switch *do {
case "hub", "birk":
expected = `no common federations
`
}
}
stdoutend(t, expected)
}
func TestSignErrorModifiedContent(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./saml:Assertion/saml:Issuer", "+ 1234", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:digest mismatch","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
func TestSamlVulnerability(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name=\"eduPersonPrincipalName\"]/saml:AttributeValue", "- <!--and.a.fake.domain--->", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:digest mismatch","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
func TestSignErrorModifiedSignature(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./saml:Assertion/ds:Signature/ds:SignatureValue", "+ 1234", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:crypto/rsa: verification error","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
// TestNoSignatureError tests if the hub and BIRK reacts assertions that are not signed
func TestNoSignatureError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"//ds:Signature", "", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:encryption error"]
`
}
}
stdoutend(t, expected)
}
// TestUnknownKeySignatureError tests if the hub and BIRK reacts on signing with an unknown key
func TestUnknownKeySignatureError(t *testing.T) {
var expected string
stdoutstart()
// Just a random private key - not used for anything else
pk := `-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAsd0urclhDMeNqfmmES6LxVf3mK6CAX3vER1Te8QNLsd1iUEq
inmx+j6TqoyLBuVrQkOSMn7pPQMobjpca81KsWcS00RvZCNAgreTj4jOzfIouSml
6BDjuEPP9GEjQhf5iajenBeKfK8jPVnpxEKsuopE6ueKG5Rpi59mV/iVq7ZMQSGl
504OBKWBkAUgO5dPneB632uJSp2kiy0/YNUp30ItR45TncOqEtkrwBx219pRg2B0
2ot8TwZ8xFD7LG2V/hq8+0Ppp+tzTWDAri5z5ZSrAn0/j8sC56Qcwl2w2sYYhpNx
8T9x1QupnIpR1RyHCqR5mBJWDtO3pLAyPW74EwIDAQABAoIBAG9MixMwusw2a017
7RE/YTNCUqt2N+AbH+hDw6PlEKK/KauT3bz9XgPL+Ld2buEH2tCCXA/BHs6RFVG0
r3S96AmPCFavskylSo8BtRLSdyakbBtCFpFbUERUGuM/jcKkIgCkbXibuos/RPv1
MbUgS9oHAA1GikOr4Uf/nRlbcr0ZsRQnqp9uaK+rMCnypBQFB/YE1eKuTqSXf/Yb
D0+xJ3XDaTalBH2qXfIZX3+hKd7NvL5KHAc5ZVj3LzaBJ6GXV7nIKKbbTbdQdjxe
uEzPj36Zb7ultAorQYIyPHlGiXBh+gpkC3BHxDLwIqG+Iw0wUCnlKTDBO0qq8JcZ
TQAVsmECgYEA2IAosfRHjgWhT40+JTd/DICLoa/VAUeHok1JvjirJwADjDj0oZ6C
Ry5ioxrOpxH1RvHSfCHdKt0/aPviyEJGDRU8d/tFQypeSzDHL/BDQtavw/Na5c9x
epCft6HshpuzPr43IYB/VbiUedm8w78jNIcXEphNgNLaw22uU/3gkfkCgYEA0lB3
t+QJiHXY3J7YHze5jYrK96By9DY8MjkgKLwxaFFGZqpb2egXQK5ohBHuAbZXVGDY
oOH/IOBgdsOYuJv7NKfMY35wzrMygfWXbSNlTZwdrmJPqOSfUwu/hmBuwEHsfrEJ
3a2xiX+OFhfRwebcQwgOrN1FVpobKrXqYjp+3WsCgYB/vu9EQY1PIcdS91ZqA1r1
94tsdiHLRXekrtIKacmjk4CEZr8B9lOMyLPu5cx2DESb/ehi0mB8AFyAB9CCtYg8
BAHQEfWGciN9XmTJxo0JjT/c8WT7IPImjduQMP0tWAXlybsiC34XCHijhXS6U7fk
MKnOkQt6LfBjS/6HFNBDkQKBgBbW0DlzFSnxikxjH5s8RPU/Bk2f6fvlS+I0W+6w
iTkH4npRs8nVL3lBt23oOI2NDKzIG55VDIy4cSFUmmgp4DzWoBaJ65w2z5xXXEto
1Z54/qwqVvZDZZ3yH6lrHXvZbOJRPX4KV8ZTyM1TZt8EwBSzckyJdvcxoxOfT8W9
DnvjAoGAIu1AHwMhBdGmwffsII1VAb7gyYnjFbPfaSrwaxIMJ61Djayg4XhGFJ5+
NDVIEaV6/PITFgNcYIzBZxgCEqZ6jJ5jiidlnUbGPYaMhPN/mmHCP/2dvYW6ZSGC
mYqIGJZzLM/wk1u/CG52i+zDOiYbeiYNZc7qhIFU9ueinr88YZo=
-----END RSA PRIVATE KEY-----
`
// need to do resign before sending to birk - not able to do that pt
// _ = DoRunTestBirk(nil)
if browse(nil, &overwrites{"Privatekey": pk, "Privatekeypw": "-"}) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:crypto/rsa: verification error","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
// TestRequestSchemaError tests that the HUB and BIRK reacts on schema errors in requests
func TestRequestSchemaError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"requestmods": mods{mod{"./@IsPassive", "isfalse", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:schema validation failed"]
`
}
}
stdoutend(t, expected)
}
// TestResponseSchemaError tests that the HUB and BIRK reacts on schema errors in responses
func TestResponseSchemaError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./@IssueInstant", "isfalse", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:schema validation failed"]
`
}
}
stdoutend(t, expected)
}
// TestNoEPPNError tests that the hub does not accept assertions with no eppn
func TestNoEPPNError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:isRequired: eduPersonPrincipalName"]
`
}
}
stdoutend(t, expected)
}
// TestEPPNScopingError tests that the hub does not accept scoping errors in eppn - currently it does
func TestEPPNScopingError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{
mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil},
mod{`/saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name="eduPersonPrincipalName"]/saml:AttributeValue[1]`, "joe@example.com", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:security domain 'example.com' does not match any scopes"]
`
}
}
stdoutend(t, expected)
}
// TestNoLocalpartInEPPNError tests that the hub does not accept eppn with no localpart - currently it does
func TestNoLocalpartInEPPNError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{
mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil},
mod{`/saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name="eduPersonPrincipalName"]/saml:AttributeValue[1]`, "@this.is.not.a.valid.idp", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:not a scoped value: @this.is.not.a.valid.idp"]
`
}
}
stdoutend(t, expected)
}
// TestNoLocalpartInEPPNError tests that the hub does not accept eppn with no domain - currently it does
func TestNoDomainInEPPNError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{
mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil},
mod{`/saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name="eduPersonPrincipalName"]/saml:AttributeValue[1]`, "joe", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:not a scoped value: joe"]
`
}
}
stdoutend(t, expected)
}
// TestUnknownSPError test how the birk and the hub reacts on requests from an unknown sP
func TestUnknownSPError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"requestmods": mods{mod{"./saml:Issuer", "https://www.example.com/unknownentity", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:Metadata not found","err:Metadata not found","key:https://www.example.com/unknownentity","table:HYBRID_EXTERNAL_SP"]
`
}
}
stdoutend(t, expected)
}
// TestUnknownIDPError tests how BIRK reacts on requests to an unknown IdP
// Use the line below for new birkservers
// Metadata for entity: https://birk.wayf.dk/birk.php/www.example.com/unknownentity not found
func TestUnknownIDPError(t *testing.T) {
var expected string
stdoutstart()
switch *do {
case "hub", "birk":
m := modsset{"requestmods": mods{mod{"./@Destination", "https://wayf.wayf.dk/unknownentity", nil}}}
if browse(m, nil) != nil {
expected = `unknown error
`
}
}
stdoutend(t, expected)
}
func xTestXSW1(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"", "", ApplyXSW1}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected += `["cause:sql: no rows in result set","err:Metadata not found","key:https://birk.wayf.dk/birk.php/www.example.com/unknownentity","table:HYBRID_EXTERNAL_IDP"]
`
}
}
stdoutend(t, expected)
}
// from https://github.com/SAMLRaider/SAMLRaider/blob/master/src/main/java/helpers/XSWHelpers.java
func ApplyXSW1(xp *goxml.Xp) {
log.Println(xp.PP())
assertion := xp.Query(nil, "/samlp:Response[1]/saml:Assertion[1]")[0]
clonedAssertion := xp.CopyNode(assertion, 1)
signature := xp.Query(clonedAssertion, "./ds:Signature")[0]
log.Println(goxml.NewXpFromNode(signature).PP())
parent, _ := signature.(types.Element).ParentNode()
parent.RemoveChild(signature)
defer signature.Free()
log.Println(goxml.NewXpFromNode(clonedAssertion).PP())
newSignature := xp.Query(assertion, "ds:Signature[1]")[0]
newSignature.AddChild(clonedAssertion)
assertion.(types.Element).SetAttribute("ID", "_evil_response_ID")
log.Println(xp.PP())
}
func xTestSpeed(t *testing.T) {
const gorutines = 10
const iterations = 100000
for i := 0; i < gorutines; i++ {
wg.Add(1)
go func(i int) {
for j := 0; j < iterations; j++ {
starttime := time.Now()
spmd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
browse(nil, &overwrites{"Spmd": spmd})
log.Println(i, j, time.Since(starttime).Seconds())
//runtime.GC()
//time.Sleep(200 * time.Millisecond)
}
wg.Done()
}(i)
}
wg.Wait()
}
BIRK entityIDs is unfashionable
package wayffunctionaltest
import (
"bytes"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"errors"
"flag"
"fmt"
toml "github.com/pelletier/go-toml"
"github.com/wayf-dk/go-libxml2/types"
"github.com/wayf-dk/gosaml"
"github.com/wayf-dk/goxml"
"github.com/wayf-dk/lMDQ"
"github.com/wayf-dk/wayfhybrid"
"github.com/y0ssar1an/q"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
// "runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"testing"
"text/template"
"time"
)
var (
_ = log.Printf // For debugging; delete when done.
_ = fmt.Printf
_ = q.Q
)
type (
Testparams struct {
Idp, BirkIdp, FinalIdp string
Spmd, Idpmd, Hubidpmd, Hubspmd, Birkmd, Firstidpmd *goxml.Xp
Cookiejar map[string]map[string]*http.Cookie
IdpentityID string
DSIdpentityID string
// Resolv map[string]string
Initialrequest *goxml.Xp
Newresponse *goxml.Xp
Resp *http.Response
Responsebody []byte
Err error
Trace, Logxml, Encryptresponse bool
Privatekey string
Privatekeypw string
Certificate string
Hashalgorithm string
Attributestmt *goxml.Xp
Hub bool
Birk bool
Env string
ConsentGiven bool
ElementsToSign []string
}
overwrites map[string]interface{}
mod struct {
path, value string
function func(*goxml.Xp)
}
mods []mod
modsset map[string]mods
M map[string]interface{} // just an alias
)
const lMDQ_METADATA_SCHEMA_PATH = "src/github.com/wayf-dk/goxml/schemas/ws-federation.xsd"
var (
mdsources = map[string]map[string]string{
"prodz": {
"hub": "../hybrid-metadata-test.mddb",
"internal": "../hybrid-metadata.mddb",
"externalIdP": "../hybrid-metadata-test.mddb",
"externalSP": "../hybrid-metadata.mddb",
},
"prod": {
"hub": "../hybrid-metadata.mddb",
"internal": "../hybrid-metadata.mddb",
"externalIdP": "../hybrid-metadata.mddb",
"externalSP": "../hybrid-metadata.mddb",
},
"dev": {
"hub": "../hybrid-metadata-test.mddb",
"internal": "../hybrid-metadata-test.mddb",
"externalIdP": "../hybrid-metadata-test.mddb",
"externalSP": "../hybrid-metadata-test.mddb",
},
}
wayf_hub_public, internal, externalIdP, externalSP *lMDQ.MDQ
Md wayfhybrid.MdSets
testAttributes = map[string][]string{
"eduPersonPrincipalName": {"joe@this.is.not.a.valid.idp"},
"mail": {"joe@example.com"},
"gn": {`Anton Banton <SamlRequest id="abc">abc</SamlRequest>`},
"sn": {"Cantonsen"},
"norEduPersonLIN": {"123456789"},
"eduPersonScopedAffiliation": {"student@this.is.not.a.valid.idp", "member@this.is.not.a.valid.idp"},
"preferredLanguage": {"da"},
"eduPersonEntitlement": {"https://example.com/course101"},
"eduPersonAssurance": {"2"},
"organizationName": {"Orphanage - home for the homeless"},
"cn": {"Anton Banton Cantonsen"},
"eduPersonPrimaryAffiliation": {"student"},
"eduPersonAffiliation": {"alum"},
"schacHomeOrganizationType": {"abc"},
"schacPersonalUniqueID": {"urn:mace:terena.org:schac:personalUniqueID:dk:CPR:2408586234"},
"schacCountryOfCitizenship": {"dk"},
"displayName": {"Anton Banton Cantonsen"},
}
do = flag.String("do", "hub", "Which tests to run")
hub = flag.String("hub", "wayf.wayf.dk", "the hostname for the hub server to be tested")
hubbe = flag.String("hubbe", "", "the hub backend server")
birk = flag.String("birk", "birk.wayf.dk", "the hostname for the BIRK server to be tested")
birkbe = flag.String("birkbe", "", "the birk backend server")
ds = flag.String("ds", "ds.wayf.dk", "the discovery server")
trace = flag.Bool("xtrace", false, "trace the request/response flow")
logxml = flag.Bool("logxml", false, "dump requests/responses in xml")
env = flag.String("env", "prod", "which environment to test dev, hybrid, prod - if not dev")
refreshmd = flag.Bool("refreshmd", true, "update local metadatcache before testing")
testcertpath = flag.String("testcertpath", "/etc/ssl/wayf/certs/wildcard.test.lan.pem", "path to the testing cert")
testSPs *goxml.Xp
dohub, dobirk bool
old, r, w *os.File
outC = make(chan string)
templatevalues = map[string]map[string]string{
"prod": {
"eptid": "WAYF-DK-c52a92a5467ae336a2be77cd06719c645e72dfd2",
"pnameid": "WAYF-DK-c52a92a5467ae336a2be77cd06719c645e72dfd2",
},
"prodz": {
"eptid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
"pnameid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
},
"dev": {
"eptid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
"pnameid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
},
"hybrid": {
"eptid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
"pnameid": "WAYF-DK-a7379f69e957371dc49350a27b704093c0b813f1",
},
}
resolv map[string]string
wg sync.WaitGroup
tr *http.Transport
client *http.Client
)
func TestMain(m *testing.M) {
flag.Parse()
dohub = *do == "hub"
dobirk = *do == "birk"
log.Printf("do: %q hub: %q backend: %q birk: %q backend: %q\n", *do, *hub, *hubbe, *birk, *birkbe)
Md.Hub = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["hub"] + "?mode=ro", Table: "HYBRID_HUB"}
Md.Internal = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["internal"] + "?mode=ro", Table: "HYBRID_INTERNAL"}
Md.ExternalIdP = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["externalIdP"] + "?mode=ro", Table: "HYBRID_EXTERNAL_IDP"}
Md.ExternalSP = &lMDQ.MDQ{Path: "file:" + mdsources[*env]["externalSP"] + "?mode=ro", Table: "HYBRID_EXTERNAL_SP"}
for _, md := range []gosaml.Md{Md.Hub, Md.Internal, Md.ExternalIdP, Md.ExternalSP} {
err := md.(*lMDQ.MDQ).Open()
if err != nil {
panic(err)
}
}
tomlConfig, err := toml.LoadFile(wayfhybrid.X.Path + "hybrid-config/hybrid-config.toml")
if err != nil { // Handle errors reading the config file
panic(fmt.Errorf("Fatal error config file: %s\n", err))
}
err = tomlConfig.Unmarshal(wayfhybrid.X)
if err != nil {
panic(fmt.Errorf("Fatal error %s\n", err))
}
for _, ad := range wayfhybrid.X.AttributeDescriptions {
k := wayfhybrid.AttributeKey{ad.Name, wayfhybrid.X.AttributenameFormats[ad.Nameformat].Ns}
wayfhybrid.AttributeDescriptions[k] = ad
wayfhybrid.AttributeDescriptionsList[ad.Nameformat] = append(wayfhybrid.AttributeDescriptionsList[ad.Nameformat], ad)
}
//gosaml.Config.CertPath = "testdata/"
//wayfhybrid.Md = Md
//go wayfhybrid.Main()
// need non-birk, non-request.validate and non-IDPList SPs for testing ....
var numberOfTestSPs int
testSPs, numberOfTestSPs, _ = Md.Internal.MDQFilter("/md:EntityDescriptor/md:Extensions/wayf:wayf[wayf:federation='WAYF' and not(wayf:IDPList)]/../../md:SPSSODescriptor/..")
if numberOfTestSPs == 0 {
log.Fatal("No testSP candidates")
}
resolv = map[string]string{"wayf.wayf.dk:443": *hub + ":443", "birk.wayf.dk:443": *birk + ":443", "krib.wayf.dk:443": *hub + ":443", "ds.wayf.dk:443": *ds + ":443"}
tr = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Dial: func(network, addr string) (net.Conn, error) { return net.Dial(network, resolv[addr]) },
DisableCompression: true,
}
client = &http.Client{
Transport: tr,
CheckRedirect: func(req *http.Request, via []*http.Request) error { return errors.New("redirect-not-allowed") },
}
os.Exit(m.Run())
}
func (tp *Testparams) logxml(x interface{}) {
if tp.Logxml {
var xml *goxml.Xp
switch i := x.(type) {
case *url.URL:
query := i.Query()
req, _ := base64.StdEncoding.DecodeString(query.Get("SAMLRequest"))
xml = goxml.NewXp(gosaml.Inflate(req))
case *goxml.Xp:
xml = i
}
log.Println(xml.PP())
}
}
func stdoutstart() {
old = os.Stdout // keep backup of the real stdout
r, w, _ = os.Pipe()
os.Stdout = w
outC = make(chan string)
// copy the output in a separate goroutine so printing can't block indefinitely
go func() {
var buf bytes.Buffer
io.Copy(&buf, r)
outC <- buf.String()
}()
}
func stdoutend(t *testing.T, expected string) {
// back to normal state
var b bytes.Buffer
w.Close()
os.Stdout = old // restoring the real stdout
got := <-outC
tmpl := template.Must(template.New("expected").Parse(expected))
_ = tmpl.Execute(&b, templatevalues[*env])
expected = b.String()
if expected == "" {
// t.Errorf("unexpected empty expected string\n")
}
if expected != got {
t.Errorf("\nexpected:\n%s\ngot:\n%s\n", expected, got)
}
//fmt.Printf("\nexpected:\n%s\ngot:\n%s\n", expected, got)
}
func Newtp(overwrite *overwrites) (tp *Testparams) {
tp = new(Testparams)
tp.Privatekeypw = os.Getenv("PW")
if tp.Privatekeypw == "" {
log.Fatal("no PW environment var")
}
tp.Env = *env
tp.Hub = dohub
tp.Birk = dobirk
tp.Trace = *trace
tp.Logxml = *logxml
tp.Hashalgorithm = "sha256"
tp.ElementsToSign = []string{"saml:Assertion[1]"}
tp.Idp = "https://this.is.not.a.valid.idp"
tp.FinalIdp = tp.Idp
tp.Spmd, _ = Md.Internal.MDQ("https://wayfsp.wayf.dk")
if overwrite != nil { // overwrite default values with test specific values while it still matters
for k, v := range *overwrite {
reflect.ValueOf(tp).Elem().FieldByName(k).Set(reflect.ValueOf(v))
}
}
// don't use urn:... entityID'ed IdPs for now
tp.BirkIdp = tp.Idp // regexp.MustCompile("^(https?://)(.*)$").ReplaceAllString(tp.Idp, "${1}birk.wayf.dk/birk.php/$2")
tp.Hubidpmd, _ = Md.Hub.MDQ("https://wayf.wayf.dk")
tp.Hubspmd = tp.Hubidpmd
tp.Idpmd, _ = Md.Internal.MDQ(tp.Idp)
tp.Birkmd, _ = Md.ExternalIdP.MDQ(tp.BirkIdp)
tp.DSIdpentityID = tp.Idp
switch *do {
case "hub":
tp.Firstidpmd = tp.Hubidpmd
tp.DSIdpentityID = tp.BirkIdp
case "birk":
tp.Firstidpmd = tp.Birkmd
}
tp.Cookiejar = make(map[string]map[string]*http.Cookie)
tp.Cookiejar["wayf.dk"] = make(map[string]*http.Cookie)
tp.Cookiejar["wayf.dk"]["wayfid"] = &http.Cookie{Name: "wayfid", Value: *hubbe}
//tp.Cookiejar["wayf.dk"] = make(map[string]*http.Cookie)
tp.Cookiejar["wayf.dk"]["birkid"] = &http.Cookie{Name: "birkid", Value: *birkbe}
tp.Attributestmt = newAttributeStatement(testAttributes)
cert := tp.Idpmd.Query1(nil, `//md:KeyDescriptor[@use="signing" or not(@use)]/ds:KeyInfo/ds:X509Data/ds:X509Certificate`)
if cert == "" {
fmt.Errorf("Could not find signing cert for: %s", tp.Idpmd.Query1(nil, "/@entityID"))
}
tp.Certificate = cert
keyname, _, err := gosaml.PublicKeyInfo(cert)
if err != nil {
log.Fatal(err)
}
pk, err := ioutil.ReadFile("signing/" + keyname + ".key")
if err != nil {
log.Fatal(err)
}
tp.Privatekey = string(pk)
// due to dependencies on tp.Idpmd we need to overwrite again for specific keys
// to be able to test for "wrong" keys
if overwrite != nil {
lateOverWrites := []string{"Privatekey", "Certificate"}
for _, k := range lateOverWrites {
if v, ok := (*overwrite)[k]; ok {
reflect.ValueOf(tp).Elem().FieldByName(k).Set(reflect.ValueOf(v))
}
}
}
return
}
func newAttributeStatement(attrs map[string][]string) (ats *goxml.Xp) {
template := `<saml:Assertion xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<saml:Subject>
<saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient"></saml:NameID>
</saml:Subject>
<saml:AuthnStatement>
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</saml:AuthnContextClassRef>
</saml:AuthnContext>
</saml:AuthnStatement>
<saml:AttributeStatement/>
</saml:Assertion>`
ats = goxml.NewXpFromString(template)
ats.QueryDashP(nil, "./saml:Subject/saml:NameID", gosaml.Id(), nil)
attributeStmt := ats.Query(nil, "./saml:AttributeStatement")[0]
i := 1
for attr, attrvals := range attrs {
attrelement := ats.QueryDashP(attributeStmt, `saml:Attribute[`+strconv.Itoa(i)+`]`, "", nil)
ats.QueryDashP(attrelement, "@Name", attr, nil)
ats.QueryDashP(attrelement, "@NameFormat", "urn:oasis:names:tc:SAML:2.0:attrname-format:basic", nil)
j := 1
for _, attrval := range attrvals {
attrvalelement := ats.QueryDashP(attrelement, `saml:AttributeValue[`+strconv.Itoa(j)+`]`, attrval, nil)
ats.QueryDashP(attrvalelement, "@xsi:type", "xs:string", nil)
j = j + 1
}
i = i + 1
}
return
}
// Does what the browser does follow redirects and POSTs and displays errors
func browse(m modsset, overwrite interface{}) (tp *Testparams) {
var htmlresponse *goxml.Xp
switch t := overwrite.(type) {
case *overwrites:
tp = Newtp(t)
case *Testparams:
tp = t
case nil:
tp = Newtp(nil)
}
stage := map[string]string{"hub": "wayf.wayf.dk", "birk": "wayf.wayf.dk"}[*do]
ApplyMods(tp.Attributestmt, m["attributemods"])
tp.Initialrequest, _ = gosaml.NewAuthnRequest(nil, tp.Spmd, tp.Firstidpmd, []string{tp.IdpentityID}, "")
ApplyMods(tp.Initialrequest, m["requestmods"])
u, _ := gosaml.SAMLRequest2Url(tp.Initialrequest, "", "", "", "")
// when to stop
finalDestination, _ := url.Parse(tp.Initialrequest.Query1(nil, "./@AssertionConsumerServiceURL"))
finalIdp, _ := url.Parse(tp.FinalIdp)
redirects := 7
method := "GET"
body := ""
for {
redirects--
if redirects == 0 { // if we go wild ...
return
}
if method == "POST" {
tp.logxml(tp.Newresponse)
acs := tp.Newresponse.Query1(nil, "@Destination")
issuer, _ := url.Parse(tp.Newresponse.Query1(nil, "./saml:Issuer"))
if (tp.Birk || tp.Hub) && map[string]bool{"birk.wayf.dk": true, "wayf.wayf.dk": true}[issuer.Host] {
// in the new hybrid consent is made in js - and the flag for bypassing it is in js - sad!
tp.ConsentGiven = strings.Contains(htmlresponse.PP(), `,"BypassConfirmation":false`)
}
u, _ = url.Parse(acs)
//q.Q(u, finalDestination)
if u.Host == finalDestination.Host {
tp.logxml(tp.Newresponse)
err := ValidateSignature(tp.Firstidpmd, tp.Newresponse)
if err != nil {
fmt.Printf("signature errors: %s\n", err)
}
break
}
if u.Host == stage { // only change the response to the place we are actually testing (wayf|birk|krib).wayf.dk
ApplyMods(tp.Newresponse, m["responsemods"])
}
data := url.Values{}
data.Set("SAMLResponse", base64.StdEncoding.EncodeToString([]byte(tp.Newresponse.Doc.Dump(false))))
body = data.Encode()
//log.Println("SAMLResponse", tp.Newresponse.PP())
} else {
tp.logxml(u)
}
//q.Q("u", method, redirects, u)
tp.Resp, tp.Responsebody, tp.Err = tp.sendRequest(u, method, body, tp.Cookiejar)
if tp.Err != nil {
switch tp.Err.(type) {
case *url.Error:
log.Panic()
default:
q.Q(tp.Err)
return nil
}
}
htmlresponse = goxml.NewHtmlXp(tp.Responsebody)
// delete stuff that just takes too much space when debugging
tp.Resp.TLS = nil
tp.Resp.Body = nil
//q.Q("resp", tp, tp.Err, tp.Resp, string(tp.Responsebody))
if u, _ = tp.Resp.Location(); u != nil { // we don't care about the StatusCode - Location means redirect
if tp.Err == nil {
query := u.Query()
// we got to a discoveryservice - choose our testidp
if len(query["return"]) > 0 && len(query["returnIDParam"]) > 0 {
u, _ = url.Parse(query["return"][0])
q := u.Query()
q.Set(query["returnIDParam"][0], tp.DSIdpentityID)
u.RawQuery = q.Encode()
} else if strings.Contains(u.Path, "getconsent.php") { // hub consent
u.RawQuery = u.RawQuery + "&yes=1"
tp.ConsentGiven = true
}
}
//q.Q(u.Host, finalIdp.Host)
if u.Host != finalIdp.Host {
method = "GET"
body = ""
} else { // we have reached our IdP
tp.newresponse(u)
method = "POST"
}
continue
}
if tp.Resp.StatusCode == 500 {
error := ""
if tp.Resp.Header.Get("content-type") == "text/html" { // hub errors
error = htmlresponse.Query1(nil, `//a[@id="errormsg"]/text()`)
} else { // birk & krib errors
error = string(tp.Responsebody)
error = regexp.MustCompile("^\\d* ").ReplaceAllString(error, "")
}
fmt.Println(strings.Trim(error, "\n "))
break
} else {
tp.Newresponse, _ = gosaml.Html2SAMLResponse(tp.Responsebody)
if tp.Newresponse.Query1(nil, ".") == "" { // from old hub - disjoint federations
fmt.Println("unknown error")
break
}
method = "POST"
}
}
if tp.Trace {
log.Println()
}
return
}
func (tp *Testparams) newresponse(u *url.URL) {
// get the SAMLRequest
query := u.Query()
req, _ := base64.StdEncoding.DecodeString(query["SAMLRequest"][0])
authnrequest := goxml.NewXp(gosaml.Inflate(req))
tp.logxml(authnrequest)
switch tp.FinalIdp {
case "https://login.test-nemlog-in.dk":
tp.Newresponse = goxml.NewXpFromFile("testdata/nemlogin.encryptedresponse.xml")
tp.logxml(tp.Newresponse)
case "https://this.is.not.a.valid.idp":
// create a response
tp.Newresponse = gosaml.NewResponse(tp.Idpmd, tp.Hubspmd, authnrequest, tp.Attributestmt)
wayfhybrid.CopyAttributes(tp.Attributestmt, tp.Newresponse, tp.Hubspmd)
for _, xpath := range tp.ElementsToSign {
element := tp.Newresponse.Query(nil, xpath)[0]
before := tp.Newresponse.Query(element, "*[2]")[0]
err := tp.Newresponse.Sign(element.(types.Element), before.(types.Element), []byte(tp.Privatekey), []byte(tp.Privatekeypw), tp.Certificate, tp.Hashalgorithm)
if err != nil {
// q.Q("Newresponse", err.(goxml.Werror).Stack(2))
log.Fatal(err)
}
}
//tp.logxml(tp.Newresponse)
if tp.Encryptresponse {
assertion := tp.Newresponse.Query(nil, "saml:Assertion[1]")[0]
cert := tp.Hubspmd.Query1(nil, `//md:KeyDescriptor[@use="encryption" or not(@use)]/ds:KeyInfo/ds:X509Data/ds:X509Certificate`)
if cert == "" {
fmt.Errorf("Could not find encryption cert for: %s", tp.Hubspmd.Query1(nil, "/@entityID"))
}
_, publickey, _ := gosaml.PublicKeyInfo(cert)
if tp.Env == "xdev" {
cert, err := ioutil.ReadFile(*testcertpath)
pk, err := x509.ParseCertificate(cert)
if err != nil {
return
}
publickey = pk.PublicKey.(*rsa.PublicKey)
}
ea := goxml.NewXpFromString(`<saml:EncryptedAssertion xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"></saml:EncryptedAssertion>`)
err := tp.Newresponse.Encrypt(assertion.(types.Element), publickey, ea)
if err != nil {
log.Fatal(err)
}
tp.Encryptresponse = false // for now only possible for idp -> hub
tp.logxml(tp.Newresponse)
}
}
}
// SendRequest sends a http request - GET or POST using the supplied url, server, method and cookies
// It updates the cookies and returns a http.Response and a posssible response body and error
// The server parameter contains the dns name of the actual server, which should respond to the host part of the url
func (tp *Testparams) sendRequest(url *url.URL, method, body string, cookies map[string]map[string]*http.Cookie) (resp *http.Response, responsebody []byte, err error) {
var payload io.Reader
if method == "POST" {
payload = strings.NewReader(body)
}
host := url.Host
cookiedomain := "wayf.dk"
req, err := http.NewRequest(method, url.String(), payload)
for _, cookie := range cookies[cookiedomain] {
req.AddCookie(cookie)
}
if method == "POST" {
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(body)))
}
req.Header.Add("Host", host)
resp, err = client.Do(req)
if err != nil && !strings.HasSuffix(err.Error(), "redirect-not-allowed") {
// we need to do the redirect ourselves so a self inflicted redirect "error" is not an error
log.Println(err)
debug.PrintStack()
return nil, nil, errors.New("emit macho dwarf: elf header corrupted")
//log.Fatalln("client.do", err)
}
location, _ := resp.Location()
loc := ""
if location != nil {
loc = location.Host + location.Path
}
setcookies := resp.Cookies()
for _, cookie := range setcookies {
if cookies[cookiedomain] == nil {
cookies[cookiedomain] = make(map[string]*http.Cookie)
}
cookies[cookiedomain][cookie.Name] = cookie
}
// We can't get to the body if we got a redirect pseudo error above
if err == nil {
responsebody, err = ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
}
// We didn't get a Location: header - we are POST'ing a SAMLResponse
if loc == "" {
response := goxml.NewHtmlXp(responsebody)
samlbase64 := response.Query1(nil, `//input[@name="SAMLResponse"]/@value`)
if samlbase64 != "" {
samlxml, _ := base64.StdEncoding.DecodeString(samlbase64)
samlresponse := goxml.NewXp(samlxml)
u, _ := url.Parse(samlresponse.Query1(nil, "@Destination"))
loc = u.Host + u.Path
}
}
if tp.Trace {
log.Printf("%-4s %-70s %s %-15s %s\n", req.Method, host+req.URL.Path, resp.Proto, resp.Status, loc)
}
// we need to nullify the damn redirec-not-allowed error from above
err = nil
return
}
// ApplyMods changes a SAML message by applying an array of xpath expressions and a value
// If the value is "" the nodes are unlinked
// if the value starts with "+ " the the node content is prefixed with the rest of the value
// Otherwise the node content is replaced with the value
func ApplyMods(xp *goxml.Xp, m mods) {
for _, change := range m {
if change.function != nil {
change.function(xp)
} else if change.value == "" {
//log.Printf("changeval: '%s'\n", change.value)
for _, element := range xp.Query(nil, change.path) {
//log.Printf("unlink: %s\n", change.path)
parent, _ := element.ParentNode()
parent.RemoveChild(element)
defer element.Free()
}
} else if strings.HasPrefix(change.value, "+ ") {
for _, value := range xp.QueryMulti(nil, change.path) {
xp.QueryDashP(nil, change.path, change.value[2:]+value, nil)
}
} else if strings.HasPrefix(change.value, "- ") {
for _, value := range xp.QueryMulti(nil, change.path) {
xp.QueryDashP(nil, change.path, value+change.value[2:], nil)
}
} else {
xp.QueryDashP(nil, change.path, change.value, nil)
}
}
//q.Q(string(xp.PP()))
}
func ValidateSignature(md, xp *goxml.Xp) (err error) {
certificates := md.QueryMulti(nil, `./md:IDPSSODescriptor/md:KeyDescriptor[@use="signing" or not(@use)]/ds:KeyInfo/ds:X509Data/ds:X509Certificate`)
if len(certificates) == 0 {
err = errors.New("no certificates found in metadata")
return
}
signatures := xp.Query(nil, "(/samlp:Response[ds:Signature] | /samlp:Response/saml:Assertion[ds:Signature])")
destination := xp.Query1(nil, "/samlp:Response/@Destination")
if len(signatures) == 0 {
err = fmt.Errorf("%s neither the assertion nor the response was signed", destination)
return
}
for _, signature := range signatures {
err = gosaml.VerifySign(xp, certificates, signature)
if err != nil {
return
}
}
return
}
// TestAttributeNameFormat tests if the hub delivers the attributes in the correct format - only one (or none) is allowed
// Currently if none is specified we deliver both but lie about the format so we say that it is basic even though it actually is uri
// As PHPH always uses uri we just count the number of RequestedAttributes
func TestAttributeNameFormat(t *testing.T) {
const (
mdcounturi = "count(//md:RequestedAttribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:uri'])"
mdcountbasic = "count(//md:RequestedAttribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:basic'])"
mdcount = "count(//md:RequestedAttribute)"
ascounturi = "count(//saml:Attribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:uri'])"
ascountbasic = "count(//saml:Attribute[@NameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:basic'])"
)
stdoutstart()
attrnameformats := []string{"uri", "basic"}
attrnameformatqueries := map[string]string{
"uri": "/*/*/*/wayf:wayf[wayf:AttributeNameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:uri']/../../@entityID",
"basic": "/*/*/*/wayf:wayf[wayf:AttributeNameFormat='urn:oasis:names:tc:SAML:2.0:attrname-format:basic']/../../@entityID",
}
for _, attrname := range attrnameformats {
eID := testSPs.Query1(nil, attrnameformatqueries[attrname])
md, _ := Md.Internal.MDQ(eID)
if md == nil {
log.Fatalln("No SP found for testing attributenameformat: ", attrname)
}
tp := browse(nil, &overwrites{"Spmd": md})
if tp != nil {
//samlresponse := Html2SAMLResponse(tp)
requested := md.QueryNumber(nil, mdcount)
uricount := tp.Newresponse.QueryNumber(nil, ascounturi)
basiccount := tp.Newresponse.QueryNumber(nil, ascountbasic)
fmt.Printf("%t %t %t\n", basiccount == requested*2, uricount == requested, basiccount == requested)
//fmt.Printf("requested %d uri %d basic %d\n", requested, uricount, basiccount)
}
}
expected := ""
if dohub || dobirk {
expected += `false true false
false false true
`
}
stdoutend(t, expected)
}
// TestMultipleSPs tests just test a lot of SPs - if any fails signature validation it fails
func xTestMultipleSPs(t *testing.T) {
stdoutstart()
spquery := "/*/*/@entityID"
eIDs := testSPs.QueryMulti(nil, spquery)
for _, eID := range eIDs {
log.Println("eID", eID)
md, _ := Md.Internal.MDQ(eID)
if md == nil {
log.Fatalln("No SP found for testing multiple SPs: ", eID)
}
if md.Query1(nil, "./md:Extensions/wayf:wayf/wayf:feds[.='eduGAIN']") == "eduGAIN" {
continue
}
if md.Query1(nil, "./md:Extensions/wayf:wayf/wayf:feds[.='WAYF']") == "" {
continue
}
log.Println("eID", eID)
browse(nil, &overwrites{"Spmd": md})
}
expected := ""
stdoutend(t, expected)
}
// TestDigestMethodSha1 tests that the Signature|DigestMethod is what the sp asks for
func xTestDigestMethodSendingSha1(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2000/09/xmldsig#rsa-sha1
http://www.w3.org/2000/09/xmldsig#sha1
`
}
stdoutend(t, expected)
}
// TestDigestMethodSha256_1 tests that the Signature|DigestMethod is what the sp asks for
func TestDigestMethodSendingSha256(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://wayfsp2.wayf.dk")
//entitymd, _ := Md.Internal.MDQ("https://ucsyd.papirfly.com/AuthServices")
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2001/04/xmldsig-more#rsa-sha256
http://www.w3.org/2001/04/xmlenc#sha256
`
}
stdoutend(t, expected)
}
// TestDigestMethodSha1 tests that the Signature|DigestMethod is what the sp asks for
func xTestDigestMethodReceivingSha1(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2000/09/xmldsig#rsa-sha1
http://www.w3.org/2000/09/xmldsig#sha1
`
}
stdoutend(t, expected)
}
// TestDigestMethodSha256_1 tests that the Signature|DigestMethod is what the sp asks for
func TestDigestMethodReceivingSha256(t *testing.T) {
stdoutstart()
expected := ""
entitymd, _ := Md.Internal.MDQ("https://wayfsp2.wayf.dk")
//entitymd, _ := Md.Internal.MDQ("https://ucsyd.papirfly.com/AuthServices")
tp := browse(nil, &overwrites{"Spmd": entitymd, "Hashalgorithm": "sha256"})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
signatureMethod := samlresponse.Query1(nil, "//ds:SignatureMethod/@Algorithm")
digestMethod := samlresponse.Query1(nil, "//ds:DigestMethod/@Algorithm")
fmt.Printf("%s\n%s\n", signatureMethod, digestMethod)
expected += `http://www.w3.org/2001/04/xmldsig-more#rsa-sha256
http://www.w3.org/2001/04/xmlenc#sha256
`
}
stdoutend(t, expected)
}
// TestConsentDisabled tests that a SP with consent.disabled set actually bypasses the consent form
func TestSigningResponse(t *testing.T) {
stdoutstart()
expected := ""
// find an entity with consent disabled, but no a birk entity as we know that using ssp does not understand the wayf namespace yet ...
entityID := testSPs.Query1(nil, "/*/*/*/wayf:wayf[wayf:saml20.sign.response='1']/../../md:SPSSODescriptor/../@entityID")
if entityID != "" {
entitymd, _ := Md.Internal.MDQ(entityID)
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
responseSignatures := len(samlresponse.QueryMulti(nil, "/samlp:Response/ds:Signature"))
assertionSignatures := len(samlresponse.QueryMulti(nil, "/samlp:Response/saml:Assertion/ds:Signature"))
fmt.Printf("Response signature = %d Assertion signatures = %d\n", responseSignatures, assertionSignatures)
expected = `Response signature = 1 Assertion signatures = 0
`
}
} else {
expected += "no entity suited for test found"
}
stdoutend(t, expected)
}
// TestConsentDisabled tests that a SP with consent.disabled set actually bypasses the consent form
func TestConsentDisabled(t *testing.T) {
stdoutstart()
expected := ""
// find an entity with consent disabled, but no a birk entity as we know that using ssp does not understand the wayf namespace yet ...
entityID := testSPs.Query1(nil, "/*/*/*/wayf:wayf[wayf:consent.disable='1']/../../md:SPSSODescriptor/../@entityID")
if entityID != "" {
entitymd, _ := Md.Internal.MDQ(entityID)
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
fmt.Printf("consent given %t\n", tp.ConsentGiven)
}
expected += `consent given false
`
} else {
expected += "no entity suited for test found"
}
stdoutend(t, expected)
}
// TestConsentDisabled tests that a SP with consent.disabled set actually bypasses the consent form
func TestConsentGiven(t *testing.T) {
stdoutstart()
expected := ""
// find an entity with consent disabled, but no a birk entity as we know that using ssp does not understand the wayf namespace yet ...
entityID := testSPs.Query1(nil, "/*/*/*/wayf:wayf[not(wayf:consent.disable='1')]/../../md:SPSSODescriptor/../@entityID")
if entityID != "" {
entitymd, _ := Md.Internal.MDQ(entityID)
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
fmt.Printf("consent given %t\n", tp.ConsentGiven)
}
expected += `consent given true
`
} else {
expected += "no entity suited for test found"
}
stdoutend(t, expected)
}
// TestPersistentNameID tests that the persistent nameID (and eptid) is the same from both the hub and BIRK
func xTestPersistentNameID(t *testing.T) {
expected := ""
stdoutstart()
defer stdoutend(t, expected)
entityID := testSPs.Query1(nil, "/*/*/md:SPSSODescriptor/md:NameIDFormat[.='urn:oasis:names:tc:SAML:2.0:nameid-format:persistent']/../md:AttributeConsumingService/md:RequestedAttribute[@Name='urn:oid:1.3.6.1.4.1.5923.1.1.1.10' or @Name='eduPersonTargetedID']/../../../@entityID")
log.Println("ent", entityID)
entitymd, _ := Md.Internal.MDQ(entityID)
if entitymd == nil {
return
//log.Fatalln("no SP found for testing TestPersistentNameID")
}
tp := browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
entityID := entitymd.Query1(nil, "@entityID")
nameidformat := samlresponse.Query1(nil, "//saml:NameID/@Format")
nameid := samlresponse.Query1(nil, "//saml:NameID")
audience := samlresponse.Query1(nil, "//saml:Audience")
spnamequalifier := samlresponse.Query1(nil, "//saml:NameID/@SPNameQualifier")
eptid := samlresponse.Query1(nil, "//saml:Attribute[@Name='urn:oid:1.3.6.1.4.1.5923.1.1.1.10' or @Name='eduPersonTargetedID']/saml:AttributeValue")
fmt.Printf("%s %s %s %s %s\n", nameidformat, nameid, eptid, audience, spnamequalifier)
expected += `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent {{.pnameid}} {{.eptid}} ` + entityID + ` ` + entityID + "\n"
}
}
// TestTransientNameID tests that the transient nameID (and eptid) is the same from both the hub and BIRK
func TestTransientNameID(t *testing.T) {
stdoutstart()
var expected string
eID := testSPs.Query1(nil, "/*/*/*/wayf:wayf/wayf:feds[.='WAYF']/../../../md:SPSSODescriptor/md:NameIDFormat[.='urn:oasis:names:tc:SAML:2.0:nameid-format:transient']/../../@entityID")
entitymd, _ := Md.Internal.MDQ(eID)
var tp *Testparams
entityID := ""
// m := modsset{"responsemods": mods{mod{"./saml:Assertion/saml:Issuer", "+ 1234", nil}}}
// m := modsset{"responsemods": mods{mod{"./saml:Assertion/ds:Signature/ds:SignatureValue", "+ 1234", nil}}}
tp = browse(nil, &overwrites{"Spmd": entitymd})
if tp != nil {
samlresponse, _ := gosaml.Html2SAMLResponse(tp.Responsebody)
entityID = entitymd.Query1(nil, "@entityID")
nameid := samlresponse.Query1(nil, "//saml:NameID")
nameidformat := samlresponse.Query1(nil, "//saml:NameID/@Format")
audience := samlresponse.Query1(nil, "//saml:Audience")
spnamequalifier := samlresponse.Query1(nil, "//saml:NameID/@SPNameQualifier")
fmt.Printf("%s %t %s %s\n", nameidformat, nameid != "", audience, spnamequalifier)
expected = `urn:oasis:names:tc:SAML:2.0:nameid-format:transient true ` + entityID + ` ` + entityID + "\n"
}
stdoutend(t, expected)
}
/*
// TestUnspecifiedNameID tests that the
func TestUnspecifiedNameID(t *testing.T) {
stdoutstart()
m := modsset{"requestmods": mods{mod{"/samlp:NameIDPolicy[1]/@Format", "urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified"}}}
// BIRK always sends NameIDPolicy/@Format=transient - but respects what the hub sends back - thus we need to fix the request BIRK sends to the hub (WAYFMMISC-940)
// n := modsset{"birkrequestmods": m["requestmods"]}
hub := DoRunTestHub(m)
birk := DoRunTestBirk(m)
expected := ""
for _, tp := range []*Testparams{hub, birk} {
if tp == nil || tp.Resp.StatusCode != 200 {
continue
}
samlresponse := Html2SAMLResponse(tp)
nameidformat := samlresponse.Query1(nil, "//saml:NameID/@Format")
nameid := samlresponse.Query1(nil, "//saml:NameID")
eptid := samlresponse.Query1(nil, "//saml:Attribute[@Name='urn:oid:1.3.6.1.4.1.5923.1.1.1.10']/saml:AttributeValue")
fmt.Printf("%s %t %s\n", nameidformat, nameid != "", eptid)
expected += `urn:oasis:names:tc:SAML:2.0:nameid-format:transient true {{.eptid}}
`
}
stdoutend(t, expected)
}
*/
func xTestNemLogin(t *testing.T) {
var expected string
if *env != "dev" {
return
}
stdoutstart()
// common res for hub and birk
expected += `cn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Anton Banton Cantonsen
`
gosaml.TestTime, _ = time.Parse(gosaml.XsDateTime, "2017-10-09T20:48:49.385Z")
tp := Newtp(&overwrites{"Idp": "https://nemlogin.wayf.dk", "FinalIdp": "https://login.test-nemlog-in.dk"})
// cert := ioutil.ReadFile("testdata/2481cb9e1194df81050c7d22b823540b9442112c.X509Certificate")
// tp.
res := browse(nil, tp)
if true || res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
stdoutend(t, expected)
gosaml.TestTime = time.Time{}
}
// TestFullAttributeset1 test that the full attributeset is delivered to the default test sp
func TestFullAttributeset(t *testing.T) {
var expected string
stdoutstart()
// common res for hub and birk
expected += `cn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Anton Banton Cantonsen
eduPersonAffiliation urn:oasis:names:tc:SAML:2.0:attrname-format:basic
alum
member
student
eduPersonAssurance urn:oasis:names:tc:SAML:2.0:attrname-format:basic
2
eduPersonEntitlement urn:oasis:names:tc:SAML:2.0:attrname-format:basic
https://example.com/course101
eduPersonPrimaryAffiliation urn:oasis:names:tc:SAML:2.0:attrname-format:basic
student
eduPersonPrincipalName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@this.is.not.a.valid.idp
eduPersonScopedAffiliation urn:oasis:names:tc:SAML:2.0:attrname-format:basic
alum@this.is.not.a.valid.idp
member@this.is.not.a.valid.idp
student@this.is.not.a.valid.idp
eduPersonTargetedID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
{{.eptid}}
gn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Anton Banton <SamlRequest id="abc">abc</SamlRequest>
mail urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@example.com
norEduPersonLIN urn:oasis:names:tc:SAML:2.0:attrname-format:basic
123456789
organizationName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Orphanage - home for the homeless
preferredLanguage urn:oasis:names:tc:SAML:2.0:attrname-format:basic
da
schacCountryOfCitizenship urn:oasis:names:tc:SAML:2.0:attrname-format:basic
dk
schacDateOfBirth urn:oasis:names:tc:SAML:2.0:attrname-format:basic
18580824
schacHomeOrganization urn:oasis:names:tc:SAML:2.0:attrname-format:basic
this.is.not.a.valid.idp
schacHomeOrganizationType urn:oasis:names:tc:SAML:2.0:attrname-format:basic
urn:mace:terena.org:schac:homeOrganizationType:int:other
schacPersonalUniqueID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
urn:mace:terena.org:schac:personalUniqueID:dk:CPR:2408586234
schacYearOfBirth urn:oasis:names:tc:SAML:2.0:attrname-format:basic
1858
sn urn:oasis:names:tc:SAML:2.0:attrname-format:basic
Cantonsen
`
res := browse(nil, nil)
if res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
stdoutend(t, expected)
}
// TestFullAttributesetSP2 test that the full attributeset is delivered to the PHPH service
func TestFullAttributesetSP2(t *testing.T) {
var expected string
stdoutstart()
spmd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
res := browse(nil, &overwrites{"Spmd": spmd})
if res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
expected += `eduPersonPrincipalName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@this.is.not.a.valid.idp
eduPersonTargetedID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
WAYF-DK-493ee01e49107fed7c4b89622d8087bc5064cc15
mail urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@example.com
`
stdoutend(t, expected)
}
func TestFullEncryptedAttributeset1(t *testing.T) {
var expected string
stdoutstart()
spmd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
overwrite := &overwrites{"Encryptresponse": true, "Spmd": spmd}
res := browse(nil, overwrite)
if res != nil {
gosaml.AttributeCanonicalDump(os.Stdout, res.Newresponse)
}
expected += `eduPersonPrincipalName urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@this.is.not.a.valid.idp
eduPersonTargetedID urn:oasis:names:tc:SAML:2.0:attrname-format:basic
WAYF-DK-493ee01e49107fed7c4b89622d8087bc5064cc15
mail urn:oasis:names:tc:SAML:2.0:attrname-format:basic
joe@example.com
`
stdoutend(t, expected)
}
func TestAccessForNonIntersectingAdHocFederations(t *testing.T) {
var expected string
stdoutstart()
spmd, _ := Md.Internal.MDQ("https://this.is.not.a.valid.sp")
overwrite := &overwrites{"Spmd": spmd}
res := browse(nil, overwrite)
if res != nil {
switch *do {
case "hub", "birk":
expected = `no common federations
`
}
}
stdoutend(t, expected)
}
func TestSignErrorModifiedContent(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./saml:Assertion/saml:Issuer", "+ 1234", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:digest mismatch","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
func TestSamlVulnerability(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name=\"eduPersonPrincipalName\"]/saml:AttributeValue", "- <!--and.a.fake.domain--->", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:digest mismatch","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
func TestSignErrorModifiedSignature(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./saml:Assertion/ds:Signature/ds:SignatureValue", "+ 1234", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:crypto/rsa: verification error","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
// TestNoSignatureError tests if the hub and BIRK reacts assertions that are not signed
func TestNoSignatureError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"//ds:Signature", "", nil}}}
res := browse(m, nil)
if res != nil {
switch *do {
case "hub", "birk":
expected = `["cause:encryption error"]
`
}
}
stdoutend(t, expected)
}
// TestUnknownKeySignatureError tests if the hub and BIRK reacts on signing with an unknown key
func TestUnknownKeySignatureError(t *testing.T) {
var expected string
stdoutstart()
// Just a random private key - not used for anything else
pk := `-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAsd0urclhDMeNqfmmES6LxVf3mK6CAX3vER1Te8QNLsd1iUEq
inmx+j6TqoyLBuVrQkOSMn7pPQMobjpca81KsWcS00RvZCNAgreTj4jOzfIouSml
6BDjuEPP9GEjQhf5iajenBeKfK8jPVnpxEKsuopE6ueKG5Rpi59mV/iVq7ZMQSGl
504OBKWBkAUgO5dPneB632uJSp2kiy0/YNUp30ItR45TncOqEtkrwBx219pRg2B0
2ot8TwZ8xFD7LG2V/hq8+0Ppp+tzTWDAri5z5ZSrAn0/j8sC56Qcwl2w2sYYhpNx
8T9x1QupnIpR1RyHCqR5mBJWDtO3pLAyPW74EwIDAQABAoIBAG9MixMwusw2a017
7RE/YTNCUqt2N+AbH+hDw6PlEKK/KauT3bz9XgPL+Ld2buEH2tCCXA/BHs6RFVG0
r3S96AmPCFavskylSo8BtRLSdyakbBtCFpFbUERUGuM/jcKkIgCkbXibuos/RPv1
MbUgS9oHAA1GikOr4Uf/nRlbcr0ZsRQnqp9uaK+rMCnypBQFB/YE1eKuTqSXf/Yb
D0+xJ3XDaTalBH2qXfIZX3+hKd7NvL5KHAc5ZVj3LzaBJ6GXV7nIKKbbTbdQdjxe
uEzPj36Zb7ultAorQYIyPHlGiXBh+gpkC3BHxDLwIqG+Iw0wUCnlKTDBO0qq8JcZ
TQAVsmECgYEA2IAosfRHjgWhT40+JTd/DICLoa/VAUeHok1JvjirJwADjDj0oZ6C
Ry5ioxrOpxH1RvHSfCHdKt0/aPviyEJGDRU8d/tFQypeSzDHL/BDQtavw/Na5c9x
epCft6HshpuzPr43IYB/VbiUedm8w78jNIcXEphNgNLaw22uU/3gkfkCgYEA0lB3
t+QJiHXY3J7YHze5jYrK96By9DY8MjkgKLwxaFFGZqpb2egXQK5ohBHuAbZXVGDY
oOH/IOBgdsOYuJv7NKfMY35wzrMygfWXbSNlTZwdrmJPqOSfUwu/hmBuwEHsfrEJ
3a2xiX+OFhfRwebcQwgOrN1FVpobKrXqYjp+3WsCgYB/vu9EQY1PIcdS91ZqA1r1
94tsdiHLRXekrtIKacmjk4CEZr8B9lOMyLPu5cx2DESb/ehi0mB8AFyAB9CCtYg8
BAHQEfWGciN9XmTJxo0JjT/c8WT7IPImjduQMP0tWAXlybsiC34XCHijhXS6U7fk
MKnOkQt6LfBjS/6HFNBDkQKBgBbW0DlzFSnxikxjH5s8RPU/Bk2f6fvlS+I0W+6w
iTkH4npRs8nVL3lBt23oOI2NDKzIG55VDIy4cSFUmmgp4DzWoBaJ65w2z5xXXEto
1Z54/qwqVvZDZZ3yH6lrHXvZbOJRPX4KV8ZTyM1TZt8EwBSzckyJdvcxoxOfT8W9
DnvjAoGAIu1AHwMhBdGmwffsII1VAb7gyYnjFbPfaSrwaxIMJ61Djayg4XhGFJ5+
NDVIEaV6/PITFgNcYIzBZxgCEqZ6jJ5jiidlnUbGPYaMhPN/mmHCP/2dvYW6ZSGC
mYqIGJZzLM/wk1u/CG52i+zDOiYbeiYNZc7qhIFU9ueinr88YZo=
-----END RSA PRIVATE KEY-----
`
// need to do resign before sending to birk - not able to do that pt
// _ = DoRunTestBirk(nil)
if browse(nil, &overwrites{"Privatekey": pk, "Privatekeypw": "-"}) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:crypto/rsa: verification error","err:unable to validate signature"]
`
}
}
stdoutend(t, expected)
}
// TestRequestSchemaError tests that the HUB and BIRK reacts on schema errors in requests
func TestRequestSchemaError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"requestmods": mods{mod{"./@IsPassive", "isfalse", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:schema validation failed"]
`
}
}
stdoutend(t, expected)
}
// TestResponseSchemaError tests that the HUB and BIRK reacts on schema errors in responses
func TestResponseSchemaError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"./@IssueInstant", "isfalse", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:schema validation failed"]
`
}
}
stdoutend(t, expected)
}
// TestNoEPPNError tests that the hub does not accept assertions with no eppn
func TestNoEPPNError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:isRequired: eduPersonPrincipalName"]
`
}
}
stdoutend(t, expected)
}
// TestEPPNScopingError tests that the hub does not accept scoping errors in eppn - currently it does
func TestEPPNScopingError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{
mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil},
mod{`/saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name="eduPersonPrincipalName"]/saml:AttributeValue[1]`, "joe@example.com", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:security domain 'example.com' does not match any scopes"]
`
}
}
stdoutend(t, expected)
}
// TestNoLocalpartInEPPNError tests that the hub does not accept eppn with no localpart - currently it does
func TestNoLocalpartInEPPNError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{
mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil},
mod{`/saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name="eduPersonPrincipalName"]/saml:AttributeValue[1]`, "@this.is.not.a.valid.idp", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:not a scoped value: @this.is.not.a.valid.idp"]
`
}
}
stdoutend(t, expected)
}
// TestNoLocalpartInEPPNError tests that the hub does not accept eppn with no domain - currently it does
func TestNoDomainInEPPNError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"attributemods": mods{
mod{`//saml:Attribute[@Name="eduPersonPrincipalName"]`, "", nil},
mod{`/saml:Assertion/saml:AttributeStatement/saml:Attribute[@Name="eduPersonPrincipalName"]/saml:AttributeValue[1]`, "joe", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:not a scoped value: joe"]
`
}
}
stdoutend(t, expected)
}
// TestUnknownSPError test how the birk and the hub reacts on requests from an unknown sP
func TestUnknownSPError(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"requestmods": mods{mod{"./saml:Issuer", "https://www.example.com/unknownentity", nil}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected = `["cause:Metadata not found","err:Metadata not found","key:https://www.example.com/unknownentity","table:HYBRID_EXTERNAL_SP"]
`
}
}
stdoutend(t, expected)
}
// TestUnknownIDPError tests how BIRK reacts on requests to an unknown IdP
// Use the line below for new birkservers
// Metadata for entity: https://birk.wayf.dk/birk.php/www.example.com/unknownentity not found
func TestUnknownIDPError(t *testing.T) {
var expected string
stdoutstart()
switch *do {
case "hub", "birk":
m := modsset{"requestmods": mods{mod{"./@Destination", "https://wayf.wayf.dk/unknownentity", nil}}}
if browse(m, nil) != nil {
expected = `unknown error
`
}
}
stdoutend(t, expected)
}
func xTestXSW1(t *testing.T) {
var expected string
stdoutstart()
m := modsset{"responsemods": mods{mod{"", "", ApplyXSW1}}}
if browse(m, nil) != nil {
switch *do {
case "hub", "birk":
expected += `["cause:sql: no rows in result set","err:Metadata not found","key:https://birk.wayf.dk/birk.php/www.example.com/unknownentity","table:HYBRID_EXTERNAL_IDP"]
`
}
}
stdoutend(t, expected)
}
// from https://github.com/SAMLRaider/SAMLRaider/blob/master/src/main/java/helpers/XSWHelpers.java
func ApplyXSW1(xp *goxml.Xp) {
log.Println(xp.PP())
assertion := xp.Query(nil, "/samlp:Response[1]/saml:Assertion[1]")[0]
clonedAssertion := xp.CopyNode(assertion, 1)
signature := xp.Query(clonedAssertion, "./ds:Signature")[0]
log.Println(goxml.NewXpFromNode(signature).PP())
parent, _ := signature.(types.Element).ParentNode()
parent.RemoveChild(signature)
defer signature.Free()
log.Println(goxml.NewXpFromNode(clonedAssertion).PP())
newSignature := xp.Query(assertion, "ds:Signature[1]")[0]
newSignature.AddChild(clonedAssertion)
assertion.(types.Element).SetAttribute("ID", "_evil_response_ID")
log.Println(xp.PP())
}
func xTestSpeed(t *testing.T) {
const gorutines = 10
const iterations = 100000
for i := 0; i < gorutines; i++ {
wg.Add(1)
go func(i int) {
for j := 0; j < iterations; j++ {
starttime := time.Now()
spmd, _ := Md.Internal.MDQ("https://metadata.wayf.dk/PHPh")
browse(nil, &overwrites{"Spmd": spmd})
log.Println(i, j, time.Since(starttime).Seconds())
//runtime.GC()
//time.Sleep(200 * time.Millisecond)
}
wg.Done()
}(i)
}
wg.Wait()
}
|
// Copyright 2015 ThoughtWorks, Inc.
// This file is part of Gauge.
// Gauge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Gauge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Gauge. If not, see <http://www.gnu.org/licenses/>.
package main
import (
. "gopkg.in/check.v1"
)
func (s *MySuite) TestRefactoringOfStepsWithNoArgs(c *C) {
oldStep := "first step"
newStep := "second step"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, err := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
c.Assert(err, Equals, nil)
c.Assert(len(specs[0].scenarios[0].steps), Equals, 1)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, newStep)
}
func (s *MySuite) TestRefactoringOfStepsWithNoArgsAndWithMoreThanOneScenario(c *C) {
oldStep := "first step"
newStep := "second step"
unchanged := "unchanged"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
&token{kind: stepKind, value: oldStep, lineNo: 5},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 20},
&token{kind: stepKind, value: unchanged, lineNo: 30},
&token{kind: stepKind, value: oldStep, lineNo: 50},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, err := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
c.Assert(err, Equals, nil)
c.Assert(len(specs[0].scenarios), Equals, 2)
c.Assert(len(specs[0].scenarios[0].steps), Equals, 2)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, newStep)
c.Assert(specs[0].scenarios[0].steps[1].value, Equals, newStep)
c.Assert(len(specs[0].scenarios[1].steps), Equals, 2)
c.Assert(specs[0].scenarios[1].steps[0].value, Equals, unchanged)
c.Assert(specs[0].scenarios[1].steps[1].value, Equals, newStep)
}
func (s *MySuite) TestRefactoringOfStepsWithNoArgsAndWithMoreThanOneSpec(c *C) {
oldStep := " first step"
newStep := "second step"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
tokens = []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 10},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 20},
&token{kind: stepKind, value: oldStep, lineNo: 30},
}
spec1, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
specs := append(make([]*specification, 0), spec)
specs = append(specs, spec1)
agent, err := getRefactorAgent(oldStep, newStep)
specRefactored, _ := agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
for _, isRefactored := range specRefactored {
c.Assert(true, Equals, isRefactored)
}
c.Assert(err, Equals, nil)
c.Assert(len(specs[0].scenarios[0].steps), Equals, 1)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, newStep)
c.Assert(len(specs[1].scenarios[0].steps), Equals, 1)
c.Assert(specs[1].scenarios[0].steps[0].value, Equals, newStep)
}
func (s *MySuite) TestRefactoringOfStepsWithNoArgsInConceptFiles(c *C) {
oldStep := "first step"
newStep := "second step"
unchanged := "unchanged"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 20},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
step1 := &step{value: oldStep + "sdsf", isConcept: true}
step2 := &step{value: unchanged, isConcept: true, items: []item{&step{value: oldStep, isConcept: false}, &step{value: oldStep + "T", isConcept: false}}}
dictionary.add([]*step{step1, step2}, "file.cpt")
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(dictionary.conceptsMap[unchanged].conceptStep.items[0].(*step).value, Equals, newStep)
c.Assert(dictionary.conceptsMap[unchanged].conceptStep.items[1].(*step).value, Equals, oldStep+"T")
}
func (s *MySuite) TestRefactoringGivesOnlySpecsThatAreRefactored(c *C) {
oldStep := " first step"
newStep := "second step"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
tokens = []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 10},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 20},
&token{kind: stepKind, value: newStep, lineNo: 30},
}
spec1, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
specs := append(make([]*specification, 0), spec)
specs = append(specs, spec1)
agent, _ := getRefactorAgent(oldStep, newStep)
specRefactored, _ := agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
c.Assert(true, Equals, specRefactored[specs[0]])
c.Assert(false, Equals, specRefactored[specs[1]])
}
func (s *MySuite) TestRefactoringGivesOnlyThoseConceptFilesWhichAreRefactored(c *C) {
oldStep := "first step"
newStep := "second step"
unchanged := "unchanged"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 20},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
step1 := &step{value: oldStep + "sdsf", isConcept: true}
step2 := &step{value: unchanged, isConcept: true, items: []item{&step{value: newStep, isConcept: false}, &step{value: oldStep + "T", isConcept: false}}}
step3 := &step{value: "Concept value", isConcept: true, items: []item{&step{value: oldStep, isConcept: false}, &step{value: oldStep + "T", isConcept: false}}}
fileName := "file.cpt"
dictionary.add([]*step{step1, step2}, fileName)
dictionary.add([]*step{step3}, "e"+fileName)
_, filesRefactored := agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(filesRefactored[fileName], Equals, false)
c.Assert(filesRefactored["e"+fileName], Equals, true)
}
func (s *MySuite) TestRenamingWhenNumberOfArgumentsAreSame(c *C) {
oldStep := "first step {static} and {static}"
oldStep1 := "first step <a> and <b>"
newStep := "second step <a> and <b>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
}
func (s *MySuite) TestRenamingWhenArgumentsOrderIsChanged(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <d> and <b> and <c> and <a>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "id")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "number")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "name")
}
func (s *MySuite) TestCreateOrderGivesMapOfOldArgsAndNewArgs(c *C) {
step1 := &step{args: []*stepArg{&stepArg{name: "a"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "d"}}}
step2 := &step{args: []*stepArg{&stepArg{name: "d"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "a"}}}
agent := &rephraseRefactorer{step1, step2, false}
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 3)
c.Assert(orderMap[1], Equals, 1)
c.Assert(orderMap[2], Equals, 2)
}
func (s *MySuite) TestCreateOrderGivesMapOfOldArgsAndNewWhenArgsAreAdded(c *C) {
step1 := &step{args: []*stepArg{&stepArg{name: "a"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "d"}}}
step2 := &step{args: []*stepArg{&stepArg{name: "d"}, &stepArg{name: "e"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "a"}}}
agent := &rephraseRefactorer{step1, step2, false}
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 3)
c.Assert(orderMap[1], Equals, -1)
c.Assert(orderMap[2], Equals, 1)
c.Assert(orderMap[3], Equals, 2)
c.Assert(orderMap[4], Equals, 0)
}
func (s *MySuite) TestCreateOrderGivesMapOfOldArgsAndNewWhenArgsAreRemoved(c *C) {
step1 := &step{args: []*stepArg{&stepArg{name: "a"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "d"}}}
step2 := &step{args: []*stepArg{&stepArg{name: "d"}, &stepArg{name: "b"}, &stepArg{name: "c"}}}
agent := &rephraseRefactorer{step1, step2, false}
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 3)
c.Assert(orderMap[1], Equals, 1)
c.Assert(orderMap[2], Equals, 2)
}
func (s *MySuite) TestRenamingWhenArgumentsIsAddedAtLast(c *C) {
oldStep := "first step {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c>"
newStep := "second step <a> and <b> and <c> and <d>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "number")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "d")
}
func (s *MySuite) TestRenamingWhenArgumentsIsAddedAtFirst(c *C) {
oldStep := "first step {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c>"
newStep := "second step <d> and <a> and <b> and <c>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "d")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "number")
}
func (s *MySuite) TestRenamingWhenArgumentsIsAddedInMiddle(c *C) {
oldStep := "first step {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c>"
newStep := "second step <a> and <d> and <b> and <c>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "d")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "number")
}
func (s *MySuite) TestRenamingWhenArgumentsIsRemovedFromLast(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <a> and <b> and <c>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "number")
}
func (s *MySuite) TestRenamingWhenArgumentsIsRemovedFromBegining(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <b> and <c> and <d>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "number")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "id")
}
func (s *MySuite) TestRenamingWhenArgumentsIsRemovedFromMiddle(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <a> and <b> and <d>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "id")
}
func (s *MySuite) TestGenerateNewStepNameGivesLineTextWithActualParamNames(c *C) {
args := []string{"name", "address", "id"}
newStep := "second step <a> and <b> and <d>"
orderMap := make(map[int]int)
orderMap[0] = 1
orderMap[1] = 2
orderMap[2] = 0
agent, _ := getRefactorAgent(newStep, newStep)
linetext := agent.generateNewStepName(args, orderMap)
c.Assert(linetext, Equals, "second step <address> and <id> and <name>")
}
func (s *MySuite) TestGenerateNewStepNameWhenParametersAreAdded(c *C) {
args := []string{"name", "address"}
newStep := "changed step <a> and <b> and \"id\""
orderMap := make(map[int]int)
orderMap[0] = 1
orderMap[1] = 0
orderMap[2] = -1
agent, _ := getRefactorAgent(newStep, newStep)
linetext := agent.generateNewStepName(args, orderMap)
c.Assert(linetext, Equals, "changed step <address> and <name> and \"id\"")
}
func (s *MySuite) TestGenerateNewStepNameWhenParametersAreRemoved(c *C) {
args := []string{"name", "address", "desc"}
newStep := "changed step <b> and \"id\""
orderMap := make(map[int]int)
orderMap[0] = 1
orderMap[1] = -1
orderMap[2] = -1
agent, _ := getRefactorAgent(newStep, newStep)
linetext := agent.generateNewStepName(args, orderMap)
c.Assert(linetext, Equals, "changed step <address> and \"id\"")
}
func (s *MySuite) TestGenerateNewStepNameWhenParametersAreUnchanged(c *C) {
args := []string{"a"}
newStep := "make comment <a>"
agent, _ := getRefactorAgent("Comment <a>", newStep)
linetext := agent.generateNewStepName(args, agent.createOrderOfArgs())
c.Assert(linetext, Equals, "make comment <a>")
}
Adding new test for checking order of arguments
// Copyright 2015 ThoughtWorks, Inc.
// This file is part of Gauge.
// Gauge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Gauge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Gauge. If not, see <http://www.gnu.org/licenses/>.
package main
import (
. "gopkg.in/check.v1"
)
func (s *MySuite) TestRefactoringOfStepsWithNoArgs(c *C) {
oldStep := "first step"
newStep := "second step"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, err := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
c.Assert(err, Equals, nil)
c.Assert(len(specs[0].scenarios[0].steps), Equals, 1)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, newStep)
}
func (s *MySuite) TestRefactoringOfStepsWithNoArgsAndWithMoreThanOneScenario(c *C) {
oldStep := "first step"
newStep := "second step"
unchanged := "unchanged"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
&token{kind: stepKind, value: oldStep, lineNo: 5},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 20},
&token{kind: stepKind, value: unchanged, lineNo: 30},
&token{kind: stepKind, value: oldStep, lineNo: 50},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, err := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
c.Assert(err, Equals, nil)
c.Assert(len(specs[0].scenarios), Equals, 2)
c.Assert(len(specs[0].scenarios[0].steps), Equals, 2)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, newStep)
c.Assert(specs[0].scenarios[0].steps[1].value, Equals, newStep)
c.Assert(len(specs[0].scenarios[1].steps), Equals, 2)
c.Assert(specs[0].scenarios[1].steps[0].value, Equals, unchanged)
c.Assert(specs[0].scenarios[1].steps[1].value, Equals, newStep)
}
func (s *MySuite) TestRefactoringOfStepsWithNoArgsAndWithMoreThanOneSpec(c *C) {
oldStep := " first step"
newStep := "second step"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
tokens = []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 10},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 20},
&token{kind: stepKind, value: oldStep, lineNo: 30},
}
spec1, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
specs := append(make([]*specification, 0), spec)
specs = append(specs, spec1)
agent, err := getRefactorAgent(oldStep, newStep)
specRefactored, _ := agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
for _, isRefactored := range specRefactored {
c.Assert(true, Equals, isRefactored)
}
c.Assert(err, Equals, nil)
c.Assert(len(specs[0].scenarios[0].steps), Equals, 1)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, newStep)
c.Assert(len(specs[1].scenarios[0].steps), Equals, 1)
c.Assert(specs[1].scenarios[0].steps[0].value, Equals, newStep)
}
func (s *MySuite) TestRefactoringOfStepsWithNoArgsInConceptFiles(c *C) {
oldStep := "first step"
newStep := "second step"
unchanged := "unchanged"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 20},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
step1 := &step{value: oldStep + "sdsf", isConcept: true}
step2 := &step{value: unchanged, isConcept: true, items: []item{&step{value: oldStep, isConcept: false}, &step{value: oldStep + "T", isConcept: false}}}
dictionary.add([]*step{step1, step2}, "file.cpt")
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(dictionary.conceptsMap[unchanged].conceptStep.items[0].(*step).value, Equals, newStep)
c.Assert(dictionary.conceptsMap[unchanged].conceptStep.items[1].(*step).value, Equals, oldStep+"T")
}
func (s *MySuite) TestRefactoringGivesOnlySpecsThatAreRefactored(c *C) {
oldStep := " first step"
newStep := "second step"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
tokens = []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 10},
&token{kind: scenarioKind, value: "Scenario Heading", lineNo: 20},
&token{kind: stepKind, value: newStep, lineNo: 30},
}
spec1, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
specs := append(make([]*specification, 0), spec)
specs = append(specs, spec1)
agent, _ := getRefactorAgent(oldStep, newStep)
specRefactored, _ := agent.rephraseInSpecsAndConcepts(&specs, new(conceptDictionary))
c.Assert(true, Equals, specRefactored[specs[0]])
c.Assert(false, Equals, specRefactored[specs[1]])
}
func (s *MySuite) TestRefactoringGivesOnlyThoseConceptFilesWhichAreRefactored(c *C) {
oldStep := "first step"
newStep := "second step"
unchanged := "unchanged"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 20},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
step1 := &step{value: oldStep + "sdsf", isConcept: true}
step2 := &step{value: unchanged, isConcept: true, items: []item{&step{value: newStep, isConcept: false}, &step{value: oldStep + "T", isConcept: false}}}
step3 := &step{value: "Concept value", isConcept: true, items: []item{&step{value: oldStep, isConcept: false}, &step{value: oldStep + "T", isConcept: false}}}
fileName := "file.cpt"
dictionary.add([]*step{step1, step2}, fileName)
dictionary.add([]*step{step3}, "e"+fileName)
_, filesRefactored := agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(filesRefactored[fileName], Equals, false)
c.Assert(filesRefactored["e"+fileName], Equals, true)
}
func (s *MySuite) TestRenamingWhenNumberOfArgumentsAreSame(c *C) {
oldStep := "first step {static} and {static}"
oldStep1 := "first step <a> and <b>"
newStep := "second step <a> and <b>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
}
func (s *MySuite) TestRenamingWhenArgumentsOrderIsChanged(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <d> and <b> and <c> and <a>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "id")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "number")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "name")
}
func (s *MySuite) TestCreateOrderGivesMapOfOldArgsAndNewArgs(c *C) {
step1 := &step{args: []*stepArg{&stepArg{name: "a"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "d"}}}
step2 := &step{args: []*stepArg{&stepArg{name: "d"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "a"}}}
agent := &rephraseRefactorer{step1, step2, false}
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 3)
c.Assert(orderMap[1], Equals, 1)
c.Assert(orderMap[2], Equals, 2)
}
func (s *MySuite) TestCreateOrderGivesMapOfOldArgsAndNewWhenArgsAreAdded(c *C) {
step1 := &step{args: []*stepArg{&stepArg{name: "a"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "d"}}}
step2 := &step{args: []*stepArg{&stepArg{name: "d"}, &stepArg{name: "e"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "a"}}}
agent := &rephraseRefactorer{step1, step2, false}
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 3)
c.Assert(orderMap[1], Equals, -1)
c.Assert(orderMap[2], Equals, 1)
c.Assert(orderMap[3], Equals, 2)
c.Assert(orderMap[4], Equals, 0)
}
func (s *MySuite) TestCreateOrderGivesMapOfOldArgsAndNewWhenArgsAreRemoved(c *C) {
step1 := &step{args: []*stepArg{&stepArg{name: "a"}, &stepArg{name: "b"}, &stepArg{name: "c"}, &stepArg{name: "d"}}}
step2 := &step{args: []*stepArg{&stepArg{name: "d"}, &stepArg{name: "b"}, &stepArg{name: "c"}}}
agent := &rephraseRefactorer{step1, step2, false}
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 3)
c.Assert(orderMap[1], Equals, 1)
c.Assert(orderMap[2], Equals, 2)
}
func (s *MySuite) TestCreationOfOrderMapForStep(c *C) {
agent, _ := getRefactorAgent("Say <greeting> to <name>", "Say <greeting> to <name> \"DD\"")
orderMap := agent.createOrderOfArgs()
c.Assert(orderMap[0], Equals, 0)
c.Assert(orderMap[1], Equals, 1)
c.Assert(orderMap[2], Equals, -1)
}
func (s *MySuite) TestRenamingWhenArgumentsIsAddedAtLast(c *C) {
oldStep := "first step {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c>"
newStep := "second step <a> and <b> and <c> and <d>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "number")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "d")
}
func (s *MySuite) TestRenamingWhenArgumentsIsAddedAtFirst(c *C) {
oldStep := "first step {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c>"
newStep := "second step <d> and <a> and <b> and <c>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "d")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "number")
}
func (s *MySuite) TestRenamingWhenArgumentsIsAddedInMiddle(c *C) {
oldStep := "first step {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c>"
newStep := "second step <a> and <d> and <b> and <c>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "d")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[3].value, Equals, "number")
}
func (s *MySuite) TestRenamingWhenArgumentsIsRemovedFromLast(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <a> and <b> and <c>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "number")
}
func (s *MySuite) TestRenamingWhenArgumentsIsRemovedFromBegining(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <b> and <c> and <d>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "number")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "id")
}
func (s *MySuite) TestRenamingWhenArgumentsIsRemovedFromMiddle(c *C) {
oldStep := "first step {static} and {static} and {static} and {static}"
oldStep1 := "first step <a> and <b> and <c> and <d>"
newStep := "second step <a> and <b> and <d>"
tokens := []*token{
&token{kind: specKind, value: "Spec Heading", lineNo: 1},
&token{kind: scenarioKind, value: "Scenario Heading 1", lineNo: 2},
&token{kind: stepKind, value: oldStep, lineNo: 3, args: []string{"name", "address", "number", "id"}},
}
spec, _ := new(specParser).createSpecification(tokens, new(conceptDictionary))
agent, _ := getRefactorAgent(oldStep1, newStep)
specs := append(make([]*specification, 0), spec)
dictionary := new(conceptDictionary)
agent.rephraseInSpecsAndConcepts(&specs, dictionary)
c.Assert(specs[0].scenarios[0].steps[0].value, Equals, "second step {} and {} and {}")
c.Assert(specs[0].scenarios[0].steps[0].args[0].value, Equals, "name")
c.Assert(specs[0].scenarios[0].steps[0].args[1].value, Equals, "address")
c.Assert(specs[0].scenarios[0].steps[0].args[2].value, Equals, "id")
}
func (s *MySuite) TestGenerateNewStepNameGivesLineTextWithActualParamNames(c *C) {
args := []string{"name", "address", "id"}
newStep := "second step <a> and <b> and <d>"
orderMap := make(map[int]int)
orderMap[0] = 1
orderMap[1] = 2
orderMap[2] = 0
agent, _ := getRefactorAgent(newStep, newStep)
linetext := agent.generateNewStepName(args, orderMap)
c.Assert(linetext, Equals, "second step <address> and <id> and <name>")
}
func (s *MySuite) TestGenerateNewStepNameWhenParametersAreAdded(c *C) {
args := []string{"name", "address"}
newStep := "changed step <a> and <b> and \"id\""
orderMap := make(map[int]int)
orderMap[0] = 1
orderMap[1] = 0
orderMap[2] = -1
agent, _ := getRefactorAgent(newStep, newStep)
linetext := agent.generateNewStepName(args, orderMap)
c.Assert(linetext, Equals, "changed step <address> and <name> and \"id\"")
}
func (s *MySuite) TestGenerateNewStepNameWhenParametersAreRemoved(c *C) {
args := []string{"name", "address", "desc"}
newStep := "changed step <b> and \"id\""
orderMap := make(map[int]int)
orderMap[0] = 1
orderMap[1] = -1
orderMap[2] = -1
agent, _ := getRefactorAgent(newStep, newStep)
linetext := agent.generateNewStepName(args, orderMap)
c.Assert(linetext, Equals, "changed step <address> and \"id\"")
}
func (s *MySuite) TestGenerateNewStepNameWhenParametersAreUnchanged(c *C) {
args := []string{"a"}
newStep := "make comment <a>"
agent, _ := getRefactorAgent("Comment <a>", newStep)
linetext := agent.generateNewStepName(args, agent.createOrderOfArgs())
c.Assert(linetext, Equals, "make comment <a>")
}
|
package armhelpers
import (
"fmt"
"time"
"github.com/Azure/azure-sdk-for-go/arm/authorization"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/graphrbac"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/Azure/go-autorest/autorest"
)
//MockACSEngineClient is an implemetnation of ACSEngineClient where all requests error out
type MockACSEngineClient struct {
FailDeployTemplate bool
FailEnsureResourceGroup bool
FailListVirtualMachines bool
FailListVirtualMachineScaleSets bool
FailGetVirtualMachine bool
FailDeleteVirtualMachine bool
FailGetStorageClient bool
FailDeleteNetworkInterface bool
}
//MockStorageClient mock implementation of StorageClient
type MockStorageClient struct{}
//DeleteBlob mock
func (msc *MockStorageClient) DeleteBlob(container, blob string) error {
return nil
}
//AddAcceptLanguages mock
func (mc *MockACSEngineClient) AddAcceptLanguages(languages []string) {
return
}
//DeployTemplate mock
func (mc *MockACSEngineClient) DeployTemplate(resourceGroup, name string, template, parameters map[string]interface{}, cancel <-chan struct{}) (*resources.DeploymentExtended, error) {
if mc.FailDeployTemplate {
return nil, fmt.Errorf("DeployTemplate failed")
}
return nil, nil
}
//EnsureResourceGroup mock
func (mc *MockACSEngineClient) EnsureResourceGroup(resourceGroup, location string) (*resources.Group, error) {
if mc.FailEnsureResourceGroup {
return nil, fmt.Errorf("EnsureResourceGroup failed")
}
return nil, nil
}
//ListVirtualMachines mock
func (mc *MockACSEngineClient) ListVirtualMachines(resourceGroup string) (compute.VirtualMachineListResult, error) {
if mc.FailListVirtualMachines {
return compute.VirtualMachineListResult{}, fmt.Errorf("ListVirtualMachines failed")
}
vm1Name := "k8s-master-12345678-0"
creationSourceString := "creationSource"
orchestratorString := "orchestrator"
resourceNameSuffixString := "resourceNameSuffix"
creationSource := "acsengine-k8s-master-12345678-0"
orchestrator := "Kubernetes:1.5.3"
resourceNameSuffix := "12345678"
tags := map[string]*string{
creationSourceString: &creationSource,
orchestratorString: &orchestrator,
resourceNameSuffixString: &resourceNameSuffix,
}
vm1 := compute.VirtualMachine{
Name: &vm1Name,
Tags: &tags,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
OsDisk: &compute.OSDisk{
Vhd: &compute.VirtualHardDisk{
URI: &validOSDiskResourceName},
},
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: &validNicResourceName,
},
},
},
},
}
vmr := compute.VirtualMachineListResult{}
vmr.Value = &[]compute.VirtualMachine{vm1}
return vmr, nil
}
//ListVirtualMachineScaleSets mock
func (mc *MockACSEngineClient) ListVirtualMachineScaleSets(resourceGroup string) (compute.VirtualMachineScaleSetListResult, error) {
if mc.FailListVirtualMachineScaleSets {
return compute.VirtualMachineScaleSetListResult{}, fmt.Errorf("ListVirtualMachines failed")
}
return compute.VirtualMachineScaleSetListResult{}, nil
}
//GetVirtualMachine mock
func (mc *MockACSEngineClient) GetVirtualMachine(resourceGroup, name string) (compute.VirtualMachine, error) {
if mc.FailGetVirtualMachine {
return compute.VirtualMachine{}, fmt.Errorf("GetVirtualMachine failed")
}
vm1Name := "k8s-master-12345678-0"
creationSourceString := "creationSource"
orchestratorString := "orchestrator"
resourceNameSuffixString := "resourceNameSuffix"
creationSource := "acsengine-k8s-master-12345678-0"
orchestrator := "Kubernetes:1.5.3"
resourceNameSuffix := "12345678"
tags := map[string]*string{
creationSourceString: &creationSource,
orchestratorString: &orchestrator,
resourceNameSuffixString: &resourceNameSuffix,
}
return compute.VirtualMachine{
Name: &vm1Name,
Tags: &tags,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
OsDisk: &compute.OSDisk{
Vhd: &compute.VirtualHardDisk{
URI: &validOSDiskResourceName},
},
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: &validNicResourceName,
},
},
},
},
}, nil
}
//DeleteVirtualMachine mock
func (mc *MockACSEngineClient) DeleteVirtualMachine(resourceGroup, name string, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) {
if mc.FailDeleteVirtualMachine {
errChan := make(chan error)
respChan := make(chan compute.OperationStatusResponse)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- fmt.Errorf("DeleteVirtualMachine failed")
time.Sleep(1 * time.Second)
}()
return respChan, errChan
}
errChan := make(chan error)
respChan := make(chan compute.OperationStatusResponse)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- nil
respChan <- compute.OperationStatusResponse{}
time.Sleep(1 * time.Second)
}()
return respChan, errChan
}
//GetStorageClient mock
func (mc *MockACSEngineClient) GetStorageClient(resourceGroup, accountName string) (ACSStorageClient, error) {
if mc.FailGetStorageClient {
return nil, fmt.Errorf("GetStorageClient failed")
}
return &MockStorageClient{}, nil
}
//DeleteNetworkInterface mock
func (mc *MockACSEngineClient) DeleteNetworkInterface(resourceGroup, nicName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
if mc.FailDeleteNetworkInterface {
errChan := make(chan error)
respChan := make(chan autorest.Response)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- fmt.Errorf("DeleteNetworkInterface failed")
time.Sleep(1 * time.Second)
}()
return respChan, errChan
}
errChan := make(chan error)
respChan := make(chan autorest.Response)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- nil
respChan <- autorest.Response{}
time.Sleep(1 * time.Second)
}()
return respChan, errChan
}
var validOSDiskResourceName = "https://00k71r4u927seqiagnt0.blob.core.windows.net/osdisk/k8s-agentpool1-12345678-0-osdisk.vhd"
var validNicResourceName = "/subscriptions/DEC923E3-1EF1-4745-9516-37906D56DEC4/resourceGroups/acsK8sTest/providers/Microsoft.Network/networkInterfaces/k8s-agent-12345678-nic-0"
// Active Directory
// Mocks
// Graph Mocks
// CreateGraphApplication creates an application via the graphrbac client
func (mc *MockACSEngineClient) CreateGraphApplication(applicationCreateParameters graphrbac.ApplicationCreateParameters) (graphrbac.Application, error) {
return graphrbac.Application{}, nil
}
// CreateGraphPrincipal creates a service principal via the graphrbac client
func (mc *MockACSEngineClient) CreateGraphPrincipal(servicePrincipalCreateParameters graphrbac.ServicePrincipalCreateParameters) (graphrbac.ServicePrincipal, error) {
return graphrbac.ServicePrincipal{}, nil
}
// CreateApp is a simpler method for creating an application
func (mc *MockACSEngineClient) CreateApp(applicationName, applicationURL string) (applicationID, servicePrincipalObjectID, secret string, err error) {
return "app-id", "client-id", "client-secret", nil
}
// RBAC Mocks
// CreateRoleAssignment creates a role assignment via the authorization client
func (mc *MockACSEngineClient) CreateRoleAssignment(scope string, roleAssignmentName string, parameters authorization.RoleAssignmentCreateParameters) (authorization.RoleAssignment, error) {
return authorization.RoleAssignment{}, nil
}
// CreateRoleAssignmentSimple is a wrapper around RoleAssignmentsClient.Create
func (mc *MockACSEngineClient) CreateRoleAssignmentSimple(applicationID, roleID string) error {
return nil
}
chore(mocks): we should not be sleeping in mocks
package armhelpers
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/arm/authorization"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/graphrbac"
"github.com/Azure/azure-sdk-for-go/arm/resources/resources"
"github.com/Azure/go-autorest/autorest"
)
//MockACSEngineClient is an implemetnation of ACSEngineClient where all requests error out
type MockACSEngineClient struct {
FailDeployTemplate bool
FailEnsureResourceGroup bool
FailListVirtualMachines bool
FailListVirtualMachineScaleSets bool
FailGetVirtualMachine bool
FailDeleteVirtualMachine bool
FailGetStorageClient bool
FailDeleteNetworkInterface bool
}
//MockStorageClient mock implementation of StorageClient
type MockStorageClient struct{}
//DeleteBlob mock
func (msc *MockStorageClient) DeleteBlob(container, blob string) error {
return nil
}
//AddAcceptLanguages mock
func (mc *MockACSEngineClient) AddAcceptLanguages(languages []string) {
return
}
//DeployTemplate mock
func (mc *MockACSEngineClient) DeployTemplate(resourceGroup, name string, template, parameters map[string]interface{}, cancel <-chan struct{}) (*resources.DeploymentExtended, error) {
if mc.FailDeployTemplate {
return nil, fmt.Errorf("DeployTemplate failed")
}
return nil, nil
}
//EnsureResourceGroup mock
func (mc *MockACSEngineClient) EnsureResourceGroup(resourceGroup, location string) (*resources.Group, error) {
if mc.FailEnsureResourceGroup {
return nil, fmt.Errorf("EnsureResourceGroup failed")
}
return nil, nil
}
//ListVirtualMachines mock
func (mc *MockACSEngineClient) ListVirtualMachines(resourceGroup string) (compute.VirtualMachineListResult, error) {
if mc.FailListVirtualMachines {
return compute.VirtualMachineListResult{}, fmt.Errorf("ListVirtualMachines failed")
}
vm1Name := "k8s-master-12345678-0"
creationSourceString := "creationSource"
orchestratorString := "orchestrator"
resourceNameSuffixString := "resourceNameSuffix"
creationSource := "acsengine-k8s-master-12345678-0"
orchestrator := "Kubernetes:1.5.3"
resourceNameSuffix := "12345678"
tags := map[string]*string{
creationSourceString: &creationSource,
orchestratorString: &orchestrator,
resourceNameSuffixString: &resourceNameSuffix,
}
vm1 := compute.VirtualMachine{
Name: &vm1Name,
Tags: &tags,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
OsDisk: &compute.OSDisk{
Vhd: &compute.VirtualHardDisk{
URI: &validOSDiskResourceName},
},
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: &validNicResourceName,
},
},
},
},
}
vmr := compute.VirtualMachineListResult{}
vmr.Value = &[]compute.VirtualMachine{vm1}
return vmr, nil
}
//ListVirtualMachineScaleSets mock
func (mc *MockACSEngineClient) ListVirtualMachineScaleSets(resourceGroup string) (compute.VirtualMachineScaleSetListResult, error) {
if mc.FailListVirtualMachineScaleSets {
return compute.VirtualMachineScaleSetListResult{}, fmt.Errorf("ListVirtualMachines failed")
}
return compute.VirtualMachineScaleSetListResult{}, nil
}
//GetVirtualMachine mock
func (mc *MockACSEngineClient) GetVirtualMachine(resourceGroup, name string) (compute.VirtualMachine, error) {
if mc.FailGetVirtualMachine {
return compute.VirtualMachine{}, fmt.Errorf("GetVirtualMachine failed")
}
vm1Name := "k8s-master-12345678-0"
creationSourceString := "creationSource"
orchestratorString := "orchestrator"
resourceNameSuffixString := "resourceNameSuffix"
creationSource := "acsengine-k8s-master-12345678-0"
orchestrator := "Kubernetes:1.5.3"
resourceNameSuffix := "12345678"
tags := map[string]*string{
creationSourceString: &creationSource,
orchestratorString: &orchestrator,
resourceNameSuffixString: &resourceNameSuffix,
}
return compute.VirtualMachine{
Name: &vm1Name,
Tags: &tags,
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: &compute.StorageProfile{
OsDisk: &compute.OSDisk{
Vhd: &compute.VirtualHardDisk{
URI: &validOSDiskResourceName},
},
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: &validNicResourceName,
},
},
},
},
}, nil
}
//DeleteVirtualMachine mock
func (mc *MockACSEngineClient) DeleteVirtualMachine(resourceGroup, name string, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) {
if mc.FailDeleteVirtualMachine {
errChan := make(chan error)
respChan := make(chan compute.OperationStatusResponse)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- fmt.Errorf("DeleteVirtualMachine failed")
}()
return respChan, errChan
}
errChan := make(chan error)
respChan := make(chan compute.OperationStatusResponse)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- nil
respChan <- compute.OperationStatusResponse{}
}()
return respChan, errChan
}
//GetStorageClient mock
func (mc *MockACSEngineClient) GetStorageClient(resourceGroup, accountName string) (ACSStorageClient, error) {
if mc.FailGetStorageClient {
return nil, fmt.Errorf("GetStorageClient failed")
}
return &MockStorageClient{}, nil
}
//DeleteNetworkInterface mock
func (mc *MockACSEngineClient) DeleteNetworkInterface(resourceGroup, nicName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
if mc.FailDeleteNetworkInterface {
errChan := make(chan error)
respChan := make(chan autorest.Response)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- fmt.Errorf("DeleteNetworkInterface failed")
}()
return respChan, errChan
}
errChan := make(chan error)
respChan := make(chan autorest.Response)
go func() {
defer func() {
close(errChan)
}()
defer func() {
close(respChan)
}()
errChan <- nil
respChan <- autorest.Response{}
}()
return respChan, errChan
}
var validOSDiskResourceName = "https://00k71r4u927seqiagnt0.blob.core.windows.net/osdisk/k8s-agentpool1-12345678-0-osdisk.vhd"
var validNicResourceName = "/subscriptions/DEC923E3-1EF1-4745-9516-37906D56DEC4/resourceGroups/acsK8sTest/providers/Microsoft.Network/networkInterfaces/k8s-agent-12345678-nic-0"
// Active Directory
// Mocks
// Graph Mocks
// CreateGraphApplication creates an application via the graphrbac client
func (mc *MockACSEngineClient) CreateGraphApplication(applicationCreateParameters graphrbac.ApplicationCreateParameters) (graphrbac.Application, error) {
return graphrbac.Application{}, nil
}
// CreateGraphPrincipal creates a service principal via the graphrbac client
func (mc *MockACSEngineClient) CreateGraphPrincipal(servicePrincipalCreateParameters graphrbac.ServicePrincipalCreateParameters) (graphrbac.ServicePrincipal, error) {
return graphrbac.ServicePrincipal{}, nil
}
// CreateApp is a simpler method for creating an application
func (mc *MockACSEngineClient) CreateApp(applicationName, applicationURL string) (applicationID, servicePrincipalObjectID, secret string, err error) {
return "app-id", "client-id", "client-secret", nil
}
// RBAC Mocks
// CreateRoleAssignment creates a role assignment via the authorization client
func (mc *MockACSEngineClient) CreateRoleAssignment(scope string, roleAssignmentName string, parameters authorization.RoleAssignmentCreateParameters) (authorization.RoleAssignment, error) {
return authorization.RoleAssignment{}, nil
}
// CreateRoleAssignmentSimple is a wrapper around RoleAssignmentsClient.Create
func (mc *MockACSEngineClient) CreateRoleAssignmentSimple(applicationID, roleID string) error {
return nil
}
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"fmt"
"os"
"os/signal"
"path/filepath"
"regexp"
"strings"
"syscall"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd"
"github.com/rook/rook/pkg/util/sys"
)
const (
pvcDataTypeDevice = "data"
pvcMetadataTypeDevice = "metadata"
pvcWalTypeDevice = "wal"
lvmCommandToCheck = "lvm"
)
var (
logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephosd")
)
// StartOSD starts an OSD on a device that was provisioned by ceph-volume
func StartOSD(context *clusterd.Context, osdType, osdID, osdUUID, lvPath string, pvcBackedOSD, lvBackedPV bool, cephArgs []string) error {
// ensure the config mount point exists
configDir := fmt.Sprintf("/var/lib/ceph/osd/ceph-%s", osdID)
err := os.Mkdir(configDir, 0750)
if err != nil {
logger.Errorf("failed to create config dir %q. %v", configDir, err)
}
// Update LVM config at runtime
if err := UpdateLVMConfig(context, pvcBackedOSD, lvBackedPV); err != nil {
return errors.Wrap(err, "failed to update lvm configuration file") // fail return here as validation provided by ceph-volume
}
var volumeGroupName string
if pvcBackedOSD && !lvBackedPV {
volumeGroupName := getVolumeGroupName(lvPath)
if volumeGroupName == "" {
return errors.Wrapf(err, "error fetching volume group name for OSD %q", osdID)
}
go handleTerminate(context, lvPath, volumeGroupName)
// It's fine to continue if deactivate fails since we will return error if activate fails
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-an", "-vv", volumeGroupName); err != nil {
logger.Errorf("failed to deactivate volume group for lv %q. output: %s. %v", lvPath, op, err)
return nil
}
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-ay", "-vv", volumeGroupName); err != nil {
return errors.Wrapf(err, "failed to activate volume group for lv %q. output: %s", lvPath, op)
}
}
// activate the osd with ceph-volume
storeFlag := "--" + osdType
if err := context.Executor.ExecuteCommand("stdbuf", "-oL", "ceph-volume", "lvm", "activate", "--no-systemd", storeFlag, osdID, osdUUID); err != nil {
return errors.Wrap(err, "failed to activate osd")
}
// run the ceph-osd daemon
if err := context.Executor.ExecuteCommand("ceph-osd", cephArgs...); err != nil {
// Instead of returning, we want to allow the lvm release to happen below, so we just log the err
logger.Errorf("failed to start osd or shutting down. %v", err)
}
if pvcBackedOSD && !lvBackedPV {
if err := releaseLVMDevice(context, volumeGroupName); err != nil {
// Let's just report the error and not fail as a best-effort since some drivers will force detach anyway
// Failing to release the device does not means the detach will fail so let's proceed
logger.Errorf("failed to release device from lvm. %v", err)
return nil
}
}
return nil
}
func handleTerminate(context *clusterd.Context, lvPath, volumeGroupName string) {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGTERM)
<-sigc
logger.Infof("shutdown signal received, exiting...")
err := killCephOSDProcess(context, lvPath)
if err != nil {
logger.Errorf("failed to kill ceph-osd process. %v", err)
}
}
func killCephOSDProcess(context *clusterd.Context, lvPath string) error {
pid, err := context.Executor.ExecuteCommandWithOutput("fuser", "-a", lvPath)
if err != nil {
return errors.Wrapf(err, "failed to retrieve process ID for %q", lvPath)
}
logger.Infof("process ID for ceph-osd: %s", pid)
// shut down the osd-ceph process so that lvm release does not show device in use error.
if pid != "" {
// The OSD needs to exit as quickly as possible in order for the IO requests
// to be redirected to other OSDs in the cluster. The OSD is designed to tolerate failures
// of any kind, including power loss or kill -9. The upstream Ceph tests have for many years
// been testing with kill -9 so this is expected to be safe. There is a fix upstream Ceph that will
// improve the shutdown time of the OSD. For cleanliness we should consider removing the -9
// once it is backported to Nautilus: https://github.com/ceph/ceph/pull/31677.
if err := context.Executor.ExecuteCommand("kill", "-9", pid); err != nil {
return errors.Wrap(err, "failed to kill ceph-osd process")
}
}
return nil
}
func configRawDevice(name string, context *clusterd.Context) (*sys.LocalDisk, error) {
rawDevice, err := clusterd.PopulateDeviceInfo(name, context.Executor)
if err != nil {
return nil, errors.Wrapf(err, "failed to get device info for %q", name)
}
// set the device type: data, block_db(metadata) or wal.
if strings.HasPrefix(name, "/mnt") {
rawDevice, err = clusterd.PopulateDeviceUdevInfo(rawDevice.KernelName, context.Executor, rawDevice)
if err != nil {
logger.Warningf("failed to get udev info for device %q. %v", name, err)
}
rawDevice.Type = pvcDataTypeDevice
} else if strings.HasPrefix(name, "/srv") {
rawDevice.Type = pvcMetadataTypeDevice
} else if strings.HasPrefix(name, "/wal") {
rawDevice.Type = pvcWalTypeDevice
}
return rawDevice, nil
}
// Provision provisions an OSD
func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topologyAffinity string) error {
if agent.pvcBacked {
// Init KMS store, retrieve the KEK and store it as an env var for ceph-volume
err := setKEKinEnv(context, agent.clusterInfo)
if err != nil {
return errors.Wrap(err, "failed to set kek as an environment variable")
}
}
// Print dmsetup version
err := dmsetupVersion(context)
if err != nil {
return errors.Wrap(err, "failed to print device mapper version")
}
// set the initial orchestration status
status := oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
if err := client.WriteCephConfig(context, agent.clusterInfo); err != nil {
return errors.Wrap(err, "failed to generate ceph config")
}
logger.Infof("discovering hardware")
var rawDevices []*sys.LocalDisk
if agent.pvcBacked {
for i := range agent.devices {
rawDevice, err := configRawDevice(agent.devices[i].Name, context)
if err != nil {
return err
}
rawDevices = append(rawDevices, rawDevice)
}
} else {
// We still need to use 'lsblk' as the underlying way to discover devices
// Ideally, we would use the "ceph-volume inventory" command instead
// However, it suffers from some limitation such as exposing available partitions and LVs
// See: https://tracker.ceph.com/issues/43579
rawDevices, err = clusterd.DiscoverDevices(context.Executor)
if err != nil {
return errors.Wrap(err, "failed initial hardware discovery")
}
}
context.Devices = rawDevices
logger.Info("creating and starting the osds")
// determine the set of devices that can/should be used for OSDs.
devices, err := getAvailableDevices(context, agent)
if err != nil {
return errors.Wrap(err, "failed to get available devices")
}
// orchestration is about to start, update the status
status = oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
// start the desired OSDs on devices
logger.Infof("configuring osd devices: %+v", devices)
deviceOSDs, err := agent.configureCVDevices(context, devices)
if err != nil {
return errors.Wrap(err, "failed to configure devices")
}
// Let's fail if no OSDs were configured
// This likely means the filter for available devices passed (in PVC case)
// but the resulting device was already configured for another cluster (disk not wiped and leftover)
// So we need to make sure the list is filled up, otherwise fail
if len(deviceOSDs) == 0 {
logger.Warningf("skipping OSD configuration as no devices matched the storage settings for this node %q", agent.nodeName)
status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
return nil
}
// Populate CRUSH location for each OSD on the host
for i := range deviceOSDs {
deviceOSDs[i].Location = crushLocation
deviceOSDs[i].TopologyAffinity = topologyAffinity
}
logger.Infof("devices = %+v", deviceOSDs)
// Since we are done configuring the PVC we need to release it from LVM
// If we don't do this, the device will remain hold by LVM and we won't be able to detach it
// When running on PVC, the device is:
// * attached on the prepare pod
// * osd is mkfs
// * detached from the prepare pod
// * attached to the activate pod
// * then the OSD runs
if agent.pvcBacked && !deviceOSDs[0].SkipLVRelease && !deviceOSDs[0].LVBackedPV {
// Try to discover the VG of that LV
volumeGroupName := getVolumeGroupName(deviceOSDs[0].BlockPath)
// If empty the osd is using the ceph-volume raw mode
// so it's consumming a raw block device and LVM is not used
// so there is nothing to de-activate
if volumeGroupName != "" {
if err := releaseLVMDevice(context, volumeGroupName); err != nil {
return errors.Wrap(err, "failed to release device from lvm")
}
} else {
// TODO
// don't assume this and run a bluestore check on the device to be sure?
logger.Infof("ceph-volume raw mode used by block %q, no VG to de-activate", deviceOSDs[0].BlockPath)
}
}
// orchestration is completed, update the status
status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
return nil
}
func getAvailableDevices(context *clusterd.Context, agent *OsdAgent) (*DeviceOsdMapping, error) {
desiredDevices := agent.devices
logger.Debugf("desiredDevices are %+v", desiredDevices)
logger.Debug("context.Devices are:")
for _, disk := range context.Devices {
logger.Debugf("%+v", disk)
}
available := &DeviceOsdMapping{Entries: map[string]*DeviceOsdIDEntry{}}
for _, device := range context.Devices {
// Ignore 'dm' device since they are not handled by c-v properly
// see: https://tracker.ceph.com/issues/43209
if strings.HasPrefix(device.Name, sys.DeviceMapperPrefix) && device.Type == sys.LVMType {
logger.Infof("skipping 'dm' device %q", device.Name)
continue
}
// Ignore device with filesystem signature since c-v inventory
// cannot detect that correctly
// see: https://tracker.ceph.com/issues/43585
if device.Filesystem != "" {
logger.Infof("skipping device %q because it contains a filesystem %q", device.Name, device.Filesystem)
continue
}
// If we detect a partition we have to make sure that ceph-volume will be able to consume it
// ceph-volume version 14.2.8 has the right code to support partitions
if device.Type == sys.PartType {
if !agent.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion) {
logger.Infof("skipping device %q because it is a partition and ceph version is too old, you need at least ceph %q", device.Name, cephVolumeRawModeMinCephVersion.String())
continue
}
device, err := clusterd.PopulateDeviceUdevInfo(device.Name, context.Executor, device)
if err != nil {
logger.Errorf("failed to get udev info of partition %q. %v", device.Name, err)
continue
}
}
// Check if the desired device is available
//
// We need to use the /dev path, provided by the NAME property from "lsblk --paths",
// especially when running on PVC and/or on dm device
// When running on PVC we use the real device name instead of the Kubernetes mountpoint
// When running on dm device we use the dm device name like "/dev/mapper/foo" instead of "/dev/dm-1"
// Otherwise ceph-volume inventory will fail on the udevadm check
// udevadm does not support device path different than /dev or /sys
//
// So earlier lsblk extracted the '/dev' path, hence the device.Name property
// device.Name can be 'xvdca', later this is formatted to '/dev/xvdca'
var err error
var isAvailable bool
rejectedReason := ""
if agent.pvcBacked {
block := fmt.Sprintf("/mnt/%s", agent.nodeName)
rawOsds, err := GetCephVolumeRawOSDs(context, agent.clusterInfo, agent.clusterInfo.FSID, block, agent.metadataDevice, "", false)
if err != nil {
isAvailable = false
rejectedReason = fmt.Sprintf("failed to detect if there is already an osd. %v", err)
} else if len(rawOsds) > 0 {
isAvailable = false
rejectedReason = "already in use by a raw OSD, no need to reconfigure"
} else {
isAvailable = true
}
} else {
isAvailable, rejectedReason, err = sys.CheckIfDeviceAvailable(context.Executor, device.RealPath, agent.pvcBacked)
if err != nil {
return nil, errors.Wrapf(err, "failed to get device %q info", device.Name)
}
}
if !isAvailable {
logger.Infof("skipping device %q: %s.", device.Name, rejectedReason)
continue
} else {
logger.Infof("device %q is available.", device.Name)
}
var deviceInfo *DeviceOsdIDEntry
if agent.metadataDevice != "" && agent.metadataDevice == device.Name {
// current device is desired as the metadata device
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Metadata: []int{}}
} else if len(desiredDevices) == 1 && desiredDevices[0].Name == "all" {
// user has specified all devices, use the current one for data
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID}
} else if len(desiredDevices) > 0 {
var matched bool
var matchedDevice DesiredDevice
for _, desiredDevice := range desiredDevices {
if desiredDevice.IsFilter {
// the desired devices is a regular expression
matched, err = regexp.Match(desiredDevice.Name, []byte(device.Name))
if err != nil {
logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err)
continue
}
if matched {
logger.Infof("device %q matches device filter %q", device.Name, desiredDevice.Name)
}
} else if desiredDevice.IsDevicePathFilter {
pathnames := append(strings.Fields(device.DevLinks), filepath.Join("/dev", device.Name))
for _, pathname := range pathnames {
matched, err = regexp.Match(desiredDevice.Name, []byte(pathname))
if err != nil {
logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err)
continue
}
if matched {
logger.Infof("device %q (aliases: %q) matches device path filter %q", device.Name, device.DevLinks, desiredDevice.Name)
break
}
}
} else if device.Name == desiredDevice.Name {
logger.Infof("%q found in the desired devices", device.Name)
matched = true
} else if strings.HasPrefix(desiredDevice.Name, "/dev/") {
devLinks := strings.Split(device.DevLinks, " ")
for _, link := range devLinks {
if link == desiredDevice.Name {
logger.Infof("%q found in the desired devices (matched by link: %q)", device.Name, link)
matched = true
break
}
}
}
matchedDevice = desiredDevice
if matched {
break
}
}
if err == nil && matched {
// the current device matches the user specifies filter/list, use it for data
logger.Infof("device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks)}
// set that this is not an OSD but a metadata device
if device.Type == pvcMetadataTypeDevice {
logger.Infof("metadata device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{1}}
}
// set that this is not an OSD but a wal device
if device.Type == pvcWalTypeDevice {
logger.Infof("wal device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{2}}
}
} else {
logger.Infof("skipping device %q that does not match the device filter/list (%v). %v", device.Name, desiredDevices, err)
}
} else {
logger.Infof("skipping device %q until the admin specifies it can be used by an osd", device.Name)
}
if deviceInfo != nil {
// When running on PVC, we typically have a single device only
// So it's fine to name the first entry of the map "data" instead of the PVC name
// It is particularly useful when a metadata PVC is used because we need to identify it in the map
// So the entry must be named "metadata" so it can accessed later
if agent.pvcBacked {
if device.Type == pvcDataTypeDevice {
available.Entries[pvcDataTypeDevice] = deviceInfo
} else if device.Type == pvcMetadataTypeDevice {
available.Entries[pvcMetadataTypeDevice] = deviceInfo
} else if device.Type == pvcWalTypeDevice {
available.Entries[pvcWalTypeDevice] = deviceInfo
}
} else {
available.Entries[device.Name] = deviceInfo
}
}
}
return available, nil
}
// releaseLVMDevice deactivates the LV to release the device.
func releaseLVMDevice(context *clusterd.Context, volumeGroupName string) error {
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("lvchange", "-an", "-vv", volumeGroupName); err != nil {
return errors.Wrapf(err, "failed to deactivate LVM %s. output: %s", volumeGroupName, op)
}
logger.Info("successfully released device from lvm")
return nil
}
// getVolumeGroupName returns the Volume group name from the given Logical Volume Path
func getVolumeGroupName(lvPath string) string {
vgSlice := strings.Split(lvPath, "/")
// Assert that lvpath is in correct format `/dev/<vg name>/<lv name>` before extracting the vg name
if len(vgSlice) != 4 || vgSlice[2] == "" {
logger.Warningf("invalid LV Path: %q", lvPath)
return ""
}
return vgSlice[2]
}
ceph: continue to get available devices if failed to get a device info
getAvailableDevices() should continue if something wrong happens in a device than returning
immediately with error.
Closes: https://github.com/rook/rook/issues/7543
Signed-off-by: Satoru Takeuchi <0be322b419f8cd46ee75447832ef0476dff222bc@gmail.com>
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"fmt"
"os"
"os/signal"
"path/filepath"
"regexp"
"strings"
"syscall"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd"
"github.com/rook/rook/pkg/util/sys"
)
const (
pvcDataTypeDevice = "data"
pvcMetadataTypeDevice = "metadata"
pvcWalTypeDevice = "wal"
lvmCommandToCheck = "lvm"
)
var (
logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephosd")
)
// StartOSD starts an OSD on a device that was provisioned by ceph-volume
func StartOSD(context *clusterd.Context, osdType, osdID, osdUUID, lvPath string, pvcBackedOSD, lvBackedPV bool, cephArgs []string) error {
// ensure the config mount point exists
configDir := fmt.Sprintf("/var/lib/ceph/osd/ceph-%s", osdID)
err := os.Mkdir(configDir, 0750)
if err != nil {
logger.Errorf("failed to create config dir %q. %v", configDir, err)
}
// Update LVM config at runtime
if err := UpdateLVMConfig(context, pvcBackedOSD, lvBackedPV); err != nil {
return errors.Wrap(err, "failed to update lvm configuration file") // fail return here as validation provided by ceph-volume
}
var volumeGroupName string
if pvcBackedOSD && !lvBackedPV {
volumeGroupName := getVolumeGroupName(lvPath)
if volumeGroupName == "" {
return errors.Wrapf(err, "error fetching volume group name for OSD %q", osdID)
}
go handleTerminate(context, lvPath, volumeGroupName)
// It's fine to continue if deactivate fails since we will return error if activate fails
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-an", "-vv", volumeGroupName); err != nil {
logger.Errorf("failed to deactivate volume group for lv %q. output: %s. %v", lvPath, op, err)
return nil
}
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-ay", "-vv", volumeGroupName); err != nil {
return errors.Wrapf(err, "failed to activate volume group for lv %q. output: %s", lvPath, op)
}
}
// activate the osd with ceph-volume
storeFlag := "--" + osdType
if err := context.Executor.ExecuteCommand("stdbuf", "-oL", "ceph-volume", "lvm", "activate", "--no-systemd", storeFlag, osdID, osdUUID); err != nil {
return errors.Wrap(err, "failed to activate osd")
}
// run the ceph-osd daemon
if err := context.Executor.ExecuteCommand("ceph-osd", cephArgs...); err != nil {
// Instead of returning, we want to allow the lvm release to happen below, so we just log the err
logger.Errorf("failed to start osd or shutting down. %v", err)
}
if pvcBackedOSD && !lvBackedPV {
if err := releaseLVMDevice(context, volumeGroupName); err != nil {
// Let's just report the error and not fail as a best-effort since some drivers will force detach anyway
// Failing to release the device does not means the detach will fail so let's proceed
logger.Errorf("failed to release device from lvm. %v", err)
return nil
}
}
return nil
}
func handleTerminate(context *clusterd.Context, lvPath, volumeGroupName string) {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGTERM)
<-sigc
logger.Infof("shutdown signal received, exiting...")
err := killCephOSDProcess(context, lvPath)
if err != nil {
logger.Errorf("failed to kill ceph-osd process. %v", err)
}
}
func killCephOSDProcess(context *clusterd.Context, lvPath string) error {
pid, err := context.Executor.ExecuteCommandWithOutput("fuser", "-a", lvPath)
if err != nil {
return errors.Wrapf(err, "failed to retrieve process ID for %q", lvPath)
}
logger.Infof("process ID for ceph-osd: %s", pid)
// shut down the osd-ceph process so that lvm release does not show device in use error.
if pid != "" {
// The OSD needs to exit as quickly as possible in order for the IO requests
// to be redirected to other OSDs in the cluster. The OSD is designed to tolerate failures
// of any kind, including power loss or kill -9. The upstream Ceph tests have for many years
// been testing with kill -9 so this is expected to be safe. There is a fix upstream Ceph that will
// improve the shutdown time of the OSD. For cleanliness we should consider removing the -9
// once it is backported to Nautilus: https://github.com/ceph/ceph/pull/31677.
if err := context.Executor.ExecuteCommand("kill", "-9", pid); err != nil {
return errors.Wrap(err, "failed to kill ceph-osd process")
}
}
return nil
}
func configRawDevice(name string, context *clusterd.Context) (*sys.LocalDisk, error) {
rawDevice, err := clusterd.PopulateDeviceInfo(name, context.Executor)
if err != nil {
return nil, errors.Wrapf(err, "failed to get device info for %q", name)
}
// set the device type: data, block_db(metadata) or wal.
if strings.HasPrefix(name, "/mnt") {
rawDevice, err = clusterd.PopulateDeviceUdevInfo(rawDevice.KernelName, context.Executor, rawDevice)
if err != nil {
logger.Warningf("failed to get udev info for device %q. %v", name, err)
}
rawDevice.Type = pvcDataTypeDevice
} else if strings.HasPrefix(name, "/srv") {
rawDevice.Type = pvcMetadataTypeDevice
} else if strings.HasPrefix(name, "/wal") {
rawDevice.Type = pvcWalTypeDevice
}
return rawDevice, nil
}
// Provision provisions an OSD
func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topologyAffinity string) error {
if agent.pvcBacked {
// Init KMS store, retrieve the KEK and store it as an env var for ceph-volume
err := setKEKinEnv(context, agent.clusterInfo)
if err != nil {
return errors.Wrap(err, "failed to set kek as an environment variable")
}
}
// Print dmsetup version
err := dmsetupVersion(context)
if err != nil {
return errors.Wrap(err, "failed to print device mapper version")
}
// set the initial orchestration status
status := oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
if err := client.WriteCephConfig(context, agent.clusterInfo); err != nil {
return errors.Wrap(err, "failed to generate ceph config")
}
logger.Infof("discovering hardware")
var rawDevices []*sys.LocalDisk
if agent.pvcBacked {
for i := range agent.devices {
rawDevice, err := configRawDevice(agent.devices[i].Name, context)
if err != nil {
return err
}
rawDevices = append(rawDevices, rawDevice)
}
} else {
// We still need to use 'lsblk' as the underlying way to discover devices
// Ideally, we would use the "ceph-volume inventory" command instead
// However, it suffers from some limitation such as exposing available partitions and LVs
// See: https://tracker.ceph.com/issues/43579
rawDevices, err = clusterd.DiscoverDevices(context.Executor)
if err != nil {
return errors.Wrap(err, "failed initial hardware discovery")
}
}
context.Devices = rawDevices
logger.Info("creating and starting the osds")
// determine the set of devices that can/should be used for OSDs.
devices, err := getAvailableDevices(context, agent)
if err != nil {
return errors.Wrap(err, "failed to get available devices")
}
// orchestration is about to start, update the status
status = oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
// start the desired OSDs on devices
logger.Infof("configuring osd devices: %+v", devices)
deviceOSDs, err := agent.configureCVDevices(context, devices)
if err != nil {
return errors.Wrap(err, "failed to configure devices")
}
// Let's fail if no OSDs were configured
// This likely means the filter for available devices passed (in PVC case)
// but the resulting device was already configured for another cluster (disk not wiped and leftover)
// So we need to make sure the list is filled up, otherwise fail
if len(deviceOSDs) == 0 {
logger.Warningf("skipping OSD configuration as no devices matched the storage settings for this node %q", agent.nodeName)
status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
return nil
}
// Populate CRUSH location for each OSD on the host
for i := range deviceOSDs {
deviceOSDs[i].Location = crushLocation
deviceOSDs[i].TopologyAffinity = topologyAffinity
}
logger.Infof("devices = %+v", deviceOSDs)
// Since we are done configuring the PVC we need to release it from LVM
// If we don't do this, the device will remain hold by LVM and we won't be able to detach it
// When running on PVC, the device is:
// * attached on the prepare pod
// * osd is mkfs
// * detached from the prepare pod
// * attached to the activate pod
// * then the OSD runs
if agent.pvcBacked && !deviceOSDs[0].SkipLVRelease && !deviceOSDs[0].LVBackedPV {
// Try to discover the VG of that LV
volumeGroupName := getVolumeGroupName(deviceOSDs[0].BlockPath)
// If empty the osd is using the ceph-volume raw mode
// so it's consumming a raw block device and LVM is not used
// so there is nothing to de-activate
if volumeGroupName != "" {
if err := releaseLVMDevice(context, volumeGroupName); err != nil {
return errors.Wrap(err, "failed to release device from lvm")
}
} else {
// TODO
// don't assume this and run a bluestore check on the device to be sure?
logger.Infof("ceph-volume raw mode used by block %q, no VG to de-activate", deviceOSDs[0].BlockPath)
}
}
// orchestration is completed, update the status
status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
return nil
}
func getAvailableDevices(context *clusterd.Context, agent *OsdAgent) (*DeviceOsdMapping, error) {
desiredDevices := agent.devices
logger.Debugf("desiredDevices are %+v", desiredDevices)
logger.Debug("context.Devices are:")
for _, disk := range context.Devices {
logger.Debugf("%+v", disk)
}
available := &DeviceOsdMapping{Entries: map[string]*DeviceOsdIDEntry{}}
for _, device := range context.Devices {
// Ignore 'dm' device since they are not handled by c-v properly
// see: https://tracker.ceph.com/issues/43209
if strings.HasPrefix(device.Name, sys.DeviceMapperPrefix) && device.Type == sys.LVMType {
logger.Infof("skipping 'dm' device %q", device.Name)
continue
}
// Ignore device with filesystem signature since c-v inventory
// cannot detect that correctly
// see: https://tracker.ceph.com/issues/43585
if device.Filesystem != "" {
logger.Infof("skipping device %q because it contains a filesystem %q", device.Name, device.Filesystem)
continue
}
// If we detect a partition we have to make sure that ceph-volume will be able to consume it
// ceph-volume version 14.2.8 has the right code to support partitions
if device.Type == sys.PartType {
if !agent.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion) {
logger.Infof("skipping device %q because it is a partition and ceph version is too old, you need at least ceph %q", device.Name, cephVolumeRawModeMinCephVersion.String())
continue
}
device, err := clusterd.PopulateDeviceUdevInfo(device.Name, context.Executor, device)
if err != nil {
logger.Errorf("failed to get udev info of partition %q. %v", device.Name, err)
continue
}
}
// Check if the desired device is available
//
// We need to use the /dev path, provided by the NAME property from "lsblk --paths",
// especially when running on PVC and/or on dm device
// When running on PVC we use the real device name instead of the Kubernetes mountpoint
// When running on dm device we use the dm device name like "/dev/mapper/foo" instead of "/dev/dm-1"
// Otherwise ceph-volume inventory will fail on the udevadm check
// udevadm does not support device path different than /dev or /sys
//
// So earlier lsblk extracted the '/dev' path, hence the device.Name property
// device.Name can be 'xvdca', later this is formatted to '/dev/xvdca'
var err error
var isAvailable bool
rejectedReason := ""
if agent.pvcBacked {
block := fmt.Sprintf("/mnt/%s", agent.nodeName)
rawOsds, err := GetCephVolumeRawOSDs(context, agent.clusterInfo, agent.clusterInfo.FSID, block, agent.metadataDevice, "", false)
if err != nil {
isAvailable = false
rejectedReason = fmt.Sprintf("failed to detect if there is already an osd. %v", err)
} else if len(rawOsds) > 0 {
isAvailable = false
rejectedReason = "already in use by a raw OSD, no need to reconfigure"
} else {
isAvailable = true
}
} else {
isAvailable, rejectedReason, err = sys.CheckIfDeviceAvailable(context.Executor, device.RealPath, agent.pvcBacked)
if err != nil {
isAvailable = false
rejectedReason = fmt.Sprintf("failed to check if the device %q is available. %v", device.Name, err)
}
}
if !isAvailable {
logger.Infof("skipping device %q: %s.", device.Name, rejectedReason)
continue
} else {
logger.Infof("device %q is available.", device.Name)
}
var deviceInfo *DeviceOsdIDEntry
if agent.metadataDevice != "" && agent.metadataDevice == device.Name {
// current device is desired as the metadata device
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Metadata: []int{}}
} else if len(desiredDevices) == 1 && desiredDevices[0].Name == "all" {
// user has specified all devices, use the current one for data
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID}
} else if len(desiredDevices) > 0 {
var matched bool
var matchedDevice DesiredDevice
for _, desiredDevice := range desiredDevices {
if desiredDevice.IsFilter {
// the desired devices is a regular expression
matched, err = regexp.Match(desiredDevice.Name, []byte(device.Name))
if err != nil {
logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err)
continue
}
if matched {
logger.Infof("device %q matches device filter %q", device.Name, desiredDevice.Name)
}
} else if desiredDevice.IsDevicePathFilter {
pathnames := append(strings.Fields(device.DevLinks), filepath.Join("/dev", device.Name))
for _, pathname := range pathnames {
matched, err = regexp.Match(desiredDevice.Name, []byte(pathname))
if err != nil {
logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err)
continue
}
if matched {
logger.Infof("device %q (aliases: %q) matches device path filter %q", device.Name, device.DevLinks, desiredDevice.Name)
break
}
}
} else if device.Name == desiredDevice.Name {
logger.Infof("%q found in the desired devices", device.Name)
matched = true
} else if strings.HasPrefix(desiredDevice.Name, "/dev/") {
devLinks := strings.Split(device.DevLinks, " ")
for _, link := range devLinks {
if link == desiredDevice.Name {
logger.Infof("%q found in the desired devices (matched by link: %q)", device.Name, link)
matched = true
break
}
}
}
matchedDevice = desiredDevice
if matched {
break
}
}
if err == nil && matched {
// the current device matches the user specifies filter/list, use it for data
logger.Infof("device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks)}
// set that this is not an OSD but a metadata device
if device.Type == pvcMetadataTypeDevice {
logger.Infof("metadata device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{1}}
}
// set that this is not an OSD but a wal device
if device.Type == pvcWalTypeDevice {
logger.Infof("wal device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{2}}
}
} else {
logger.Infof("skipping device %q that does not match the device filter/list (%v). %v", device.Name, desiredDevices, err)
}
} else {
logger.Infof("skipping device %q until the admin specifies it can be used by an osd", device.Name)
}
if deviceInfo != nil {
// When running on PVC, we typically have a single device only
// So it's fine to name the first entry of the map "data" instead of the PVC name
// It is particularly useful when a metadata PVC is used because we need to identify it in the map
// So the entry must be named "metadata" so it can accessed later
if agent.pvcBacked {
if device.Type == pvcDataTypeDevice {
available.Entries[pvcDataTypeDevice] = deviceInfo
} else if device.Type == pvcMetadataTypeDevice {
available.Entries[pvcMetadataTypeDevice] = deviceInfo
} else if device.Type == pvcWalTypeDevice {
available.Entries[pvcWalTypeDevice] = deviceInfo
}
} else {
available.Entries[device.Name] = deviceInfo
}
}
}
return available, nil
}
// releaseLVMDevice deactivates the LV to release the device.
func releaseLVMDevice(context *clusterd.Context, volumeGroupName string) error {
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("lvchange", "-an", "-vv", volumeGroupName); err != nil {
return errors.Wrapf(err, "failed to deactivate LVM %s. output: %s", volumeGroupName, op)
}
logger.Info("successfully released device from lvm")
return nil
}
// getVolumeGroupName returns the Volume group name from the given Logical Volume Path
func getVolumeGroupName(lvPath string) string {
vgSlice := strings.Split(lvPath, "/")
// Assert that lvpath is in correct format `/dev/<vg name>/<lv name>` before extracting the vg name
if len(vgSlice) != 4 || vgSlice[2] == "" {
logger.Warningf("invalid LV Path: %q", lvPath)
return ""
}
return vgSlice[2]
}
|
package control
import (
"bufio"
"context"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/csv"
"encoding/hex"
"fmt"
"html/template"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/sirupsen/logrus"
"k8s.io/apiserver/pkg/authentication/authenticator"
certutil "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/cmd/kube-apiserver/app"
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app"
sapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/proxy/util"
_ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration
_ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" // for workqueue metric registration
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
var (
localhostIP = net.ParseIP("127.0.0.1")
kubeconfigTemplate = template.Must(template.New("kubeconfig").Parse(`apiVersion: v1
clusters:
- cluster:
server: {{.URL}}
certificate-authority-data: {{.CACert}}
name: local
contexts:
- context:
cluster: local
namespace: default
user: user
name: Default
current-context: Default
kind: Config
preferences: {}
users:
- name: user
user:
username: {{.User}}
password: {{.Password}}
`))
)
func Server(ctx context.Context, cfg *config.Control) error {
rand.Seed(time.Now().UTC().UnixNano())
runtime := &config.ControlRuntime{}
cfg.Runtime = runtime
if err := prepare(cfg, runtime); err != nil {
return err
}
cfg.Runtime.Tunnel = setupTunnel()
util.DisableProxyHostnameCheck = true
auth, handler, err := apiServer(ctx, cfg, runtime)
if err != nil {
return err
}
runtime.Handler = handler
runtime.Authenticator = auth
if !cfg.NoScheduler {
scheduler(cfg, runtime)
}
controllerManager(cfg, runtime)
return nil
}
func controllerManager(cfg *config.Control, runtime *config.ControlRuntime) {
args := []string{
"--kubeconfig", runtime.KubeConfigSystem,
"--service-account-private-key-file", runtime.ServiceKey,
"--allocate-node-cidrs",
"--cluster-cidr", cfg.ClusterIPRange.String(),
"--root-ca-file", runtime.TokenCA,
"--port", "10252",
"--address", "127.0.0.1",
"--secure-port", "0",
}
if cfg.NoLeaderElect {
args = append(args, "--leader-elect=false")
}
args = append(args, cfg.ExtraControllerArgs...)
command := cmapp.NewControllerManagerCommand()
command.SetArgs(args)
go func() {
logrus.Infof("Running kube-controller-manager %s", config.ArgString(args))
logrus.Fatalf("controller-manager exited: %v", command.Execute())
}()
}
func scheduler(cfg *config.Control, runtime *config.ControlRuntime) {
args := []string{
"--kubeconfig", runtime.KubeConfigSystem,
"--port", "10251",
"--address", "127.0.0.1",
"--secure-port", "0",
}
if cfg.NoLeaderElect {
args = append(args, "--leader-elect=false")
}
args = append(args, cfg.ExtraSchedulerAPIArgs...)
command := sapp.NewSchedulerCommand()
command.SetArgs(args)
go func() {
logrus.Infof("Running kube-scheduler %s", config.ArgString(args))
logrus.Fatalf("scheduler exited: %v", command.Execute())
}()
}
func apiServer(ctx context.Context, cfg *config.Control, runtime *config.ControlRuntime) (authenticator.Request, http.Handler, error) {
var args []string
if len(cfg.ETCDEndpoints) > 0 {
args = append(args, "--storage-backend", "etcd3")
args = append(args, "--etcd-servers", strings.Join(cfg.ETCDEndpoints, ","))
if cfg.ETCDKeyFile != "" {
args = append(args, "--etcd-keyfile", cfg.ETCDKeyFile)
}
if cfg.ETCDCAFile != "" {
args = append(args, "--etcd-cafile", cfg.ETCDCAFile)
}
if cfg.ETCDCertFile != "" {
args = append(args, "--etcd-certfile", cfg.ETCDCertFile)
}
}
certDir := filepath.Join(cfg.DataDir, "tls/temporary-certs")
os.MkdirAll(certDir, 0700)
// TODO: sqlite doesn't need the watch cache, but etcd does, so make this dynamic
args = append(args, "--watch-cache=false")
args = append(args, "--cert-dir", certDir)
args = append(args, "--allow-privileged=true")
args = append(args, "--authorization-mode", strings.Join([]string{modes.ModeNode, modes.ModeRBAC}, ","))
args = append(args, "--service-account-signing-key-file", runtime.ServiceKey)
args = append(args, "--service-cluster-ip-range", cfg.ServiceIPRange.String())
args = append(args, "--advertise-port", strconv.Itoa(cfg.AdvertisePort))
args = append(args, "--advertise-address", localhostIP.String())
args = append(args, "--insecure-port", "0")
args = append(args, "--secure-port", strconv.Itoa(cfg.ListenPort))
args = append(args, "--bind-address", localhostIP.String())
args = append(args, "--tls-cert-file", runtime.TLSCert)
args = append(args, "--tls-private-key-file", runtime.TLSKey)
args = append(args, "--service-account-key-file", runtime.ServiceKey)
args = append(args, "--service-account-issuer", "k3s")
args = append(args, "--api-audiences", "unknown")
args = append(args, "--basic-auth-file", runtime.PasswdFile)
args = append(args, "--kubelet-client-certificate", runtime.NodeCert)
args = append(args, "--kubelet-client-key", runtime.NodeKey)
args = append(args, cfg.ExtraAPIArgs...)
command := app.NewAPIServerCommand(ctx.Done())
command.SetArgs(args)
go func() {
logrus.Infof("Running kube-apiserver %s", config.ArgString(args))
logrus.Fatalf("apiserver exited: %v", command.Execute())
}()
startupConfig := <-app.StartupConfig
return startupConfig.Authenticator, startupConfig.Handler, nil
}
func defaults(config *config.Control) {
if config.ClusterIPRange == nil {
_, clusterIPNet, _ := net.ParseCIDR("10.42.0.0/16")
config.ClusterIPRange = clusterIPNet
}
if config.ServiceIPRange == nil {
_, serviceIPNet, _ := net.ParseCIDR("10.43.0.0/16")
config.ServiceIPRange = serviceIPNet
}
if len(config.ClusterDNS) == 0 {
config.ClusterDNS = net.ParseIP("10.43.0.10")
}
if config.AdvertisePort == 0 {
config.AdvertisePort = 6445
}
if config.ListenPort == 0 {
config.ListenPort = 6444
}
if config.DataDir == "" {
config.DataDir = "./management-state"
}
}
func prepare(config *config.Control, runtime *config.ControlRuntime) error {
var err error
defaults(config)
if _, err := os.Stat(config.DataDir); os.IsNotExist(err) {
if err := os.MkdirAll(config.DataDir, 0700); err != nil {
return err
}
} else if err != nil {
return err
}
config.DataDir, err = filepath.Abs(config.DataDir)
if err != nil {
return err
}
os.MkdirAll(path.Join(config.DataDir, "tls"), 0700)
os.MkdirAll(path.Join(config.DataDir, "cred"), 0700)
name := "localhost"
runtime.TLSCert = path.Join(config.DataDir, "tls", name+".crt")
runtime.TLSKey = path.Join(config.DataDir, "tls", name+".key")
runtime.TLSCA = path.Join(config.DataDir, "tls", "ca.crt")
runtime.TLSCAKey = path.Join(config.DataDir, "tls", "ca.key")
runtime.TokenCA = path.Join(config.DataDir, "tls", "token-ca.crt")
runtime.TokenCAKey = path.Join(config.DataDir, "tls", "token-ca.key")
runtime.ServiceKey = path.Join(config.DataDir, "tls", "service.key")
runtime.PasswdFile = path.Join(config.DataDir, "cred", "passwd")
runtime.KubeConfigSystem = path.Join(config.DataDir, "cred", "kubeconfig-system.yaml")
runtime.NodeKey = path.Join(config.DataDir, "tls", "token-node.key")
runtime.NodeCert = path.Join(config.DataDir, "tls", "token-node.crt")
regen := false
if _, err := os.Stat(runtime.TLSCA); err != nil {
regen = true
if err := genCA(runtime); err != nil {
return err
}
}
if err := genServiceAccount(runtime); err != nil {
return err
}
if err := genTLS(regen, config, runtime); err != nil {
return err
}
if err := genTokenTLS(config, runtime); err != nil {
return err
}
if err := genUsers(config, runtime); err != nil {
return err
}
return readTokens(runtime)
}
func readTokens(runtime *config.ControlRuntime) error {
f, err := os.Open(runtime.PasswdFile)
if err != nil {
return err
}
reader := csv.NewReader(f)
reader.FieldsPerRecord = -1
for {
record, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
if len(record) < 2 {
continue
}
switch record[1] {
case "node":
runtime.NodeToken = "node:" + record[0]
case "admin":
runtime.ClientToken = "admin:" + record[0]
}
}
return nil
}
func ensureNodeToken(config *config.Control, runtime *config.ControlRuntime) error {
if config.ClusterSecret == "" {
return nil
}
f, err := os.Open(runtime.PasswdFile)
if err != nil {
return err
}
defer f.Close()
buf := &strings.Builder{}
scan := bufio.NewScanner(f)
for scan.Scan() {
line := scan.Text()
parts := strings.Split(line, ",")
if len(parts) < 4 {
continue
}
if parts[1] == "node" {
if parts[0] == config.ClusterSecret {
return nil
}
parts[0] = config.ClusterSecret
line = strings.Join(parts, ",")
}
buf.WriteString(line)
buf.WriteString("\n")
}
if scan.Err() != nil {
return scan.Err()
}
f.Close()
return ioutil.WriteFile(runtime.PasswdFile, []byte(buf.String()), 0600)
}
func genUsers(config *config.Control, runtime *config.ControlRuntime) error {
if s, err := os.Stat(runtime.PasswdFile); err == nil && s.Size() > 0 {
return ensureNodeToken(config, runtime)
}
adminToken, err := getToken()
if err != nil {
return err
}
systemToken, err := getToken()
if err != nil {
return err
}
nodeToken, err := getToken()
if err != nil {
return err
}
if config.ClusterSecret != "" {
nodeToken = config.ClusterSecret
}
passwd := fmt.Sprintf(`%s,admin,admin,system:masters
%s,system,system,system:masters
%s,node,node,system:masters
`, adminToken, systemToken, nodeToken)
caCertBytes, err := ioutil.ReadFile(runtime.TLSCA)
if err != nil {
return err
}
caCert := base64.StdEncoding.EncodeToString(caCertBytes)
if err := kubeConfig(runtime.KubeConfigSystem, fmt.Sprintf("https://localhost:%d", config.ListenPort), caCert,
"system", systemToken); err != nil {
return err
}
return ioutil.WriteFile(runtime.PasswdFile, []byte(passwd), 0600)
}
func getToken() (string, error) {
token := make([]byte, 16, 16)
_, err := cryptorand.Read(token)
if err != nil {
return "", err
}
return hex.EncodeToString(token), err
}
func genTokenTLS(config *config.Control, runtime *config.ControlRuntime) error {
regen := false
if _, err := os.Stat(runtime.TokenCA); err != nil {
regen = true
if err := genTokenCA(runtime); err != nil {
return err
}
}
_, apiServerServiceIP, err := master.DefaultServiceIPRange(*config.ServiceIPRange)
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: "kubernetes",
AltNames: certutil.AltNames{
DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
IPs: []net.IP{net.ParseIP("127.0.0.1"), apiServerServiceIP},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
}
if _, err := os.Stat(runtime.NodeCert); err == nil && !regen {
return nil
}
caKeyBytes, err := ioutil.ReadFile(runtime.TokenCAKey)
if err != nil {
return err
}
caBytes, err := ioutil.ReadFile(runtime.TokenCA)
if err != nil {
return err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return err
}
caCert, err := certutil.ParseCertsPEM(caBytes)
if err != nil {
return err
}
key, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cert, err := certutil.NewSignedCert(cfg, key, caCert[0], caKey.(*rsa.PrivateKey))
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.NodeKey, certutil.EncodePrivateKeyPEM(key)); err != nil {
return err
}
return certutil.WriteCert(runtime.NodeCert, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...))
}
func genTLS(regen bool, config *config.Control, runtime *config.ControlRuntime) error {
if !regen {
_, certErr := os.Stat(runtime.TLSCert)
_, keyErr := os.Stat(runtime.TLSKey)
if certErr == nil && keyErr == nil {
return nil
}
}
_, apiServerServiceIP, err := master.DefaultServiceIPRange(*config.ServiceIPRange)
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: "localhost",
AltNames: certutil.AltNames{
DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
IPs: []net.IP{apiServerServiceIP, localhostIP},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
caKeyBytes, err := ioutil.ReadFile(runtime.TLSCAKey)
if err != nil {
return err
}
caBytes, err := ioutil.ReadFile(runtime.TLSCA)
if err != nil {
return err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return err
}
caCert, err := certutil.ParseCertsPEM(caBytes)
if err != nil {
return err
}
key, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cert, err := certutil.NewSignedCert(cfg, key, caCert[0], caKey.(*rsa.PrivateKey))
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.TLSKey, certutil.EncodePrivateKeyPEM(key)); err != nil {
return err
}
return certutil.WriteCert(runtime.TLSCert, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...))
}
func genServiceAccount(runtime *config.ControlRuntime) error {
_, keyErr := os.Stat(runtime.ServiceKey)
if keyErr == nil {
return nil
}
key, err := certutil.NewPrivateKey()
if err != nil {
return err
}
return certutil.WriteKey(runtime.ServiceKey, certutil.EncodePrivateKeyPEM(key))
}
func genTokenCA(runtime *config.ControlRuntime) error {
caKey, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: fmt.Sprintf("%s-ca@%d", "k3s-token", time.Now().Unix()),
}
cert, err := certutil.NewSelfSignedCACert(cfg, caKey)
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.TokenCAKey, certutil.EncodePrivateKeyPEM(caKey)); err != nil {
return err
}
return certutil.WriteCert(runtime.TokenCA, certutil.EncodeCertPEM(cert))
}
func genCA(runtime *config.ControlRuntime) error {
caKey, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: fmt.Sprintf("%s-ca@%d", "k3s", time.Now().Unix()),
}
cert, err := certutil.NewSelfSignedCACert(cfg, caKey)
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.TLSCAKey, certutil.EncodePrivateKeyPEM(caKey)); err != nil {
return err
}
return certutil.WriteCert(runtime.TLSCA, certutil.EncodeCertPEM(cert))
}
func kubeConfig(dest, url, cert, user, password string) error {
data := struct {
URL string
CACert string
User string
Password string
}{
URL: url,
CACert: cert,
User: user,
Password: password,
}
output, err := os.Create(dest)
if err != nil {
return err
}
defer output.Close()
return kubeconfigTemplate.Execute(output, &data)
}
Change address to bind-address for scheduler and api
package control
import (
"bufio"
"context"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/csv"
"encoding/hex"
"fmt"
"html/template"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/sirupsen/logrus"
"k8s.io/apiserver/pkg/authentication/authenticator"
certutil "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/cmd/kube-apiserver/app"
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app"
sapp "k8s.io/kubernetes/cmd/kube-scheduler/app"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/proxy/util"
_ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration
_ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" // for workqueue metric registration
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
var (
localhostIP = net.ParseIP("127.0.0.1")
kubeconfigTemplate = template.Must(template.New("kubeconfig").Parse(`apiVersion: v1
clusters:
- cluster:
server: {{.URL}}
certificate-authority-data: {{.CACert}}
name: local
contexts:
- context:
cluster: local
namespace: default
user: user
name: Default
current-context: Default
kind: Config
preferences: {}
users:
- name: user
user:
username: {{.User}}
password: {{.Password}}
`))
)
func Server(ctx context.Context, cfg *config.Control) error {
rand.Seed(time.Now().UTC().UnixNano())
runtime := &config.ControlRuntime{}
cfg.Runtime = runtime
if err := prepare(cfg, runtime); err != nil {
return err
}
cfg.Runtime.Tunnel = setupTunnel()
util.DisableProxyHostnameCheck = true
auth, handler, err := apiServer(ctx, cfg, runtime)
if err != nil {
return err
}
runtime.Handler = handler
runtime.Authenticator = auth
if !cfg.NoScheduler {
scheduler(cfg, runtime)
}
controllerManager(cfg, runtime)
return nil
}
func controllerManager(cfg *config.Control, runtime *config.ControlRuntime) {
args := []string{
"--kubeconfig", runtime.KubeConfigSystem,
"--service-account-private-key-file", runtime.ServiceKey,
"--allocate-node-cidrs",
"--cluster-cidr", cfg.ClusterIPRange.String(),
"--root-ca-file", runtime.TokenCA,
"--port", "10252",
"--bind-address", "127.0.0.1",
"--secure-port", "0",
}
if cfg.NoLeaderElect {
args = append(args, "--leader-elect=false")
}
args = append(args, cfg.ExtraControllerArgs...)
command := cmapp.NewControllerManagerCommand()
command.SetArgs(args)
go func() {
logrus.Infof("Running kube-controller-manager %s", config.ArgString(args))
logrus.Fatalf("controller-manager exited: %v", command.Execute())
}()
}
func scheduler(cfg *config.Control, runtime *config.ControlRuntime) {
args := []string{
"--kubeconfig", runtime.KubeConfigSystem,
"--port", "10251",
"--bind-address", "127.0.0.1",
"--secure-port", "0",
}
if cfg.NoLeaderElect {
args = append(args, "--leader-elect=false")
}
args = append(args, cfg.ExtraSchedulerAPIArgs...)
command := sapp.NewSchedulerCommand()
command.SetArgs(args)
go func() {
logrus.Infof("Running kube-scheduler %s", config.ArgString(args))
logrus.Fatalf("scheduler exited: %v", command.Execute())
}()
}
func apiServer(ctx context.Context, cfg *config.Control, runtime *config.ControlRuntime) (authenticator.Request, http.Handler, error) {
var args []string
if len(cfg.ETCDEndpoints) > 0 {
args = append(args, "--storage-backend", "etcd3")
args = append(args, "--etcd-servers", strings.Join(cfg.ETCDEndpoints, ","))
if cfg.ETCDKeyFile != "" {
args = append(args, "--etcd-keyfile", cfg.ETCDKeyFile)
}
if cfg.ETCDCAFile != "" {
args = append(args, "--etcd-cafile", cfg.ETCDCAFile)
}
if cfg.ETCDCertFile != "" {
args = append(args, "--etcd-certfile", cfg.ETCDCertFile)
}
}
certDir := filepath.Join(cfg.DataDir, "tls/temporary-certs")
os.MkdirAll(certDir, 0700)
// TODO: sqlite doesn't need the watch cache, but etcd does, so make this dynamic
args = append(args, "--watch-cache=false")
args = append(args, "--cert-dir", certDir)
args = append(args, "--allow-privileged=true")
args = append(args, "--authorization-mode", strings.Join([]string{modes.ModeNode, modes.ModeRBAC}, ","))
args = append(args, "--service-account-signing-key-file", runtime.ServiceKey)
args = append(args, "--service-cluster-ip-range", cfg.ServiceIPRange.String())
args = append(args, "--advertise-port", strconv.Itoa(cfg.AdvertisePort))
args = append(args, "--advertise-address", localhostIP.String())
args = append(args, "--insecure-port", "0")
args = append(args, "--secure-port", strconv.Itoa(cfg.ListenPort))
args = append(args, "--bind-address", localhostIP.String())
args = append(args, "--tls-cert-file", runtime.TLSCert)
args = append(args, "--tls-private-key-file", runtime.TLSKey)
args = append(args, "--service-account-key-file", runtime.ServiceKey)
args = append(args, "--service-account-issuer", "k3s")
args = append(args, "--api-audiences", "unknown")
args = append(args, "--basic-auth-file", runtime.PasswdFile)
args = append(args, "--kubelet-client-certificate", runtime.NodeCert)
args = append(args, "--kubelet-client-key", runtime.NodeKey)
args = append(args, cfg.ExtraAPIArgs...)
command := app.NewAPIServerCommand(ctx.Done())
command.SetArgs(args)
go func() {
logrus.Infof("Running kube-apiserver %s", config.ArgString(args))
logrus.Fatalf("apiserver exited: %v", command.Execute())
}()
startupConfig := <-app.StartupConfig
return startupConfig.Authenticator, startupConfig.Handler, nil
}
func defaults(config *config.Control) {
if config.ClusterIPRange == nil {
_, clusterIPNet, _ := net.ParseCIDR("10.42.0.0/16")
config.ClusterIPRange = clusterIPNet
}
if config.ServiceIPRange == nil {
_, serviceIPNet, _ := net.ParseCIDR("10.43.0.0/16")
config.ServiceIPRange = serviceIPNet
}
if len(config.ClusterDNS) == 0 {
config.ClusterDNS = net.ParseIP("10.43.0.10")
}
if config.AdvertisePort == 0 {
config.AdvertisePort = 6445
}
if config.ListenPort == 0 {
config.ListenPort = 6444
}
if config.DataDir == "" {
config.DataDir = "./management-state"
}
}
func prepare(config *config.Control, runtime *config.ControlRuntime) error {
var err error
defaults(config)
if _, err := os.Stat(config.DataDir); os.IsNotExist(err) {
if err := os.MkdirAll(config.DataDir, 0700); err != nil {
return err
}
} else if err != nil {
return err
}
config.DataDir, err = filepath.Abs(config.DataDir)
if err != nil {
return err
}
os.MkdirAll(path.Join(config.DataDir, "tls"), 0700)
os.MkdirAll(path.Join(config.DataDir, "cred"), 0700)
name := "localhost"
runtime.TLSCert = path.Join(config.DataDir, "tls", name+".crt")
runtime.TLSKey = path.Join(config.DataDir, "tls", name+".key")
runtime.TLSCA = path.Join(config.DataDir, "tls", "ca.crt")
runtime.TLSCAKey = path.Join(config.DataDir, "tls", "ca.key")
runtime.TokenCA = path.Join(config.DataDir, "tls", "token-ca.crt")
runtime.TokenCAKey = path.Join(config.DataDir, "tls", "token-ca.key")
runtime.ServiceKey = path.Join(config.DataDir, "tls", "service.key")
runtime.PasswdFile = path.Join(config.DataDir, "cred", "passwd")
runtime.KubeConfigSystem = path.Join(config.DataDir, "cred", "kubeconfig-system.yaml")
runtime.NodeKey = path.Join(config.DataDir, "tls", "token-node.key")
runtime.NodeCert = path.Join(config.DataDir, "tls", "token-node.crt")
regen := false
if _, err := os.Stat(runtime.TLSCA); err != nil {
regen = true
if err := genCA(runtime); err != nil {
return err
}
}
if err := genServiceAccount(runtime); err != nil {
return err
}
if err := genTLS(regen, config, runtime); err != nil {
return err
}
if err := genTokenTLS(config, runtime); err != nil {
return err
}
if err := genUsers(config, runtime); err != nil {
return err
}
return readTokens(runtime)
}
func readTokens(runtime *config.ControlRuntime) error {
f, err := os.Open(runtime.PasswdFile)
if err != nil {
return err
}
reader := csv.NewReader(f)
reader.FieldsPerRecord = -1
for {
record, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
if len(record) < 2 {
continue
}
switch record[1] {
case "node":
runtime.NodeToken = "node:" + record[0]
case "admin":
runtime.ClientToken = "admin:" + record[0]
}
}
return nil
}
func ensureNodeToken(config *config.Control, runtime *config.ControlRuntime) error {
if config.ClusterSecret == "" {
return nil
}
f, err := os.Open(runtime.PasswdFile)
if err != nil {
return err
}
defer f.Close()
buf := &strings.Builder{}
scan := bufio.NewScanner(f)
for scan.Scan() {
line := scan.Text()
parts := strings.Split(line, ",")
if len(parts) < 4 {
continue
}
if parts[1] == "node" {
if parts[0] == config.ClusterSecret {
return nil
}
parts[0] = config.ClusterSecret
line = strings.Join(parts, ",")
}
buf.WriteString(line)
buf.WriteString("\n")
}
if scan.Err() != nil {
return scan.Err()
}
f.Close()
return ioutil.WriteFile(runtime.PasswdFile, []byte(buf.String()), 0600)
}
func genUsers(config *config.Control, runtime *config.ControlRuntime) error {
if s, err := os.Stat(runtime.PasswdFile); err == nil && s.Size() > 0 {
return ensureNodeToken(config, runtime)
}
adminToken, err := getToken()
if err != nil {
return err
}
systemToken, err := getToken()
if err != nil {
return err
}
nodeToken, err := getToken()
if err != nil {
return err
}
if config.ClusterSecret != "" {
nodeToken = config.ClusterSecret
}
passwd := fmt.Sprintf(`%s,admin,admin,system:masters
%s,system,system,system:masters
%s,node,node,system:masters
`, adminToken, systemToken, nodeToken)
caCertBytes, err := ioutil.ReadFile(runtime.TLSCA)
if err != nil {
return err
}
caCert := base64.StdEncoding.EncodeToString(caCertBytes)
if err := kubeConfig(runtime.KubeConfigSystem, fmt.Sprintf("https://localhost:%d", config.ListenPort), caCert,
"system", systemToken); err != nil {
return err
}
return ioutil.WriteFile(runtime.PasswdFile, []byte(passwd), 0600)
}
func getToken() (string, error) {
token := make([]byte, 16, 16)
_, err := cryptorand.Read(token)
if err != nil {
return "", err
}
return hex.EncodeToString(token), err
}
func genTokenTLS(config *config.Control, runtime *config.ControlRuntime) error {
regen := false
if _, err := os.Stat(runtime.TokenCA); err != nil {
regen = true
if err := genTokenCA(runtime); err != nil {
return err
}
}
_, apiServerServiceIP, err := master.DefaultServiceIPRange(*config.ServiceIPRange)
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: "kubernetes",
AltNames: certutil.AltNames{
DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
IPs: []net.IP{net.ParseIP("127.0.0.1"), apiServerServiceIP},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
}
if _, err := os.Stat(runtime.NodeCert); err == nil && !regen {
return nil
}
caKeyBytes, err := ioutil.ReadFile(runtime.TokenCAKey)
if err != nil {
return err
}
caBytes, err := ioutil.ReadFile(runtime.TokenCA)
if err != nil {
return err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return err
}
caCert, err := certutil.ParseCertsPEM(caBytes)
if err != nil {
return err
}
key, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cert, err := certutil.NewSignedCert(cfg, key, caCert[0], caKey.(*rsa.PrivateKey))
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.NodeKey, certutil.EncodePrivateKeyPEM(key)); err != nil {
return err
}
return certutil.WriteCert(runtime.NodeCert, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...))
}
func genTLS(regen bool, config *config.Control, runtime *config.ControlRuntime) error {
if !regen {
_, certErr := os.Stat(runtime.TLSCert)
_, keyErr := os.Stat(runtime.TLSKey)
if certErr == nil && keyErr == nil {
return nil
}
}
_, apiServerServiceIP, err := master.DefaultServiceIPRange(*config.ServiceIPRange)
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: "localhost",
AltNames: certutil.AltNames{
DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
IPs: []net.IP{apiServerServiceIP, localhostIP},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
caKeyBytes, err := ioutil.ReadFile(runtime.TLSCAKey)
if err != nil {
return err
}
caBytes, err := ioutil.ReadFile(runtime.TLSCA)
if err != nil {
return err
}
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return err
}
caCert, err := certutil.ParseCertsPEM(caBytes)
if err != nil {
return err
}
key, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cert, err := certutil.NewSignedCert(cfg, key, caCert[0], caKey.(*rsa.PrivateKey))
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.TLSKey, certutil.EncodePrivateKeyPEM(key)); err != nil {
return err
}
return certutil.WriteCert(runtime.TLSCert, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...))
}
func genServiceAccount(runtime *config.ControlRuntime) error {
_, keyErr := os.Stat(runtime.ServiceKey)
if keyErr == nil {
return nil
}
key, err := certutil.NewPrivateKey()
if err != nil {
return err
}
return certutil.WriteKey(runtime.ServiceKey, certutil.EncodePrivateKeyPEM(key))
}
func genTokenCA(runtime *config.ControlRuntime) error {
caKey, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: fmt.Sprintf("%s-ca@%d", "k3s-token", time.Now().Unix()),
}
cert, err := certutil.NewSelfSignedCACert(cfg, caKey)
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.TokenCAKey, certutil.EncodePrivateKeyPEM(caKey)); err != nil {
return err
}
return certutil.WriteCert(runtime.TokenCA, certutil.EncodeCertPEM(cert))
}
func genCA(runtime *config.ControlRuntime) error {
caKey, err := certutil.NewPrivateKey()
if err != nil {
return err
}
cfg := certutil.Config{
CommonName: fmt.Sprintf("%s-ca@%d", "k3s", time.Now().Unix()),
}
cert, err := certutil.NewSelfSignedCACert(cfg, caKey)
if err != nil {
return err
}
if err := certutil.WriteKey(runtime.TLSCAKey, certutil.EncodePrivateKeyPEM(caKey)); err != nil {
return err
}
return certutil.WriteCert(runtime.TLSCA, certutil.EncodeCertPEM(cert))
}
func kubeConfig(dest, url, cert, user, password string) error {
data := struct {
URL string
CACert string
User string
Password string
}{
URL: url,
CACert: cert,
User: user,
Password: password,
}
output, err := os.Create(dest)
if err != nil {
return err
}
defer output.Close()
return kubeconfigTemplate.Execute(output, &data)
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // alpha: v1.X
// MyFeature utilfeature.Feature = "MyFeature"
// owner: @tallclair
// beta: v1.4
AppArmor utilfeature.Feature = "AppArmor"
// owner: @mtaufen
// alpha: v1.4
// beta: v1.11
DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig"
// owner: @pweil-
// alpha: v1.5
//
// Default userns=host for containers that are using other host namespaces, host mounts, the pod
// contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE,
// SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon.
ExperimentalHostUserNamespaceDefaultingGate utilfeature.Feature = "ExperimentalHostUserNamespaceDefaulting"
// owner: @vishh
// alpha: v1.5
//
// Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod`
// and also prevents them from being evicted from a node.
// Note: This feature is not supported for `BestEffort` pods.
ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation"
// owner: @jiayingz
// beta: v1.10
//
// Enables support for Device Plugins
DevicePlugins utilfeature.Feature = "DevicePlugins"
// owner: @gmarek
// alpha: v1.6
//
// Changes the logic behind evicting Pods from not ready Nodes
// to take advantage of NoExecute Taints and Tolerations.
TaintBasedEvictions utilfeature.Feature = "TaintBasedEvictions"
// owner: @mikedanese
// alpha: v1.7
// beta: v1.12
//
// Gets a server certificate for the kubelet from the Certificate Signing
// Request API instead of generating one self signed and auto rotates the
// certificate as expiration approaches.
RotateKubeletServerCertificate utilfeature.Feature = "RotateKubeletServerCertificate"
// owner: @mikedanese
// beta: v1.8
//
// Automatically renews the client certificate used for communicating with
// the API server as the certificate approaches expiration.
RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate"
// owner: @msau42
// alpha: v1.7
//
// A new volume type that supports local disks on a node.
PersistentLocalVolumes utilfeature.Feature = "PersistentLocalVolumes"
// owner: @jinxu
// beta: v1.10
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation"
// owner: @gnufied
// beta: v1.11
// Ability to Expand persistent volumes
ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes"
// owner: @mlmhl
// alpha: v1.11
// Ability to expand persistent volumes' file system without unmounting volumes.
ExpandInUsePersistentVolumes utilfeature.Feature = "ExpandInUsePersistentVolumes"
// owner: @verb
// alpha: v1.10
//
// Allows running a "debug container" in a pod namespaces to troubleshoot a running pod.
DebugContainers utilfeature.Feature = "DebugContainers"
// owner: @verb
// alpha: v1.10
//
// Allows all containers in a pod to share a process namespace.
PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace"
// owner: @bsalamat
// alpha: v1.8
//
// Add priority to pods. Priority affects scheduling and preemption of pods.
PodPriority utilfeature.Feature = "PodPriority"
// owner: @resouer
// alpha: v1.8
//
// Enable equivalence class cache for scheduler.
EnableEquivalenceClassCache utilfeature.Feature = "EnableEquivalenceClassCache"
// owner: @k82cn
// alpha: v1.8
//
// Taint nodes based on their condition status for 'NetworkUnavailable',
// 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'.
TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition"
// owner: @jsafrane
// beta: v1.10
//
// Enable mount propagation of volumes.
MountPropagation utilfeature.Feature = "MountPropagation"
// owner: @sjenning
// alpha: v1.11
//
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved utilfeature.Feature = "QOSReserved"
// owner: @ConnorDoyle
// alpha: v1.8
//
// Alternative container-level CPU affinity policies.
CPUManager utilfeature.Feature = "CPUManager"
// owner: @derekwaynecarr
// beta: v1.10
//
// Enable pods to consume pre-allocated huge pages of varying page sizes
HugePages utilfeature.Feature = "HugePages"
// owner: @sjenning
// beta: v1.11
//
// Enable pods to set sysctls on a pod
Sysctls utilfeature.Feature = "Sysctls"
// owner @brendandburns
// alpha: v1.9
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion"
// owner @brendandburns
// deprecated: v1.10
//
// Enable the service proxy to contact external IP addresses. Note this feature is present
// only for backward compatibility, it will be removed in the 1.11 release.
ServiceProxyAllowExternalIPs utilfeature.Feature = "ServiceProxyAllowExternalIPs"
// owner: @jsafrane
// alpha: v1.9
//
// Enable running mount utilities in containers.
MountContainers utilfeature.Feature = "MountContainers"
// owner: @msau42
// alpha: v1.9
//
// Extend the default scheduler to be aware of PV topology and handle PV binding
// Before moving to beta, resolve Kubernetes issue #56180
VolumeScheduling utilfeature.Feature = "VolumeScheduling"
// owner: @vladimirvivien
// alpha: v1.9
//
// Enable mount/attachment of Container Storage Interface (CSI) backed PVs
CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume"
// owner @MrHohn
// beta: v1.10
//
// Support configurable pod DNS parameters.
CustomPodDNS utilfeature.Feature = "CustomPodDNS"
// owner: @screeley44
// alpha: v1.9
//
// Enable Block volume support in containers.
BlockVolume utilfeature.Feature = "BlockVolume"
// owner: @pospispa
// GA: v1.11
//
// Postpone deletion of a PV or a PVC when they are being used
StorageObjectInUseProtection utilfeature.Feature = "StorageObjectInUseProtection"
// owner: @aveshagarwal
// alpha: v1.9
//
// Enable resource limits priority function
ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction"
// owner: @m1093782566
// GA: v1.11
//
// Implement IPVS-based in-cluster service load balancing
SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode"
// owner: @dims
// alpha: v1.10
//
// Implement support for limiting pids in pods
SupportPodPidsLimit utilfeature.Feature = "SupportPodPidsLimit"
// owner: @feiskyer
// alpha: v1.10
//
// Enable Hyper-V containers on Windows
HyperVContainer utilfeature.Feature = "HyperVContainer"
// owner: @joelsmith
// deprecated: v1.10
//
// Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature
// gate is present only for backward compatibility, it will be removed in the 1.11 release.
ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes"
// owner: @k82cn
// alpha: v1.10
//
// Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller
ScheduleDaemonSetPods utilfeature.Feature = "ScheduleDaemonSetPods"
// owner: @mikedanese
// alpha: v1.10
//
// Implement TokenRequest endpoint on service account resources.
TokenRequest utilfeature.Feature = "TokenRequest"
// owner: @mikedanese
// alpha: v1.11
//
// Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes.
TokenRequestProjection utilfeature.Feature = "TokenRequestProjection"
// owner: @Random-Liu
// beta: v1.11
//
// Enable container log rotation for cri container runtime
CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation"
// owner: @verult
// beta: v1.10
//
// Enables the regional PD feature on GCE.
GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk"
// owner: @krmayankk
// alpha: v1.10
//
// Enables control over the primary group ID of containers' init processes.
RunAsGroup utilfeature.Feature = "RunAsGroup"
// owner: @saad-ali
// ga
//
// Allow mounting a subpath of a volume in a container
// Do not remove this feature gate even though it's GA
VolumeSubpath utilfeature.Feature = "VolumeSubpath"
// owner: @gnufied
// alpha : v1.11
//
// Add support for volume plugins to report node specific
// volume limits
AttachVolumeLimit utilfeature.Feature = "AttachVolumeLimit"
// owner: @ravig
// alpha: v1.11
//
// Include volume count on node to be considered for balanced resource allocation while scheduling.
// A node which has closer cpu,memory utilization and volume count is favoured by scheduler
// while making decisions.
BalanceAttachedNodeVolumes utilfeature.Feature = "BalanceAttachedNodeVolumes"
// owner @freehan
// beta: v1.11
//
// Support Pod Ready++
PodReadinessGates utilfeature.Feature = "PodReadinessGates"
// owner: @lichuqiang
// alpha: v1.11
//
// Extend the default scheduler to be aware of volume topology and handle PV provisioning
DynamicProvisioningScheduling utilfeature.Feature = "DynamicProvisioningScheduling"
// owner: @kevtaylor
// alpha: v1.11
//
// Allow subpath environment variable substitution
// Only applicable if the VolumeSubpath feature is also enabled
VolumeSubpathEnvExpansion utilfeature.Feature = "VolumeSubpathEnvExpansion"
// owner: @vikaschoudhary16
// alpha: v1.11
//
//
// Enable probe based plugin watcher utility for discovering Kubelet plugins
KubeletPluginsWatcher utilfeature.Feature = "KubeletPluginsWatcher"
// owner: @vikaschoudhary16
// alpha: v1.11
//
//
// Enable resource quota scope selectors
ResourceQuotaScopeSelectors utilfeature.Feature = "ResourceQuotaScopeSelectors"
// owner: @vladimirvivien
// alpha: v1.11
//
// Enables CSI to use raw block storage volumes
CSIBlockVolume utilfeature.Feature = "CSIBlockVolume"
)
func init() {
utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates)
}
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{
AppArmor: {Default: true, PreRelease: utilfeature.Beta},
DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
DevicePlugins: {Default: true, PreRelease: utilfeature.Beta},
TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha},
RotateKubeletServerCertificate: {Default: true, PreRelease: utilfeature.Beta},
RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta},
PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta},
LocalStorageCapacityIsolation: {Default: true, PreRelease: utilfeature.Beta},
HugePages: {Default: true, PreRelease: utilfeature.Beta},
Sysctls: {Default: true, PreRelease: utilfeature.Beta},
DebugContainers: {Default: false, PreRelease: utilfeature.Alpha},
PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha},
PodPriority: {Default: true, PreRelease: utilfeature.Beta},
EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha},
TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha},
MountPropagation: {Default: true, PreRelease: utilfeature.Beta},
QOSReserved: {Default: false, PreRelease: utilfeature.Alpha},
ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta},
ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha},
AttachVolumeLimit: {Default: false, PreRelease: utilfeature.Alpha},
CPUManager: {Default: true, PreRelease: utilfeature.Beta},
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta},
CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta},
CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta},
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA},
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA},
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha},
TokenRequest: {Default: false, PreRelease: utilfeature.Alpha},
TokenRequestProjection: {Default: false, PreRelease: utilfeature.Alpha},
CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta},
GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta},
RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha},
VolumeSubpath: {Default: true, PreRelease: utilfeature.GA},
BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha},
DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha},
PodReadinessGates: {Default: false, PreRelease: utilfeature.Beta},
VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha},
KubeletPluginsWatcher: {Default: false, PreRelease: utilfeature.Alpha},
ResourceQuotaScopeSelectors: {Default: false, PreRelease: utilfeature.Alpha},
CSIBlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta},
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta},
genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha},
genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha},
genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta},
genericfeatures.DryRun: {Default: false, PreRelease: utilfeature.Alpha},
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta},
// features that enable backwards compatibility but are scheduled to be removed
ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated},
ReadOnlyAPIDataVolumes: {Default: true, PreRelease: utilfeature.Deprecated},
}
Graduate ResourceQuotaScopeSelectors to beta
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // alpha: v1.X
// MyFeature utilfeature.Feature = "MyFeature"
// owner: @tallclair
// beta: v1.4
AppArmor utilfeature.Feature = "AppArmor"
// owner: @mtaufen
// alpha: v1.4
// beta: v1.11
DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig"
// owner: @pweil-
// alpha: v1.5
//
// Default userns=host for containers that are using other host namespaces, host mounts, the pod
// contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE,
// SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon.
ExperimentalHostUserNamespaceDefaultingGate utilfeature.Feature = "ExperimentalHostUserNamespaceDefaulting"
// owner: @vishh
// alpha: v1.5
//
// Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod`
// and also prevents them from being evicted from a node.
// Note: This feature is not supported for `BestEffort` pods.
ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation"
// owner: @jiayingz
// beta: v1.10
//
// Enables support for Device Plugins
DevicePlugins utilfeature.Feature = "DevicePlugins"
// owner: @gmarek
// alpha: v1.6
//
// Changes the logic behind evicting Pods from not ready Nodes
// to take advantage of NoExecute Taints and Tolerations.
TaintBasedEvictions utilfeature.Feature = "TaintBasedEvictions"
// owner: @mikedanese
// alpha: v1.7
// beta: v1.12
//
// Gets a server certificate for the kubelet from the Certificate Signing
// Request API instead of generating one self signed and auto rotates the
// certificate as expiration approaches.
RotateKubeletServerCertificate utilfeature.Feature = "RotateKubeletServerCertificate"
// owner: @mikedanese
// beta: v1.8
//
// Automatically renews the client certificate used for communicating with
// the API server as the certificate approaches expiration.
RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate"
// owner: @msau42
// alpha: v1.7
//
// A new volume type that supports local disks on a node.
PersistentLocalVolumes utilfeature.Feature = "PersistentLocalVolumes"
// owner: @jinxu
// beta: v1.10
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation"
// owner: @gnufied
// beta: v1.11
// Ability to Expand persistent volumes
ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes"
// owner: @mlmhl
// alpha: v1.11
// Ability to expand persistent volumes' file system without unmounting volumes.
ExpandInUsePersistentVolumes utilfeature.Feature = "ExpandInUsePersistentVolumes"
// owner: @verb
// alpha: v1.10
//
// Allows running a "debug container" in a pod namespaces to troubleshoot a running pod.
DebugContainers utilfeature.Feature = "DebugContainers"
// owner: @verb
// alpha: v1.10
//
// Allows all containers in a pod to share a process namespace.
PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace"
// owner: @bsalamat
// alpha: v1.8
//
// Add priority to pods. Priority affects scheduling and preemption of pods.
PodPriority utilfeature.Feature = "PodPriority"
// owner: @resouer
// alpha: v1.8
//
// Enable equivalence class cache for scheduler.
EnableEquivalenceClassCache utilfeature.Feature = "EnableEquivalenceClassCache"
// owner: @k82cn
// alpha: v1.8
//
// Taint nodes based on their condition status for 'NetworkUnavailable',
// 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'.
TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition"
// owner: @jsafrane
// beta: v1.10
//
// Enable mount propagation of volumes.
MountPropagation utilfeature.Feature = "MountPropagation"
// owner: @sjenning
// alpha: v1.11
//
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved utilfeature.Feature = "QOSReserved"
// owner: @ConnorDoyle
// alpha: v1.8
//
// Alternative container-level CPU affinity policies.
CPUManager utilfeature.Feature = "CPUManager"
// owner: @derekwaynecarr
// beta: v1.10
//
// Enable pods to consume pre-allocated huge pages of varying page sizes
HugePages utilfeature.Feature = "HugePages"
// owner: @sjenning
// beta: v1.11
//
// Enable pods to set sysctls on a pod
Sysctls utilfeature.Feature = "Sysctls"
// owner @brendandburns
// alpha: v1.9
//
// Enable nodes to exclude themselves from service load balancers
ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion"
// owner @brendandburns
// deprecated: v1.10
//
// Enable the service proxy to contact external IP addresses. Note this feature is present
// only for backward compatibility, it will be removed in the 1.11 release.
ServiceProxyAllowExternalIPs utilfeature.Feature = "ServiceProxyAllowExternalIPs"
// owner: @jsafrane
// alpha: v1.9
//
// Enable running mount utilities in containers.
MountContainers utilfeature.Feature = "MountContainers"
// owner: @msau42
// alpha: v1.9
//
// Extend the default scheduler to be aware of PV topology and handle PV binding
// Before moving to beta, resolve Kubernetes issue #56180
VolumeScheduling utilfeature.Feature = "VolumeScheduling"
// owner: @vladimirvivien
// alpha: v1.9
//
// Enable mount/attachment of Container Storage Interface (CSI) backed PVs
CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume"
// owner @MrHohn
// beta: v1.10
//
// Support configurable pod DNS parameters.
CustomPodDNS utilfeature.Feature = "CustomPodDNS"
// owner: @screeley44
// alpha: v1.9
//
// Enable Block volume support in containers.
BlockVolume utilfeature.Feature = "BlockVolume"
// owner: @pospispa
// GA: v1.11
//
// Postpone deletion of a PV or a PVC when they are being used
StorageObjectInUseProtection utilfeature.Feature = "StorageObjectInUseProtection"
// owner: @aveshagarwal
// alpha: v1.9
//
// Enable resource limits priority function
ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction"
// owner: @m1093782566
// GA: v1.11
//
// Implement IPVS-based in-cluster service load balancing
SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode"
// owner: @dims
// alpha: v1.10
//
// Implement support for limiting pids in pods
SupportPodPidsLimit utilfeature.Feature = "SupportPodPidsLimit"
// owner: @feiskyer
// alpha: v1.10
//
// Enable Hyper-V containers on Windows
HyperVContainer utilfeature.Feature = "HyperVContainer"
// owner: @joelsmith
// deprecated: v1.10
//
// Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature
// gate is present only for backward compatibility, it will be removed in the 1.11 release.
ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes"
// owner: @k82cn
// alpha: v1.10
//
// Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller
ScheduleDaemonSetPods utilfeature.Feature = "ScheduleDaemonSetPods"
// owner: @mikedanese
// alpha: v1.10
//
// Implement TokenRequest endpoint on service account resources.
TokenRequest utilfeature.Feature = "TokenRequest"
// owner: @mikedanese
// alpha: v1.11
//
// Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes.
TokenRequestProjection utilfeature.Feature = "TokenRequestProjection"
// owner: @Random-Liu
// beta: v1.11
//
// Enable container log rotation for cri container runtime
CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation"
// owner: @verult
// beta: v1.10
//
// Enables the regional PD feature on GCE.
GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk"
// owner: @krmayankk
// alpha: v1.10
//
// Enables control over the primary group ID of containers' init processes.
RunAsGroup utilfeature.Feature = "RunAsGroup"
// owner: @saad-ali
// ga
//
// Allow mounting a subpath of a volume in a container
// Do not remove this feature gate even though it's GA
VolumeSubpath utilfeature.Feature = "VolumeSubpath"
// owner: @gnufied
// alpha : v1.11
//
// Add support for volume plugins to report node specific
// volume limits
AttachVolumeLimit utilfeature.Feature = "AttachVolumeLimit"
// owner: @ravig
// alpha: v1.11
//
// Include volume count on node to be considered for balanced resource allocation while scheduling.
// A node which has closer cpu,memory utilization and volume count is favoured by scheduler
// while making decisions.
BalanceAttachedNodeVolumes utilfeature.Feature = "BalanceAttachedNodeVolumes"
// owner @freehan
// beta: v1.11
//
// Support Pod Ready++
PodReadinessGates utilfeature.Feature = "PodReadinessGates"
// owner: @lichuqiang
// alpha: v1.11
//
// Extend the default scheduler to be aware of volume topology and handle PV provisioning
DynamicProvisioningScheduling utilfeature.Feature = "DynamicProvisioningScheduling"
// owner: @kevtaylor
// alpha: v1.11
//
// Allow subpath environment variable substitution
// Only applicable if the VolumeSubpath feature is also enabled
VolumeSubpathEnvExpansion utilfeature.Feature = "VolumeSubpathEnvExpansion"
// owner: @vikaschoudhary16
// alpha: v1.11
//
//
// Enable probe based plugin watcher utility for discovering Kubelet plugins
KubeletPluginsWatcher utilfeature.Feature = "KubeletPluginsWatcher"
// owner: @vikaschoudhary16
// beta: v1.12
//
//
// Enable resource quota scope selectors
ResourceQuotaScopeSelectors utilfeature.Feature = "ResourceQuotaScopeSelectors"
// owner: @vladimirvivien
// alpha: v1.11
//
// Enables CSI to use raw block storage volumes
CSIBlockVolume utilfeature.Feature = "CSIBlockVolume"
)
func init() {
utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates)
}
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{
AppArmor: {Default: true, PreRelease: utilfeature.Beta},
DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta},
ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha},
DevicePlugins: {Default: true, PreRelease: utilfeature.Beta},
TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha},
RotateKubeletServerCertificate: {Default: true, PreRelease: utilfeature.Beta},
RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta},
PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta},
LocalStorageCapacityIsolation: {Default: true, PreRelease: utilfeature.Beta},
HugePages: {Default: true, PreRelease: utilfeature.Beta},
Sysctls: {Default: true, PreRelease: utilfeature.Beta},
DebugContainers: {Default: false, PreRelease: utilfeature.Alpha},
PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha},
PodPriority: {Default: true, PreRelease: utilfeature.Beta},
EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha},
TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha},
MountPropagation: {Default: true, PreRelease: utilfeature.Beta},
QOSReserved: {Default: false, PreRelease: utilfeature.Alpha},
ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta},
ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha},
AttachVolumeLimit: {Default: false, PreRelease: utilfeature.Alpha},
CPUManager: {Default: true, PreRelease: utilfeature.Beta},
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta},
CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta},
CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta},
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA},
ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha},
SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA},
SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha},
HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha},
ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha},
TokenRequest: {Default: false, PreRelease: utilfeature.Alpha},
TokenRequestProjection: {Default: false, PreRelease: utilfeature.Alpha},
CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta},
GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta},
RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha},
VolumeSubpath: {Default: true, PreRelease: utilfeature.GA},
BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha},
DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha},
PodReadinessGates: {Default: false, PreRelease: utilfeature.Beta},
VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha},
KubeletPluginsWatcher: {Default: false, PreRelease: utilfeature.Alpha},
ResourceQuotaScopeSelectors: {Default: true, PreRelease: utilfeature.Beta},
CSIBlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta},
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta},
genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha},
genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha},
genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta},
genericfeatures.DryRun: {Default: false, PreRelease: utilfeature.Alpha},
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta},
apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta},
// features that enable backwards compatibility but are scheduled to be removed
ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated},
ReadOnlyAPIDataVolumes: {Default: true, PreRelease: utilfeature.Deprecated},
}
|
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iampolicy
import (
"encoding/json"
"net"
"reflect"
"testing"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/policy/condition"
)
func TestGetPoliciesFromClaims(t *testing.T) {
attributesArray := `{
"exp": 1594690452,
"iat": 1594689552,
"auth_time": 1594689552,
"jti": "18ed05c9-2c69-45d5-a33f-8c94aca99ad5",
"iss": "http://localhost:8080/auth/realms/minio",
"aud": "account",
"sub": "7e5e2f30-1c97-4616-8623-2eae14dee9b1",
"typ": "ID",
"azp": "account",
"nonce": "66ZoLzwJbjdkiedI",
"session_state": "3df7b526-5310-4038-9f35-50ecd295a31d",
"acr": "1",
"upn": "harsha",
"address": {},
"email_verified": false,
"groups": [
"offline_access"
],
"preferred_username": "harsha",
"policy": [
"readwrite",
"readwrite,readonly",
" readonly",
""
]}`
var m = make(map[string]interface{})
if err := json.Unmarshal([]byte(attributesArray), &m); err != nil {
t.Fatal(err)
}
var expectedSet = set.CreateStringSet("readwrite", "readonly")
gotSet, ok := GetPoliciesFromClaims(m, "policy")
if !ok {
t.Fatal("no policy claim was found")
}
if gotSet.IsEmpty() {
t.Fatal("no policies were found in policy claim")
}
if !gotSet.Equals(expectedSet) {
t.Fatalf("Expected %v got %v", expectedSet, gotSet)
}
}
func TestPolicyIsAllowed(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetBucketLocationAction, PutObjectAction),
NewResourceSet(NewResource("*", "")),
condition.NewFunctions(),
)},
}
case2Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
)},
}
_, IPNet, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func1, err := condition.NewIPAddressFunc(
condition.AWSSourceIP,
IPNet,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case3Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
)},
}
case4Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Deny,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
)},
}
anonGetBucketLocationArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetBucketLocationAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
}
anonPutObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: PutObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{
"x-amz-copy-source": {"mybucket/myobject"},
"SourceIp": {"192.168.1.10"},
},
ObjectName: "myobject",
}
anonGetObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
ObjectName: "myobject",
}
getBucketLocationArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetBucketLocationAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
}
putObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: PutObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{
"x-amz-copy-source": {"mybucket/myobject"},
"SourceIp": {"192.168.1.10"},
},
ObjectName: "myobject",
}
getObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
ObjectName: "myobject",
}
testCases := []struct {
policy Policy
args Args
expectedResult bool
}{
{case1Policy, anonGetBucketLocationArgs, true},
{case1Policy, anonPutObjectActionArgs, true},
{case1Policy, anonGetObjectActionArgs, false},
{case1Policy, getBucketLocationArgs, true},
{case1Policy, putObjectActionArgs, true},
{case1Policy, getObjectActionArgs, false},
{case2Policy, anonGetBucketLocationArgs, false},
{case2Policy, anonPutObjectActionArgs, true},
{case2Policy, anonGetObjectActionArgs, true},
{case2Policy, getBucketLocationArgs, false},
{case2Policy, putObjectActionArgs, true},
{case2Policy, getObjectActionArgs, true},
{case3Policy, anonGetBucketLocationArgs, false},
{case3Policy, anonPutObjectActionArgs, true},
{case3Policy, anonGetObjectActionArgs, false},
{case3Policy, getBucketLocationArgs, false},
{case3Policy, putObjectActionArgs, true},
{case3Policy, getObjectActionArgs, false},
{case4Policy, anonGetBucketLocationArgs, false},
{case4Policy, anonPutObjectActionArgs, false},
{case4Policy, anonGetObjectActionArgs, false},
{case4Policy, getBucketLocationArgs, false},
{case4Policy, putObjectActionArgs, false},
{case4Policy, getObjectActionArgs, false},
}
for i, testCase := range testCases {
result := testCase.policy.IsAllowed(testCase.args)
if result != testCase.expectedResult {
t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
}
}
}
func TestPolicyIsEmpty(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case2Policy := Policy{
ID: "MyPolicyForMyBucket",
Version: DefaultVersion,
}
testCases := []struct {
policy Policy
expectedResult bool
}{
{case1Policy, false},
{case2Policy, true},
}
for i, testCase := range testCases {
result := testCase.policy.IsEmpty()
if result != testCase.expectedResult {
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
}
}
}
func TestPolicyIsValid(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case2Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case3Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/yourobject*")),
condition.NewFunctions(),
),
},
}
func1, err := condition.NewNullFunc(
condition.S3XAmzCopySource,
true,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func2, err := condition.NewNullFunc(
condition.S3XAmzServerSideEncryption,
false,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case4Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func2),
),
},
}
case5Policy := Policy{
Version: "17-10-2012",
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case6Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(func1, func2),
),
},
}
case7Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case8Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
testCases := []struct {
policy Policy
expectErr bool
}{
{case1Policy, false},
// allowed duplicate principal.
{case2Policy, false},
// allowed duplicate principal and action.
{case3Policy, false},
// allowed duplicate principal, action and resource.
{case4Policy, false},
// Invalid version error.
{case5Policy, true},
// Invalid statement error.
{case6Policy, true},
// Duplicate statement different Effects.
{case7Policy, false},
// Duplicate statement same Effects, duplicate effect will be removed.
{case8Policy, false},
}
for i, testCase := range testCases {
err := testCase.policy.isValid()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func TestPolicyUnmarshalJSONAndValidate(t *testing.T) {
case1Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SomeId1",
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case1Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case1Policy.Statements[0].SID = "SomeId1"
case2Data := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Deny",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::mybucket/yourobject*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "192.168.1.0/24"
}
}
}
]
}`)
_, IPNet1, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func1, err := condition.NewIPAddressFunc(
condition.AWSSourceIP,
IPNet1,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case2Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/yourobject*")),
condition.NewFunctions(func1),
),
},
}
case3Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case3Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case4Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case4Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case5Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/yourobject*"
}
]
}`)
case5Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/yourobject*")),
condition.NewFunctions(),
),
},
}
case6Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "192.168.1.0/24"
}
}
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "192.168.2.0/24"
}
}
}
]
}`)
_, IPNet2, err := net.ParseCIDR("192.168.2.0/24")
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func2, err := condition.NewIPAddressFunc(
condition.AWSSourceIP,
IPNet2,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case6Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func2),
),
},
}
case7Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::mybucket"
}
]
}`)
case7Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetBucketLocationAction),
NewResourceSet(NewResource("mybucket", "")),
condition.NewFunctions(),
),
},
}
case8Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::*"
}
]
}`)
case8Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetBucketLocationAction),
NewResourceSet(NewResource("*", "")),
condition.NewFunctions(),
),
},
}
case9Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "17-10-2012",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case10Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case10Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
},
}
case11Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Deny",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case11Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
},
}
testCases := []struct {
data []byte
expectedResult Policy
expectUnmarshalErr bool
expectValidationErr bool
}{
{case1Data, case1Policy, false, false},
{case2Data, case2Policy, false, false},
{case3Data, case3Policy, false, false},
{case4Data, case4Policy, false, false},
{case5Data, case5Policy, false, false},
{case6Data, case6Policy, false, false},
{case7Data, case7Policy, false, false},
{case8Data, case8Policy, false, false},
// Invalid version error.
{case9Data, Policy{}, false, true},
// Duplicate statement success, duplicate statement is removed.
{case10Data, case10Policy, false, false},
// Duplicate statement success (Effect differs).
{case11Data, case11Policy, false, false},
}
for i, testCase := range testCases {
var result Policy
err := json.Unmarshal(testCase.data, &result)
expectErr := (err != nil)
if expectErr != testCase.expectUnmarshalErr {
t.Errorf("case %v: error during unmarshal: expected: %v, got: %v", i+1, testCase.expectUnmarshalErr, expectErr)
}
err = result.Validate()
expectErr = (err != nil)
if expectErr != testCase.expectValidationErr {
t.Errorf("case %v: error during validation: expected: %v, got: %v", i+1, testCase.expectValidationErr, expectErr)
}
if !testCase.expectUnmarshalErr && !testCase.expectValidationErr {
if !reflect.DeepEqual(result, testCase.expectedResult) {
t.Errorf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
}
}
}
}
func TestPolicyValidate(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("", "")),
condition.NewFunctions(),
),
},
}
func1, err := condition.NewNullFunc(
condition.S3XAmzCopySource,
true,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func2, err := condition.NewNullFunc(
condition.S3XAmzServerSideEncryption,
false,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case2Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(func1, func2),
),
},
}
case3Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
},
}
testCases := []struct {
policy Policy
expectErr bool
}{
{case1Policy, true},
{case2Policy, true},
{case3Policy, false},
}
for i, testCase := range testCases {
err := testCase.policy.Validate()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
fix: policy_test to use minio-go/v7
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iampolicy
import (
"encoding/json"
"net"
"reflect"
"testing"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/policy/condition"
)
func TestGetPoliciesFromClaims(t *testing.T) {
attributesArray := `{
"exp": 1594690452,
"iat": 1594689552,
"auth_time": 1594689552,
"jti": "18ed05c9-2c69-45d5-a33f-8c94aca99ad5",
"iss": "http://localhost:8080/auth/realms/minio",
"aud": "account",
"sub": "7e5e2f30-1c97-4616-8623-2eae14dee9b1",
"typ": "ID",
"azp": "account",
"nonce": "66ZoLzwJbjdkiedI",
"session_state": "3df7b526-5310-4038-9f35-50ecd295a31d",
"acr": "1",
"upn": "harsha",
"address": {},
"email_verified": false,
"groups": [
"offline_access"
],
"preferred_username": "harsha",
"policy": [
"readwrite",
"readwrite,readonly",
" readonly",
""
]}`
var m = make(map[string]interface{})
if err := json.Unmarshal([]byte(attributesArray), &m); err != nil {
t.Fatal(err)
}
var expectedSet = set.CreateStringSet("readwrite", "readonly")
gotSet, ok := GetPoliciesFromClaims(m, "policy")
if !ok {
t.Fatal("no policy claim was found")
}
if gotSet.IsEmpty() {
t.Fatal("no policies were found in policy claim")
}
if !gotSet.Equals(expectedSet) {
t.Fatalf("Expected %v got %v", expectedSet, gotSet)
}
}
func TestPolicyIsAllowed(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetBucketLocationAction, PutObjectAction),
NewResourceSet(NewResource("*", "")),
condition.NewFunctions(),
)},
}
case2Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
)},
}
_, IPNet, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func1, err := condition.NewIPAddressFunc(
condition.AWSSourceIP,
IPNet,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case3Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
)},
}
case4Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Deny,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
)},
}
anonGetBucketLocationArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetBucketLocationAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
}
anonPutObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: PutObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{
"x-amz-copy-source": {"mybucket/myobject"},
"SourceIp": {"192.168.1.10"},
},
ObjectName: "myobject",
}
anonGetObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
ObjectName: "myobject",
}
getBucketLocationArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetBucketLocationAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
}
putObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: PutObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{
"x-amz-copy-source": {"mybucket/myobject"},
"SourceIp": {"192.168.1.10"},
},
ObjectName: "myobject",
}
getObjectActionArgs := Args{
AccountName: "Q3AM3UQ867SPQQA43P2F",
Action: GetObjectAction,
BucketName: "mybucket",
ConditionValues: map[string][]string{},
ObjectName: "myobject",
}
testCases := []struct {
policy Policy
args Args
expectedResult bool
}{
{case1Policy, anonGetBucketLocationArgs, true},
{case1Policy, anonPutObjectActionArgs, true},
{case1Policy, anonGetObjectActionArgs, false},
{case1Policy, getBucketLocationArgs, true},
{case1Policy, putObjectActionArgs, true},
{case1Policy, getObjectActionArgs, false},
{case2Policy, anonGetBucketLocationArgs, false},
{case2Policy, anonPutObjectActionArgs, true},
{case2Policy, anonGetObjectActionArgs, true},
{case2Policy, getBucketLocationArgs, false},
{case2Policy, putObjectActionArgs, true},
{case2Policy, getObjectActionArgs, true},
{case3Policy, anonGetBucketLocationArgs, false},
{case3Policy, anonPutObjectActionArgs, true},
{case3Policy, anonGetObjectActionArgs, false},
{case3Policy, getBucketLocationArgs, false},
{case3Policy, putObjectActionArgs, true},
{case3Policy, getObjectActionArgs, false},
{case4Policy, anonGetBucketLocationArgs, false},
{case4Policy, anonPutObjectActionArgs, false},
{case4Policy, anonGetObjectActionArgs, false},
{case4Policy, getBucketLocationArgs, false},
{case4Policy, putObjectActionArgs, false},
{case4Policy, getObjectActionArgs, false},
}
for i, testCase := range testCases {
result := testCase.policy.IsAllowed(testCase.args)
if result != testCase.expectedResult {
t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
}
}
}
func TestPolicyIsEmpty(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case2Policy := Policy{
ID: "MyPolicyForMyBucket",
Version: DefaultVersion,
}
testCases := []struct {
policy Policy
expectedResult bool
}{
{case1Policy, false},
{case2Policy, true},
}
for i, testCase := range testCases {
result := testCase.policy.IsEmpty()
if result != testCase.expectedResult {
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
}
}
}
func TestPolicyIsValid(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case2Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case3Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/yourobject*")),
condition.NewFunctions(),
),
},
}
func1, err := condition.NewNullFunc(
condition.S3XAmzCopySource,
true,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func2, err := condition.NewNullFunc(
condition.S3XAmzServerSideEncryption,
false,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case4Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func2),
),
},
}
case5Policy := Policy{
Version: "17-10-2012",
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case6Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(func1, func2),
),
},
}
case7Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case8Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
testCases := []struct {
policy Policy
expectErr bool
}{
{case1Policy, false},
// allowed duplicate principal.
{case2Policy, false},
// allowed duplicate principal and action.
{case3Policy, false},
// allowed duplicate principal, action and resource.
{case4Policy, false},
// Invalid version error.
{case5Policy, true},
// Invalid statement error.
{case6Policy, true},
// Duplicate statement different Effects.
{case7Policy, false},
// Duplicate statement same Effects, duplicate effect will be removed.
{case8Policy, false},
}
for i, testCase := range testCases {
err := testCase.policy.isValid()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
func TestPolicyUnmarshalJSONAndValidate(t *testing.T) {
case1Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SomeId1",
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case1Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case1Policy.Statements[0].SID = "SomeId1"
case2Data := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Deny",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::mybucket/yourobject*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "192.168.1.0/24"
}
}
}
]
}`)
_, IPNet1, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func1, err := condition.NewIPAddressFunc(
condition.AWSSourceIP,
IPNet1,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case2Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/yourobject*")),
condition.NewFunctions(func1),
),
},
}
case3Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case3Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case4Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case4Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
},
}
case5Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/yourobject*"
}
]
}`)
case5Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/yourobject*")),
condition.NewFunctions(),
),
},
}
case6Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "192.168.1.0/24"
}
}
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "192.168.2.0/24"
}
}
}
]
}`)
_, IPNet2, err := net.ParseCIDR("192.168.2.0/24")
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func2, err := condition.NewIPAddressFunc(
condition.AWSSourceIP,
IPNet2,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case6Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func1),
),
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "/myobject*")),
condition.NewFunctions(func2),
),
},
}
case7Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::mybucket"
}
]
}`)
case7Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetBucketLocationAction),
NewResourceSet(NewResource("mybucket", "")),
condition.NewFunctions(),
),
},
}
case8Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::*"
}
]
}`)
case8Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetBucketLocationAction),
NewResourceSet(NewResource("*", "")),
condition.NewFunctions(),
),
},
}
case9Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "17-10-2012",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case10Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case10Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
},
}
case11Data := []byte(`{
"ID": "MyPolicyForMyBucket1",
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
},
{
"Effect": "Deny",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::mybucket/myobject*"
}
]
}`)
case11Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
NewStatement(
policy.Deny,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
},
}
testCases := []struct {
data []byte
expectedResult Policy
expectUnmarshalErr bool
expectValidationErr bool
}{
{case1Data, case1Policy, false, false},
{case2Data, case2Policy, false, false},
{case3Data, case3Policy, false, false},
{case4Data, case4Policy, false, false},
{case5Data, case5Policy, false, false},
{case6Data, case6Policy, false, false},
{case7Data, case7Policy, false, false},
{case8Data, case8Policy, false, false},
// Invalid version error.
{case9Data, Policy{}, false, true},
// Duplicate statement success, duplicate statement is removed.
{case10Data, case10Policy, false, false},
// Duplicate statement success (Effect differs).
{case11Data, case11Policy, false, false},
}
for i, testCase := range testCases {
var result Policy
err := json.Unmarshal(testCase.data, &result)
expectErr := (err != nil)
if expectErr != testCase.expectUnmarshalErr {
t.Errorf("case %v: error during unmarshal: expected: %v, got: %v", i+1, testCase.expectUnmarshalErr, expectErr)
}
err = result.Validate()
expectErr = (err != nil)
if expectErr != testCase.expectValidationErr {
t.Errorf("case %v: error during validation: expected: %v, got: %v", i+1, testCase.expectValidationErr, expectErr)
}
if !testCase.expectUnmarshalErr && !testCase.expectValidationErr {
if !reflect.DeepEqual(result, testCase.expectedResult) {
t.Errorf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
}
}
}
}
func TestPolicyValidate(t *testing.T) {
case1Policy := Policy{
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(PutObjectAction),
NewResourceSet(NewResource("", "")),
condition.NewFunctions(),
),
},
}
func1, err := condition.NewNullFunc(
condition.S3XAmzCopySource,
true,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
func2, err := condition.NewNullFunc(
condition.S3XAmzServerSideEncryption,
false,
)
if err != nil {
t.Fatalf("unexpected error. %v\n", err)
}
case2Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(func1, func2),
),
},
}
case3Policy := Policy{
ID: "MyPolicyForMyBucket1",
Version: DefaultVersion,
Statements: []Statement{
NewStatement(
policy.Allow,
NewActionSet(GetObjectAction, PutObjectAction),
NewResourceSet(NewResource("mybucket", "myobject*")),
condition.NewFunctions(),
),
},
}
testCases := []struct {
policy Policy
expectErr bool
}{
{case1Policy, true},
{case2Policy, true},
{case3Policy, false},
}
for i, testCase := range testCases {
err := testCase.policy.Validate()
expectErr := (err != nil)
if expectErr != testCase.expectErr {
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
}
}
}
|
//go:build linux || solaris || darwin || freebsd
// +build linux solaris darwin freebsd
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
type lockfile struct {
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
file string
fd uintptr
lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID()
locktype int16
locked bool
ro bool
recursive bool
}
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
var lastWriterIDCounter uint64 // Private state for newLastWriterID
// newLastWriterID returns a new "last writer" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWriterID() []byte {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return res
}
// openLock opens the file at path and returns the corresponding file
// descriptor. Note that the path is opened read-only when ro is set. If ro
// is unset, openLock will open the path read-write and create the file if
// necessary.
func openLock(path string, ro bool) (fd int, err error) {
if ro {
fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC|os.O_CREATE, 0)
} else {
fd, err = unix.Open(path,
os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE,
unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH,
)
}
if err == nil {
return
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return fd, fmt.Errorf("creating locker directory: %w", err)
}
return openLock(path, ro)
}
return
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unix.Close(fd)
locktype := unix.F_WRLCK
if ro {
locktype = unix.F_RDLCK
}
return &lockfile{
stateMutex: &sync.Mutex{},
rwMutex: &sync.RWMutex{},
file: path,
lw: newLastWriterID(),
locktype: int16(locktype),
locked: false,
ro: ro}, nil
}
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
func (l *lockfile) lock(lType int16, recursive bool) {
lk := unix.Flock_t{
Type: lType,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
}
switch lType {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
if recursive {
// NOTE: that's okay as recursive is only set in RecursiveLock(), so
// there's no need to protect against hypothetical RDLCK cases.
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(fmt.Sprintf("error opening %q: %v", l.file, err))
}
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
l.locktype = lType
l.locked = true
l.recursive = recursive
l.counter++
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *lockfile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(unix.F_WRLCK, false)
}
}
// RecursiveLock locks the lockfile as a writer but allows for recursive
// acquisitions within the same process space. Note that RLock() will be called
// if it's a lockTypReader lock.
func (l *lockfile) RecursiveLock() {
if l.ro {
l.RLock()
} else {
l.lock(unix.F_WRLCK, true)
}
}
// LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() {
l.lock(unix.F_RDLCK, false)
}
// Unlock unlocks the lockfile.
func (l *lockfile) Unlock() {
l.stateMutex.Lock()
if l.locked == false {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unix.Close(int(l.fd))
}
if l.locktype == unix.F_RDLCK || l.recursive {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
// Locked checks if lockfile is locked for writing by a thread in this process.
func (l *lockfile) Locked() bool {
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
return l.locked && (l.locktype == unix.F_WRLCK)
}
// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = newLastWriterID()
n, err := unix.Pwrite(int(l.fd), l.lw, 0)
if err != nil {
return err
}
if n != len(l.lw) {
return unix.ENOSPC
}
return nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
func (l *lockfile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
currentLW := make([]byte, len(l.lw))
n, err := unix.Pread(int(l.fd), currentLW, 0)
if err != nil {
return true, err
}
if n != len(l.lw) {
return true, nil
}
oldLW := l.lw
l.lw = currentLW
return !bytes.Equal(currentLW, oldLW), nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *lockfile) IsReadWrite() bool {
return !l.ro
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
if err != nil {
return true
}
mtim := st.Mtim()
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}
pkg/lockfile: fix openLock doc
Since commit 162a0bf730ce0b30, openLock creates a file even if ro is
set, but the documentation says otherwise.
Since commit 16de6d32b4f82fa6, openLock also creates file's parent
directories if needed, but the documentation doesn't say that.
Fix both issues.
Signed-off-by: Kir Kolyshkin <3a017b8ddb3f9cf3e4a59978b004111bdeb97f08@gmail.com>
//go:build linux || solaris || darwin || freebsd
// +build linux solaris darwin freebsd
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
type lockfile struct {
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
file string
fd uintptr
lw []byte // "last writer"-unique value valid as of the last .Touch() or .Modified(), generated by newLastWriterID()
locktype int16
locked bool
ro bool
recursive bool
}
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
var lastWriterIDCounter uint64 // Private state for newLastWriterID
// newLastWriterID returns a new "last writer" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWriterID() []byte {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return res
}
// openLock opens the file at path and returns the corresponding file
// descriptor. The path is opened either read-only or read-write,
// depending on the value of ro argument.
//
// openLock will create the file and its parent directories,
// if necessary.
func openLock(path string, ro bool) (fd int, err error) {
if ro {
fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC|os.O_CREATE, 0)
} else {
fd, err = unix.Open(path,
os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE,
unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH,
)
}
if err == nil {
return
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return fd, fmt.Errorf("creating locker directory: %w", err)
}
return openLock(path, ro)
}
return
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockerForPath(path string, ro bool) (Locker, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unix.Close(fd)
locktype := unix.F_WRLCK
if ro {
locktype = unix.F_RDLCK
}
return &lockfile{
stateMutex: &sync.Mutex{},
rwMutex: &sync.RWMutex{},
file: path,
lw: newLastWriterID(),
locktype: int16(locktype),
locked: false,
ro: ro}, nil
}
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
func (l *lockfile) lock(lType int16, recursive bool) {
lk := unix.Flock_t{
Type: lType,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
}
switch lType {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
if recursive {
// NOTE: that's okay as recursive is only set in RecursiveLock(), so
// there's no need to protect against hypothetical RDLCK cases.
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(fmt.Sprintf("error opening %q: %v", l.file, err))
}
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
l.locktype = lType
l.locked = true
l.recursive = recursive
l.counter++
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *lockfile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(unix.F_WRLCK, false)
}
}
// RecursiveLock locks the lockfile as a writer but allows for recursive
// acquisitions within the same process space. Note that RLock() will be called
// if it's a lockTypReader lock.
func (l *lockfile) RecursiveLock() {
if l.ro {
l.RLock()
} else {
l.lock(unix.F_WRLCK, true)
}
}
// LockRead locks the lockfile as a reader.
func (l *lockfile) RLock() {
l.lock(unix.F_RDLCK, false)
}
// Unlock unlocks the lockfile.
func (l *lockfile) Unlock() {
l.stateMutex.Lock()
if l.locked == false {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unix.Close(int(l.fd))
}
if l.locktype == unix.F_RDLCK || l.recursive {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
// Locked checks if lockfile is locked for writing by a thread in this process.
func (l *lockfile) Locked() bool {
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
return l.locked && (l.locktype == unix.F_WRLCK)
}
// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = newLastWriterID()
n, err := unix.Pwrite(int(l.fd), l.lw, 0)
if err != nil {
return err
}
if n != len(l.lw) {
return unix.ENOSPC
}
return nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
func (l *lockfile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
currentLW := make([]byte, len(l.lw))
n, err := unix.Pread(int(l.fd), currentLW, 0)
if err != nil {
return true, err
}
if n != len(l.lw) {
return true, nil
}
oldLW := l.lw
l.lw = currentLW
return !bytes.Equal(currentLW, oldLW), nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *lockfile) IsReadWrite() bool {
return !l.ro
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
if err != nil {
return true
}
mtim := st.Mtim()
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package domodel
import (
"strconv"
"strings"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
)
// DropletBuilder configures droplets for the cluster
type DropletBuilder struct {
*DOModelContext
BootstrapScriptBuilder *model.BootstrapScriptBuilder
Lifecycle *fi.Lifecycle
}
var _ fi.ModelBuilder = &DropletBuilder{}
func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error {
sshKeyName, err := d.SSHKeyName()
if err != nil {
return err
}
splitSSHKeyName := strings.Split(sshKeyName, "-")
sshKeyFingerPrint := splitSSHKeyName[len(splitSSHKeyName)-1]
// replace "." with "-" since DO API does not accept "."
clusterTag := do.TagKubernetesClusterNamePrefix + ":" + strings.Replace(d.ClusterName(), ".", "-", -1)
clusterMasterTag := do.TagKubernetesClusterMasterPrefix + ":" + strings.Replace(d.ClusterName(), ".", "-", -1)
masterIndexCount := 0
// In the future, DigitalOcean will use Machine API to manage groups,
// for now create d.InstanceGroups.Spec.MinSize amount of droplets
for _, ig := range d.InstanceGroups {
name := d.AutoscalingGroupName(ig)
var droplet dotasks.Droplet
droplet.Count = int(fi.Int32Value(ig.Spec.MinSize))
droplet.Name = fi.String(name)
// during alpha support we only allow 1 region
// validation for only 1 region is done at this point
droplet.Region = fi.String(d.Cluster.Spec.Subnets[0].Region)
droplet.Size = fi.String(ig.Spec.MachineType)
droplet.Image = fi.String(ig.Spec.Image)
droplet.SSHKey = fi.String(sshKeyFingerPrint)
droplet.Tags = []string{clusterTag}
if ig.IsMaster() {
masterIndexCount++
// create tag based on etcd name. etcd name is now prefixed with etcd-
// Ref: https://github.com/kubernetes/kops/commit/31f8cbd571964f19d3c31024ddba918998d29929
clusterTagIndex := do.TagKubernetesClusterIndex + ":" + "etcd-" + strconv.Itoa(masterIndexCount)
droplet.Tags = append(droplet.Tags, clusterTagIndex)
droplet.Tags = append(droplet.Tags, clusterMasterTag)
droplet.Tags = append(droplet.Tags, do.TagKubernetesInstanceGroup+":"+ig.Name)
} else {
droplet.Tags = append(droplet.Tags, do.TagKubernetesInstanceGroup+":"+ig.Name)
}
userData, err := d.BootstrapScriptBuilder.ResourceNodeUp(c, ig)
if err != nil {
return err
}
droplet.UserData = userData
c.AddTask(&droplet)
}
return nil
}
Set lifecycle on Droplet task
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package domodel
import (
"strconv"
"strings"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
)
// DropletBuilder configures droplets for the cluster
type DropletBuilder struct {
*DOModelContext
BootstrapScriptBuilder *model.BootstrapScriptBuilder
Lifecycle *fi.Lifecycle
}
var _ fi.ModelBuilder = &DropletBuilder{}
func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error {
sshKeyName, err := d.SSHKeyName()
if err != nil {
return err
}
splitSSHKeyName := strings.Split(sshKeyName, "-")
sshKeyFingerPrint := splitSSHKeyName[len(splitSSHKeyName)-1]
// replace "." with "-" since DO API does not accept "."
clusterTag := do.TagKubernetesClusterNamePrefix + ":" + strings.Replace(d.ClusterName(), ".", "-", -1)
clusterMasterTag := do.TagKubernetesClusterMasterPrefix + ":" + strings.Replace(d.ClusterName(), ".", "-", -1)
masterIndexCount := 0
// In the future, DigitalOcean will use Machine API to manage groups,
// for now create d.InstanceGroups.Spec.MinSize amount of droplets
for _, ig := range d.InstanceGroups {
name := d.AutoscalingGroupName(ig)
droplet := dotasks.Droplet{
Count: int(fi.Int32Value(ig.Spec.MinSize)),
Name: fi.String(name),
Lifecycle: d.Lifecycle,
// during alpha support we only allow 1 region
// validation for only 1 region is done at this point
Region: fi.String(d.Cluster.Spec.Subnets[0].Region),
Size: fi.String(ig.Spec.MachineType),
Image: fi.String(ig.Spec.Image),
SSHKey: fi.String(sshKeyFingerPrint),
Tags: []string{clusterTag},
}
if ig.IsMaster() {
masterIndexCount++
// create tag based on etcd name. etcd name is now prefixed with etcd-
// Ref: https://github.com/kubernetes/kops/commit/31f8cbd571964f19d3c31024ddba918998d29929
clusterTagIndex := do.TagKubernetesClusterIndex + ":" + "etcd-" + strconv.Itoa(masterIndexCount)
droplet.Tags = append(droplet.Tags, clusterTagIndex)
droplet.Tags = append(droplet.Tags, clusterMasterTag)
droplet.Tags = append(droplet.Tags, do.TagKubernetesInstanceGroup+":"+ig.Name)
} else {
droplet.Tags = append(droplet.Tags, do.TagKubernetesInstanceGroup+":"+ig.Name)
}
userData, err := d.BootstrapScriptBuilder.ResourceNodeUp(c, ig)
if err != nil {
return err
}
droplet.UserData = userData
c.AddTask(&droplet)
}
return nil
}
|
package paxos
import (
"borg/assert"
"testing"
)
func TestCoordIgnoreOldMessages(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
done := make(chan int)
nNodes := uint64(10) // this is arbitrary
go func() {
coordinator(1, UNUSED, nNodes, tCh, ins, outs, clock, logger)
done <- 1
}()
tCh <- "foo"
<-outs //discard INVITE:1
clock <- 1 // force the start of a new round
<-outs //discard INVITE:11
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
close(ins)
assert.Equal(t, 1, <-done, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordStart(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
assert.Equal(t, m("1:*:INVITE:1"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
// This is here mainly for triangulation. It ensures we're not
// hardcoding crnd.
func TestCoordStartAlt(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(2, UNUSED, nNodes, tCh, ins, PutWrapper{1, 2, outs}, clock, logger)
tCh <- "foo"
assert.Equal(t, m("2:*:INVITE:2"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordTargetNomination(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
ins <- m("7:1:RSVP:1:0:")
assert.Equal(t, m("1:*:NOMINATE:1:foo"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordRestart(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
// never reach majority (force timeout)
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
clock <- 1
assert.Equal(t, m("1:*:INVITE:11"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordNonTargetNomination(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:1:bar")
assert.Equal(t, m("1:*:NOMINATE:1:bar"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordOneNominationPerRound(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
done := make(chan int)
nNodes := uint64(10) // this is arbitrary
go func() {
coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
done <- 1
}()
tCh <- "foo"
<-outs //discard INVITE
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
assert.Equal(t, m("1:*:NOMINATE:1:foo"), <-outs, "")
ins <- m("7:1:RSVP:1:0:")
close(ins)
assert.Equal(t, 1, <-done, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordEachRoundResetsCval(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
<-outs //discard NOMINATE
clock <- 1 // force the start of a new round
<-outs //discard INVITE:11
ins <- m("1:1:RSVP:11:0:")
ins <- m("2:1:RSVP:11:0:")
ins <- m("3:1:RSVP:11:0:")
ins <- m("4:1:RSVP:11:0:")
ins <- m("5:1:RSVP:11:0:")
ins <- m("6:1:RSVP:11:0:")
close(ins)
exp := m("1:*:NOMINATE:11:foo")
assert.Equal(t, exp, <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestAbortIfNoProposal(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
done := make(chan int)
nNodes := uint64(10) // this is arbitrary
go func() {
coordinator(1, UNUSED, nNodes, tCh, ins, outs, clock, logger)
done <- 1
}()
close(tCh)
assert.Equal(t, 1, <-done, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
refactor
package paxos
import (
"borg/assert"
"testing"
)
func TestCoordIgnoreOldMessages(t *testing.T) {
outs := SyncPutter(make(chan Msg))
done := make(chan int)
nNodes := uint64(10)
c := NewC(FakeCluster{outs, nNodes})
go func() {
c.process("foo", 1)
done <- 1
}()
<-outs //discard INVITE:1
c.clock <- 1 // force the start of a new round
<-outs //discard INVITE:11
c.ins <- m("1:1:RSVP:1:0:")
c.ins <- m("2:1:RSVP:1:0:")
c.ins <- m("3:1:RSVP:1:0:")
c.ins <- m("4:1:RSVP:1:0:")
c.ins <- m("5:1:RSVP:1:0:")
c.ins <- m("6:1:RSVP:1:0:")
close(c.ins)
assert.Equal(t, 1, <-done, "")
close(c.ins)
close(outs)
close(c.clock)
}
func TestCoordStart(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
assert.Equal(t, m("1:*:INVITE:1"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
// This is here mainly for triangulation. It ensures we're not
// hardcoding crnd.
func TestCoordStartAlt(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(2, UNUSED, nNodes, tCh, ins, PutWrapper{1, 2, outs}, clock, logger)
tCh <- "foo"
assert.Equal(t, m("2:*:INVITE:2"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordTargetNomination(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
ins <- m("7:1:RSVP:1:0:")
assert.Equal(t, m("1:*:NOMINATE:1:foo"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordRestart(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
// never reach majority (force timeout)
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
clock <- 1
assert.Equal(t, m("1:*:INVITE:11"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordNonTargetNomination(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:1:bar")
assert.Equal(t, m("1:*:NOMINATE:1:bar"), <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordOneNominationPerRound(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
done := make(chan int)
nNodes := uint64(10) // this is arbitrary
go func() {
coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
done <- 1
}()
tCh <- "foo"
<-outs //discard INVITE
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
assert.Equal(t, m("1:*:NOMINATE:1:foo"), <-outs, "")
ins <- m("7:1:RSVP:1:0:")
close(ins)
assert.Equal(t, 1, <-done, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestCoordEachRoundResetsCval(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
nNodes := uint64(10) // this is arbitrary
go coordinator(1, UNUSED, nNodes, tCh, ins, PutWrapper{1, 1, outs}, clock, logger)
tCh <- "foo"
<-outs //discard INVITE
ins <- m("1:1:RSVP:1:0:")
ins <- m("2:1:RSVP:1:0:")
ins <- m("3:1:RSVP:1:0:")
ins <- m("4:1:RSVP:1:0:")
ins <- m("5:1:RSVP:1:0:")
ins <- m("6:1:RSVP:1:0:")
<-outs //discard NOMINATE
clock <- 1 // force the start of a new round
<-outs //discard INVITE:11
ins <- m("1:1:RSVP:11:0:")
ins <- m("2:1:RSVP:11:0:")
ins <- m("3:1:RSVP:11:0:")
ins <- m("4:1:RSVP:11:0:")
ins <- m("5:1:RSVP:11:0:")
ins <- m("6:1:RSVP:11:0:")
close(ins)
exp := m("1:*:NOMINATE:11:foo")
assert.Equal(t, exp, <-outs, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
func TestAbortIfNoProposal(t *testing.T) {
ins := make(chan Msg)
outs := SyncPutter(make(chan Msg))
clock := make(chan int)
tCh := make(chan string)
done := make(chan int)
nNodes := uint64(10) // this is arbitrary
go func() {
coordinator(1, UNUSED, nNodes, tCh, ins, outs, clock, logger)
done <- 1
}()
close(tCh)
assert.Equal(t, 1, <-done, "")
close(ins)
close(outs)
close(clock)
close(tCh)
}
|
package service
import (
"crypto/hmac"
"hash"
)
// Signable is a type which can be signed
type Signable interface {
Message() []byte
HashFunc() func() hash.Hash
}
// Signed is a type which has been signed
// The signature can be authenticated using the Signable interface and recreating the signature
type Signed interface {
Signable
Signature() []byte
}
// IsAuthentic returns true if the signed message has a correct signature for the given key
func IsAuthentic(msg Signed, key []byte) (bool, error) {
mac := hmac.New(msg.HashFunc(), key)
_, err := mac.Write(msg.Message())
if err != nil {
return false, err
}
return hmac.Equal(msg.Signature(), mac.Sum(nil)), nil
}
// Sign signs a signable message with the given key and returns the signature
func Sign(msg Signable, key []byte) ([]byte, error) {
mac := hmac.New(msg.HashFunc(), key)
_, err := mac.Write(msg.Message())
if err != nil {
return nil, err
}
return mac.Sum(nil), nil
}
IsAuthentic to return an error if empty message was passed
package service
import (
"crypto/hmac"
"errors"
"hash"
)
// Signable is a type which can be signed
type Signable interface {
Message() []byte
HashFunc() func() hash.Hash
}
// Signed is a type which has been signed
// The signature can be authenticated using the Signable interface and recreating the signature
type Signed interface {
Signable
Signature() []byte
}
// IsAuthentic returns true if the signed message has a correct signature for the given key
func IsAuthentic(msg Signed, key []byte) (bool, error) {
mac := hmac.New(msg.HashFunc(), key)
msgBytes := msg.Message()
if msgBytes == nil {
return false, errors.New("empty message")
}
_, err := mac.Write(msgBytes)
if err != nil {
return false, err
}
return hmac.Equal(msg.Signature(), mac.Sum(nil)), nil
}
// Sign signs a signable message with the given key and returns the signature
func Sign(msg Signable, key []byte) ([]byte, error) {
mac := hmac.New(msg.HashFunc(), key)
_, err := mac.Write(msg.Message())
if err != nil {
return nil, err
}
return mac.Sum(nil), nil
}
|
// Copyright 2017 Francisco Souza. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fakestorage
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strings"
"testing"
"cloud.google.com/go/storage"
"google.golang.org/api/googleapi"
)
func TestServerClientObjectWriter(t *testing.T) {
const baseContent = "some nice content"
content := strings.Repeat(baseContent+"\n", googleapi.MinUploadChunkSize)
checksum := uint32Checksum([]byte(content))
hash := md5Hash([]byte(content))
runServersTest(t, nil, func(t *testing.T, server *Server) {
tests := []struct {
testCase string
bucketName string
objectName string
chunkSize int
}{
{
"default chunk size",
"some-bucket",
"some/interesting/object.txt",
googleapi.DefaultUploadChunkSize,
},
{
"small chunk size",
"other-bucket",
"other/interesting/object.txt",
googleapi.MinUploadChunkSize,
},
}
for _, test := range tests {
test := test
t.Run(test.testCase, func(t *testing.T) {
const contentType = "text/plain; charset=utf-8"
server.CreateBucketWithOpts(CreateBucketOpts{Name: test.bucketName})
client := server.Client()
objHandle := client.Bucket(test.bucketName).Object(test.objectName)
w := objHandle.NewWriter(context.Background())
w.ChunkSize = test.chunkSize
w.ContentType = contentType
w.Metadata = map[string]string{
"foo": "bar",
}
w.Write([]byte(content))
err := w.Close()
if err != nil {
t.Fatal(err)
}
obj, err := server.GetObject(test.bucketName, test.objectName)
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != content {
n := strings.Count(string(obj.Content), baseContent)
t.Errorf("wrong content returned\nwant %dx%q\ngot %dx%q",
googleapi.MinUploadChunkSize, baseContent,
n, baseContent)
}
if returnedChecksum := w.Attrs().CRC32C; returnedChecksum != checksum {
t.Errorf("wrong writer.Attrs() checksum returned\nwant %d\ngot %d", checksum, returnedChecksum)
}
if base64Checksum := encodedChecksum(uint32ToBytes(checksum)); obj.Crc32c != base64Checksum {
t.Errorf("wrong obj.Crc32c returned\nwant %s\ngot %s", base64Checksum, obj.Crc32c)
}
if returnedHash := w.Attrs().MD5; !bytes.Equal(returnedHash, hash) {
t.Errorf("wrong writer.Attrs() hash returned\nwant %d\ngot %d", hash, returnedHash)
}
if stringHash := encodedHash(hash); obj.Md5Hash != stringHash {
t.Errorf("wrong obj.Md5Hash returned\nwant %s\ngot %s", stringHash, obj.Md5Hash)
}
if obj.ContentType != contentType {
t.Errorf("wrong content-type\nwant %q\ngot %q", contentType, obj.ContentType)
}
if !reflect.DeepEqual(obj.Metadata, w.Metadata) {
t.Errorf("wrong meta data\nwant %+v\ngot %+v", w.Metadata, obj.Metadata)
}
})
}
})
}
func checkChecksum(t *testing.T, content []byte, obj Object) {
t.Helper()
if expect := encodedCrc32cChecksum(content); expect != obj.Crc32c {
t.Errorf("wrong checksum in the object\nwant %s\ngot %s", expect, obj.Crc32c)
}
}
func TestServerClientObjectWriterOverwrite(t *testing.T) {
runServersTest(t, nil, func(t *testing.T, server *Server) {
const content = "other content"
const contentType = "text/plain"
server.CreateObject(Object{
BucketName: "some-bucket",
Name: "some-object.txt",
Content: []byte("some content"),
ContentType: "some-stff",
})
objHandle := server.Client().Bucket("some-bucket").Object("some-object.txt")
w := objHandle.NewWriter(context.Background())
w.ContentType = contentType
w.Write([]byte(content))
err := w.Close()
if err != nil {
t.Fatal(err)
}
obj, err := server.GetObject("some-bucket", "some-object.txt")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != content {
t.Errorf("wrong content in the object\nwant %q\ngot %q", content, string(obj.Content))
}
checkChecksum(t, []byte(content), obj)
if obj.ContentType != contentType {
t.Errorf("wrong content-type\nwsant %q\ngot %q", contentType, obj.ContentType)
}
})
}
func TestServerClientObjectWriterWithDoesNotExistPrecondition(t *testing.T) {
runServersTest(t, nil, func(t *testing.T, server *Server) {
const originalContent = "original content"
const originalContentType = "text/plain"
const bucketName = "some-bucket"
const objectName = "some-object-2.txt"
bucket := server.Client().Bucket(bucketName)
if err := bucket.Create(context.Background(), "my-project", nil); err != nil {
t.Fatal(err)
}
objHandle := bucket.Object(objectName)
firstWriter := objHandle.If(storage.Conditions{DoesNotExist: true}).NewWriter(context.Background())
firstWriter.ContentType = originalContentType
firstWriter.Write([]byte(originalContent))
if err := firstWriter.Close(); err != nil {
t.Fatal(err)
}
firstReader, err := objHandle.NewReader(context.Background())
if err != nil {
t.Fatal(err)
}
objectContent, err := ioutil.ReadAll(firstReader)
if err != nil {
t.Fatal(err)
}
if string(objectContent) != originalContent {
t.Errorf("wrong content in the object after initial write with precondition\nwant %q\ngot %q", originalContent, string(objectContent))
}
secondWriter := objHandle.If(storage.Conditions{DoesNotExist: true}).NewWriter(context.Background())
secondWriter.ContentType = "application/json"
secondWriter.Write([]byte("new content"))
err = secondWriter.Close()
if err == nil {
t.Fatal("expected overwriting existing object to fail, but received no error")
}
if err.Error() != "googleapi: Error 412: Precondition failed" {
t.Errorf("expected HTTP 412 precondition failed error, but got %v", err)
}
secondReader, err := objHandle.NewReader(context.Background())
if err != nil {
t.Fatal(err)
}
objectContentAfterFailedPrecondition, err := ioutil.ReadAll(secondReader)
if err != nil {
t.Fatal(err)
}
if string(objectContentAfterFailedPrecondition) != originalContent {
t.Errorf("wrong content in the object after failed precondition\nwant %q\ngot %q", originalContent, string(objectContentAfterFailedPrecondition))
}
})
}
func TestServerClientObjectWriterBucketNotFound(t *testing.T) {
runServersTest(t, nil, func(t *testing.T, server *Server) {
client := server.Client()
objHandle := client.Bucket("some-bucket").Object("some/interesting/object.txt")
w := objHandle.NewWriter(context.Background())
w.Write([]byte("whatever"))
err := w.Close()
if err == nil {
t.Fatal("unexpected <nil> error")
}
})
}
func TestServerClientSimpleUpload(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
const contentType = "text/plain"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?uploadType=media&name=some/nice/object.txt", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", contentType)
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusOK
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status code\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
obj, err := server.GetObject("other-bucket", "some/nice/object.txt")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != data {
t.Errorf("wrong content\nwant %q\ngot %q", string(obj.Content), data)
}
if obj.ContentType != contentType {
t.Errorf("wrong content type\nwant %q\ngot %q", contentType, obj.ContentType)
}
checkChecksum(t, []byte(data), obj)
}
func TestServerClientSignedUpload(t *testing.T) {
server, err := NewServerWithOptions(Options{PublicHost: "127.0.0.1"})
if err != nil {
t.Fatalf("could not start server: %v", err)
}
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
const contentType = "text/plain"
req, err := http.NewRequest("PUT", server.URL()+"/other-bucket/some/nice/object.txt?X-Goog-Algorithm=GOOG4-RSA-SHA256", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", contentType)
req.Header.Set("X-Goog-Meta-Key", "Value")
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusOK
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status code\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
obj, err := server.GetObject("other-bucket", "some/nice/object.txt")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != data {
t.Errorf("wrong content\nwant %q\ngot %q", string(obj.Content), data)
}
if obj.ContentType != contentType {
t.Errorf("wrong content type\nwant %q\ngot %q", contentType, obj.ContentType)
}
if want := map[string]string{"key": "Value"}; !reflect.DeepEqual(obj.Metadata, want) {
t.Errorf("wrong metadata\nwant %q\ngot %q", want, obj.Metadata)
}
checkChecksum(t, []byte(data), obj)
}
func TestServerClientSignedUploadBucketCNAME(t *testing.T) {
url := "https://mybucket.mydomain.com:4443/files/txt/text-02.txt?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=fake-gcs&X-Goog-Expires=3600&X-Goog-SignedHeaders=host&X-Goog-Signature=fake-gc"
expectedName := "files/txt/text-02.txt"
expectedContentType := "text/plain"
expectedHash := "bHupxaFBQh4cA8uYB8l8dA=="
opts := Options{
InitialObjects: []Object{
{BucketName: "mybucket.mydomain.com", Name: "files/txt/text-01.txt", Content: []byte("something")},
},
}
server, err := NewServerWithOptions(opts)
if err != nil {
t.Fatal(err)
}
client := server.HTTPClient()
req, err := http.NewRequest(http.MethodPut, url, strings.NewReader("something else"))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "text/plain")
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Errorf("wrong status returned\nwant %d\ngot %d", http.StatusOK, resp.StatusCode)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
var obj Object
if err := json.Unmarshal(data, &obj); err != nil {
t.Fatal(err)
}
if obj.Name != expectedName {
t.Errorf("wrong filename\nwant %q\ngot %q", expectedName, obj.Name)
}
if obj.ContentType != expectedContentType {
t.Errorf("wrong content type\nwant %q\ngot %q", expectedContentType, obj.ContentType)
}
if obj.Md5Hash != expectedHash {
t.Errorf("wrong md5 hash\nwant %q\ngot %q", expectedHash, obj.Md5Hash)
}
}
func TestServerClientUploadWithPredefinedAclPublicRead(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
const contentType = "text/plain"
const contentEncoding = "gzip"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?predefinedAcl=publicRead&uploadType=media&name=some/nice/object.txt&contentEncoding="+contentEncoding, strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", contentType)
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusOK
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status code\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
obj, err := server.GetObject("other-bucket", "some/nice/object.txt")
if err != nil {
t.Fatal(err)
}
attrs, err := server.Client().Bucket("other-bucket").Object("some/nice/object.txt").Attrs(context.Background())
if err != nil {
t.Fatal(err)
}
if attrs.ContentEncoding != contentEncoding {
t.Errorf("wrong contentEncoding\nwant %q\ngot %q", contentEncoding, attrs.ContentEncoding)
}
acl, err := server.Client().Bucket("other-bucket").Object("some/nice/object.txt").ACL().List(context.Background())
if err != nil {
t.Fatal(err)
}
if !isACLPublic(acl) {
t.Errorf("wrong acl\ngot %+v", acl)
}
if string(obj.Content) != data {
t.Errorf("wrong content\nwant %q\ngot %q", string(obj.Content), data)
}
if obj.ContentType != contentType {
t.Errorf("wrong content type\nwant %q\ngot %q", contentType, obj.ContentType)
}
checkChecksum(t, []byte(data), obj)
}
func TestServerClientSimpleUploadNoName(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?uploadType=media", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusBadRequest
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status returned\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
}
func TestServerInvalidUploadType(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?uploadType=bananas&name=some-object.txt", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusBadRequest
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status returned\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
}
func TestParseContentRange(t *testing.T) {
t.Parallel()
goodHeaderTests := []struct {
header string
output contentRange
}{
{
"bytes */1024", // End of a streaming request, total is now known
contentRange{KnownTotal: true, Start: -1, End: -1, Total: 1024},
},
{
"bytes 1024-2047/4096", // Range with a known total
contentRange{KnownRange: true, KnownTotal: true, Start: 1024, End: 2047, Total: 4096},
},
{
"bytes 0-1024/*", // A streaming request, unknown total
contentRange{KnownRange: true, Start: 0, End: 1024, Total: -1},
},
}
for _, test := range goodHeaderTests {
test := test
t.Run(test.header, func(t *testing.T) {
t.Parallel()
output, err := parseContentRange(test.header)
if output != test.output {
t.Fatalf("output is different.\nexpected: %+v\n actual: %+v\n", test.output, output)
}
if err != nil {
t.Fatal(err)
}
})
}
badHeaderTests := []string{
"none", // Unsupported unit "none"
"bytes 20", // No slash to split range from size
"bytes 1/4", // Single-field range
"bytes start-20/100", // Non-integer range start
"bytes 20-end/100", // Non-integer range end
"bytes 100-200/total", // Non-integer size
"bytes */*", // Unknown range or size
}
for _, test := range badHeaderTests {
test := test
t.Run(test, func(t *testing.T) {
t.Parallel()
_, err := parseContentRange(test)
if err == nil {
t.Fatalf("Expected err!=<nil>, but was %v", err)
}
})
}
}
// this is to support the Ruby SDK.
func TestServerUndocumentedResumableUploadAPI(t *testing.T) {
bucketName := "testbucket"
runServersTest(t, nil, func(t *testing.T, server *Server) {
t.Run("test headers", func(t *testing.T) {
server.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName})
client := server.HTTPClient()
url := server.URL()
body := strings.NewReader("{\"contentType\": \"application/json\"}")
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/upload/storage/v1/b/%s/o?name=testobj", url, bucketName), body)
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Goog-Upload-Protocol", "resumable")
req.Header.Set("X-Goog-Upload-Command", "start")
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer func() {
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
t.Errorf("expected a 200 response, got: %d", resp.StatusCode)
}
if hdr := resp.Header.Get("X-Goog-Upload-Status"); hdr != "active" {
t.Errorf("X-Goog-Upload-Status response header expected 'active' got: %s", hdr)
}
uploadURL := resp.Header.Get("X-Goog-Upload-URL")
if uploadURL == "" {
t.Error("X-Goog-Upload-URL did not return upload url")
}
body = strings.NewReader("{\"test\": \"foo\"}")
req, err = http.NewRequest(http.MethodPost, uploadURL, body)
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Goog-Upload-Command", "upload, finalize")
resp2, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer func() {
_, _ = io.Copy(ioutil.Discard, resp2.Body)
_ = resp2.Body.Close()
}()
if resp2.StatusCode != 200 {
t.Errorf("expected a 200 response, got: %d", resp2.StatusCode)
}
if hdr := resp2.Header.Get("X-Goog-Upload-Status"); hdr != "final" {
t.Errorf("X-Goog-Upload-Status response header expected 'final' got: %s", hdr)
}
})
})
}
// this is to support the Java SDK.
func TestServerGzippedUpload(t *testing.T) {
const bucketName = "testbucket"
runServersTest(t, nil, func(t *testing.T, server *Server) {
t.Run("test headers", func(t *testing.T) {
server.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName})
client := server.HTTPClient()
var buf bytes.Buffer
const content = "some interesting content perhaps?"
writer := gzip.NewWriter(&buf)
_, err := writer.Write([]byte(content))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
url := server.URL()
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/upload/storage/v1/b/%s/o?name=testobj&uploadType=media", url, bucketName), &buf)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Encoding", "gzip")
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 200 {
t.Errorf("expected a 200 response, got: %d", resp.StatusCode)
}
obj, err := server.GetObject(bucketName, "testobj")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != content {
t.Errorf("wrong content\nwant %q\ngot %q", content, obj.Content)
}
})
})
}
func isACLPublic(acl []storage.ACLRule) bool {
for _, entry := range acl {
if entry.Entity == storage.AllUsers && entry.Role == storage.RoleReader {
return true
}
}
return false
}
Add an assertion to ensure we can read the object after writing
Related to #458.
// Copyright 2017 Francisco Souza. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fakestorage
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"strings"
"testing"
"cloud.google.com/go/storage"
"google.golang.org/api/googleapi"
)
func TestServerClientObjectWriter(t *testing.T) {
const baseContent = "some nice content"
content := strings.Repeat(baseContent+"\n", googleapi.MinUploadChunkSize)
checksum := uint32Checksum([]byte(content))
hash := md5Hash([]byte(content))
runServersTest(t, nil, func(t *testing.T, server *Server) {
tests := []struct {
testCase string
bucketName string
objectName string
chunkSize int
}{
{
"default chunk size",
"some-bucket",
"some/interesting/object.txt",
googleapi.DefaultUploadChunkSize,
},
{
"small chunk size",
"other-bucket",
"other/interesting/object.txt",
googleapi.MinUploadChunkSize,
},
}
for _, test := range tests {
test := test
t.Run(test.testCase, func(t *testing.T) {
const contentType = "text/plain; charset=utf-8"
server.CreateBucketWithOpts(CreateBucketOpts{Name: test.bucketName})
client := server.Client()
objHandle := client.Bucket(test.bucketName).Object(test.objectName)
w := objHandle.NewWriter(context.Background())
w.ChunkSize = test.chunkSize
w.ContentType = contentType
w.Metadata = map[string]string{
"foo": "bar",
}
w.Write([]byte(content))
err := w.Close()
if err != nil {
t.Fatal(err)
}
obj, err := server.GetObject(test.bucketName, test.objectName)
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != content {
n := strings.Count(string(obj.Content), baseContent)
t.Errorf("wrong content returned\nwant %dx%q\ngot %dx%q",
googleapi.MinUploadChunkSize, baseContent,
n, baseContent)
}
if returnedChecksum := w.Attrs().CRC32C; returnedChecksum != checksum {
t.Errorf("wrong writer.Attrs() checksum returned\nwant %d\ngot %d", checksum, returnedChecksum)
}
if base64Checksum := encodedChecksum(uint32ToBytes(checksum)); obj.Crc32c != base64Checksum {
t.Errorf("wrong obj.Crc32c returned\nwant %s\ngot %s", base64Checksum, obj.Crc32c)
}
if returnedHash := w.Attrs().MD5; !bytes.Equal(returnedHash, hash) {
t.Errorf("wrong writer.Attrs() hash returned\nwant %d\ngot %d", hash, returnedHash)
}
if stringHash := encodedHash(hash); obj.Md5Hash != stringHash {
t.Errorf("wrong obj.Md5Hash returned\nwant %s\ngot %s", stringHash, obj.Md5Hash)
}
if obj.ContentType != contentType {
t.Errorf("wrong content-type\nwant %q\ngot %q", contentType, obj.ContentType)
}
if !reflect.DeepEqual(obj.Metadata, w.Metadata) {
t.Errorf("wrong meta data\nwant %+v\ngot %+v", w.Metadata, obj.Metadata)
}
reader, err := client.Bucket(test.bucketName).Object(test.objectName).NewReader(context.Background())
if err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
if string(data) != content {
n := strings.Count(string(obj.Content), baseContent)
t.Errorf("wrong content returned via object reader\nwant %dx%q\ngot %dx%q",
googleapi.MinUploadChunkSize, baseContent,
n, baseContent)
}
})
}
})
}
func checkChecksum(t *testing.T, content []byte, obj Object) {
t.Helper()
if expect := encodedCrc32cChecksum(content); expect != obj.Crc32c {
t.Errorf("wrong checksum in the object\nwant %s\ngot %s", expect, obj.Crc32c)
}
}
func TestServerClientObjectWriterOverwrite(t *testing.T) {
runServersTest(t, nil, func(t *testing.T, server *Server) {
const content = "other content"
const contentType = "text/plain"
server.CreateObject(Object{
BucketName: "some-bucket",
Name: "some-object.txt",
Content: []byte("some content"),
ContentType: "some-stff",
})
objHandle := server.Client().Bucket("some-bucket").Object("some-object.txt")
w := objHandle.NewWriter(context.Background())
w.ContentType = contentType
w.Write([]byte(content))
err := w.Close()
if err != nil {
t.Fatal(err)
}
obj, err := server.GetObject("some-bucket", "some-object.txt")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != content {
t.Errorf("wrong content in the object\nwant %q\ngot %q", content, string(obj.Content))
}
checkChecksum(t, []byte(content), obj)
if obj.ContentType != contentType {
t.Errorf("wrong content-type\nwsant %q\ngot %q", contentType, obj.ContentType)
}
})
}
func TestServerClientObjectWriterWithDoesNotExistPrecondition(t *testing.T) {
runServersTest(t, nil, func(t *testing.T, server *Server) {
const originalContent = "original content"
const originalContentType = "text/plain"
const bucketName = "some-bucket"
const objectName = "some-object-2.txt"
bucket := server.Client().Bucket(bucketName)
if err := bucket.Create(context.Background(), "my-project", nil); err != nil {
t.Fatal(err)
}
objHandle := bucket.Object(objectName)
firstWriter := objHandle.If(storage.Conditions{DoesNotExist: true}).NewWriter(context.Background())
firstWriter.ContentType = originalContentType
firstWriter.Write([]byte(originalContent))
if err := firstWriter.Close(); err != nil {
t.Fatal(err)
}
firstReader, err := objHandle.NewReader(context.Background())
if err != nil {
t.Fatal(err)
}
objectContent, err := ioutil.ReadAll(firstReader)
if err != nil {
t.Fatal(err)
}
if string(objectContent) != originalContent {
t.Errorf("wrong content in the object after initial write with precondition\nwant %q\ngot %q", originalContent, string(objectContent))
}
secondWriter := objHandle.If(storage.Conditions{DoesNotExist: true}).NewWriter(context.Background())
secondWriter.ContentType = "application/json"
secondWriter.Write([]byte("new content"))
err = secondWriter.Close()
if err == nil {
t.Fatal("expected overwriting existing object to fail, but received no error")
}
if err.Error() != "googleapi: Error 412: Precondition failed" {
t.Errorf("expected HTTP 412 precondition failed error, but got %v", err)
}
secondReader, err := objHandle.NewReader(context.Background())
if err != nil {
t.Fatal(err)
}
objectContentAfterFailedPrecondition, err := ioutil.ReadAll(secondReader)
if err != nil {
t.Fatal(err)
}
if string(objectContentAfterFailedPrecondition) != originalContent {
t.Errorf("wrong content in the object after failed precondition\nwant %q\ngot %q", originalContent, string(objectContentAfterFailedPrecondition))
}
})
}
func TestServerClientObjectWriterBucketNotFound(t *testing.T) {
runServersTest(t, nil, func(t *testing.T, server *Server) {
client := server.Client()
objHandle := client.Bucket("some-bucket").Object("some/interesting/object.txt")
w := objHandle.NewWriter(context.Background())
w.Write([]byte("whatever"))
err := w.Close()
if err == nil {
t.Fatal("unexpected <nil> error")
}
})
}
func TestServerClientSimpleUpload(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
const contentType = "text/plain"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?uploadType=media&name=some/nice/object.txt", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", contentType)
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusOK
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status code\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
obj, err := server.GetObject("other-bucket", "some/nice/object.txt")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != data {
t.Errorf("wrong content\nwant %q\ngot %q", string(obj.Content), data)
}
if obj.ContentType != contentType {
t.Errorf("wrong content type\nwant %q\ngot %q", contentType, obj.ContentType)
}
checkChecksum(t, []byte(data), obj)
}
func TestServerClientSignedUpload(t *testing.T) {
server, err := NewServerWithOptions(Options{PublicHost: "127.0.0.1"})
if err != nil {
t.Fatalf("could not start server: %v", err)
}
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
const contentType = "text/plain"
req, err := http.NewRequest("PUT", server.URL()+"/other-bucket/some/nice/object.txt?X-Goog-Algorithm=GOOG4-RSA-SHA256", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", contentType)
req.Header.Set("X-Goog-Meta-Key", "Value")
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusOK
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status code\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
obj, err := server.GetObject("other-bucket", "some/nice/object.txt")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != data {
t.Errorf("wrong content\nwant %q\ngot %q", string(obj.Content), data)
}
if obj.ContentType != contentType {
t.Errorf("wrong content type\nwant %q\ngot %q", contentType, obj.ContentType)
}
if want := map[string]string{"key": "Value"}; !reflect.DeepEqual(obj.Metadata, want) {
t.Errorf("wrong metadata\nwant %q\ngot %q", want, obj.Metadata)
}
checkChecksum(t, []byte(data), obj)
}
func TestServerClientSignedUploadBucketCNAME(t *testing.T) {
url := "https://mybucket.mydomain.com:4443/files/txt/text-02.txt?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=fake-gcs&X-Goog-Expires=3600&X-Goog-SignedHeaders=host&X-Goog-Signature=fake-gc"
expectedName := "files/txt/text-02.txt"
expectedContentType := "text/plain"
expectedHash := "bHupxaFBQh4cA8uYB8l8dA=="
opts := Options{
InitialObjects: []Object{
{BucketName: "mybucket.mydomain.com", Name: "files/txt/text-01.txt", Content: []byte("something")},
},
}
server, err := NewServerWithOptions(opts)
if err != nil {
t.Fatal(err)
}
client := server.HTTPClient()
req, err := http.NewRequest(http.MethodPut, url, strings.NewReader("something else"))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "text/plain")
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Errorf("wrong status returned\nwant %d\ngot %d", http.StatusOK, resp.StatusCode)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
var obj Object
if err := json.Unmarshal(data, &obj); err != nil {
t.Fatal(err)
}
if obj.Name != expectedName {
t.Errorf("wrong filename\nwant %q\ngot %q", expectedName, obj.Name)
}
if obj.ContentType != expectedContentType {
t.Errorf("wrong content type\nwant %q\ngot %q", expectedContentType, obj.ContentType)
}
if obj.Md5Hash != expectedHash {
t.Errorf("wrong md5 hash\nwant %q\ngot %q", expectedHash, obj.Md5Hash)
}
}
func TestServerClientUploadWithPredefinedAclPublicRead(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
const contentType = "text/plain"
const contentEncoding = "gzip"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?predefinedAcl=publicRead&uploadType=media&name=some/nice/object.txt&contentEncoding="+contentEncoding, strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", contentType)
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusOK
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status code\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
obj, err := server.GetObject("other-bucket", "some/nice/object.txt")
if err != nil {
t.Fatal(err)
}
attrs, err := server.Client().Bucket("other-bucket").Object("some/nice/object.txt").Attrs(context.Background())
if err != nil {
t.Fatal(err)
}
if attrs.ContentEncoding != contentEncoding {
t.Errorf("wrong contentEncoding\nwant %q\ngot %q", contentEncoding, attrs.ContentEncoding)
}
acl, err := server.Client().Bucket("other-bucket").Object("some/nice/object.txt").ACL().List(context.Background())
if err != nil {
t.Fatal(err)
}
if !isACLPublic(acl) {
t.Errorf("wrong acl\ngot %+v", acl)
}
if string(obj.Content) != data {
t.Errorf("wrong content\nwant %q\ngot %q", string(obj.Content), data)
}
if obj.ContentType != contentType {
t.Errorf("wrong content type\nwant %q\ngot %q", contentType, obj.ContentType)
}
checkChecksum(t, []byte(data), obj)
}
func TestServerClientSimpleUploadNoName(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?uploadType=media", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusBadRequest
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status returned\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
}
func TestServerInvalidUploadType(t *testing.T) {
server := NewServer(nil)
defer server.Stop()
server.CreateBucketWithOpts(CreateBucketOpts{Name: "other-bucket"})
const data = "some nice content"
req, err := http.NewRequest("POST", server.URL()+"/storage/v1/b/other-bucket/o?uploadType=bananas&name=some-object.txt", strings.NewReader(data))
if err != nil {
t.Fatal(err)
}
client := http.Client{
Transport: &http.Transport{
// #nosec
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
expectedStatus := http.StatusBadRequest
if resp.StatusCode != expectedStatus {
t.Errorf("wrong status returned\nwant %d\ngot %d", expectedStatus, resp.StatusCode)
}
}
func TestParseContentRange(t *testing.T) {
t.Parallel()
goodHeaderTests := []struct {
header string
output contentRange
}{
{
"bytes */1024", // End of a streaming request, total is now known
contentRange{KnownTotal: true, Start: -1, End: -1, Total: 1024},
},
{
"bytes 1024-2047/4096", // Range with a known total
contentRange{KnownRange: true, KnownTotal: true, Start: 1024, End: 2047, Total: 4096},
},
{
"bytes 0-1024/*", // A streaming request, unknown total
contentRange{KnownRange: true, Start: 0, End: 1024, Total: -1},
},
}
for _, test := range goodHeaderTests {
test := test
t.Run(test.header, func(t *testing.T) {
t.Parallel()
output, err := parseContentRange(test.header)
if output != test.output {
t.Fatalf("output is different.\nexpected: %+v\n actual: %+v\n", test.output, output)
}
if err != nil {
t.Fatal(err)
}
})
}
badHeaderTests := []string{
"none", // Unsupported unit "none"
"bytes 20", // No slash to split range from size
"bytes 1/4", // Single-field range
"bytes start-20/100", // Non-integer range start
"bytes 20-end/100", // Non-integer range end
"bytes 100-200/total", // Non-integer size
"bytes */*", // Unknown range or size
}
for _, test := range badHeaderTests {
test := test
t.Run(test, func(t *testing.T) {
t.Parallel()
_, err := parseContentRange(test)
if err == nil {
t.Fatalf("Expected err!=<nil>, but was %v", err)
}
})
}
}
// this is to support the Ruby SDK.
func TestServerUndocumentedResumableUploadAPI(t *testing.T) {
bucketName := "testbucket"
runServersTest(t, nil, func(t *testing.T, server *Server) {
t.Run("test headers", func(t *testing.T) {
server.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName})
client := server.HTTPClient()
url := server.URL()
body := strings.NewReader("{\"contentType\": \"application/json\"}")
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/upload/storage/v1/b/%s/o?name=testobj", url, bucketName), body)
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Goog-Upload-Protocol", "resumable")
req.Header.Set("X-Goog-Upload-Command", "start")
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer func() {
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
}()
if resp.StatusCode != 200 {
t.Errorf("expected a 200 response, got: %d", resp.StatusCode)
}
if hdr := resp.Header.Get("X-Goog-Upload-Status"); hdr != "active" {
t.Errorf("X-Goog-Upload-Status response header expected 'active' got: %s", hdr)
}
uploadURL := resp.Header.Get("X-Goog-Upload-URL")
if uploadURL == "" {
t.Error("X-Goog-Upload-URL did not return upload url")
}
body = strings.NewReader("{\"test\": \"foo\"}")
req, err = http.NewRequest(http.MethodPost, uploadURL, body)
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Goog-Upload-Command", "upload, finalize")
resp2, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
defer func() {
_, _ = io.Copy(ioutil.Discard, resp2.Body)
_ = resp2.Body.Close()
}()
if resp2.StatusCode != 200 {
t.Errorf("expected a 200 response, got: %d", resp2.StatusCode)
}
if hdr := resp2.Header.Get("X-Goog-Upload-Status"); hdr != "final" {
t.Errorf("X-Goog-Upload-Status response header expected 'final' got: %s", hdr)
}
})
})
}
// this is to support the Java SDK.
func TestServerGzippedUpload(t *testing.T) {
const bucketName = "testbucket"
runServersTest(t, nil, func(t *testing.T, server *Server) {
t.Run("test headers", func(t *testing.T) {
server.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName})
client := server.HTTPClient()
var buf bytes.Buffer
const content = "some interesting content perhaps?"
writer := gzip.NewWriter(&buf)
_, err := writer.Write([]byte(content))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
url := server.URL()
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/upload/storage/v1/b/%s/o?name=testobj&uploadType=media", url, bucketName), &buf)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Encoding", "gzip")
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 200 {
t.Errorf("expected a 200 response, got: %d", resp.StatusCode)
}
obj, err := server.GetObject(bucketName, "testobj")
if err != nil {
t.Fatal(err)
}
if string(obj.Content) != content {
t.Errorf("wrong content\nwant %q\ngot %q", content, obj.Content)
}
})
})
}
func isACLPublic(acl []storage.ACLRule) bool {
for _, entry := range acl {
if entry.Entity == storage.AllUsers && entry.Role == storage.RoleReader {
return true
}
}
return false
}
|
/*
* Copyright 2016 Robin Engel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package handler
import (
"github.com/bluedevel/mosel/moselserver"
"net/http"
"time"
"github.com/bluedevel/mosel/api"
"encoding/json"
"log"
)
type streamHandler struct {
ctx *moselserver.MoselServerContext
}
func NewStreamHandler(ctx *moselserver.MoselServerContext) streamHandler {
return streamHandler{
ctx: ctx,
}
}
func (handler streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
return
}
ticker := time.NewTicker(1 * time.Second)
for now := range ticker.C {
err := json.NewEncoder(w).Encode(handler.createResponse(r, now))
if err != nil {
log.Println(err)
ticker.Stop()
break
}
flusher.Flush()
}
}
func (handler streamHandler) createResponse(r *http.Request, now time.Time) interface{} {
resp := api.NewNodeResponse()
return resp
}
func (handler streamHandler) GetPath() string {
return "/stream"
}
func (handler streamHandler) Secure() bool {
return false
}
add debug code in streamHandler to check behaviour
/*
* Copyright 2016 Robin Engel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package handler
import (
"github.com/bluedevel/mosel/moselserver"
"net/http"
"time"
"github.com/bluedevel/mosel/api"
"encoding/json"
"log"
)
type streamHandler struct {
ctx *moselserver.MoselServerContext
test int
}
func NewStreamHandler(ctx *moselserver.MoselServerContext) streamHandler {
return streamHandler{
ctx: ctx,
}
}
func (handler streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
return
}
ticker := time.NewTicker(1 * time.Second)
for now := range ticker.C {
err := json.NewEncoder(w).Encode(handler.createResponse(r, now))
if err != nil {
log.Println(err)
ticker.Stop()
break
}
flusher.Flush()
}
}
func (handler *streamHandler) createResponse(r *http.Request, now time.Time) interface{} {
resp := api.NewNodeResponse()
resp.Test = handler.test
handler.test += 1
return resp
}
func (handler streamHandler) GetPath() string {
return "/stream"
}
func (handler streamHandler) Secure() bool {
return false
} |
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017, 2018 Red Hat, Inc.
*
*/
package virtconfig
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/util"
)
const (
configMapName = "kubevirt-config"
FeatureGatesKey = "feature-gates"
EmulatedMachinesKey = "emulated-machines"
MachineTypeKey = "machine-type"
useEmulationKey = "debug.useEmulation"
ImagePullPolicyKey = "dev.imagePullPolicy"
MigrationsConfigKey = "migrations"
CpuModelKey = "default-cpu-model"
CpuRequestKey = "cpu-request"
MemoryOvercommitKey = "memory-overcommit"
LessPVCSpaceTolerationKey = "pvc-tolerate-less-space-up-to-percent"
NodeSelectorsKey = "node-selectors"
NetworkInterfaceKey = "default-network-interface"
PermitSlirpInterface = "permitSlirpInterface"
ParallelOutboundMigrationsPerNodeDefault uint32 = 2
ParallelMigrationsPerClusterDefault uint32 = 5
BandwithPerMigrationDefault = "64Mi"
MigrationAllowAutoConverge bool = false
MigrationProgressTimeout int64 = 150
MigrationCompletionTimeoutPerGiB int64 = 800
DefaultMachineType = "q35"
DefaultCPURequest = "100m"
DefaultMemoryOvercommit = 100
DefaultEmulatedMachines = "q35*,pc-q35*"
DefaultLessPVCSpaceToleration = 10
DefaultNodeSelectors = ""
DefaultNetworkInterface = "bridge"
NodeDrainTaintDefaultKey = "kubevirt.io/drain"
)
func getConfigMap() *k8sv1.ConfigMap {
virtClient, err := kubecli.GetKubevirtClient()
if err != nil {
panic(err)
}
var cfgMap *k8sv1.ConfigMap
err = wait.PollImmediate(time.Second*1, time.Second*10, func() (bool, error) {
namespace, curErr := util.GetNamespace()
if err != nil {
return false, err
}
cfgMap, curErr = virtClient.CoreV1().ConfigMaps(namespace).Get(configMapName, metav1.GetOptions{})
if curErr != nil {
if errors.IsNotFound(curErr) {
logger := log.DefaultLogger()
logger.Infof("%s ConfigMap does not exist. Using defaults.", configMapName)
cfgMap = &k8sv1.ConfigMap{}
return true, nil
}
return false, curErr
}
return true, nil
})
if err != nil {
panic(err)
}
return cfgMap
}
// NewClusterConfig represents the `kubevirt-config` config map. It can be used to live-update
// values if the config changes. The config update works like this:
// 1. Check if the config exists. If it does not exist, return the default config
// 2. Check if the config got updated. If so, try to parse and return it
// 3. In case of errors or no updates (resource version stays the same), it returns the values from the last good config
func NewClusterConfig(configMapInformer cache.SharedIndexInformer, namespace string) *ClusterConfig {
c := &ClusterConfig{
informer: configMapInformer,
lock: &sync.Mutex{},
namespace: namespace,
lastValidConfig: defaultClusterConfig(),
defaultConfig: defaultClusterConfig(),
}
return c
}
func defaultClusterConfig() *Config {
parallelOutboundMigrationsPerNodeDefault := ParallelOutboundMigrationsPerNodeDefault
parallelMigrationsPerClusterDefault := ParallelMigrationsPerClusterDefault
bandwithPerMigrationDefault := resource.MustParse(BandwithPerMigrationDefault)
nodeDrainTaintDefaultKey := NodeDrainTaintDefaultKey
allowAutoConverge := MigrationAllowAutoConverge
progressTimeout := MigrationProgressTimeout
completionTimeoutPerGiB := MigrationCompletionTimeoutPerGiB
cpuRequestDefault := resource.MustParse(DefaultCPURequest)
emulatedMachinesDefault := strings.Split(DefaultEmulatedMachines, ",")
nodeSelectorsDefault, _ := parseNodeSelectors(DefaultNodeSelectors)
defaultNetworkInterface := DefaultNetworkInterface
return &Config{
ResourceVersion: "0",
ImagePullPolicy: k8sv1.PullIfNotPresent,
UseEmulation: false,
MigrationConfig: &MigrationConfig{
ParallelMigrationsPerCluster: ¶llelMigrationsPerClusterDefault,
ParallelOutboundMigrationsPerNode: ¶llelOutboundMigrationsPerNodeDefault,
BandwidthPerMigration: &bandwithPerMigrationDefault,
NodeDrainTaintKey: &nodeDrainTaintDefaultKey,
ProgressTimeout: &progressTimeout,
CompletionTimeoutPerGiB: &completionTimeoutPerGiB,
UnsafeMigrationOverride: false,
AllowAutoConverge: allowAutoConverge,
},
MachineType: DefaultMachineType,
CPURequest: cpuRequestDefault,
MemoryOvercommit: DefaultMemoryOvercommit,
EmulatedMachines: emulatedMachinesDefault,
LessPVCSpaceToleration: DefaultLessPVCSpaceToleration,
NodeSelectors: nodeSelectorsDefault,
NetworkInterface: defaultNetworkInterface,
PermitSlirpInterface: false,
}
}
type Config struct {
ResourceVersion string
UseEmulation bool
MigrationConfig *MigrationConfig
ImagePullPolicy k8sv1.PullPolicy
MachineType string
CPUModel string
CPURequest resource.Quantity
MemoryOvercommit int
EmulatedMachines []string
FeatureGates string
LessPVCSpaceToleration int
NodeSelectors map[string]string
NetworkInterface string
PermitSlirpInterface bool
}
type MigrationConfig struct {
ParallelOutboundMigrationsPerNode *uint32 `json:"parallelOutboundMigrationsPerNode,omitempty"`
ParallelMigrationsPerCluster *uint32 `json:"parallelMigrationsPerCluster,omitempty"`
BandwidthPerMigration *resource.Quantity `json:"bandwidthPerMigration,omitempty"`
NodeDrainTaintKey *string `json:"nodeDrainTaintKey,omitempty"`
ProgressTimeout *int64 `json:"progressTimeout,omitempty"`
CompletionTimeoutPerGiB *int64 `json:"completionTimeoutPerGiB,omitempty"`
UnsafeMigrationOverride bool `json:"unsafeMigrationOverride"`
AllowAutoConverge bool `json:"allowAutoConverge"`
}
type ClusterConfig struct {
informer cache.SharedIndexInformer
namespace string
lock *sync.Mutex
lastValidConfig *Config
defaultConfig *Config
lastInvalidConfigResourceVersion string
}
func (c *ClusterConfig) IsUseEmulation() bool {
return c.getConfig().UseEmulation
}
func (c *ClusterConfig) GetMigrationConfig() *MigrationConfig {
return c.getConfig().MigrationConfig
}
func (c *ClusterConfig) GetImagePullPolicy() (policy k8sv1.PullPolicy) {
return c.getConfig().ImagePullPolicy
}
func (c *ClusterConfig) GetMachineType() string {
return c.getConfig().MachineType
}
func (c *ClusterConfig) GetCPUModel() string {
return c.getConfig().CPUModel
}
func (c *ClusterConfig) GetCPURequest() resource.Quantity {
return c.getConfig().CPURequest
}
func (c *ClusterConfig) GetMemoryOvercommit() int {
return c.getConfig().MemoryOvercommit
}
func (c *ClusterConfig) GetEmulatedMachines() []string {
return c.getConfig().EmulatedMachines
}
func (c *ClusterConfig) GetLessPVCSpaceToleration() int {
return c.getConfig().LessPVCSpaceToleration
}
func (c *ClusterConfig) GetNodeSelectors() map[string]string {
return c.getConfig().NodeSelectors
}
func (c *ClusterConfig) GetDefaultNetworkInterface() string {
return c.getConfig().NetworkInterface
}
func (c *ClusterConfig) IsSlirpInterfaceEnabled() bool {
return c.getConfig().PermitSlirpInterface
}
// setConfig parses the provided config map and updates the provided config.
// Default values in the provided config stay in tact.
func setConfig(config *Config, configMap *k8sv1.ConfigMap) error {
// set revision
config.ResourceVersion = configMap.ResourceVersion
// set migration options
rawConfig := strings.TrimSpace(configMap.Data[MigrationsConfigKey])
if rawConfig != "" {
// only sets values if they were specified, default values stay intact
err := yaml.NewYAMLOrJSONDecoder(strings.NewReader(rawConfig), 1024).Decode(config.MigrationConfig)
if err != nil {
return fmt.Errorf("failed to parse migration config: %v", err)
}
}
// set image pull policy
policy := strings.TrimSpace(configMap.Data[ImagePullPolicyKey])
switch policy {
case "":
// keep the default
case "Always":
config.ImagePullPolicy = k8sv1.PullAlways
case "Never":
config.ImagePullPolicy = k8sv1.PullNever
case "IfNotPresent":
config.ImagePullPolicy = k8sv1.PullIfNotPresent
default:
return fmt.Errorf("invalid dev.imagePullPolicy in config: %v", policy)
}
// set if emulation is used
useEmulation := strings.TrimSpace(configMap.Data[useEmulationKey])
switch useEmulation {
case "":
// keep the default
case "true":
config.UseEmulation = true
case "false":
config.UseEmulation = false
default:
return fmt.Errorf("invalid debug.useEmulation in config: %v", useEmulation)
}
// set machine type
if machineType := strings.TrimSpace(configMap.Data[MachineTypeKey]); machineType != "" {
config.MachineType = machineType
}
if cpuModel := strings.TrimSpace(configMap.Data[CpuModelKey]); cpuModel != "" {
config.CPUModel = cpuModel
}
if cpuRequest := strings.TrimSpace(configMap.Data[CpuRequestKey]); cpuRequest != "" {
config.CPURequest = resource.MustParse(cpuRequest)
}
if memoryOvercommit := strings.TrimSpace(configMap.Data[MemoryOvercommitKey]); memoryOvercommit != "" {
if value, err := strconv.Atoi(memoryOvercommit); err == nil && value > 0 {
config.MemoryOvercommit = value
} else {
return fmt.Errorf("Invalid memoryOvercommit in ConfigMap: %s", memoryOvercommit)
}
}
if emulatedMachines := strings.TrimSpace(configMap.Data[EmulatedMachinesKey]); emulatedMachines != "" {
vals := strings.Split(emulatedMachines, ",")
for i := range vals {
vals[i] = strings.TrimSpace(vals[i])
}
config.EmulatedMachines = vals
}
if featureGates := strings.TrimSpace(configMap.Data[FeatureGatesKey]); featureGates != "" {
config.FeatureGates = featureGates
}
if toleration := strings.TrimSpace(configMap.Data[LessPVCSpaceTolerationKey]); toleration != "" {
if value, err := strconv.Atoi(toleration); err != nil || value < 0 || value > 100 {
return fmt.Errorf("Invalid lessPVCSpaceToleration in ConfigMap: %s", toleration)
} else {
config.LessPVCSpaceToleration = value
}
}
if nodeSelectors := strings.TrimSpace(configMap.Data[NodeSelectorsKey]); nodeSelectors != "" {
if selectors, err := parseNodeSelectors(nodeSelectors); err != nil {
return err
} else {
config.NodeSelectors = selectors
}
}
// disable slirp
permitSlirp := strings.TrimSpace(configMap.Data[PermitSlirpInterface])
switch permitSlirp {
case "":
// keep the default
case "true":
config.PermitSlirpInterface = true
case "false":
config.PermitSlirpInterface = false
default:
return fmt.Errorf("invalid value for permitSlirpInterfaces in config: %v", permitSlirp)
}
// set default network interface
iface := strings.TrimSpace(configMap.Data[NetworkInterfaceKey])
switch iface {
case "":
// keep the default
case string(v1.BridgeInterface), string(v1.SlirpInterface), string(v1.MasqueradeInterface):
config.NetworkInterface = iface
default:
return fmt.Errorf("invalid default-network-interface in config: %v", iface)
}
return nil
}
// getConfig returns the latest valid parsed config map result, or updates it
// if a newer version is available.
// XXX Rework this, to happen mostly in informer callbacks.
// This will also allow us then to react to config changes and e.g. restart some controllers
func (c *ClusterConfig) getConfig() (config *Config) {
c.lock.Lock()
defer c.lock.Unlock()
if obj, exists, err := c.informer.GetStore().GetByKey(c.namespace + "/" + configMapName); err != nil {
log.DefaultLogger().Reason(err).Errorf("Error loading the cluster config from cache, falling back to last good resource version '%s'", c.lastValidConfig.ResourceVersion)
return c.lastValidConfig
} else if !exists {
return c.defaultConfig
} else {
configMap := obj.(*k8sv1.ConfigMap)
if c.lastValidConfig.ResourceVersion == configMap.ResourceVersion ||
c.lastInvalidConfigResourceVersion == configMap.ResourceVersion {
return c.lastValidConfig
}
config := defaultClusterConfig()
if err := setConfig(config, configMap); err != nil {
c.lastInvalidConfigResourceVersion = configMap.ResourceVersion
log.DefaultLogger().Reason(err).Errorf("Invalid cluster config with resource version '%s', falling back to last good resource version '%s'", configMap.ResourceVersion, c.lastValidConfig.ResourceVersion)
return c.lastValidConfig
}
log.DefaultLogger().Infof("Updating cluster config to resource version '%s'", configMap.ResourceVersion)
c.lastValidConfig = config
return c.lastValidConfig
}
}
func parseNodeSelectors(str string) (map[string]string, error) {
nodeSelectors := make(map[string]string)
for _, s := range strings.Split(strings.TrimSpace(str), "\n") {
v := strings.Split(s, "=")
if len(v) != 2 {
return nil, fmt.Errorf("Invalid node selector: %s", s)
}
nodeSelectors[v[0]] = v[1]
}
return nodeSelectors, nil
}
replace hard-coded default config values with consts
Signed-off-by: Arik Hadas <4817076c8cbf937260fc19e8bc79ad74394890b9@bamba.tlv.redhat.com>
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017, 2018 Red Hat, Inc.
*
*/
package virtconfig
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/util"
)
const (
configMapName = "kubevirt-config"
FeatureGatesKey = "feature-gates"
EmulatedMachinesKey = "emulated-machines"
MachineTypeKey = "machine-type"
useEmulationKey = "debug.useEmulation"
ImagePullPolicyKey = "dev.imagePullPolicy"
MigrationsConfigKey = "migrations"
CpuModelKey = "default-cpu-model"
CpuRequestKey = "cpu-request"
MemoryOvercommitKey = "memory-overcommit"
LessPVCSpaceTolerationKey = "pvc-tolerate-less-space-up-to-percent"
NodeSelectorsKey = "node-selectors"
NetworkInterfaceKey = "default-network-interface"
PermitSlirpInterface = "permitSlirpInterface"
ParallelOutboundMigrationsPerNodeDefault uint32 = 2
ParallelMigrationsPerClusterDefault uint32 = 5
BandwithPerMigrationDefault = "64Mi"
MigrationAllowAutoConverge bool = false
MigrationProgressTimeout int64 = 150
MigrationCompletionTimeoutPerGiB int64 = 800
DefaultMachineType = "q35"
DefaultCPURequest = "100m"
DefaultMemoryOvercommit = 100
DefaultEmulatedMachines = "q35*,pc-q35*"
DefaultLessPVCSpaceToleration = 10
DefaultNodeSelectors = ""
DefaultNetworkInterface = "bridge"
DefaultImagePullPolicy = k8sv1.PullIfNotPresent
DefaultUseEmulation = false
DefaultUnsafeMigrationOverride = false
DefaultPermitSlirpInterface = false
NodeDrainTaintDefaultKey = "kubevirt.io/drain"
)
func getConfigMap() *k8sv1.ConfigMap {
virtClient, err := kubecli.GetKubevirtClient()
if err != nil {
panic(err)
}
var cfgMap *k8sv1.ConfigMap
err = wait.PollImmediate(time.Second*1, time.Second*10, func() (bool, error) {
namespace, curErr := util.GetNamespace()
if err != nil {
return false, err
}
cfgMap, curErr = virtClient.CoreV1().ConfigMaps(namespace).Get(configMapName, metav1.GetOptions{})
if curErr != nil {
if errors.IsNotFound(curErr) {
logger := log.DefaultLogger()
logger.Infof("%s ConfigMap does not exist. Using defaults.", configMapName)
cfgMap = &k8sv1.ConfigMap{}
return true, nil
}
return false, curErr
}
return true, nil
})
if err != nil {
panic(err)
}
return cfgMap
}
// NewClusterConfig represents the `kubevirt-config` config map. It can be used to live-update
// values if the config changes. The config update works like this:
// 1. Check if the config exists. If it does not exist, return the default config
// 2. Check if the config got updated. If so, try to parse and return it
// 3. In case of errors or no updates (resource version stays the same), it returns the values from the last good config
func NewClusterConfig(configMapInformer cache.SharedIndexInformer, namespace string) *ClusterConfig {
c := &ClusterConfig{
informer: configMapInformer,
lock: &sync.Mutex{},
namespace: namespace,
lastValidConfig: defaultClusterConfig(),
defaultConfig: defaultClusterConfig(),
}
return c
}
func defaultClusterConfig() *Config {
parallelOutboundMigrationsPerNodeDefault := ParallelOutboundMigrationsPerNodeDefault
parallelMigrationsPerClusterDefault := ParallelMigrationsPerClusterDefault
bandwithPerMigrationDefault := resource.MustParse(BandwithPerMigrationDefault)
nodeDrainTaintDefaultKey := NodeDrainTaintDefaultKey
allowAutoConverge := MigrationAllowAutoConverge
progressTimeout := MigrationProgressTimeout
completionTimeoutPerGiB := MigrationCompletionTimeoutPerGiB
cpuRequestDefault := resource.MustParse(DefaultCPURequest)
emulatedMachinesDefault := strings.Split(DefaultEmulatedMachines, ",")
nodeSelectorsDefault, _ := parseNodeSelectors(DefaultNodeSelectors)
defaultNetworkInterface := DefaultNetworkInterface
return &Config{
ResourceVersion: "0",
ImagePullPolicy: DefaultImagePullPolicy,
UseEmulation: DefaultUseEmulation,
MigrationConfig: &MigrationConfig{
ParallelMigrationsPerCluster: ¶llelMigrationsPerClusterDefault,
ParallelOutboundMigrationsPerNode: ¶llelOutboundMigrationsPerNodeDefault,
BandwidthPerMigration: &bandwithPerMigrationDefault,
NodeDrainTaintKey: &nodeDrainTaintDefaultKey,
ProgressTimeout: &progressTimeout,
CompletionTimeoutPerGiB: &completionTimeoutPerGiB,
UnsafeMigrationOverride: DefaultUnsafeMigrationOverride,
AllowAutoConverge: allowAutoConverge,
},
MachineType: DefaultMachineType,
CPURequest: cpuRequestDefault,
MemoryOvercommit: DefaultMemoryOvercommit,
EmulatedMachines: emulatedMachinesDefault,
LessPVCSpaceToleration: DefaultLessPVCSpaceToleration,
NodeSelectors: nodeSelectorsDefault,
NetworkInterface: defaultNetworkInterface,
PermitSlirpInterface: DefaultPermitSlirpInterface,
}
}
type Config struct {
ResourceVersion string
UseEmulation bool
MigrationConfig *MigrationConfig
ImagePullPolicy k8sv1.PullPolicy
MachineType string
CPUModel string
CPURequest resource.Quantity
MemoryOvercommit int
EmulatedMachines []string
FeatureGates string
LessPVCSpaceToleration int
NodeSelectors map[string]string
NetworkInterface string
PermitSlirpInterface bool
}
type MigrationConfig struct {
ParallelOutboundMigrationsPerNode *uint32 `json:"parallelOutboundMigrationsPerNode,omitempty"`
ParallelMigrationsPerCluster *uint32 `json:"parallelMigrationsPerCluster,omitempty"`
BandwidthPerMigration *resource.Quantity `json:"bandwidthPerMigration,omitempty"`
NodeDrainTaintKey *string `json:"nodeDrainTaintKey,omitempty"`
ProgressTimeout *int64 `json:"progressTimeout,omitempty"`
CompletionTimeoutPerGiB *int64 `json:"completionTimeoutPerGiB,omitempty"`
UnsafeMigrationOverride bool `json:"unsafeMigrationOverride"`
AllowAutoConverge bool `json:"allowAutoConverge"`
}
type ClusterConfig struct {
informer cache.SharedIndexInformer
namespace string
lock *sync.Mutex
lastValidConfig *Config
defaultConfig *Config
lastInvalidConfigResourceVersion string
}
func (c *ClusterConfig) IsUseEmulation() bool {
return c.getConfig().UseEmulation
}
func (c *ClusterConfig) GetMigrationConfig() *MigrationConfig {
return c.getConfig().MigrationConfig
}
func (c *ClusterConfig) GetImagePullPolicy() (policy k8sv1.PullPolicy) {
return c.getConfig().ImagePullPolicy
}
func (c *ClusterConfig) GetMachineType() string {
return c.getConfig().MachineType
}
func (c *ClusterConfig) GetCPUModel() string {
return c.getConfig().CPUModel
}
func (c *ClusterConfig) GetCPURequest() resource.Quantity {
return c.getConfig().CPURequest
}
func (c *ClusterConfig) GetMemoryOvercommit() int {
return c.getConfig().MemoryOvercommit
}
func (c *ClusterConfig) GetEmulatedMachines() []string {
return c.getConfig().EmulatedMachines
}
func (c *ClusterConfig) GetLessPVCSpaceToleration() int {
return c.getConfig().LessPVCSpaceToleration
}
func (c *ClusterConfig) GetNodeSelectors() map[string]string {
return c.getConfig().NodeSelectors
}
func (c *ClusterConfig) GetDefaultNetworkInterface() string {
return c.getConfig().NetworkInterface
}
func (c *ClusterConfig) IsSlirpInterfaceEnabled() bool {
return c.getConfig().PermitSlirpInterface
}
// setConfig parses the provided config map and updates the provided config.
// Default values in the provided config stay in tact.
func setConfig(config *Config, configMap *k8sv1.ConfigMap) error {
// set revision
config.ResourceVersion = configMap.ResourceVersion
// set migration options
rawConfig := strings.TrimSpace(configMap.Data[MigrationsConfigKey])
if rawConfig != "" {
// only sets values if they were specified, default values stay intact
err := yaml.NewYAMLOrJSONDecoder(strings.NewReader(rawConfig), 1024).Decode(config.MigrationConfig)
if err != nil {
return fmt.Errorf("failed to parse migration config: %v", err)
}
}
// set image pull policy
policy := strings.TrimSpace(configMap.Data[ImagePullPolicyKey])
switch policy {
case "":
// keep the default
case "Always":
config.ImagePullPolicy = k8sv1.PullAlways
case "Never":
config.ImagePullPolicy = k8sv1.PullNever
case "IfNotPresent":
config.ImagePullPolicy = k8sv1.PullIfNotPresent
default:
return fmt.Errorf("invalid dev.imagePullPolicy in config: %v", policy)
}
// set if emulation is used
useEmulation := strings.TrimSpace(configMap.Data[useEmulationKey])
switch useEmulation {
case "":
// keep the default
case "true":
config.UseEmulation = true
case "false":
config.UseEmulation = false
default:
return fmt.Errorf("invalid debug.useEmulation in config: %v", useEmulation)
}
// set machine type
if machineType := strings.TrimSpace(configMap.Data[MachineTypeKey]); machineType != "" {
config.MachineType = machineType
}
if cpuModel := strings.TrimSpace(configMap.Data[CpuModelKey]); cpuModel != "" {
config.CPUModel = cpuModel
}
if cpuRequest := strings.TrimSpace(configMap.Data[CpuRequestKey]); cpuRequest != "" {
config.CPURequest = resource.MustParse(cpuRequest)
}
if memoryOvercommit := strings.TrimSpace(configMap.Data[MemoryOvercommitKey]); memoryOvercommit != "" {
if value, err := strconv.Atoi(memoryOvercommit); err == nil && value > 0 {
config.MemoryOvercommit = value
} else {
return fmt.Errorf("Invalid memoryOvercommit in ConfigMap: %s", memoryOvercommit)
}
}
if emulatedMachines := strings.TrimSpace(configMap.Data[EmulatedMachinesKey]); emulatedMachines != "" {
vals := strings.Split(emulatedMachines, ",")
for i := range vals {
vals[i] = strings.TrimSpace(vals[i])
}
config.EmulatedMachines = vals
}
if featureGates := strings.TrimSpace(configMap.Data[FeatureGatesKey]); featureGates != "" {
config.FeatureGates = featureGates
}
if toleration := strings.TrimSpace(configMap.Data[LessPVCSpaceTolerationKey]); toleration != "" {
if value, err := strconv.Atoi(toleration); err != nil || value < 0 || value > 100 {
return fmt.Errorf("Invalid lessPVCSpaceToleration in ConfigMap: %s", toleration)
} else {
config.LessPVCSpaceToleration = value
}
}
if nodeSelectors := strings.TrimSpace(configMap.Data[NodeSelectorsKey]); nodeSelectors != "" {
if selectors, err := parseNodeSelectors(nodeSelectors); err != nil {
return err
} else {
config.NodeSelectors = selectors
}
}
// disable slirp
permitSlirp := strings.TrimSpace(configMap.Data[PermitSlirpInterface])
switch permitSlirp {
case "":
// keep the default
case "true":
config.PermitSlirpInterface = true
case "false":
config.PermitSlirpInterface = false
default:
return fmt.Errorf("invalid value for permitSlirpInterfaces in config: %v", permitSlirp)
}
// set default network interface
iface := strings.TrimSpace(configMap.Data[NetworkInterfaceKey])
switch iface {
case "":
// keep the default
case string(v1.BridgeInterface), string(v1.SlirpInterface), string(v1.MasqueradeInterface):
config.NetworkInterface = iface
default:
return fmt.Errorf("invalid default-network-interface in config: %v", iface)
}
return nil
}
// getConfig returns the latest valid parsed config map result, or updates it
// if a newer version is available.
// XXX Rework this, to happen mostly in informer callbacks.
// This will also allow us then to react to config changes and e.g. restart some controllers
func (c *ClusterConfig) getConfig() (config *Config) {
c.lock.Lock()
defer c.lock.Unlock()
if obj, exists, err := c.informer.GetStore().GetByKey(c.namespace + "/" + configMapName); err != nil {
log.DefaultLogger().Reason(err).Errorf("Error loading the cluster config from cache, falling back to last good resource version '%s'", c.lastValidConfig.ResourceVersion)
return c.lastValidConfig
} else if !exists {
return c.defaultConfig
} else {
configMap := obj.(*k8sv1.ConfigMap)
if c.lastValidConfig.ResourceVersion == configMap.ResourceVersion ||
c.lastInvalidConfigResourceVersion == configMap.ResourceVersion {
return c.lastValidConfig
}
config := defaultClusterConfig()
if err := setConfig(config, configMap); err != nil {
c.lastInvalidConfigResourceVersion = configMap.ResourceVersion
log.DefaultLogger().Reason(err).Errorf("Invalid cluster config with resource version '%s', falling back to last good resource version '%s'", configMap.ResourceVersion, c.lastValidConfig.ResourceVersion)
return c.lastValidConfig
}
log.DefaultLogger().Infof("Updating cluster config to resource version '%s'", configMap.ResourceVersion)
c.lastValidConfig = config
return c.lastValidConfig
}
}
func parseNodeSelectors(str string) (map[string]string, error) {
nodeSelectors := make(map[string]string)
for _, s := range strings.Split(strings.TrimSpace(str), "\n") {
v := strings.Split(s, "=")
if len(v) != 2 {
return nil, fmt.Errorf("Invalid node selector: %s", s)
}
nodeSelectors[v[0]] = v[1]
}
return nodeSelectors, nil
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"bytes"
"compress/gzip"
"crypto/sha1"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"appengine"
"appengine/datastore"
)
const maxDatastoreStringLen = 500
// A Package describes a package that is listed on the dashboard.
type Package struct {
Kind string // "subrepo", "external", or empty for the main Go tree
Name string
Path string // (empty for the main Go tree)
NextNum int // Num of the next head Commit
}
func (p *Package) String() string {
return fmt.Sprintf("%s: %q", p.Path, p.Name)
}
func (p *Package) Key(c appengine.Context) *datastore.Key {
key := p.Path
if key == "" {
key = "go"
}
return datastore.NewKey(c, "Package", key, 0, nil)
}
// LastCommit returns the most recent Commit for this Package.
func (p *Package) LastCommit(c appengine.Context) (*Commit, error) {
var commits []*Commit
_, err := datastore.NewQuery("Commit").
Ancestor(p.Key(c)).
Order("-Time").
Limit(1).
GetAll(c, &commits)
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
// Some fields have been removed, so it's okay to ignore this error.
err = nil
}
if err != nil {
return nil, err
}
if len(commits) != 1 {
return nil, datastore.ErrNoSuchEntity
}
return commits[0], nil
}
// GetPackage fetches a Package by path from the datastore.
func GetPackage(c appengine.Context, path string) (*Package, error) {
p := &Package{Path: path}
err := datastore.Get(c, p.Key(c), p)
if err == datastore.ErrNoSuchEntity {
return nil, fmt.Errorf("package %q not found", path)
}
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
// Some fields have been removed, so it's okay to ignore this error.
err = nil
}
return p, err
}
// A Commit describes an individual commit in a package.
//
// Each Commit entity is a descendant of its associated Package entity.
// In other words, all Commits with the same PackagePath belong to the same
// datastore entity group.
type Commit struct {
PackagePath string // (empty for Go commits)
Hash string
ParentHash string
Num int // Internal monotonic counter unique to this package.
User string
Desc string `datastore:",noindex"`
Time time.Time
// ResultData is the Data string of each build Result for this Commit.
// For non-Go commits, only the Results for the current Go tip, weekly,
// and release Tags are stored here. This is purely de-normalized data.
// The complete data set is stored in Result entities.
ResultData []string `datastore:",noindex"`
FailNotificationSent bool
}
func (com *Commit) Key(c appengine.Context) *datastore.Key {
if com.Hash == "" {
panic("tried Key on Commit with empty Hash")
}
p := Package{Path: com.PackagePath}
key := com.PackagePath + "|" + com.Hash
return datastore.NewKey(c, "Commit", key, 0, p.Key(c))
}
func (c *Commit) Valid() error {
if !validHash(c.Hash) {
return errors.New("invalid Hash")
}
if c.ParentHash != "" && !validHash(c.ParentHash) { // empty is OK
return errors.New("invalid ParentHash")
}
return nil
}
// AddResult adds the denormalized Reuslt data to the Commit's Result field.
// It must be called from inside a datastore transaction.
func (com *Commit) AddResult(c appengine.Context, r *Result) error {
if err := datastore.Get(c, com.Key(c), com); err != nil {
return fmt.Errorf("getting Commit: %v", err)
}
com.ResultData = append(com.ResultData, r.Data())
if _, err := datastore.Put(c, com.Key(c), com); err != nil {
return fmt.Errorf("putting Commit: %v", err)
}
return nil
}
// Result returns the build Result for this Commit for the given builder/goHash.
func (c *Commit) Result(builder, goHash string) *Result {
for _, r := range c.ResultData {
p := strings.SplitN(r, "|", 4)
if len(p) != 4 || p[0] != builder || p[3] != goHash {
continue
}
return partsToHash(c, p)
}
return nil
}
// Results returns the build Results for this Commit for the given goHash.
func (c *Commit) Results(goHash string) (results []*Result) {
for _, r := range c.ResultData {
p := strings.SplitN(r, "|", 4)
if len(p) != 4 || p[3] != goHash {
continue
}
results = append(results, partsToHash(c, p))
}
return
}
// partsToHash converts a Commit and ResultData substrings to a Result.
func partsToHash(c *Commit, p []string) *Result {
return &Result{
Builder: p[0],
Hash: c.Hash,
PackagePath: c.PackagePath,
GoHash: p[3],
OK: p[1] == "true",
LogHash: p[2],
}
}
// A Result describes a build result for a Commit on an OS/architecture.
//
// Each Result entity is a descendant of its associated Commit entity.
type Result struct {
Builder string // "arch-os[-note]"
Hash string
PackagePath string // (empty for Go commits)
// The Go Commit this was built against (empty for Go commits).
GoHash string
OK bool
Log string `datastore:"-"` // for JSON unmarshaling only
LogHash string `datastore:",noindex"` // Key to the Log record.
RunTime int64 // time to build+test in nanoseconds
}
func (r *Result) Key(c appengine.Context) *datastore.Key {
p := Package{Path: r.PackagePath}
key := r.Builder + "|" + r.PackagePath + "|" + r.Hash + "|" + r.GoHash
return datastore.NewKey(c, "Result", key, 0, p.Key(c))
}
func (r *Result) Valid() error {
if !validHash(r.Hash) {
return errors.New("invalid Hash")
}
if r.PackagePath != "" && !validHash(r.GoHash) {
return errors.New("invalid GoHash")
}
return nil
}
// Data returns the Result in string format
// to be stored in Commit's ResultData field.
func (r *Result) Data() string {
return fmt.Sprintf("%v|%v|%v|%v", r.Builder, r.OK, r.LogHash, r.GoHash)
}
// A Log is a gzip-compressed log file stored under the SHA1 hash of the
// uncompressed log text.
type Log struct {
CompressedLog []byte
}
func (l *Log) Text() ([]byte, error) {
d, err := gzip.NewReader(bytes.NewBuffer(l.CompressedLog))
if err != nil {
return nil, fmt.Errorf("reading log data: %v", err)
}
b, err := ioutil.ReadAll(d)
if err != nil {
return nil, fmt.Errorf("reading log data: %v", err)
}
return b, nil
}
func PutLog(c appengine.Context, text string) (hash string, err error) {
h := sha1.New()
io.WriteString(h, text)
b := new(bytes.Buffer)
z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
io.WriteString(z, text)
z.Close()
hash = fmt.Sprintf("%x", h.Sum(nil))
key := datastore.NewKey(c, "Log", hash, 0, nil)
_, err = datastore.Put(c, key, &Log{b.Bytes()})
return
}
// A Tag is used to keep track of the most recent Go weekly and release tags.
// Typically there will be one Tag entity for each kind of hg tag.
type Tag struct {
Kind string // "weekly", "release", or "tip"
Name string // the tag itself (for example: "release.r60")
Hash string
}
func (t *Tag) Key(c appengine.Context) *datastore.Key {
p := &Package{}
return datastore.NewKey(c, "Tag", t.Kind, 0, p.Key(c))
}
func (t *Tag) Valid() error {
if t.Kind != "weekly" && t.Kind != "release" && t.Kind != "tip" {
return errors.New("invalid Kind")
}
if !validHash(t.Hash) {
return errors.New("invalid Hash")
}
return nil
}
// Commit returns the Commit that corresponds with this Tag.
func (t *Tag) Commit(c appengine.Context) (*Commit, error) {
com := &Commit{Hash: t.Hash}
err := datastore.Get(c, com.Key(c), com)
return com, err
}
// GetTag fetches a Tag by name from the datastore.
func GetTag(c appengine.Context, tag string) (*Tag, error) {
t := &Tag{Kind: tag}
if err := datastore.Get(c, t.Key(c), t); err != nil {
if err == datastore.ErrNoSuchEntity {
return nil, errors.New("tag not found: " + tag)
}
return nil, err
}
if err := t.Valid(); err != nil {
return nil, err
}
return t, nil
}
// Packages returns packages of the specified kind.
// Kind must be one of "external" or "subrepo".
func Packages(c appengine.Context, kind string) ([]*Package, error) {
switch kind {
case "external", "subrepo":
default:
return nil, errors.New(`kind must be one of "external" or "subrepo"`)
}
var pkgs []*Package
q := datastore.NewQuery("Package").Filter("Kind=", kind)
for t := q.Run(c); ; {
pkg := new(Package)
_, err := t.Next(pkg)
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
// Some fields have been removed, so it's okay to ignore this error.
err = nil
}
if err == datastore.Done {
break
} else if err != nil {
return nil, err
}
if pkg.Path != "" {
pkgs = append(pkgs, pkg)
}
}
return pkgs, nil
}
misc/dashboard/app: trim old builds from the history
The dashboard is currently failing to store results of new builds for some keys, notable the go.codereview sub repository. This is causing the builders to mark the entire triggering commit as failed. With the help of David Symonds we think it is because the results value has breached the 1mb datastore limit on AppEngine.
R=dsymonds, adg
CC=golang-dev
https://golang.org/cl/6858094
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"bytes"
"compress/gzip"
"crypto/sha1"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"appengine"
"appengine/datastore"
)
const maxDatastoreStringLen = 500
// A Package describes a package that is listed on the dashboard.
type Package struct {
Kind string // "subrepo", "external", or empty for the main Go tree
Name string
Path string // (empty for the main Go tree)
NextNum int // Num of the next head Commit
}
func (p *Package) String() string {
return fmt.Sprintf("%s: %q", p.Path, p.Name)
}
func (p *Package) Key(c appengine.Context) *datastore.Key {
key := p.Path
if key == "" {
key = "go"
}
return datastore.NewKey(c, "Package", key, 0, nil)
}
// LastCommit returns the most recent Commit for this Package.
func (p *Package) LastCommit(c appengine.Context) (*Commit, error) {
var commits []*Commit
_, err := datastore.NewQuery("Commit").
Ancestor(p.Key(c)).
Order("-Time").
Limit(1).
GetAll(c, &commits)
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
// Some fields have been removed, so it's okay to ignore this error.
err = nil
}
if err != nil {
return nil, err
}
if len(commits) != 1 {
return nil, datastore.ErrNoSuchEntity
}
return commits[0], nil
}
// GetPackage fetches a Package by path from the datastore.
func GetPackage(c appengine.Context, path string) (*Package, error) {
p := &Package{Path: path}
err := datastore.Get(c, p.Key(c), p)
if err == datastore.ErrNoSuchEntity {
return nil, fmt.Errorf("package %q not found", path)
}
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
// Some fields have been removed, so it's okay to ignore this error.
err = nil
}
return p, err
}
// A Commit describes an individual commit in a package.
//
// Each Commit entity is a descendant of its associated Package entity.
// In other words, all Commits with the same PackagePath belong to the same
// datastore entity group.
type Commit struct {
PackagePath string // (empty for Go commits)
Hash string
ParentHash string
Num int // Internal monotonic counter unique to this package.
User string
Desc string `datastore:",noindex"`
Time time.Time
// ResultData is the Data string of each build Result for this Commit.
// For non-Go commits, only the Results for the current Go tip, weekly,
// and release Tags are stored here. This is purely de-normalized data.
// The complete data set is stored in Result entities.
ResultData []string `datastore:",noindex"`
FailNotificationSent bool
}
func (com *Commit) Key(c appengine.Context) *datastore.Key {
if com.Hash == "" {
panic("tried Key on Commit with empty Hash")
}
p := Package{Path: com.PackagePath}
key := com.PackagePath + "|" + com.Hash
return datastore.NewKey(c, "Commit", key, 0, p.Key(c))
}
func (c *Commit) Valid() error {
if !validHash(c.Hash) {
return errors.New("invalid Hash")
}
if c.ParentHash != "" && !validHash(c.ParentHash) { // empty is OK
return errors.New("invalid ParentHash")
}
return nil
}
// each result line is approx 105 bytes. This constant is a tradeoff between
// build history and the AppEngine datastore limit of 1mb.
const maxResults = 1000
// AddResult adds the denormalized Reuslt data to the Commit's Result field.
// It must be called from inside a datastore transaction.
func (com *Commit) AddResult(c appengine.Context, r *Result) error {
if err := datastore.Get(c, com.Key(c), com); err != nil {
return fmt.Errorf("getting Commit: %v", err)
}
com.ResultData = trim(append(com.ResultData, r.Data()), maxResults)
if _, err := datastore.Put(c, com.Key(c), com); err != nil {
return fmt.Errorf("putting Commit: %v", err)
}
return nil
}
func trim(s []string, n int) []string {
l := min(len(s), n)
return s[len(s)-l:]
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// Result returns the build Result for this Commit for the given builder/goHash.
func (c *Commit) Result(builder, goHash string) *Result {
for _, r := range c.ResultData {
p := strings.SplitN(r, "|", 4)
if len(p) != 4 || p[0] != builder || p[3] != goHash {
continue
}
return partsToHash(c, p)
}
return nil
}
// Results returns the build Results for this Commit for the given goHash.
func (c *Commit) Results(goHash string) (results []*Result) {
for _, r := range c.ResultData {
p := strings.SplitN(r, "|", 4)
if len(p) != 4 || p[3] != goHash {
continue
}
results = append(results, partsToHash(c, p))
}
return
}
// partsToHash converts a Commit and ResultData substrings to a Result.
func partsToHash(c *Commit, p []string) *Result {
return &Result{
Builder: p[0],
Hash: c.Hash,
PackagePath: c.PackagePath,
GoHash: p[3],
OK: p[1] == "true",
LogHash: p[2],
}
}
// A Result describes a build result for a Commit on an OS/architecture.
//
// Each Result entity is a descendant of its associated Commit entity.
type Result struct {
Builder string // "arch-os[-note]"
Hash string
PackagePath string // (empty for Go commits)
// The Go Commit this was built against (empty for Go commits).
GoHash string
OK bool
Log string `datastore:"-"` // for JSON unmarshaling only
LogHash string `datastore:",noindex"` // Key to the Log record.
RunTime int64 // time to build+test in nanoseconds
}
func (r *Result) Key(c appengine.Context) *datastore.Key {
p := Package{Path: r.PackagePath}
key := r.Builder + "|" + r.PackagePath + "|" + r.Hash + "|" + r.GoHash
return datastore.NewKey(c, "Result", key, 0, p.Key(c))
}
func (r *Result) Valid() error {
if !validHash(r.Hash) {
return errors.New("invalid Hash")
}
if r.PackagePath != "" && !validHash(r.GoHash) {
return errors.New("invalid GoHash")
}
return nil
}
// Data returns the Result in string format
// to be stored in Commit's ResultData field.
func (r *Result) Data() string {
return fmt.Sprintf("%v|%v|%v|%v", r.Builder, r.OK, r.LogHash, r.GoHash)
}
// A Log is a gzip-compressed log file stored under the SHA1 hash of the
// uncompressed log text.
type Log struct {
CompressedLog []byte
}
func (l *Log) Text() ([]byte, error) {
d, err := gzip.NewReader(bytes.NewBuffer(l.CompressedLog))
if err != nil {
return nil, fmt.Errorf("reading log data: %v", err)
}
b, err := ioutil.ReadAll(d)
if err != nil {
return nil, fmt.Errorf("reading log data: %v", err)
}
return b, nil
}
func PutLog(c appengine.Context, text string) (hash string, err error) {
h := sha1.New()
io.WriteString(h, text)
b := new(bytes.Buffer)
z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
io.WriteString(z, text)
z.Close()
hash = fmt.Sprintf("%x", h.Sum(nil))
key := datastore.NewKey(c, "Log", hash, 0, nil)
_, err = datastore.Put(c, key, &Log{b.Bytes()})
return
}
// A Tag is used to keep track of the most recent Go weekly and release tags.
// Typically there will be one Tag entity for each kind of hg tag.
type Tag struct {
Kind string // "weekly", "release", or "tip"
Name string // the tag itself (for example: "release.r60")
Hash string
}
func (t *Tag) Key(c appengine.Context) *datastore.Key {
p := &Package{}
return datastore.NewKey(c, "Tag", t.Kind, 0, p.Key(c))
}
func (t *Tag) Valid() error {
if t.Kind != "weekly" && t.Kind != "release" && t.Kind != "tip" {
return errors.New("invalid Kind")
}
if !validHash(t.Hash) {
return errors.New("invalid Hash")
}
return nil
}
// Commit returns the Commit that corresponds with this Tag.
func (t *Tag) Commit(c appengine.Context) (*Commit, error) {
com := &Commit{Hash: t.Hash}
err := datastore.Get(c, com.Key(c), com)
return com, err
}
// GetTag fetches a Tag by name from the datastore.
func GetTag(c appengine.Context, tag string) (*Tag, error) {
t := &Tag{Kind: tag}
if err := datastore.Get(c, t.Key(c), t); err != nil {
if err == datastore.ErrNoSuchEntity {
return nil, errors.New("tag not found: " + tag)
}
return nil, err
}
if err := t.Valid(); err != nil {
return nil, err
}
return t, nil
}
// Packages returns packages of the specified kind.
// Kind must be one of "external" or "subrepo".
func Packages(c appengine.Context, kind string) ([]*Package, error) {
switch kind {
case "external", "subrepo":
default:
return nil, errors.New(`kind must be one of "external" or "subrepo"`)
}
var pkgs []*Package
q := datastore.NewQuery("Package").Filter("Kind=", kind)
for t := q.Run(c); ; {
pkg := new(Package)
_, err := t.Next(pkg)
if _, ok := err.(*datastore.ErrFieldMismatch); ok {
// Some fields have been removed, so it's okay to ignore this error.
err = nil
}
if err == datastore.Done {
break
} else if err != nil {
return nil, err
}
if pkg.Path != "" {
pkgs = append(pkgs, pkg)
}
}
return pkgs, nil
}
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/operationmanager"
"k8s.io/kubernetes/pkg/util/sets"
)
const (
diskByIdPath = "/dev/disk/by-id/"
diskGooglePrefix = "google-"
diskScsiGooglePrefix = "scsi-0Google_PersistentDisk_"
diskPartitionSuffix = "-part"
diskSDPath = "/dev/sd"
diskSDPattern = "/dev/sd*"
maxChecks = 60
maxRetries = 10
checkSleepDuration = time.Second
errorSleepDuration = 5 * time.Second
)
// Singleton operation manager for managing detach clean up go routines
var detachCleanupManager = operationmanager.NewOperationManager()
type GCEDiskUtil struct{}
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error {
glog.V(5).Infof("AttachAndMountDisk(b, %q) where b is %#v\r\n", globalPDPath, b)
// Block execution until any pending detach goroutines for this pd have completed
detachCleanupManager.Send(b.pdName, true)
sdBefore, err := filepath.Glob(diskSDPattern)
if err != nil {
glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
sdBeforeSet := sets.NewString(sdBefore...)
devicePath, err := attachDiskAndVerify(b, sdBeforeSet)
if err != nil {
return err
}
// Only mount the PD globally once.
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
options := []string{}
if b.readOnly {
options = append(options, "ro")
}
if notMnt {
err = b.diskMounter.Mount(devicePath, globalPDPath, b.fsType, options)
if err != nil {
os.Remove(globalPDPath)
return err
}
}
return nil
}
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error {
// Unmount the global PD mount, which should be the only one.
globalPDPath := makeGlobalPDName(c.plugin.host, c.pdName)
glog.V(5).Infof("DetachDisk(c) where c is %#v and the globalPDPath is %q\r\n", c, globalPDPath)
if err := c.mounter.Unmount(globalPDPath); err != nil {
return err
}
if err := os.Remove(globalPDPath); err != nil {
return err
}
if detachCleanupManager.Exists(c.pdName) {
glog.Warningf("Terminating new DetachDisk call for GCE PD %q. A previous detach call for this PD is still pending.", c.pdName)
return nil
}
// Detach disk, retry if needed.
go detachDiskAndVerify(c)
return nil
}
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) {
devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
var gceCloud *gce_cloud.GCECloud
for numRetries := 0; numRetries < maxRetries; numRetries++ {
var err error
if gceCloud == nil {
gceCloud, err = getCloudProvider()
if err != nil || gceCloud == nil {
// Retry on error. See issue #11321
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", b.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
}
if numRetries > 0 {
glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", b.pdName)
}
if err := gceCloud.AttachDisk(b.pdName, b.readOnly); err != nil {
// Retry on error. See issue #11321. Continue and verify if disk is attached, because a
// previous attach operation may still succeed.
glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
}
for numChecks := 0; numChecks < maxChecks; numChecks++ {
path, err := verifyDevicePath(devicePaths, sdBeforeSet)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying GCE PD (%q) is attached: %v", b.pdName, err)
} else if path != "" {
// A device path has successfully been created for the PD
glog.Infof("Successfully attached GCE PD %q.", b.pdName)
return path, nil
}
// Sleep then check again
glog.V(3).Infof("Waiting for GCE PD %q to attach.", b.pdName)
time.Sleep(checkSleepDuration)
}
}
return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", b.pdName)
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, error) {
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
}
for _, path := range devicePaths {
if pathExists, err := pathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
return path, nil
}
}
return "", nil
}
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
// It starts the detachCleanupManager with the specified pdName so that callers can wait for completion.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
glog.V(5).Infof("detachDiskAndVerify for pd %q.", c.pdName)
defer util.HandleCrash()
// Start operation, so that other threads can wait on this detach operation.
// Set bufferSize to 0 so senders are blocked on send until we receive.
ch, err := detachCleanupManager.Start(c.pdName, 0 /* bufferSize */)
if err != nil {
glog.Errorf("Error adding %q to detachCleanupManager: %v", c.pdName, err)
return
}
defer detachCleanupManager.Close(c.pdName)
defer func() {
// Unblock any callers that have been waiting for this detach routine to complete.
for {
select {
case <-ch:
glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", c.pdName)
default:
glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", c.pdName)
return
}
}
}()
devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
var gceCloud *gce_cloud.GCECloud
for numRetries := 0; numRetries < maxRetries; numRetries++ {
var err error
if gceCloud == nil {
gceCloud, err = getCloudProvider()
if err != nil || gceCloud == nil {
// Retry on error. See issue #11321
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
}
if numRetries > 0 {
glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", c.pdName)
}
if err := gceCloud.DetachDisk(c.pdName); err != nil {
// Retry on error. See issue #11321. Continue and verify if disk is detached, because a
// previous detach operation may still succeed.
glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
}
for numChecks := 0; numChecks < maxChecks; numChecks++ {
allPathsRemoved, err := verifyAllPathsRemoved(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically.
glog.Errorf("Error verifying GCE PD (%q) is detached: %v", c.pdName, err)
} else if allPathsRemoved {
// All paths to the PD have been succefully removed
glog.Infof("Successfully detached GCE PD %q.", c.pdName)
return
}
// Sleep then check again
glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
time.Sleep(checkSleepDuration)
}
}
glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if err := udevadmChangeToDrive(path); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("%v", err)
}
if exists, err := pathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
}
return allPathsRemoved, nil
}
// Returns list of all /dev/disk/by-id/* paths for given PD.
func getDiskByIdPaths(pd *gcePersistentDisk) []string {
devicePaths := []string{
path.Join(diskByIdPath, diskGooglePrefix+pd.pdName),
path.Join(diskByIdPath, diskScsiGooglePrefix+pd.pdName),
}
if pd.partition != "" {
for i, path := range devicePaths {
devicePaths[i] = path + diskPartitionSuffix + pd.partition
}
}
return devicePaths
}
// Checks if the specified path exists
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else {
return false, err
}
}
// Return cloud provider
func getCloudProvider() (*gce_cloud.GCECloud, error) {
gceCloudProvider, err := cloudprovider.GetCloudProvider("gce", nil)
if err != nil || gceCloudProvider == nil {
return nil, err
}
// The conversion must be safe otherwise bug in GetCloudProvider()
return gceCloudProvider.(*gce_cloud.GCECloud), nil
}
// Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set).
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToNewDrives(sdBeforeSet sets.String) error {
sdAfter, err := filepath.Glob(diskSDPattern)
if err != nil {
return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
for _, sd := range sdAfter {
if !sdBeforeSet.Has(sd) {
return udevadmChangeToDrive(sd)
}
}
return nil
}
// Calls "udevadm trigger --action=change" on the specified drive.
// drivePath must be the the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToDrive(drivePath string) error {
glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)
// Evaluate symlink, if any
drive, err := filepath.EvalSymlinks(drivePath)
if err != nil {
return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err)
}
glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive)
// Check to make sure input is "/dev/sd*"
if !strings.Contains(drive, diskSDPath) {
return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)
}
// Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..."
_, err = exec.New().Command(
"udevadm",
"trigger",
"--action=change",
fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput()
if err != nil {
return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err)
}
return nil
}
Prevent GCE PD attach code from succeding if disk failed to attach.
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/operationmanager"
"k8s.io/kubernetes/pkg/util/sets"
)
const (
diskByIdPath = "/dev/disk/by-id/"
diskGooglePrefix = "google-"
diskScsiGooglePrefix = "scsi-0Google_PersistentDisk_"
diskPartitionSuffix = "-part"
diskSDPath = "/dev/sd"
diskSDPattern = "/dev/sd*"
maxChecks = 60
maxRetries = 10
checkSleepDuration = time.Second
errorSleepDuration = 5 * time.Second
)
// Singleton operation manager for managing detach clean up go routines
var detachCleanupManager = operationmanager.NewOperationManager()
type GCEDiskUtil struct{}
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error {
glog.V(5).Infof("AttachAndMountDisk(b, %q) where b is %#v\r\n", globalPDPath, b)
// Block execution until any pending detach goroutines for this pd have completed
detachCleanupManager.Send(b.pdName, true)
sdBefore, err := filepath.Glob(diskSDPattern)
if err != nil {
glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
sdBeforeSet := sets.NewString(sdBefore...)
devicePath, err := attachDiskAndVerify(b, sdBeforeSet)
if err != nil {
return err
}
// Only mount the PD globally once.
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
options := []string{}
if b.readOnly {
options = append(options, "ro")
}
if notMnt {
err = b.diskMounter.Mount(devicePath, globalPDPath, b.fsType, options)
if err != nil {
os.Remove(globalPDPath)
return err
}
}
return nil
}
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error {
// Unmount the global PD mount, which should be the only one.
globalPDPath := makeGlobalPDName(c.plugin.host, c.pdName)
glog.V(5).Infof("DetachDisk(c) where c is %#v and the globalPDPath is %q\r\n", c, globalPDPath)
if err := c.mounter.Unmount(globalPDPath); err != nil {
return err
}
if err := os.Remove(globalPDPath); err != nil {
return err
}
if detachCleanupManager.Exists(c.pdName) {
glog.Warningf("Terminating new DetachDisk call for GCE PD %q. A previous detach call for this PD is still pending.", c.pdName)
return nil
}
// Detach disk, retry if needed.
go detachDiskAndVerify(c)
return nil
}
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) {
devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
var gceCloud *gce_cloud.GCECloud
for numRetries := 0; numRetries < maxRetries; numRetries++ {
// Block execution until any pending detach goroutines for this pd have completed
detachCleanupManager.Send(b.pdName, true)
var err error
if gceCloud == nil {
gceCloud, err = getCloudProvider()
if err != nil || gceCloud == nil {
// Retry on error. See issue #11321
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", b.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
}
if numRetries > 0 {
glog.Warningf("Timed out waiting for GCE PD %q to attach. Retrying attach.", b.pdName)
}
if err := gceCloud.AttachDisk(b.pdName, b.readOnly); err != nil {
// Retry on error. See issue #11321.
glog.Errorf("Error attaching PD %q: %v", b.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
for numChecks := 0; numChecks < maxChecks; numChecks++ {
path, err := verifyDevicePath(devicePaths, sdBeforeSet)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying GCE PD (%q) is attached: %v", b.pdName, err)
} else if path != "" {
// A device path has successfully been created for the PD
glog.Infof("Successfully attached GCE PD %q.", b.pdName)
return path, nil
}
// Sleep then check again
glog.V(3).Infof("Waiting for GCE PD %q to attach.", b.pdName)
time.Sleep(checkSleepDuration)
}
}
return "", fmt.Errorf("Could not attach GCE PD %q. Timeout waiting for mount paths to be created.", b.pdName)
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, error) {
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
}
for _, path := range devicePaths {
if pathExists, err := pathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
return path, nil
}
}
return "", nil
}
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
// It starts the detachCleanupManager with the specified pdName so that callers can wait for completion.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
glog.V(5).Infof("detachDiskAndVerify for pd %q.", c.pdName)
defer util.HandleCrash()
// Start operation, so that other threads can wait on this detach operation.
// Set bufferSize to 0 so senders are blocked on send until we receive.
ch, err := detachCleanupManager.Start(c.pdName, 0 /* bufferSize */)
if err != nil {
glog.Errorf("Error adding %q to detachCleanupManager: %v", c.pdName, err)
return
}
defer detachCleanupManager.Close(c.pdName)
defer func() {
// Unblock any callers that have been waiting for this detach routine to complete.
for {
select {
case <-ch:
glog.V(5).Infof("detachDiskAndVerify for pd %q clearing chan.", c.pdName)
default:
glog.V(5).Infof("detachDiskAndVerify for pd %q done clearing chans.", c.pdName)
return
}
}
}()
devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
var gceCloud *gce_cloud.GCECloud
for numRetries := 0; numRetries < maxRetries; numRetries++ {
var err error
if gceCloud == nil {
gceCloud, err = getCloudProvider()
if err != nil || gceCloud == nil {
// Retry on error. See issue #11321
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
}
if numRetries > 0 {
glog.Warningf("Timed out waiting for GCE PD %q to detach. Retrying detach.", c.pdName)
}
if err := gceCloud.DetachDisk(c.pdName); err != nil {
// Retry on error. See issue #11321. Continue and verify if disk is detached, because a
// previous detach operation may still succeed.
glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
}
for numChecks := 0; numChecks < maxChecks; numChecks++ {
allPathsRemoved, err := verifyAllPathsRemoved(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically.
glog.Errorf("Error verifying GCE PD (%q) is detached: %v", c.pdName, err)
} else if allPathsRemoved {
// All paths to the PD have been succefully removed
glog.Infof("Successfully detached GCE PD %q.", c.pdName)
return
}
// Sleep then check again
glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
time.Sleep(checkSleepDuration)
}
}
glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if err := udevadmChangeToDrive(path); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("%v", err)
}
if exists, err := pathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
}
return allPathsRemoved, nil
}
// Returns list of all /dev/disk/by-id/* paths for given PD.
func getDiskByIdPaths(pd *gcePersistentDisk) []string {
devicePaths := []string{
path.Join(diskByIdPath, diskGooglePrefix+pd.pdName),
path.Join(diskByIdPath, diskScsiGooglePrefix+pd.pdName),
}
if pd.partition != "" {
for i, path := range devicePaths {
devicePaths[i] = path + diskPartitionSuffix + pd.partition
}
}
return devicePaths
}
// Checks if the specified path exists
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else {
return false, err
}
}
// Return cloud provider
func getCloudProvider() (*gce_cloud.GCECloud, error) {
gceCloudProvider, err := cloudprovider.GetCloudProvider("gce", nil)
if err != nil || gceCloudProvider == nil {
return nil, err
}
// The conversion must be safe otherwise bug in GetCloudProvider()
return gceCloudProvider.(*gce_cloud.GCECloud), nil
}
// Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set).
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToNewDrives(sdBeforeSet sets.String) error {
sdAfter, err := filepath.Glob(diskSDPattern)
if err != nil {
return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
for _, sd := range sdAfter {
if !sdBeforeSet.Has(sd) {
return udevadmChangeToDrive(sd)
}
}
return nil
}
// Calls "udevadm trigger --action=change" on the specified drive.
// drivePath must be the the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToDrive(drivePath string) error {
glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)
// Evaluate symlink, if any
drive, err := filepath.EvalSymlinks(drivePath)
if err != nil {
return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err)
}
glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive)
// Check to make sure input is "/dev/sd*"
if !strings.Contains(drive, diskSDPath) {
return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)
}
// Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..."
_, err = exec.New().Command(
"udevadm",
"trigger",
"--action=change",
fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput()
if err != nil {
return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err)
}
return nil
}
|
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package watchdog
import (
"io/ioutil"
"os"
"path/filepath"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "kubevirt.io/client-go/api"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/precond"
)
var _ = Describe("Watchdog", func() {
Context("When watching files in a directory", func() {
var tmpVirtShareDir string
var tmpWatchdogDir string
var err error
BeforeEach(func() {
tmpVirtShareDir, err = ioutil.TempDir("", "kubevirt")
Expect(err).ToNot(HaveOccurred())
tmpWatchdogDir = WatchdogFileDirectory(tmpVirtShareDir)
err = os.MkdirAll(tmpWatchdogDir, 0755)
Expect(err).ToNot(HaveOccurred())
})
It("should detect expired watchdog files", func() {
fileName := filepath.Join(tmpWatchdogDir, "default_expiredvmi")
Expect(os.Create(fileName)).ToNot(BeNil())
now := time.Now()
domains, err := getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(0))
now = now.Add(time.Second * 3)
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(1))
Expect(os.Create(fileName)).ToNot(BeNil())
now = time.Now()
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(0))
})
It("should successfully remove watchdog file", func() {
vmi := v1.NewMinimalVMI("tvmi")
namespace := precond.MustNotBeEmpty(vmi.GetObjectMeta().GetNamespace())
domain := precond.MustNotBeEmpty(vmi.GetObjectMeta().GetName())
now := time.Now()
fileName := WatchdogFileFromNamespaceName(tmpVirtShareDir, namespace, domain)
Expect(os.Create(fileName)).ToNot(BeNil())
domains, err := getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(0))
expired, err := watchdogFileIsExpired(1, tmpVirtShareDir, vmi, now)
Expect(err).ToNot(HaveOccurred())
Expect(expired).To(BeFalse())
now = now.Add(time.Second * 3)
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(1))
expired, err = watchdogFileIsExpired(1, tmpVirtShareDir, vmi, now)
Expect(err).ToNot(HaveOccurred())
Expect(expired).To(BeTrue())
exists, err := WatchdogFileExists(tmpVirtShareDir, vmi)
Expect(err).ToNot(HaveOccurred())
Expect(exists).To(BeTrue())
err = WatchdogFileRemove(tmpVirtShareDir, vmi)
Expect(err).ToNot(HaveOccurred())
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(0))
exists, err = WatchdogFileExists(tmpVirtShareDir, vmi)
Expect(err).ToNot(HaveOccurred())
Expect(exists).To(BeFalse())
})
It("should not expire updated files", func() {
fileName := filepath.Join(tmpVirtShareDir, "default_expiredvmi")
Expect(os.Create(fileName)).ToNot(BeNil())
now := time.Now()
for i := 0; i < 4; i++ {
WatchdogFileUpdate(fileName, "somestring")
now = now.Add(time.Second * 1)
domains, err := getExpiredDomains(2, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(len(domains)).To(Equal(0))
}
})
It("should be able to get uid from watchdog", func() {
vmi := v1.NewMinimalVMI("testvmi")
vmi.UID = types.UID("1234")
fileName := filepath.Join(tmpVirtShareDir, "watchdog-files", vmi.Namespace+"_"+vmi.Name)
WatchdogFileUpdate(fileName, string(vmi.UID))
uid := WatchdogFileGetUID(tmpVirtShareDir, vmi)
Expect(uid).To(Equal(string(vmi.UID)))
})
It("should provide file in watchdog subdirectory", func() {
dir := WatchdogFileDirectory(tmpVirtShareDir)
Expect(dir).To(Equal(filepath.Join(tmpVirtShareDir, "watchdog-files")))
dir = WatchdogFileFromNamespaceName(tmpVirtShareDir, "tnamespace", "tvmi")
Expect(dir).To(Equal(filepath.Join(tmpVirtShareDir, "watchdog-files/tnamespace_tvmi")))
})
AfterEach(func() {
os.RemoveAll(tmpVirtShareDir)
})
})
})
Change all `Expect(len(var)).To(Equal(some-length))` to
`Expect(var).To(HaveLen(some-length))` in pkg/watchdog/watchdog_test.go
Signed-off-by: Nahshon Unna-Tsameret <d2a35840b61a4cf34917e887f4c10555349f04a7@redhat.com>
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package watchdog
import (
"io/ioutil"
"os"
"path/filepath"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "kubevirt.io/client-go/api"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/precond"
)
var _ = Describe("Watchdog", func() {
Context("When watching files in a directory", func() {
var tmpVirtShareDir string
var tmpWatchdogDir string
var err error
BeforeEach(func() {
tmpVirtShareDir, err = ioutil.TempDir("", "kubevirt")
Expect(err).ToNot(HaveOccurred())
tmpWatchdogDir = WatchdogFileDirectory(tmpVirtShareDir)
err = os.MkdirAll(tmpWatchdogDir, 0755)
Expect(err).ToNot(HaveOccurred())
})
It("should detect expired watchdog files", func() {
fileName := filepath.Join(tmpWatchdogDir, "default_expiredvmi")
Expect(os.Create(fileName)).ToNot(BeNil())
now := time.Now()
domains, err := getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(BeEmpty())
now = now.Add(time.Second * 3)
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(HaveLen(1))
Expect(os.Create(fileName)).ToNot(BeNil())
now = time.Now()
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(BeEmpty())
})
It("should successfully remove watchdog file", func() {
vmi := v1.NewMinimalVMI("tvmi")
namespace := precond.MustNotBeEmpty(vmi.GetObjectMeta().GetNamespace())
domain := precond.MustNotBeEmpty(vmi.GetObjectMeta().GetName())
now := time.Now()
fileName := WatchdogFileFromNamespaceName(tmpVirtShareDir, namespace, domain)
Expect(os.Create(fileName)).ToNot(BeNil())
domains, err := getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(BeEmpty())
expired, err := watchdogFileIsExpired(1, tmpVirtShareDir, vmi, now)
Expect(err).ToNot(HaveOccurred())
Expect(expired).To(BeFalse())
now = now.Add(time.Second * 3)
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(HaveLen(1))
expired, err = watchdogFileIsExpired(1, tmpVirtShareDir, vmi, now)
Expect(err).ToNot(HaveOccurred())
Expect(expired).To(BeTrue())
exists, err := WatchdogFileExists(tmpVirtShareDir, vmi)
Expect(err).ToNot(HaveOccurred())
Expect(exists).To(BeTrue())
err = WatchdogFileRemove(tmpVirtShareDir, vmi)
Expect(err).ToNot(HaveOccurred())
domains, err = getExpiredDomains(1, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(BeEmpty())
exists, err = WatchdogFileExists(tmpVirtShareDir, vmi)
Expect(err).ToNot(HaveOccurred())
Expect(exists).To(BeFalse())
})
It("should not expire updated files", func() {
fileName := filepath.Join(tmpVirtShareDir, "default_expiredvmi")
Expect(os.Create(fileName)).ToNot(BeNil())
now := time.Now()
for i := 0; i < 4; i++ {
WatchdogFileUpdate(fileName, "somestring")
now = now.Add(time.Second * 1)
domains, err := getExpiredDomains(2, tmpVirtShareDir, now)
Expect(err).ToNot(HaveOccurred())
Expect(domains).To(BeEmpty())
}
})
It("should be able to get uid from watchdog", func() {
vmi := v1.NewMinimalVMI("testvmi")
vmi.UID = types.UID("1234")
fileName := filepath.Join(tmpVirtShareDir, "watchdog-files", vmi.Namespace+"_"+vmi.Name)
WatchdogFileUpdate(fileName, string(vmi.UID))
uid := WatchdogFileGetUID(tmpVirtShareDir, vmi)
Expect(uid).To(Equal(string(vmi.UID)))
})
It("should provide file in watchdog subdirectory", func() {
dir := WatchdogFileDirectory(tmpVirtShareDir)
Expect(dir).To(Equal(filepath.Join(tmpVirtShareDir, "watchdog-files")))
dir = WatchdogFileFromNamespaceName(tmpVirtShareDir, "tnamespace", "tvmi")
Expect(dir).To(Equal(filepath.Join(tmpVirtShareDir, "watchdog-files/tnamespace_tvmi")))
})
AfterEach(func() {
os.RemoveAll(tmpVirtShareDir)
})
})
})
|
// Package netint (linode-netint) is a client for accessing the Linode network
// internals samples. This API is undocumented and looks to be a set of
// unauthenticated endpoints that provide JSON data. This package also does
// some alterations to the data provided by Linode as some of the JSON types
// don't make sense...
//
// * The rount-trip-time (RTT) field is converted from a string to uint32
//
// * The Loss field is converted from a string to uint32
//
// * The Jitter field is converted from a string to a uint32
//
// To note, this package is not maintained by nor affiliated with Linode. It
// simply consumes data from an undocumented pulic API.
package netint
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"runtime"
"strconv"
)
// BASE_URLF is the base URL with a format specifier for the abbreviation
const BASE_URLF = "http://netint-%v.linode.com/ping/samples"
const (
DALLAS = "dal" // Dallas abbreviation
FREMONT = "fmt" // Fremont abbreviation
ATLANTA = "atl" // Atlanta abbreviation
NEWARK = "nwk" // Newark abbreviation
LONDON = "lon" // London abbreviation
TOKYO = "tok" // Tokyo abbreviation
VERSION = "0.0.1" // Library version
)
// used for parsing the JSON response
type samples struct {
Dallas [][]interface{} `json:"linode-dallas"`
Fremont [][]interface{} `json:"linode-fremont"`
Atlanta [][]interface{} `json:"linode-atlanta"`
Newark [][]interface{} `json:"linode-newark"`
London [][]interface{} `json:"linode-london"`
Tokyo [][]interface{} `json:"linode-tokyo"`
}
// Sample is a single result for a point-to-point measurement.
type Sample struct {
Epoch int64
RTT uint32 // unit: milliseconds
Loss uint32 // unit: percentage
Jitter uint32 // unit: milliseconds
}
// Overview is the entire view a single region has to the rest of the regions.
// It consists of one *Sample for each Region
type Overview struct {
Name string
Dallas *Sample
Fremont *Sample
Atlanta *Sample
Newark *Sample
London *Sample
Tokyo *Sample
}
// Regions is a function that returns a slice of strings that is the
// collection of Linode regions.
func Regions() []string {
return []string{"dallas", "fremont", "atlanta", "newark", "london", "tokyo"}
}
// AllSamples is a function to return all overviews.
// It's a map of *Overview instances with the lowercase name
// of the region as the key.
func AllOverviews() (map[string]*Overview, error) {
m := make(map[string]*Overview)
// loop over each region and
// populate its overview
for _, d := range Regions() {
o, err := getOverview(d)
if err != nil {
return nil, err
}
m[d] = o
}
return m, nil
}
// Dallas is a function to get an overview of the Dallas region.
func Dallas() (*Overview, error) {
return getOverview("dallas")
}
// Fremont is a function to get an overview of the Fremont region.
func Fremont() (*Overview, error) {
return getOverview("fremont")
}
// Atlanta is a function to get an overview of the Atlanta region.
func Atlanta() (*Overview, error) {
return getOverview("atlanta")
}
// Newark is a function to get an overview of the Newark region.
func Newark() (*Overview, error) {
return getOverview("newark")
}
// London is a function to get an overview of the London region.
func London() (*Overview, error) {
return getOverview("london")
}
// Tokyo is a function to get an overview of the Tokyo region.
func Tokyo() (*Overview, error) {
return getOverview("tokyo")
}
func getOverview(r string) (o *Overview, err error) {
var u string
// determine the URL based on the region
// if the region is unknown return error
switch r {
case "dallas":
u = url(DALLAS)
case "fremont":
u = url(FREMONT)
case "atlanta":
u = url(ATLANTA)
case "newark":
u = url(NEWARK)
case "london":
u = url(LONDON)
case "tokyo":
u = url(TOKYO)
default:
return nil, fmt.Errorf("'%v' is not a valid datacenter", r)
}
body, err := responseBody(u)
if err != nil {
return
}
s := &samples{}
err = json.Unmarshal(body, s)
if err != nil {
return
}
o, err = buildOverview(s)
if err != nil {
return nil, err
}
o.Name = r
return
}
func url(abbr string) string {
return fmt.Sprintf(BASE_URLF, abbr)
}
func responseBody(url string) ([]byte, error) {
httpc := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
// we set a user agent so Linode has an idea of where requests are being generated from
// LinodeNetInt/<VERSION> (go<VER> net/http)
req.Header.Add("User-Agent", fmt.Sprintf("LinodeNetInt/%v (%v net/http)", VERSION, runtime.Version()))
// execute the request
resp, err := httpc.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// get the entire body
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func buildOverview(s *samples) (o *Overview, err error) {
o = &Overview{}
o.Dallas, err = pullSample(s.Dallas)
if err != nil {
return nil, err
}
o.Fremont, err = pullSample(s.Fremont)
if err != nil {
return nil, err
}
o.Atlanta, err = pullSample(s.Atlanta)
if err != nil {
return nil, err
}
o.Newark, err = pullSample(s.Newark)
if err != nil {
return nil, err
}
o.London, err = pullSample(s.London)
if err != nil {
return nil, err
}
o.Tokyo, err = pullSample(s.Tokyo)
if err != nil {
return nil, err
}
return
}
func pullSample(i [][]interface{}) (s *Sample, err error) {
// NOTE: As has been historically been a pain point with Linode,
// these endpoints provide some wonky JSON. Only the timestamp
// is in a useful format (numeric). RTT, Loss, and Jitter are all
// strings for some reason. So we need to get those values.
// convert the RTT to a uint
r, err := strconv.ParseUint(i[0][1].(string), 10, 32)
if err != nil {
return
}
// convert the Loss to a uint
l, err := strconv.ParseUint(i[0][2].(string), 10, 32)
if err != nil {
return
}
// convert the jitter to a uint
j, err := strconv.ParseUint(i[0][3].(string), 10, 32)
if err != nil {
return
}
s = &Sample{}
// convert the UNIX timestamp to an int64
s.Epoch = int64(i[0][0].(float64))
s.RTT = uint32(r)
s.Loss = uint32(l)
s.Jitter = uint32(j)
return
}
fix some linter/vet issues
This fixes some issues that golint (and maybe others) had with the code.
Other than constants changing names, there should be no other compatability breakage.
// Package netint (linode-netint) is a client for accessing the Linode network
// internals samples. This API is undocumented and looks to be a set of
// unauthenticated endpoints that provide JSON data. This package also does
// some alterations to the data provided by Linode as some of the JSON types
// don't make sense...
//
// * The rount-trip-time (RTT) field is converted from a string to uint32
//
// * The Loss field is converted from a string to uint32
//
// * The Jitter field is converted from a string to a uint32
//
// To note, this package is not maintained by nor affiliated with Linode. It
// simply consumes data from an undocumented pulic API.
package netint
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"runtime"
"strconv"
)
const (
// BaseURL is the base URL with a format specifier
// for the datacenter's abbreviation
BaseURL = "http://netint-%v.linode.com/ping/samples"
// Version is the version, man...
Version = "0.0.2"
)
type dc struct {
name string
abbr string
}
// datacenters is a struct of different datacenter details
var datacenters = struct {
dallas *dc
fremont *dc
atlanta *dc
newark *dc
london *dc
tokyo *dc
}{
&dc{name: "dallas", abbr: "dal"},
&dc{name: "fremont", abbr: "fmt"},
&dc{name: "atlant", abbr: "atl"},
&dc{name: "newark", abbr: "nwk"},
&dc{name: "london", abbr: "lon"},
&dc{name: "tokyo", abbr: "tok"},
}
// used for parsing the JSON response
type samples struct {
Dallas [][]interface{} `json:"linode-dallas"`
Fremont [][]interface{} `json:"linode-fremont"`
Atlanta [][]interface{} `json:"linode-atlanta"`
Newark [][]interface{} `json:"linode-newark"`
London [][]interface{} `json:"linode-london"`
Tokyo [][]interface{} `json:"linode-tokyo"`
}
// Sample is a single result for a point-to-point measurement.
type Sample struct {
Epoch int64
RTT uint32 // unit: milliseconds
Loss uint32 // unit: percentage
Jitter uint32 // unit: milliseconds
}
// Overview is the entire view a single region has to the rest of the regions.
// It consists of one *Sample for each Region
type Overview struct {
Name string
Dallas *Sample
Fremont *Sample
Atlanta *Sample
Newark *Sample
London *Sample
Tokyo *Sample
}
// Regions is a function that returns a slice of strings that is the
// collection of Linode regions.
func Regions() []string {
return []string{
datacenters.dallas.name,
datacenters.fremont.name,
datacenters.atlanta.name,
datacenters.newark.name,
datacenters.london.name,
datacenters.tokyo.name,
}
}
// Abbr is a fcuntion to obtain the shortened version of a datacenter's
// name. 'dc' is the full name of the datacenter (e.g., "dallas"). Returns
// an empty string if given an unknown datacenter.
func Abbr(dc string) string {
switch dc {
case datacenters.dallas.name:
return datacenters.dallas.abbr
case datacenters.fremont.name:
return datacenters.fremont.abbr
case datacenters.atlanta.name:
return datacenters.atlanta.abbr
case datacenters.newark.name:
return datacenters.newark.abbr
case datacenters.london.name:
return datacenters.london.abbr
case datacenters.tokyo.name:
return datacenters.tokyo.abbr
default:
return ""
}
}
// AllOverviews is a function to return all overviews.
// It's a map of *Overview instances with the lowercase name
// of the region as the key.
func AllOverviews() (map[string]*Overview, error) {
m := make(map[string]*Overview)
// loop over each region and
// populate its overview
for _, d := range Regions() {
o, err := GetOverview(d)
if err != nil {
return nil, err
}
m[d] = o
}
return m, nil
}
// Dallas is a function to get an overview of the Dallas region.
func Dallas() (*Overview, error) {
return GetOverview("dallas")
}
// Fremont is a function to get an overview of the Fremont region.
func Fremont() (*Overview, error) {
return GetOverview("fremont")
}
// Atlanta is a function to get an overview of the Atlanta region.
func Atlanta() (*Overview, error) {
return GetOverview("atlanta")
}
// Newark is a function to get an overview of the Newark region.
func Newark() (*Overview, error) {
return GetOverview("newark")
}
// London is a function to get an overview of the London region.
func London() (*Overview, error) {
return GetOverview("london")
}
// Tokyo is a function to get an overview of the Tokyo region.
func Tokyo() (*Overview, error) {
return GetOverview("tokyo")
}
// GetOverview is a function to get an overview of a single datacenter with
// 'dc' being the datacenter name (e.g., "dallas")
func GetOverview(dc string) (o *Overview, err error) {
var u string
// determine the URL based on the region
// if the region is unknown return error
switch dc {
case "testdatacenter":
// for testing purposes only
u = "http://www.mocky.io/v2/548fd4750b9c75fd02437812"
default:
dcAbbr := Abbr(dc)
if dcAbbr == "" {
return nil, fmt.Errorf("'%v' is not a valid datacenter\n", dc)
}
u = url(dcAbbr)
}
body, err := responseBody(u)
if err != nil {
return
}
s := &samples{}
err = json.Unmarshal(body, s)
if err != nil {
return
}
o, err = buildOverview(s)
if err != nil {
return nil, err
}
o.Name = dc
return
}
func url(abbr string) string {
return fmt.Sprintf(BaseURL, abbr)
}
func responseBody(url string) ([]byte, error) {
httpc := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
// we set a user agent so Linode has an idea of where requests are being generated from
// LinodeNetInt/<Version> (go<runtime.Version()> net/http)
req.Header.Add("User-Agent", fmt.Sprintf("LinodeNetInt/%v (%v net/http)", Version, runtime.Version()))
// execute the request
resp, err := httpc.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// get the entire body
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func buildOverview(s *samples) (o *Overview, err error) {
o = &Overview{}
o.Dallas, err = pullSample(s.Dallas)
if err != nil {
return nil, err
}
o.Fremont, err = pullSample(s.Fremont)
if err != nil {
return nil, err
}
o.Atlanta, err = pullSample(s.Atlanta)
if err != nil {
return nil, err
}
o.Newark, err = pullSample(s.Newark)
if err != nil {
return nil, err
}
o.London, err = pullSample(s.London)
if err != nil {
return nil, err
}
o.Tokyo, err = pullSample(s.Tokyo)
if err != nil {
return nil, err
}
return
}
func pullSample(i [][]interface{}) (s *Sample, err error) {
// NOTE: As has been historically been a pain point with Linode,
// these endpoints provide some wonky JSON. Only the timestamp
// is in a useful format (numeric). RTT, Loss, and Jitter are all
// strings for some reason. So we need to get those values.
// convert the RTT to a uint
r, err := strconv.ParseUint(i[0][1].(string), 10, 32)
if err != nil {
return
}
// convert the Loss to a uint
l, err := strconv.ParseUint(i[0][2].(string), 10, 32)
if err != nil {
return
}
// convert the jitter to a uint
j, err := strconv.ParseUint(i[0][3].(string), 10, 32)
if err != nil {
return
}
s = &Sample{}
// convert the UNIX timestamp to an int64
s.Epoch = int64(i[0][0].(float64))
s.RTT = uint32(r)
s.Loss = uint32(l)
s.Jitter = uint32(j)
return
}
|
// Copyright 2015 ikawaha
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tokenizer
import (
"bytes"
"testing"
"github.com/ikawaha/kagome/internal/lattice"
)
func TestTokenize01(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: -1, Surface: "EOS"},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenize02(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("関西国際空港", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 372977, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.KNOWN)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenize03(t *testing.T) {
tnz := New(SysDic())
udic, e := NewUserDic("../_sample/userdic.txt")
if e != nil {
t.Fatalf("new user dic: unexpected error\n")
}
tnz.SetUserDic(udic)
tokens := tnz.Tokenize("関西国際空港", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 2, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.USER)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenize04(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("ポポピ", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 34, Surface: "ポポピ", Start: 0, End: 3, Class: TokenClass(lattice.UNKNOWN)},
{ID: -1, Surface: "EOS", Start: 3, End: 3},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearcModeTokenize01(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: -1, Surface: "EOS"},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearchModeTokenize02(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("関西国際空港", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 372968, Surface: "関西", Start: 0, End: 2, Class: TokenClass(lattice.KNOWN)},
{ID: 168541, Surface: "国際", Start: 2, End: 4, Class: TokenClass(lattice.KNOWN)},
{ID: 307133, Surface: "空港", Start: 4, End: 6, Class: TokenClass(lattice.KNOWN)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearchModeTokenize03(t *testing.T) {
tnz := New(SysDic())
udic, e := NewUserDic("../_sample/userdic.txt")
if e != nil {
t.Fatalf("new user dic: unexpected error\n")
}
tnz.SetUserDic(udic)
tokens := tnz.Tokenize("関西国際空港", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 2, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.USER)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearchModeTokenize04(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("ポポピ", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 34, Surface: "ポポピ", Start: 0, End: 3, Class: TokenClass(lattice.UNKNOWN)},
{ID: -1, Surface: "EOS", Start: 3, End: 3},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize01(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: -1, Surface: "EOS"},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize02(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("関西国際空港", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 372968, Surface: "関西", Start: 0, End: 2, Class: TokenClass(lattice.KNOWN)},
{ID: 168541, Surface: "国際", Start: 2, End: 4, Class: TokenClass(lattice.KNOWN)},
{ID: 307133, Surface: "空港", Start: 4, End: 6, Class: TokenClass(lattice.KNOWN)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize03(t *testing.T) {
tnz := New(SysDic())
udic, e := NewUserDic("../_sample/userdic.txt")
if e != nil {
t.Fatalf("new user dic: unexpected error\n")
}
tnz.SetUserDic(udic)
tokens := tnz.Tokenize("関西国際空港", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 2, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.USER)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize04(t *testing.T) {
tnz := New(SysDic())
tokens := tnz.Tokenize("ポポピ", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 34, Surface: "ポ", Start: 0, End: 1, Class: TokenClass(lattice.DUMMY)},
{ID: 34, Surface: "ポ", Start: 1, End: 2, Class: TokenClass(lattice.DUMMY)},
{ID: 34, Surface: "ピ", Start: 2, End: 3, Class: TokenClass(lattice.DUMMY)},
{ID: -1, Surface: "EOS", Start: 3, End: 3},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenizerSetDic(t *testing.T) {
d := SysDic()
tnz := New(d)
tnz.SetDic(d)
if tnz.dic != d.dic {
t.Errorf("got %v, expected %v", tnz.dic, d)
}
}
func TestTokenizerDot(t *testing.T) {
tnz := New(SysDic())
// test empty case
var b bytes.Buffer
tnz.Dot("", &b)
if b.String() == "" {
t.Errorf("got empty string")
}
// only idling
b.Reset()
tnz.Dot("わたしまけましたわ", &b)
if b.String() == "" {
t.Errorf("got empty string")
}
}
var benchSampleText = "人魚は、南の方の海にばかり棲んでいるのではありません。北の海にも棲んでいたのであります。北方の海の色は、青うございました。ある時、岩の上に、女の人魚があがって、あたりの景色を眺めながら休んでいました。"
func BenchmarkTokenizeNormal(b *testing.B) {
tnz := New(SysDic())
for i := 0; i < b.N; i++ {
tnz.Tokenize(benchSampleText, Normal)
}
}
func BenchmarkTokenizeSearch(b *testing.B) {
tnz := New(SysDic())
for i := 0; i < b.N; i++ {
tnz.Tokenize(benchSampleText, Search)
}
}
func BenchmarkTokenizeExtended(b *testing.B) {
tnz := New(SysDic())
for i := 0; i < b.N; i++ {
tnz.Tokenize(benchSampleText, Extended)
}
}
Fix tests
// Copyright 2015 ikawaha
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tokenizer
import (
"bytes"
"testing"
"github.com/ikawaha/kagome/internal/lattice"
)
func TestTokenize01(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: -1, Surface: "EOS"},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenize02(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("関西国際空港", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 372977, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.KNOWN)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenize03(t *testing.T) {
tnz := New()
udic, e := NewUserDic("../_sample/userdic.txt")
if e != nil {
t.Fatalf("new user dic: unexpected error\n")
}
tnz.SetUserDic(udic)
tokens := tnz.Tokenize("関西国際空港", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 2, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.USER)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenize04(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("ポポピ", Normal)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 34, Surface: "ポポピ", Start: 0, End: 3, Class: TokenClass(lattice.UNKNOWN)},
{ID: -1, Surface: "EOS", Start: 3, End: 3},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearcModeTokenize01(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: -1, Surface: "EOS"},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearchModeTokenize02(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("関西国際空港", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 372968, Surface: "関西", Start: 0, End: 2, Class: TokenClass(lattice.KNOWN)},
{ID: 168541, Surface: "国際", Start: 2, End: 4, Class: TokenClass(lattice.KNOWN)},
{ID: 307133, Surface: "空港", Start: 4, End: 6, Class: TokenClass(lattice.KNOWN)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearchModeTokenize03(t *testing.T) {
tnz := New()
udic, e := NewUserDic("../_sample/userdic.txt")
if e != nil {
t.Fatalf("new user dic: unexpected error\n")
}
tnz.SetUserDic(udic)
tokens := tnz.Tokenize("関西国際空港", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 2, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.USER)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestSearchModeTokenize04(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("ポポピ", Search)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 34, Surface: "ポポピ", Start: 0, End: 3, Class: TokenClass(lattice.UNKNOWN)},
{ID: -1, Surface: "EOS", Start: 3, End: 3},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize01(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: -1, Surface: "EOS"},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize02(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("関西国際空港", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 372968, Surface: "関西", Start: 0, End: 2, Class: TokenClass(lattice.KNOWN)},
{ID: 168541, Surface: "国際", Start: 2, End: 4, Class: TokenClass(lattice.KNOWN)},
{ID: 307133, Surface: "空港", Start: 4, End: 6, Class: TokenClass(lattice.KNOWN)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize03(t *testing.T) {
tnz := New()
udic, e := NewUserDic("../_sample/userdic.txt")
if e != nil {
t.Fatalf("new user dic: unexpected error\n")
}
tnz.SetUserDic(udic)
tokens := tnz.Tokenize("関西国際空港", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 2, Surface: "関西国際空港", Start: 0, End: 6, Class: TokenClass(lattice.USER)},
{ID: -1, Surface: "EOS", Start: 6, End: 6},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestExtendedModeTokenize04(t *testing.T) {
tnz := New()
tokens := tnz.Tokenize("ポポピ", Extended)
expected := []Token{
{ID: -1, Surface: "BOS"},
{ID: 34, Surface: "ポ", Start: 0, End: 1, Class: TokenClass(lattice.DUMMY)},
{ID: 34, Surface: "ポ", Start: 1, End: 2, Class: TokenClass(lattice.DUMMY)},
{ID: 34, Surface: "ピ", Start: 2, End: 3, Class: TokenClass(lattice.DUMMY)},
{ID: -1, Surface: "EOS", Start: 3, End: 3},
}
if len(tokens) != len(expected) {
t.Fatalf("got %v, expected %v\n", tokens, expected)
}
for i, tok := range tokens {
if tok.ID != expected[i].ID ||
tok.Class != expected[i].Class ||
tok.Start != expected[i].Start ||
tok.End != expected[i].End ||
tok.Surface != expected[i].Surface {
t.Errorf("got %v, expected %v\n", tok, expected[i])
}
}
}
func TestTokenizerSetDic(t *testing.T) {
d := SysDic()
tnz := NewWithDic(d)
tnz.SetDic(d)
if tnz.dic != d.dic {
t.Errorf("got %v, expected %v", tnz.dic, d)
}
}
func TestTokenizerDot(t *testing.T) {
tnz := New()
// test empty case
var b bytes.Buffer
tnz.Dot("", &b)
if b.String() == "" {
t.Errorf("got empty string")
}
// only idling
b.Reset()
tnz.Dot("わたしまけましたわ", &b)
if b.String() == "" {
t.Errorf("got empty string")
}
}
var benchSampleText = "人魚は、南の方の海にばかり棲んでいるのではありません。北の海にも棲んでいたのであります。北方の海の色は、青うございました。ある時、岩の上に、女の人魚があがって、あたりの景色を眺めながら休んでいました。"
func BenchmarkTokenizeNormal(b *testing.B) {
tnz := New()
for i := 0; i < b.N; i++ {
tnz.Tokenize(benchSampleText, Normal)
}
}
func BenchmarkTokenizeSearch(b *testing.B) {
tnz := New()
for i := 0; i < b.N; i++ {
tnz.Tokenize(benchSampleText, Search)
}
}
func BenchmarkTokenizeExtended(b *testing.B) {
tnz := New()
for i := 0; i < b.N; i++ {
tnz.Tokenize(benchSampleText, Extended)
}
}
|
// Generic operations on filesystems and objects
package fs
import (
"fmt"
"io"
"log"
"mime"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/text/unicode/norm"
)
// CalculateModifyWindow works out modify window for Fses passed in -
// sets Config.ModifyWindow
//
// This is the largest modify window of all the fses in use, and the
// user configured value
func CalculateModifyWindow(fs ...Fs) {
for _, f := range fs {
if f != nil {
precision := f.Precision()
if precision > Config.ModifyWindow {
Config.ModifyWindow = precision
}
if precision == ModTimeNotSupported {
Debug(f, "Modify window not supported")
return
}
}
}
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
}
// HashEquals checks to see if src == dst, but ignores empty strings
// and returns true if either is empty.
func HashEquals(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
//
// Returns
//
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
//
// If an error is returned it will return equal as false
func CheckHashes(src, dst Object) (equal bool, hash HashType, err error) {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(nil, "Shared hashes: %v", common)
if common.Count() == 0 {
return true, HashNone, nil
}
hash = common.GetOne()
srcHash, err := src.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to calculate src hash: %v", err)
return false, hash, err
}
if srcHash == "" {
return true, HashNone, nil
}
dstHash, err := dst.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to calculate dst hash: %v", err)
return false, hash, err
}
if dstHash == "" {
return true, HashNone, nil
}
return srcHash == dstHash, hash, nil
}
// Equal checks to see if the src and dst objects are equal by looking at
// size, mtime and hash
//
// If the src and dst size are different then it is considered to be
// not equal. If --size-only is in effect then this is the only check
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the hash is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
func Equal(src, dst Object) bool {
if !Config.IgnoreSize {
if src.Size() != dst.Size() {
Debug(src, "Sizes differ")
return false
}
}
if Config.SizeOnly {
Debug(src, "Sizes identical")
return true
}
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
if Config.CheckSum {
// Check the hash
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "%v differ", hash)
return false
}
if hash == HashNone {
Debug(src, "Size of src and dst objects identical")
} else {
Debug(src, "Size and %v of src and dst objects identical", hash)
}
return true
}
// Sizes the same so check the mtime
if Config.ModifyWindow == ModTimeNotSupported {
Debug(src, "Sizes identical")
return true
}
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt < ModifyWindow && dt > -ModifyWindow {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
// Check if the hashes are the same
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "%v differ", hash)
return false
}
if hash == HashNone {
// if couldn't check hash, return that they differ
return false
}
// mod time differs but hash is the same to reset mod time if required
if !Config.NoUpdateModTime {
// Size and hash the same but mtime different so update the
// mtime of the dst object here
err := dst.SetModTime(srcModTime)
if err == ErrorCantSetModTime {
Debug(src, "src and dst identical but can't set mod time without re-uploading")
return false
} else if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to set modification time: %v", err)
} else {
Debug(src, "Updated modification time in destination")
}
}
return true
}
// MimeTypeFromName returns a guess at the mime type from the name
func MimeTypeFromName(remote string) (mimeType string) {
mimeType = mime.TypeByExtension(path.Ext(remote))
if !strings.ContainsRune(mimeType, '/') {
mimeType = "application/octet-stream"
}
return mimeType
}
// MimeType returns the MimeType from the object, either by calling
// the MimeTyper interface or using MimeTypeFromName
func MimeType(o ObjectInfo) (mimeType string) {
// Read the MimeType from the optional interface if available
if do, ok := o.(MimeTyper); ok {
mimeType = do.MimeType()
Debug(o, "Read MimeType as %q", mimeType)
if mimeType != "" {
return mimeType
}
}
return MimeTypeFromName(o.Remote())
}
// Used to remove a failed copy
//
// Returns whether the file was succesfully removed or not
func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
}
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
Object
remote string
}
// Remote returns the overriden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
// Copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
func Copy(f Fs, dst Object, remote string, src Object) (err error) {
if Config.DryRun {
Log(src, "Not copying as --dry-run")
return nil
}
maxTries := Config.LowLevelRetries
tries := 0
doUpdate := dst != nil
var actionTaken string
for {
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken = "Copied (server side copy)"
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
var newDst Object
newDst, err = fCopy.Copy(src, remote)
if err == nil {
dst = newDst
}
} else {
err = ErrorCantCopy
}
// If can't server side copy, do it manually
if err == ErrorCantCopy {
var in0 io.ReadCloser
in0, err = src.Open()
if err != nil {
err = errors.Wrap(err, "failed to open source object")
} else {
in := NewAccount(in0, src) // account and buffer the transfer
wrappedSrc := &overrideRemoteObject{Object: src, remote: remote}
if doUpdate {
actionTaken = "Copied (replaced existing)"
err = dst.Update(in, wrappedSrc)
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, wrappedSrc)
}
closeErr := in.Close()
if err == nil {
err = closeErr
}
}
}
tries++
if tries >= maxTries {
break
}
// Retry if err returned a retry error
if IsRetryError(err) || ShouldRetry(err) {
Debug(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
continue
}
// otherwise finish
break
}
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to copy: %v", err)
return err
}
// Verify sizes are the same after transfer
if !Config.IgnoreSize && src.Size() != dst.Size() {
Stats.Error()
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
ErrorLog(dst, "%v", err)
removeFailedCopy(dst)
return err
}
// Verify hashes are the same after transfer - ignoring blank hashes
// TODO(klauspost): This could be extended, so we always create a hash type matching
// the destination, and calculate it while sending.
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(src, "common hashes: %v", common)
if !Config.SizeOnly && common.Count() > 0 {
// Get common hash type
hashType := common.GetOne()
var srcSum string
srcSum, err = src.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to read src hash: %v", err)
} else if srcSum != "" {
var dstSum string
dstSum, err = dst.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to read hash: %v", err)
} else if !HashEquals(srcSum, dstSum) {
Stats.Error()
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
ErrorLog(dst, "%v", err)
removeFailedCopy(dst)
return err
}
}
}
Debug(src, actionTaken)
return err
}
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
func Move(fdst Fs, dst Object, remote string, src Object) (err error) {
if Config.DryRun {
Log(src, "Not moving as --dry-run")
return nil
}
// See if we have Move available
if do, ok := fdst.(Mover); ok && src.Fs().Name() == fdst.Name() {
// Delete destination if it exists
if dst != nil {
err = DeleteFile(dst)
if err != nil {
return err
}
}
// Move dst <- src
_, err := do.Move(src, remote)
switch err {
case nil:
Debug(src, "Moved")
return nil
case ErrorCantMove:
Debug(src, "Can't move, switching to copy")
default:
Stats.Error()
ErrorLog(dst, "Couldn't move: %v", err)
return err
}
}
// Move not found or didn't work so copy dst <- src
err = Copy(fdst, dst, remote, src)
if err != nil {
ErrorLog(src, "Not deleting source as copy failed: %v", err)
return err
}
// Delete src if no error on copy
return DeleteFile(src)
}
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
func DeleteFile(dst Object) (err error) {
if Config.DryRun {
Log(dst, "Not deleting as --dry-run")
} else {
Stats.Checking(dst.Remote())
err = dst.Remove()
Stats.DoneChecking(dst.Remote())
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %v", err)
} else {
Debug(dst, "Deleted")
}
}
return err
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(toBeDeleted ObjectsChan) error {
var wg sync.WaitGroup
wg.Add(Config.Transfers)
var errorCount int32
for i := 0; i < Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
err := DeleteFile(dst)
if err != nil {
atomic.AddInt32(&errorCount, 1)
}
}
}()
}
Log(nil, "Waiting for deletions to finish")
wg.Wait()
if errorCount > 0 {
return errors.Errorf("failed to delete %d files", errorCount)
}
return nil
}
// Read a Objects into add() for the given Fs.
// dir is the start directory, "" for root
// If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added.
//
// Each object is passed ito the function provided. If that returns
// an error then the listing will be aborted and that error returned.
func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object) error) (err error) {
list := NewLister()
if !includeAll {
list.SetFilter(Config.Filter)
list.SetLevel(Config.MaxDepth)
}
list.Start(fs, dir)
for {
o, err := list.GetObject()
if err != nil {
return err
}
// Check if we are finished
if o == nil {
break
}
// Make sure we don't delete excluded files if not required
if includeAll || Config.Filter.IncludeObject(o) {
err = add(o)
if err != nil {
list.SetError(err)
}
} else {
Debug(o, "Excluded from sync (and deletion)")
}
}
return nil
}
// Read a map of Object.Remote to Object for the given Fs.
// dir is the start directory, "" for root
// If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added.
//
// This also detects duplicates and normalised duplicates
func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object, err error) {
files = make(map[string]Object)
normalised := make(map[string]struct{})
err = readFilesFn(fs, includeAll, dir, func(o Object) error {
remote := o.Remote()
normalisedRemote := strings.ToLower(norm.NFC.String(remote))
if _, ok := files[remote]; !ok {
files[remote] = o
if _, ok := normalised[normalisedRemote]; ok {
Log(o, "Warning: File found with same name but different case on %v", o.Fs())
}
} else {
Log(o, "Duplicate file detected")
}
normalised[normalisedRemote] = struct{}{}
return nil
})
if err != nil {
err = errors.Wrapf(err, "error listing: %s", fs)
}
return files, err
}
// readFilesMaps runs readFilesMap on fdst and fsrc at the same time
// dir is the start directory, "" for root
func readFilesMaps(fdst Fs, fdstIncludeAll bool, fsrc Fs, fsrcIncludeAll bool, dir string) (dstFiles, srcFiles map[string]Object, err error) {
var wg sync.WaitGroup
var srcErr, dstErr error
list := func(fs Fs, includeAll bool, pMap *map[string]Object, pErr *error) {
defer wg.Done()
Log(fs, "Building file list")
files, listErr := readFilesMap(fs, includeAll, dir)
if listErr != nil {
ErrorLog(fs, "Error building file list: %v", listErr)
*pErr = listErr
} else {
Debug(fs, "Done building file list")
*pMap = files
}
}
wg.Add(2)
go list(fdst, fdstIncludeAll, &dstFiles, &srcErr)
go list(fsrc, fsrcIncludeAll, &srcFiles, &dstErr)
wg.Wait()
if srcErr != nil {
err = srcErr
}
if dstErr != nil {
err = dstErr
}
return dstFiles, srcFiles, err
}
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc Fs) bool {
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
}
// Overlapping returns true if fdst and fsrc point to the same
// underlying Fs or they overlap.
func Overlapping(fdst, fsrc Fs) bool {
return fdst.Name() == fsrc.Name() && (strings.HasPrefix(fdst.Root(), fsrc.Root()) || strings.HasPrefix(fsrc.Root(), fdst.Root()))
}
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
func checkIdentical(dst, src Object) (differ bool, noHash bool) {
Stats.Checking(src.Remote())
defer Stats.DoneChecking(src.Remote())
if src.Size() != dst.Size() {
Stats.Error()
ErrorLog(src, "Sizes differ")
return true, false
}
if !Config.SizeOnly {
same, hash, err := CheckHashes(src, dst)
if err != nil {
// CheckHashes will log and count errors
return true, false
}
if hash == HashNone {
return false, true
}
if !same {
Stats.Error()
ErrorLog(src, "%v differ", hash)
return true, false
}
}
Debug(src, "OK")
return false, false
}
// Check the files in fsrc and fdst according to Size and hash
func Check(fdst, fsrc Fs) error {
dstFiles, srcFiles, err := readFilesMaps(fdst, false, fsrc, false, "")
if err != nil {
return err
}
differences := int32(0)
noHashes := int32(0)
// FIXME could do this as it goes along and make it use less
// memory.
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
commonFiles := make(map[string][]Object)
for remote, src := range srcFiles {
if dst, ok := dstFiles[remote]; ok {
commonFiles[remote] = []Object{dst, src}
delete(srcFiles, remote)
delete(dstFiles, remote)
}
}
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
for _, dst := range dstFiles {
Stats.Error()
ErrorLog(dst, "File not in %v", fsrc)
atomic.AddInt32(&differences, 1)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
ErrorLog(src, "File not in %v", fdst)
atomic.AddInt32(&differences, 1)
}
checks := make(chan []Object, Config.Transfers)
go func() {
for _, check := range commonFiles {
checks <- check
}
close(checks)
}()
var checkerWg sync.WaitGroup
checkerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go func() {
defer checkerWg.Done()
for check := range checks {
differ, noHash := checkIdentical(check[0], check[1])
if differ {
atomic.AddInt32(&differences, 1)
}
if noHash {
atomic.AddInt32(&noHashes, 1)
}
}
}()
}
Log(fdst, "Waiting for checks to finish")
checkerWg.Wait()
Log(fdst, "%d differences found", Stats.GetErrors())
if noHashes > 0 {
Log(fdst, "%d hashes could not be checked", noHashes)
}
if differences > 0 {
return errors.Errorf("%d differences found", differences)
}
return nil
}
// ListFn lists the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func ListFn(f Fs, fn func(Object)) error {
list := NewLister().SetFilter(Config.Filter).SetLevel(Config.MaxDepth).Start(f, "")
var wg sync.WaitGroup
wg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go func() {
defer wg.Done()
for {
o, err := list.GetObject()
if err != nil {
log.Fatal(err)
}
// check if we are finished
if o == nil {
return
}
if Config.Filter.IncludeObject(o) {
fn(o)
}
}
}()
}
wg.Wait()
return nil
}
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
//
// Ignores errors from Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) {
outMutex.Lock()
defer outMutex.Unlock()
_, _ = fmt.Fprintf(w, format, a...)
}
// List the Fs to the supplied writer
//
// Shows size and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
})
}
// ListLong lists the Fs to the supplied writer
//
// Shows size, mod time and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o.Remote())
modTime := o.ModTime()
Stats.DoneChecking(o.Remote())
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
})
}
// Md5sum list the Fs to the supplied writer
//
// Produces the same output as the md5sum command - obeys includes and
// excludes
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
return hashLister(HashMD5, f, w)
}
// Sha1sum list the Fs to the supplied writer
//
// Obeys includes and excludes
//
// Lists in parallel which may get them out of order
func Sha1sum(f Fs, w io.Writer) error {
return hashLister(HashSHA1, f, w)
}
func hashLister(ht HashType, f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o.Remote())
sum, err := o.Hash(ht)
Stats.DoneChecking(o.Remote())
if err == ErrHashUnsupported {
sum = "UNSUPPORTED"
} else if err != nil {
Debug(o, "Failed to read %v: %v", ht, err)
sum = "ERROR"
}
syncFprintf(w, "%*s %s\n", HashWidth[ht], sum, o.Remote())
})
}
// Count counts the objects and their sizes in the Fs
//
// Obeys includes and excludes
func Count(f Fs) (objects int64, size int64, err error) {
err = ListFn(f, func(o Object) {
atomic.AddInt64(&objects, 1)
atomic.AddInt64(&size, o.Size())
})
return
}
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f Fs, w io.Writer) error {
level := 1
if Config.MaxDepth > 0 {
level = Config.MaxDepth
}
list := NewLister().SetFilter(Config.Filter).SetLevel(level).Start(f, "")
for {
dir, err := list.GetDir()
if err != nil {
log.Fatal(err)
}
if dir == nil {
break
}
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
}
return nil
}
// Mkdir makes a destination directory or container
func Mkdir(f Fs, dir string) error {
if Config.DryRun {
Log(f, "Not making directory as dry run is set")
return nil
}
err := f.Mkdir(dir)
if err != nil {
Stats.Error()
return err
}
return nil
}
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
func TryRmdir(f Fs, dir string) error {
if Config.DryRun {
if dir != "" {
Log(dir, "Not deleting as dry run is set")
} else {
Log(f, "Not deleting as dry run is set")
}
return nil
}
return f.Rmdir(dir)
}
// Rmdir removes a container but not if not empty
func Rmdir(f Fs, dir string) error {
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
return err
}
return err
}
// Purge removes a container and all of its contents
//
// FIXME doesn't delete local directories
func Purge(f Fs) error {
doFallbackPurge := true
var err error
if purger, ok := f.(Purger); ok {
doFallbackPurge = false
if Config.DryRun {
Log(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
if err == ErrorCantPurge {
doFallbackPurge = true
}
}
}
if doFallbackPurge {
// DeleteFiles and Rmdir observe --dry-run
list := NewLister().Start(f, "")
err = DeleteFiles(listToChan(list))
if err != nil {
return err
}
err = Rmdir(f, "")
}
if err != nil {
Stats.Error()
return err
}
return nil
}
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
func Delete(f Fs) error {
delete := make(ObjectsChan, Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- DeleteFiles(delete)
}()
err := ListFn(f, func(o Object) {
delete <- o
})
close(delete)
delError := <-delErr
if err == nil {
err = delError
}
return err
}
// dedupeRename renames the objs slice to different names
func dedupeRename(remote string, objs []Object) {
f := objs[0].Fs()
mover, ok := f.(Mover)
if !ok {
log.Fatalf("Fs %v doesn't support Move", f)
}
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
for i, o := range objs {
newName := fmt.Sprintf("%s-%d%s", base, i+1, ext)
if !Config.DryRun {
newObj, err := mover.Move(o, newName)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to rename: %v", err)
continue
}
Log(newObj, "renamed from: %v", o)
} else {
Log(remote, "Not renaming to %q as --dry-run", newName)
}
}
}
// dedupeDeleteAllButOne deletes all but the one in keep
func dedupeDeleteAllButOne(keep int, remote string, objs []Object) {
for i, o := range objs {
if i == keep {
continue
}
_ = DeleteFile(o)
}
Log(remote, "Deleted %d extra copies", len(objs)-1)
}
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
func dedupeDeleteIdentical(remote string, objs []Object) []Object {
// See how many of these duplicates are identical
byHash := make(map[string][]Object, len(objs))
for _, o := range objs {
md5sum, err := o.Hash(HashMD5)
if err == nil {
byHash[md5sum] = append(byHash[md5sum], o)
}
}
// Delete identical duplicates, refilling obj with the ones remaining
objs = nil
for md5sum, hashObjs := range byHash {
if len(hashObjs) > 1 {
Log(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
for _, o := range hashObjs[1:] {
_ = DeleteFile(o)
}
}
objs = append(objs, hashObjs[0])
}
return objs
}
// dedupeInteractive interactively dedupes the slice of objects
func dedupeInteractive(remote string, objs []Object) {
fmt.Printf("%s: %d duplicates remain\n", remote, len(objs))
for i, o := range objs {
md5sum, err := o.Hash(HashMD5)
if err != nil {
md5sum = err.Error()
}
fmt.Printf(" %d: %12d bytes, %s, md5sum %32s\n", i+1, o.Size(), o.ModTime().Format("2006-01-02 15:04:05.000000000"), md5sum)
}
switch Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) {
case 's':
case 'k':
keep := ChooseNumber("Enter the number of the file to keep", 1, len(objs))
dedupeDeleteAllButOne(keep-1, remote, objs)
case 'r':
dedupeRename(remote, objs)
}
}
type objectsSortedByModTime []Object
func (objs objectsSortedByModTime) Len() int { return len(objs) }
func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] }
func (objs objectsSortedByModTime) Less(i, j int) bool {
return objs[i].ModTime().Before(objs[j].ModTime())
}
// DeduplicateMode is how the dedupe command chooses what to do
type DeduplicateMode int
// Deduplicate modes
const (
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
DeduplicateSkip // skip all conflicts
DeduplicateFirst // choose the first object
DeduplicateNewest // choose the newest object
DeduplicateOldest // choose the oldest object
DeduplicateRename // rename the objects
)
func (x DeduplicateMode) String() string {
switch x {
case DeduplicateInteractive:
return "interactive"
case DeduplicateSkip:
return "skip"
case DeduplicateFirst:
return "first"
case DeduplicateNewest:
return "newest"
case DeduplicateOldest:
return "oldest"
case DeduplicateRename:
return "rename"
}
return "unknown"
}
// Set a DeduplicateMode from a string
func (x *DeduplicateMode) Set(s string) error {
switch strings.ToLower(s) {
case "interactive":
*x = DeduplicateInteractive
case "skip":
*x = DeduplicateSkip
case "first":
*x = DeduplicateFirst
case "newest":
*x = DeduplicateNewest
case "oldest":
*x = DeduplicateOldest
case "rename":
*x = DeduplicateRename
default:
return errors.Errorf("Unknown mode for dedupe %q.", s)
}
return nil
}
// Type of the value
func (x *DeduplicateMode) Type() string {
return "string"
}
// Check it satisfies the interface
var _ pflag.Value = (*DeduplicateMode)(nil)
// Deduplicate interactively finds duplicate files and offers to
// delete all but one or rename them to be different. Only useful with
// Google Drive which can have duplicate file names.
func Deduplicate(f Fs, mode DeduplicateMode) error {
Log(f, "Looking for duplicates using %v mode.", mode)
files := map[string][]Object{}
list := NewLister().Start(f, "")
for {
o, err := list.GetObject()
if err != nil {
return err
}
// Check if we are finished
if o == nil {
break
}
remote := o.Remote()
files[remote] = append(files[remote], o)
}
for remote, objs := range files {
if len(objs) > 1 {
Log(remote, "Found %d duplicates - deleting identical copies", len(objs))
objs = dedupeDeleteIdentical(remote, objs)
if len(objs) <= 1 {
Log(remote, "All duplicates removed")
continue
}
switch mode {
case DeduplicateInteractive:
dedupeInteractive(remote, objs)
case DeduplicateFirst:
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateNewest:
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(len(objs)-1, remote, objs)
case DeduplicateOldest:
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateRename:
dedupeRename(remote, objs)
case DeduplicateSkip:
// skip
default:
//skip
}
}
}
return nil
}
// listToChan will transfer all incoming objects to a new channel.
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
func listToChan(list *Lister) ObjectsChan {
o := make(ObjectsChan, Config.Checkers)
go func() {
defer close(o)
for {
obj, dir, err := list.Get()
if err != nil {
if err != ErrorDirNotFound {
Stats.Error()
ErrorLog(nil, "Failed to list: %v", err)
}
return
}
if dir == nil && obj == nil {
return
}
if obj == nil {
continue
}
o <- obj
}
}()
return o
}
// CleanUp removes the trash for the Fs
func CleanUp(f Fs) error {
fc, ok := f.(CleanUpper)
if !ok {
return errors.Errorf("%v doesn't support cleanup", f)
}
if Config.DryRun {
Log(f, "Not running cleanup as --dry-run set")
return nil
}
return fc.CleanUp()
}
// Cat any files to the io.Writer
func Cat(f Fs, w io.Writer) error {
var mu sync.Mutex
return ListFn(f, func(o Object) {
var err error
Stats.Transferring(o.Remote())
defer func() {
Stats.DoneTransferring(o.Remote(), err == nil)
}()
mu.Lock()
defer mu.Unlock()
in, err := o.Open()
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to open: %v", err)
return
}
defer func() {
err = in.Close()
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to close: %v", err)
}
}()
inAccounted := NewAccount(in, o) // account and buffer the transfer
_, err = io.Copy(w, inAccounted)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to send to output: %v", err)
}
})
}
// Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f.
func Rmdirs(f Fs) error {
list := NewLister().Start(f, "")
dirEmpty := make(map[string]bool)
dirEmpty[""] = true
for {
o, dir, err := list.Get()
if err != nil {
Stats.Error()
ErrorLog(f, "Failed to list: %v", err)
return err
} else if dir != nil {
// add a new directory as empty
dir := dir.Name
_, found := dirEmpty[dir]
if !found {
dirEmpty[dir] = true
}
} else if o != nil {
// mark the parents of the file as being non-empty
dir := o.Remote()
for dir != "" {
dir = path.Dir(dir)
if dir == "." || dir == "/" {
dir = ""
}
empty, found := dirEmpty[dir]
// End if we reach a directory which is non-empty
if found && !empty {
break
}
dirEmpty[dir] = false
}
} else {
// finished as dir == nil && o == nil
break
}
}
// Now delete the empty directories, starting from the longest path
var toDelete []string
for dir, empty := range dirEmpty {
if empty {
toDelete = append(toDelete, dir)
}
}
sort.Strings(toDelete)
for i := len(toDelete) - 1; i >= 0; i-- {
dir := toDelete[i]
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
ErrorLog(dir, "Failed to rmdir: %v", err)
return err
}
}
return nil
}
// moveOrCopyFile moves or copies a single file possibly to a new name
func moveOrCopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string, cp bool) (err error) {
// Choose operations
Op := Move
if cp {
Op = Copy
}
// Find src object
srcObj, err := fsrc.NewObject(srcFileName)
if err != nil {
return err
}
// Find dst object if it exists
dstObj, err := fdst.NewObject(dstFileName)
if err == ErrorObjectNotFound {
dstObj = nil
} else if err != nil {
return err
}
if NeedTransfer(dstObj, srcObj) {
return Op(fdst, dstObj, dstFileName, srcObj)
} else if !cp {
return DeleteFile(srcObj)
}
return nil
}
// MoveFile moves a single file possibly to a new name
func MoveFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, false)
}
// CopyFile moves a single file possibly to a new name
func CopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, true)
}
Make server side move more obvious in debug
// Generic operations on filesystems and objects
package fs
import (
"fmt"
"io"
"log"
"mime"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"golang.org/x/text/unicode/norm"
)
// CalculateModifyWindow works out modify window for Fses passed in -
// sets Config.ModifyWindow
//
// This is the largest modify window of all the fses in use, and the
// user configured value
func CalculateModifyWindow(fs ...Fs) {
for _, f := range fs {
if f != nil {
precision := f.Precision()
if precision > Config.ModifyWindow {
Config.ModifyWindow = precision
}
if precision == ModTimeNotSupported {
Debug(f, "Modify window not supported")
return
}
}
}
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
}
// HashEquals checks to see if src == dst, but ignores empty strings
// and returns true if either is empty.
func HashEquals(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
//
// Returns
//
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
//
// If an error is returned it will return equal as false
func CheckHashes(src, dst Object) (equal bool, hash HashType, err error) {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(nil, "Shared hashes: %v", common)
if common.Count() == 0 {
return true, HashNone, nil
}
hash = common.GetOne()
srcHash, err := src.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to calculate src hash: %v", err)
return false, hash, err
}
if srcHash == "" {
return true, HashNone, nil
}
dstHash, err := dst.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to calculate dst hash: %v", err)
return false, hash, err
}
if dstHash == "" {
return true, HashNone, nil
}
return srcHash == dstHash, hash, nil
}
// Equal checks to see if the src and dst objects are equal by looking at
// size, mtime and hash
//
// If the src and dst size are different then it is considered to be
// not equal. If --size-only is in effect then this is the only check
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the hash is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
func Equal(src, dst Object) bool {
if !Config.IgnoreSize {
if src.Size() != dst.Size() {
Debug(src, "Sizes differ")
return false
}
}
if Config.SizeOnly {
Debug(src, "Sizes identical")
return true
}
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
if Config.CheckSum {
// Check the hash
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "%v differ", hash)
return false
}
if hash == HashNone {
Debug(src, "Size of src and dst objects identical")
} else {
Debug(src, "Size and %v of src and dst objects identical", hash)
}
return true
}
// Sizes the same so check the mtime
if Config.ModifyWindow == ModTimeNotSupported {
Debug(src, "Sizes identical")
return true
}
srcModTime := src.ModTime()
dstModTime := dst.ModTime()
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt < ModifyWindow && dt > -ModifyWindow {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
// Check if the hashes are the same
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "%v differ", hash)
return false
}
if hash == HashNone {
// if couldn't check hash, return that they differ
return false
}
// mod time differs but hash is the same to reset mod time if required
if !Config.NoUpdateModTime {
// Size and hash the same but mtime different so update the
// mtime of the dst object here
err := dst.SetModTime(srcModTime)
if err == ErrorCantSetModTime {
Debug(src, "src and dst identical but can't set mod time without re-uploading")
return false
} else if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to set modification time: %v", err)
} else {
Debug(src, "Updated modification time in destination")
}
}
return true
}
// MimeTypeFromName returns a guess at the mime type from the name
func MimeTypeFromName(remote string) (mimeType string) {
mimeType = mime.TypeByExtension(path.Ext(remote))
if !strings.ContainsRune(mimeType, '/') {
mimeType = "application/octet-stream"
}
return mimeType
}
// MimeType returns the MimeType from the object, either by calling
// the MimeTyper interface or using MimeTypeFromName
func MimeType(o ObjectInfo) (mimeType string) {
// Read the MimeType from the optional interface if available
if do, ok := o.(MimeTyper); ok {
mimeType = do.MimeType()
Debug(o, "Read MimeType as %q", mimeType)
if mimeType != "" {
return mimeType
}
}
return MimeTypeFromName(o.Remote())
}
// Used to remove a failed copy
//
// Returns whether the file was succesfully removed or not
func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
}
Debug(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
Object
remote string
}
// Remote returns the overriden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
// Copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
func Copy(f Fs, dst Object, remote string, src Object) (err error) {
if Config.DryRun {
Log(src, "Not copying as --dry-run")
return nil
}
maxTries := Config.LowLevelRetries
tries := 0
doUpdate := dst != nil
var actionTaken string
for {
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken = "Copied (server side copy)"
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
var newDst Object
newDst, err = fCopy.Copy(src, remote)
if err == nil {
dst = newDst
}
} else {
err = ErrorCantCopy
}
// If can't server side copy, do it manually
if err == ErrorCantCopy {
var in0 io.ReadCloser
in0, err = src.Open()
if err != nil {
err = errors.Wrap(err, "failed to open source object")
} else {
in := NewAccount(in0, src) // account and buffer the transfer
wrappedSrc := &overrideRemoteObject{Object: src, remote: remote}
if doUpdate {
actionTaken = "Copied (replaced existing)"
err = dst.Update(in, wrappedSrc)
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(in, wrappedSrc)
}
closeErr := in.Close()
if err == nil {
err = closeErr
}
}
}
tries++
if tries >= maxTries {
break
}
// Retry if err returned a retry error
if IsRetryError(err) || ShouldRetry(err) {
Debug(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
continue
}
// otherwise finish
break
}
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to copy: %v", err)
return err
}
// Verify sizes are the same after transfer
if !Config.IgnoreSize && src.Size() != dst.Size() {
Stats.Error()
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
ErrorLog(dst, "%v", err)
removeFailedCopy(dst)
return err
}
// Verify hashes are the same after transfer - ignoring blank hashes
// TODO(klauspost): This could be extended, so we always create a hash type matching
// the destination, and calculate it while sending.
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(src, "common hashes: %v", common)
if !Config.SizeOnly && common.Count() > 0 {
// Get common hash type
hashType := common.GetOne()
var srcSum string
srcSum, err = src.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to read src hash: %v", err)
} else if srcSum != "" {
var dstSum string
dstSum, err = dst.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to read hash: %v", err)
} else if !HashEquals(srcSum, dstSum) {
Stats.Error()
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
ErrorLog(dst, "%v", err)
removeFailedCopy(dst)
return err
}
}
}
Debug(src, actionTaken)
return err
}
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
func Move(fdst Fs, dst Object, remote string, src Object) (err error) {
if Config.DryRun {
Log(src, "Not moving as --dry-run")
return nil
}
// See if we have Move available
if do, ok := fdst.(Mover); ok && src.Fs().Name() == fdst.Name() {
// Delete destination if it exists
if dst != nil {
err = DeleteFile(dst)
if err != nil {
return err
}
}
// Move dst <- src
_, err := do.Move(src, remote)
switch err {
case nil:
Debug(src, "Moved (server side)")
return nil
case ErrorCantMove:
Debug(src, "Can't move, switching to copy")
default:
Stats.Error()
ErrorLog(dst, "Couldn't move: %v", err)
return err
}
}
// Move not found or didn't work so copy dst <- src
err = Copy(fdst, dst, remote, src)
if err != nil {
ErrorLog(src, "Not deleting source as copy failed: %v", err)
return err
}
// Delete src if no error on copy
return DeleteFile(src)
}
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
func DeleteFile(dst Object) (err error) {
if Config.DryRun {
Log(dst, "Not deleting as --dry-run")
} else {
Stats.Checking(dst.Remote())
err = dst.Remove()
Stats.DoneChecking(dst.Remote())
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't delete: %v", err)
} else {
Debug(dst, "Deleted")
}
}
return err
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(toBeDeleted ObjectsChan) error {
var wg sync.WaitGroup
wg.Add(Config.Transfers)
var errorCount int32
for i := 0; i < Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
err := DeleteFile(dst)
if err != nil {
atomic.AddInt32(&errorCount, 1)
}
}
}()
}
Log(nil, "Waiting for deletions to finish")
wg.Wait()
if errorCount > 0 {
return errors.Errorf("failed to delete %d files", errorCount)
}
return nil
}
// Read a Objects into add() for the given Fs.
// dir is the start directory, "" for root
// If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added.
//
// Each object is passed ito the function provided. If that returns
// an error then the listing will be aborted and that error returned.
func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object) error) (err error) {
list := NewLister()
if !includeAll {
list.SetFilter(Config.Filter)
list.SetLevel(Config.MaxDepth)
}
list.Start(fs, dir)
for {
o, err := list.GetObject()
if err != nil {
return err
}
// Check if we are finished
if o == nil {
break
}
// Make sure we don't delete excluded files if not required
if includeAll || Config.Filter.IncludeObject(o) {
err = add(o)
if err != nil {
list.SetError(err)
}
} else {
Debug(o, "Excluded from sync (and deletion)")
}
}
return nil
}
// Read a map of Object.Remote to Object for the given Fs.
// dir is the start directory, "" for root
// If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added.
//
// This also detects duplicates and normalised duplicates
func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object, err error) {
files = make(map[string]Object)
normalised := make(map[string]struct{})
err = readFilesFn(fs, includeAll, dir, func(o Object) error {
remote := o.Remote()
normalisedRemote := strings.ToLower(norm.NFC.String(remote))
if _, ok := files[remote]; !ok {
files[remote] = o
if _, ok := normalised[normalisedRemote]; ok {
Log(o, "Warning: File found with same name but different case on %v", o.Fs())
}
} else {
Log(o, "Duplicate file detected")
}
normalised[normalisedRemote] = struct{}{}
return nil
})
if err != nil {
err = errors.Wrapf(err, "error listing: %s", fs)
}
return files, err
}
// readFilesMaps runs readFilesMap on fdst and fsrc at the same time
// dir is the start directory, "" for root
func readFilesMaps(fdst Fs, fdstIncludeAll bool, fsrc Fs, fsrcIncludeAll bool, dir string) (dstFiles, srcFiles map[string]Object, err error) {
var wg sync.WaitGroup
var srcErr, dstErr error
list := func(fs Fs, includeAll bool, pMap *map[string]Object, pErr *error) {
defer wg.Done()
Log(fs, "Building file list")
files, listErr := readFilesMap(fs, includeAll, dir)
if listErr != nil {
ErrorLog(fs, "Error building file list: %v", listErr)
*pErr = listErr
} else {
Debug(fs, "Done building file list")
*pMap = files
}
}
wg.Add(2)
go list(fdst, fdstIncludeAll, &dstFiles, &srcErr)
go list(fsrc, fsrcIncludeAll, &srcFiles, &dstErr)
wg.Wait()
if srcErr != nil {
err = srcErr
}
if dstErr != nil {
err = dstErr
}
return dstFiles, srcFiles, err
}
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc Fs) bool {
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
}
// Overlapping returns true if fdst and fsrc point to the same
// underlying Fs or they overlap.
func Overlapping(fdst, fsrc Fs) bool {
return fdst.Name() == fsrc.Name() && (strings.HasPrefix(fdst.Root(), fsrc.Root()) || strings.HasPrefix(fsrc.Root(), fdst.Root()))
}
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
func checkIdentical(dst, src Object) (differ bool, noHash bool) {
Stats.Checking(src.Remote())
defer Stats.DoneChecking(src.Remote())
if src.Size() != dst.Size() {
Stats.Error()
ErrorLog(src, "Sizes differ")
return true, false
}
if !Config.SizeOnly {
same, hash, err := CheckHashes(src, dst)
if err != nil {
// CheckHashes will log and count errors
return true, false
}
if hash == HashNone {
return false, true
}
if !same {
Stats.Error()
ErrorLog(src, "%v differ", hash)
return true, false
}
}
Debug(src, "OK")
return false, false
}
// Check the files in fsrc and fdst according to Size and hash
func Check(fdst, fsrc Fs) error {
dstFiles, srcFiles, err := readFilesMaps(fdst, false, fsrc, false, "")
if err != nil {
return err
}
differences := int32(0)
noHashes := int32(0)
// FIXME could do this as it goes along and make it use less
// memory.
// Move all the common files into commonFiles and delete then
// from srcFiles and dstFiles
commonFiles := make(map[string][]Object)
for remote, src := range srcFiles {
if dst, ok := dstFiles[remote]; ok {
commonFiles[remote] = []Object{dst, src}
delete(srcFiles, remote)
delete(dstFiles, remote)
}
}
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
for _, dst := range dstFiles {
Stats.Error()
ErrorLog(dst, "File not in %v", fsrc)
atomic.AddInt32(&differences, 1)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
ErrorLog(src, "File not in %v", fdst)
atomic.AddInt32(&differences, 1)
}
checks := make(chan []Object, Config.Transfers)
go func() {
for _, check := range commonFiles {
checks <- check
}
close(checks)
}()
var checkerWg sync.WaitGroup
checkerWg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go func() {
defer checkerWg.Done()
for check := range checks {
differ, noHash := checkIdentical(check[0], check[1])
if differ {
atomic.AddInt32(&differences, 1)
}
if noHash {
atomic.AddInt32(&noHashes, 1)
}
}
}()
}
Log(fdst, "Waiting for checks to finish")
checkerWg.Wait()
Log(fdst, "%d differences found", Stats.GetErrors())
if noHashes > 0 {
Log(fdst, "%d hashes could not be checked", noHashes)
}
if differences > 0 {
return errors.Errorf("%d differences found", differences)
}
return nil
}
// ListFn lists the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func ListFn(f Fs, fn func(Object)) error {
list := NewLister().SetFilter(Config.Filter).SetLevel(Config.MaxDepth).Start(f, "")
var wg sync.WaitGroup
wg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ {
go func() {
defer wg.Done()
for {
o, err := list.GetObject()
if err != nil {
log.Fatal(err)
}
// check if we are finished
if o == nil {
return
}
if Config.Filter.IncludeObject(o) {
fn(o)
}
}
}()
}
wg.Wait()
return nil
}
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
//
// Ignores errors from Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) {
outMutex.Lock()
defer outMutex.Unlock()
_, _ = fmt.Fprintf(w, format, a...)
}
// List the Fs to the supplied writer
//
// Shows size and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func List(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
})
}
// ListLong lists the Fs to the supplied writer
//
// Shows size, mod time and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func ListLong(f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o.Remote())
modTime := o.ModTime()
Stats.DoneChecking(o.Remote())
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
})
}
// Md5sum list the Fs to the supplied writer
//
// Produces the same output as the md5sum command - obeys includes and
// excludes
//
// Lists in parallel which may get them out of order
func Md5sum(f Fs, w io.Writer) error {
return hashLister(HashMD5, f, w)
}
// Sha1sum list the Fs to the supplied writer
//
// Obeys includes and excludes
//
// Lists in parallel which may get them out of order
func Sha1sum(f Fs, w io.Writer) error {
return hashLister(HashSHA1, f, w)
}
func hashLister(ht HashType, f Fs, w io.Writer) error {
return ListFn(f, func(o Object) {
Stats.Checking(o.Remote())
sum, err := o.Hash(ht)
Stats.DoneChecking(o.Remote())
if err == ErrHashUnsupported {
sum = "UNSUPPORTED"
} else if err != nil {
Debug(o, "Failed to read %v: %v", ht, err)
sum = "ERROR"
}
syncFprintf(w, "%*s %s\n", HashWidth[ht], sum, o.Remote())
})
}
// Count counts the objects and their sizes in the Fs
//
// Obeys includes and excludes
func Count(f Fs) (objects int64, size int64, err error) {
err = ListFn(f, func(o Object) {
atomic.AddInt64(&objects, 1)
atomic.AddInt64(&size, o.Size())
})
return
}
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f Fs, w io.Writer) error {
level := 1
if Config.MaxDepth > 0 {
level = Config.MaxDepth
}
list := NewLister().SetFilter(Config.Filter).SetLevel(level).Start(f, "")
for {
dir, err := list.GetDir()
if err != nil {
log.Fatal(err)
}
if dir == nil {
break
}
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
}
return nil
}
// Mkdir makes a destination directory or container
func Mkdir(f Fs, dir string) error {
if Config.DryRun {
Log(f, "Not making directory as dry run is set")
return nil
}
err := f.Mkdir(dir)
if err != nil {
Stats.Error()
return err
}
return nil
}
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
func TryRmdir(f Fs, dir string) error {
if Config.DryRun {
if dir != "" {
Log(dir, "Not deleting as dry run is set")
} else {
Log(f, "Not deleting as dry run is set")
}
return nil
}
return f.Rmdir(dir)
}
// Rmdir removes a container but not if not empty
func Rmdir(f Fs, dir string) error {
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
return err
}
return err
}
// Purge removes a container and all of its contents
//
// FIXME doesn't delete local directories
func Purge(f Fs) error {
doFallbackPurge := true
var err error
if purger, ok := f.(Purger); ok {
doFallbackPurge = false
if Config.DryRun {
Log(f, "Not purging as --dry-run set")
} else {
err = purger.Purge()
if err == ErrorCantPurge {
doFallbackPurge = true
}
}
}
if doFallbackPurge {
// DeleteFiles and Rmdir observe --dry-run
list := NewLister().Start(f, "")
err = DeleteFiles(listToChan(list))
if err != nil {
return err
}
err = Rmdir(f, "")
}
if err != nil {
Stats.Error()
return err
}
return nil
}
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
func Delete(f Fs) error {
delete := make(ObjectsChan, Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- DeleteFiles(delete)
}()
err := ListFn(f, func(o Object) {
delete <- o
})
close(delete)
delError := <-delErr
if err == nil {
err = delError
}
return err
}
// dedupeRename renames the objs slice to different names
func dedupeRename(remote string, objs []Object) {
f := objs[0].Fs()
mover, ok := f.(Mover)
if !ok {
log.Fatalf("Fs %v doesn't support Move", f)
}
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
for i, o := range objs {
newName := fmt.Sprintf("%s-%d%s", base, i+1, ext)
if !Config.DryRun {
newObj, err := mover.Move(o, newName)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to rename: %v", err)
continue
}
Log(newObj, "renamed from: %v", o)
} else {
Log(remote, "Not renaming to %q as --dry-run", newName)
}
}
}
// dedupeDeleteAllButOne deletes all but the one in keep
func dedupeDeleteAllButOne(keep int, remote string, objs []Object) {
for i, o := range objs {
if i == keep {
continue
}
_ = DeleteFile(o)
}
Log(remote, "Deleted %d extra copies", len(objs)-1)
}
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
func dedupeDeleteIdentical(remote string, objs []Object) []Object {
// See how many of these duplicates are identical
byHash := make(map[string][]Object, len(objs))
for _, o := range objs {
md5sum, err := o.Hash(HashMD5)
if err == nil {
byHash[md5sum] = append(byHash[md5sum], o)
}
}
// Delete identical duplicates, refilling obj with the ones remaining
objs = nil
for md5sum, hashObjs := range byHash {
if len(hashObjs) > 1 {
Log(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
for _, o := range hashObjs[1:] {
_ = DeleteFile(o)
}
}
objs = append(objs, hashObjs[0])
}
return objs
}
// dedupeInteractive interactively dedupes the slice of objects
func dedupeInteractive(remote string, objs []Object) {
fmt.Printf("%s: %d duplicates remain\n", remote, len(objs))
for i, o := range objs {
md5sum, err := o.Hash(HashMD5)
if err != nil {
md5sum = err.Error()
}
fmt.Printf(" %d: %12d bytes, %s, md5sum %32s\n", i+1, o.Size(), o.ModTime().Format("2006-01-02 15:04:05.000000000"), md5sum)
}
switch Command([]string{"sSkip and do nothing", "kKeep just one (choose which in next step)", "rRename all to be different (by changing file.jpg to file-1.jpg)"}) {
case 's':
case 'k':
keep := ChooseNumber("Enter the number of the file to keep", 1, len(objs))
dedupeDeleteAllButOne(keep-1, remote, objs)
case 'r':
dedupeRename(remote, objs)
}
}
type objectsSortedByModTime []Object
func (objs objectsSortedByModTime) Len() int { return len(objs) }
func (objs objectsSortedByModTime) Swap(i, j int) { objs[i], objs[j] = objs[j], objs[i] }
func (objs objectsSortedByModTime) Less(i, j int) bool {
return objs[i].ModTime().Before(objs[j].ModTime())
}
// DeduplicateMode is how the dedupe command chooses what to do
type DeduplicateMode int
// Deduplicate modes
const (
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
DeduplicateSkip // skip all conflicts
DeduplicateFirst // choose the first object
DeduplicateNewest // choose the newest object
DeduplicateOldest // choose the oldest object
DeduplicateRename // rename the objects
)
func (x DeduplicateMode) String() string {
switch x {
case DeduplicateInteractive:
return "interactive"
case DeduplicateSkip:
return "skip"
case DeduplicateFirst:
return "first"
case DeduplicateNewest:
return "newest"
case DeduplicateOldest:
return "oldest"
case DeduplicateRename:
return "rename"
}
return "unknown"
}
// Set a DeduplicateMode from a string
func (x *DeduplicateMode) Set(s string) error {
switch strings.ToLower(s) {
case "interactive":
*x = DeduplicateInteractive
case "skip":
*x = DeduplicateSkip
case "first":
*x = DeduplicateFirst
case "newest":
*x = DeduplicateNewest
case "oldest":
*x = DeduplicateOldest
case "rename":
*x = DeduplicateRename
default:
return errors.Errorf("Unknown mode for dedupe %q.", s)
}
return nil
}
// Type of the value
func (x *DeduplicateMode) Type() string {
return "string"
}
// Check it satisfies the interface
var _ pflag.Value = (*DeduplicateMode)(nil)
// Deduplicate interactively finds duplicate files and offers to
// delete all but one or rename them to be different. Only useful with
// Google Drive which can have duplicate file names.
func Deduplicate(f Fs, mode DeduplicateMode) error {
Log(f, "Looking for duplicates using %v mode.", mode)
files := map[string][]Object{}
list := NewLister().Start(f, "")
for {
o, err := list.GetObject()
if err != nil {
return err
}
// Check if we are finished
if o == nil {
break
}
remote := o.Remote()
files[remote] = append(files[remote], o)
}
for remote, objs := range files {
if len(objs) > 1 {
Log(remote, "Found %d duplicates - deleting identical copies", len(objs))
objs = dedupeDeleteIdentical(remote, objs)
if len(objs) <= 1 {
Log(remote, "All duplicates removed")
continue
}
switch mode {
case DeduplicateInteractive:
dedupeInteractive(remote, objs)
case DeduplicateFirst:
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateNewest:
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(len(objs)-1, remote, objs)
case DeduplicateOldest:
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateRename:
dedupeRename(remote, objs)
case DeduplicateSkip:
// skip
default:
//skip
}
}
}
return nil
}
// listToChan will transfer all incoming objects to a new channel.
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
func listToChan(list *Lister) ObjectsChan {
o := make(ObjectsChan, Config.Checkers)
go func() {
defer close(o)
for {
obj, dir, err := list.Get()
if err != nil {
if err != ErrorDirNotFound {
Stats.Error()
ErrorLog(nil, "Failed to list: %v", err)
}
return
}
if dir == nil && obj == nil {
return
}
if obj == nil {
continue
}
o <- obj
}
}()
return o
}
// CleanUp removes the trash for the Fs
func CleanUp(f Fs) error {
fc, ok := f.(CleanUpper)
if !ok {
return errors.Errorf("%v doesn't support cleanup", f)
}
if Config.DryRun {
Log(f, "Not running cleanup as --dry-run set")
return nil
}
return fc.CleanUp()
}
// Cat any files to the io.Writer
func Cat(f Fs, w io.Writer) error {
var mu sync.Mutex
return ListFn(f, func(o Object) {
var err error
Stats.Transferring(o.Remote())
defer func() {
Stats.DoneTransferring(o.Remote(), err == nil)
}()
mu.Lock()
defer mu.Unlock()
in, err := o.Open()
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to open: %v", err)
return
}
defer func() {
err = in.Close()
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to close: %v", err)
}
}()
inAccounted := NewAccount(in, o) // account and buffer the transfer
_, err = io.Copy(w, inAccounted)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to send to output: %v", err)
}
})
}
// Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f.
func Rmdirs(f Fs) error {
list := NewLister().Start(f, "")
dirEmpty := make(map[string]bool)
dirEmpty[""] = true
for {
o, dir, err := list.Get()
if err != nil {
Stats.Error()
ErrorLog(f, "Failed to list: %v", err)
return err
} else if dir != nil {
// add a new directory as empty
dir := dir.Name
_, found := dirEmpty[dir]
if !found {
dirEmpty[dir] = true
}
} else if o != nil {
// mark the parents of the file as being non-empty
dir := o.Remote()
for dir != "" {
dir = path.Dir(dir)
if dir == "." || dir == "/" {
dir = ""
}
empty, found := dirEmpty[dir]
// End if we reach a directory which is non-empty
if found && !empty {
break
}
dirEmpty[dir] = false
}
} else {
// finished as dir == nil && o == nil
break
}
}
// Now delete the empty directories, starting from the longest path
var toDelete []string
for dir, empty := range dirEmpty {
if empty {
toDelete = append(toDelete, dir)
}
}
sort.Strings(toDelete)
for i := len(toDelete) - 1; i >= 0; i-- {
dir := toDelete[i]
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
ErrorLog(dir, "Failed to rmdir: %v", err)
return err
}
}
return nil
}
// moveOrCopyFile moves or copies a single file possibly to a new name
func moveOrCopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string, cp bool) (err error) {
// Choose operations
Op := Move
if cp {
Op = Copy
}
// Find src object
srcObj, err := fsrc.NewObject(srcFileName)
if err != nil {
return err
}
// Find dst object if it exists
dstObj, err := fdst.NewObject(dstFileName)
if err == ErrorObjectNotFound {
dstObj = nil
} else if err != nil {
return err
}
if NeedTransfer(dstObj, srcObj) {
return Op(fdst, dstObj, dstFileName, srcObj)
} else if !cp {
return DeleteFile(srcObj)
}
return nil
}
// MoveFile moves a single file possibly to a new name
func MoveFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, false)
}
// CopyFile moves a single file possibly to a new name
func CopyFile(fdst Fs, fsrc Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(fdst, fsrc, dstFileName, srcFileName, true)
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"fmt"
"html"
"os"
"strings"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/sys"
)
var ssaEnabled = true
var ssaConfig *ssa.Config
var ssaExp ssaExport
func initssa() *ssa.Config {
ssaExp.unimplemented = false
ssaExp.mustImplement = true
if ssaConfig == nil {
ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
}
return ssaConfig
}
func shouldssa(fn *Node) bool {
switch Thearch.LinkArch.Name {
default:
// Only available for testing.
if os.Getenv("SSATEST") == "" {
return false
}
// Generally available.
case "amd64":
}
if !ssaEnabled {
return false
}
// Environment variable control of SSA CG
// 1. IF GOSSAFUNC == current function name THEN
// compile this function with SSA and log output to ssa.html
// 2. IF GOSSAHASH == "" THEN
// compile this function (and everything else) with SSA
// 3. IF GOSSAHASH == "n" or "N"
// IF GOSSAPKG == current package name THEN
// compile this function (and everything in this package) with SSA
// ELSE
// use the old back end for this function.
// This is for compatibility with existing test harness and should go away.
// 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN
// compile this function with SSA
// ELSE
// compile this function with the old back end.
// Plan is for 3 to be removed when the tests are revised.
// SSA is now default, and is disabled by setting
// GOSSAHASH to n or N, or selectively with strings of
// 0 and 1.
name := fn.Func.Nname.Sym.Name
funcname := os.Getenv("GOSSAFUNC")
if funcname != "" {
// If GOSSAFUNC is set, compile only that function.
return name == funcname
}
pkg := os.Getenv("GOSSAPKG")
if pkg != "" {
// If GOSSAPKG is set, compile only that package.
return localpkg.Name == pkg
}
return initssa().DebugHashMatch("GOSSAHASH", name)
}
// buildssa builds an SSA function.
func buildssa(fn *Node) *ssa.Func {
name := fn.Func.Nname.Sym.Name
printssa := name == os.Getenv("GOSSAFUNC")
if printssa {
fmt.Println("generating SSA for", name)
dumplist("buildssa-enter", fn.Func.Enter)
dumplist("buildssa-body", fn.Nbody)
dumplist("buildssa-exit", fn.Func.Exit)
}
var s state
s.pushLine(fn.Lineno)
defer s.popLine()
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
if fn.Func.Pragma&Nowritebarrier != 0 {
s.noWB = true
}
defer func() {
if s.WBLineno != 0 {
fn.Func.WBLineno = s.WBLineno
}
}()
// TODO(khr): build config just once at the start of the compiler binary
ssaExp.log = printssa
s.config = initssa()
s.f = s.config.NewFunc()
s.f.Name = name
s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{}
if name == os.Getenv("GOSSAFUNC") {
// TODO: tempfile? it is handy to have the location
// of this file be stable, so you can just reload in the browser.
s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
// TODO: generate and print a mapping from nodes to values and blocks
}
defer func() {
if !printssa {
s.config.HTML.Close()
}
}()
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
s.varsyms = map[*Node]interface{}{}
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
for _, n := range fn.Func.Dcl {
switch n.Class {
case PPARAM, PPARAMOUT:
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
if n.Class == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
case PAUTO | PHEAP:
// TODO this looks wrong for PAUTO|PHEAP, no vardef, but also no definition
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
case PPARAM | PHEAP, PPARAMOUT | PHEAP:
// This ends up wrong, have to do it at the PARAM node instead.
case PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
case PFUNC:
// local function - already handled by frontend
default:
str := ""
if n.Class&PHEAP != 0 {
str = ",heap"
}
s.Unimplementedf("local variable with class %s%s unimplemented", classnames[n.Class&^PHEAP], str)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmts(fn.Func.Enter)
s.stmts(fn.Nbody)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Func.Endlineno)
s.exit()
s.popLine()
}
// Check that we used all labels
for name, lab := range s.labels {
if !lab.used() && !lab.reported {
yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
lab.reported = true
}
if lab.used() && !lab.defined() && !lab.reported {
yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
lab.reported = true
}
}
// Check any forward gotos. Non-forward gotos have already been checked.
for _, n := range s.fwdGotos {
lab := s.labels[n.Left.Sym.Name]
// If the label is undefined, we have already have printed an error.
if lab.defined() {
s.checkgoto(n, lab.defNode)
}
}
if nerrors > 0 {
s.f.Free()
return nil
}
// Link up variable uses to variable definitions
s.linkForwardReferences()
// Don't carry reference this around longer than necessary
s.exitCode = Nodes{}
// Main call to ssa package to compile function
ssa.Compile(s.f)
return s.f
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
labeledNodes map[*Node]*ssaLabel
// gotos that jump forward; required for deferred checkgoto calls
fwdGotos []*Node
// Code that must precede any return
// (e.g., copying value of heap-escaped paramout back to true paramout)
exitCode Nodes
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
vars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
decladdrs map[*Node]*ssa.Value
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
varsyms map[*Node]interface{}
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// line number stack. The current line number is top of stack
line []int32
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
// list of FwdRef values.
fwdRefs []*ssa.Value
// list of PPARAMOUT (return) variables. Does not include PPARAM|PHEAP vars.
returns []*Node
cgoUnsafeArgs bool
noWB bool
WBLineno int32 // line number of first write barrier. 0=no write barriers
}
type funcLine struct {
f *Node
line int32
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
defNode *Node // label definition Node (OLABEL)
// Label use Node (OGOTO, OBREAK, OCONTINUE).
// Used only for error detection and reporting.
// There might be multiple uses, but we only need to track one.
useNode *Node
reported bool // reported indicates whether an error has already been reported for this label
}
// defined reports whether the label has a definition (OLABEL node).
func (l *ssaLabel) defined() bool { return l.defNode != nil }
// used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
func (l *ssaLabel) used() bool { return l.useNode != nil }
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) }
func (s *state) Log() bool { return s.config.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) }
func (s *state) Unimplementedf(msg string, args ...interface{}) {
s.config.Unimplementedf(s.peekLine(), msg, args...)
}
func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) }
func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() }
var (
// dummy node for the memory variable
memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
// dummy nodes for temporary variables
ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}}
okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}}
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[*Node]*ssa.Value{}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
b.Line = s.peekLine()
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line int32) {
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekLine peek the top of the line number stack.
func (s *state) peekLine() int32 {
return s.line[len(s.line)-1]
}
func (s *state) Error(msg string, args ...interface{}) {
yyerrorl(s.peekLine(), msg, args...)
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekLine(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2)
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
return s.f.Entry.NewValue0(s.peekLine(), op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
}
// entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) }
func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) }
func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) }
func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
}
func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekLine(), t, c)
}
func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
return s.f.ConstInt16(s.peekLine(), t, c)
}
func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
return s.f.ConstInt32(s.peekLine(), t, c)
}
func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
return s.f.ConstInt64(s.peekLine(), t, c)
}
func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(s.peekLine(), t, c)
}
func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekLine(), t, c)
}
func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
if s.config.IntSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) stmts(a Nodes) {
for _, x := range a.Slice() {
s.stmt(x)
}
}
// ssaStmtList converts the statement n to SSA and adds it to s.
func (s *state) stmtList(l Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// ssaStmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
s.pushLine(n.Lineno)
defer s.popLine()
// If s.curBlock is nil, then we're about to generate dead code.
// We can't just short-circuit here, though,
// because we check labels and gotos as part of SSA generation.
// Provide a block for the dead code so that we don't have
// to add special cases everywhere else.
if s.curBlock == nil {
dead := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(dead)
}
s.stmtList(n.Ninit)
switch n.Op {
case OBLOCK:
s.stmtList(n.List)
// No-ops
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
// Expression statements
case OCALLFUNC, OCALLMETH, OCALLINTER:
s.call(n, callNormal)
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC &&
(compiling_runtime != 0 && n.Left.Sym.Name == "throw" ||
n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
case ODEFER:
s.call(n.Left, callDefer)
case OPROC:
s.call(n.Left, callGo)
case OAS2DOTTYPE:
res, resok := s.dottype(n.Rlist.First(), true)
s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0)
s.assign(n.List.Second(), resok, false, false, n.Lineno, 0)
return
case ODCL:
if n.Left.Class&PHEAP == 0 {
return
}
if compiling_runtime != 0 {
Fatalf("%v escapes to heap, not allowed in runtime.", n)
}
// TODO: the old pass hides the details of PHEAP
// variables behind ONAME nodes. Figure out if it's better
// to rewrite the tree and make the heapaddr construct explicit
// or to keep this detail hidden behind the scenes.
palloc := prealloc[n.Left]
if palloc == nil {
palloc = callnew(n.Left.Type)
prealloc[n.Left] = palloc
}
r := s.expr(palloc)
s.assign(n.Left.Name.Heapaddr, r, false, false, n.Lineno, 0)
case OLABEL:
sym := n.Left.Sym
if isblanksym(sym) {
// Empty identifier is valid but useless.
// See issues 11589, 11593.
return
}
lab := s.label(sym)
// Associate label with its control flow node, if any
if ctl := n.Name.Defn; ctl != nil {
switch ctl.Op {
case OFOR, OSWITCH, OSELECT:
s.labeledNodes[ctl] = lab
}
}
if !lab.defined() {
lab.defNode = n
} else {
s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
lab.reported = true
}
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// go to that label (we pretend "label:" is preceded by "goto label")
b := s.endBlock()
b.AddEdgeTo(lab.target)
s.startBlock(lab.target)
case OGOTO:
sym := n.Left.Sym
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
if !lab.used() {
lab.useNode = n
}
if lab.defined() {
s.checkgoto(n, lab.defNode)
} else {
s.fwdGotos = append(s.fwdGotos, n)
}
b := s.endBlock()
b.AddEdgeTo(lab.target)
case OAS, OASWB:
// Check whether we can generate static data rather than code.
// If so, ignore n and defer data generation until codegen.
// Failure to do this causes writes to readonly symbols.
if gen_as_init(n, true) {
var data []*Node
if s.f.StaticData != nil {
data = s.f.StaticData.([]*Node)
}
s.f.StaticData = append(data, n)
return
}
if n.Left == n.Right && n.Left.Op == ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
var t *Type
if n.Right != nil {
t = n.Right.Type
} else {
t = n.Left.Type
}
// Evaluate RHS.
rhs := n.Right
if rhs != nil && (rhs.Op == OSTRUCTLIT || rhs.Op == OARRAYLIT) {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
}
var r *ssa.Value
needwb := n.Op == OASWB && rhs != nil
deref := !canSSAType(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs, false)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
if rhs != nil && rhs.Op == OAPPEND {
// Yuck! The frontend gets rid of the write barrier, but we need it!
// At least, we need it in the case where growslice is called.
// TODO: Do the write barrier on just the growslice branch.
// TODO: just add a ptr graying to the end of growslice?
// TODO: check whether we need to do this for ODOTTYPE and ORECV also.
// They get similar wb-removal treatment in walk.go:OAS.
needwb = true
}
var skip skipMask
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i := rhs.Right.Left
var j, k *Node
if rhs.Op == OSLICE3 {
j = rhs.Right.Right.Left
k = rhs.Right.Right.Right
} else {
j = rhs.Right.Right
}
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.Left, r, needwb, deref, n.Lineno, skip)
case OIF:
bThen := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
var bElse *ssa.Block
if n.Rlist.Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
s.condBranch(n.Left, bThen, bElse, n.Likely)
} else {
s.condBranch(n.Left, bThen, bEnd, n.Likely)
}
s.startBlock(bThen)
s.stmts(n.Nbody)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
if n.Rlist.Len() != 0 {
s.startBlock(bElse)
s.stmtList(n.Rlist)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ORETURN:
s.stmtList(n.List)
s.exit()
case ORETJMP:
s.stmtList(n.List)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = n.Left.Sym
case OCONTINUE, OBREAK:
var op string
var to *ssa.Block
switch n.Op {
case OCONTINUE:
op = "continue"
to = s.continueTo
case OBREAK:
op = "break"
to = s.breakTo
}
if n.Left == nil {
// plain break/continue
if to == nil {
s.Error("%s is not in a loop", op)
return
}
// nothing to do; "to" is already the correct target
} else {
// labeled break/continue; look up the target
sym := n.Left.Sym
lab := s.label(sym)
if !lab.used() {
lab.useNode = n.Left
}
if !lab.defined() {
s.Error("%s label not defined: %v", op, sym)
lab.reported = true
return
}
switch n.Op {
case OCONTINUE:
to = lab.continueTarget
case OBREAK:
to = lab.breakTarget
}
if to == nil {
// Valid label but not usable with a break/continue here, e.g.:
// for {
// continue abc
// }
// abc:
// for {}
s.Error("invalid %s label %v", op, sym)
lab.reported = true
return
}
}
b := s.endBlock()
b.AddEdgeTo(to)
case OFOR:
// OFOR: for Ninit; Left; Right { Nbody }
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// first, jump to condition test
b := s.endBlock()
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled for loop
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmts(n.Nbody)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr
s.startBlock(bIncr)
if n.Right != nil {
s.stmt(n.Right)
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
}
s.startBlock(bEnd)
case OSWITCH, OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled
lab.breakTarget = bEnd
}
// generate body code
s.stmts(n.Nbody)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// OSWITCH never falls through (s.curBlock == nil here).
// OSELECT does not fall through if we're calling selectgo.
// OSELECT does fall through if we're calling selectnb{send,recv}[2].
// In those latter cases, go to the code after the select.
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
s.startBlock(bEnd)
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
}
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken {
s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left)
}
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
case OCHECKNIL:
p := s.expr(n.Left)
s.nilCheck(p)
default:
s.Unimplementedf("unhandled stmt %s", opnames[n.Op])
}
}
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if hasdefer {
s.rtcall(Deferreturn, true, nil)
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmts(s.exitCode)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
aux := &ssa.ArgSymbol{Typ: n.Type, Node: n}
addr := s.newValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
val := s.variable(n, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
// Do actual return.
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
return b
}
type opAndType struct {
op Op
etype EType
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TINT8}: ssa.OpAdd8,
opAndType{OADD, TUINT8}: ssa.OpAdd8,
opAndType{OADD, TINT16}: ssa.OpAdd16,
opAndType{OADD, TUINT16}: ssa.OpAdd16,
opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32,
opAndType{OADD, TPTR32}: ssa.OpAdd32,
opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64,
opAndType{OADD, TPTR64}: ssa.OpAdd64,
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
opAndType{OSUB, TINT8}: ssa.OpSub8,
opAndType{OSUB, TUINT8}: ssa.OpSub8,
opAndType{OSUB, TINT16}: ssa.OpSub16,
opAndType{OSUB, TUINT16}: ssa.OpSub16,
opAndType{OSUB, TINT32}: ssa.OpSub32,
opAndType{OSUB, TUINT32}: ssa.OpSub32,
opAndType{OSUB, TINT64}: ssa.OpSub64,
opAndType{OSUB, TUINT64}: ssa.OpSub64,
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
opAndType{ONOT, TBOOL}: ssa.OpNot,
opAndType{OMINUS, TINT8}: ssa.OpNeg8,
opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
opAndType{OMINUS, TINT16}: ssa.OpNeg16,
opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
opAndType{OMINUS, TINT32}: ssa.OpNeg32,
opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
opAndType{OMINUS, TINT64}: ssa.OpNeg64,
opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
opAndType{OCOM, TINT8}: ssa.OpCom8,
opAndType{OCOM, TUINT8}: ssa.OpCom8,
opAndType{OCOM, TINT16}: ssa.OpCom16,
opAndType{OCOM, TUINT16}: ssa.OpCom16,
opAndType{OCOM, TINT32}: ssa.OpCom32,
opAndType{OCOM, TUINT32}: ssa.OpCom32,
opAndType{OCOM, TINT64}: ssa.OpCom64,
opAndType{OCOM, TUINT64}: ssa.OpCom64,
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
opAndType{OMUL, TINT8}: ssa.OpMul8,
opAndType{OMUL, TUINT8}: ssa.OpMul8,
opAndType{OMUL, TINT16}: ssa.OpMul16,
opAndType{OMUL, TUINT16}: ssa.OpMul16,
opAndType{OMUL, TINT32}: ssa.OpMul32,
opAndType{OMUL, TUINT32}: ssa.OpMul32,
opAndType{OMUL, TINT64}: ssa.OpMul64,
opAndType{OMUL, TUINT64}: ssa.OpMul64,
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
opAndType{OHMUL, TINT8}: ssa.OpHmul8,
opAndType{OHMUL, TUINT8}: ssa.OpHmul8u,
opAndType{OHMUL, TINT16}: ssa.OpHmul16,
opAndType{OHMUL, TUINT16}: ssa.OpHmul16u,
opAndType{OHMUL, TINT32}: ssa.OpHmul32,
opAndType{OHMUL, TUINT32}: ssa.OpHmul32u,
opAndType{ODIV, TINT8}: ssa.OpDiv8,
opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
opAndType{ODIV, TINT16}: ssa.OpDiv16,
opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
opAndType{ODIV, TINT32}: ssa.OpDiv32,
opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
opAndType{ODIV, TINT64}: ssa.OpDiv64,
opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
opAndType{OMOD, TINT8}: ssa.OpMod8,
opAndType{OMOD, TUINT8}: ssa.OpMod8u,
opAndType{OMOD, TINT16}: ssa.OpMod16,
opAndType{OMOD, TUINT16}: ssa.OpMod16u,
opAndType{OMOD, TINT32}: ssa.OpMod32,
opAndType{OMOD, TUINT32}: ssa.OpMod32u,
opAndType{OMOD, TINT64}: ssa.OpMod64,
opAndType{OMOD, TUINT64}: ssa.OpMod64u,
opAndType{OAND, TINT8}: ssa.OpAnd8,
opAndType{OAND, TUINT8}: ssa.OpAnd8,
opAndType{OAND, TINT16}: ssa.OpAnd16,
opAndType{OAND, TUINT16}: ssa.OpAnd16,
opAndType{OAND, TINT32}: ssa.OpAnd32,
opAndType{OAND, TUINT32}: ssa.OpAnd32,
opAndType{OAND, TINT64}: ssa.OpAnd64,
opAndType{OAND, TUINT64}: ssa.OpAnd64,
opAndType{OOR, TINT8}: ssa.OpOr8,
opAndType{OOR, TUINT8}: ssa.OpOr8,
opAndType{OOR, TINT16}: ssa.OpOr16,
opAndType{OOR, TUINT16}: ssa.OpOr16,
opAndType{OOR, TINT32}: ssa.OpOr32,
opAndType{OOR, TUINT32}: ssa.OpOr32,
opAndType{OOR, TINT64}: ssa.OpOr64,
opAndType{OOR, TUINT64}: ssa.OpOr64,
opAndType{OXOR, TINT8}: ssa.OpXor8,
opAndType{OXOR, TUINT8}: ssa.OpXor8,
opAndType{OXOR, TINT16}: ssa.OpXor16,
opAndType{OXOR, TUINT16}: ssa.OpXor16,
opAndType{OXOR, TINT32}: ssa.OpXor32,
opAndType{OXOR, TUINT32}: ssa.OpXor32,
opAndType{OXOR, TINT64}: ssa.OpXor64,
opAndType{OXOR, TUINT64}: ssa.OpXor64,
opAndType{OEQ, TBOOL}: ssa.OpEq8,
opAndType{OEQ, TINT8}: ssa.OpEq8,
opAndType{OEQ, TUINT8}: ssa.OpEq8,
opAndType{OEQ, TINT16}: ssa.OpEq16,
opAndType{OEQ, TUINT16}: ssa.OpEq16,
opAndType{OEQ, TINT32}: ssa.OpEq32,
opAndType{OEQ, TUINT32}: ssa.OpEq32,
opAndType{OEQ, TINT64}: ssa.OpEq64,
opAndType{OEQ, TUINT64}: ssa.OpEq64,
opAndType{OEQ, TINTER}: ssa.OpEqInter,
opAndType{OEQ, TARRAY}: ssa.OpEqSlice,
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
opAndType{ONE, TBOOL}: ssa.OpNeq8,
opAndType{ONE, TINT8}: ssa.OpNeq8,
opAndType{ONE, TUINT8}: ssa.OpNeq8,
opAndType{ONE, TINT16}: ssa.OpNeq16,
opAndType{ONE, TUINT16}: ssa.OpNeq16,
opAndType{ONE, TINT32}: ssa.OpNeq32,
opAndType{ONE, TUINT32}: ssa.OpNeq32,
opAndType{ONE, TINT64}: ssa.OpNeq64,
opAndType{ONE, TUINT64}: ssa.OpNeq64,
opAndType{ONE, TINTER}: ssa.OpNeqInter,
opAndType{ONE, TARRAY}: ssa.OpNeqSlice,
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
opAndType{OLT, TINT8}: ssa.OpLess8,
opAndType{OLT, TUINT8}: ssa.OpLess8U,
opAndType{OLT, TINT16}: ssa.OpLess16,
opAndType{OLT, TUINT16}: ssa.OpLess16U,
opAndType{OLT, TINT32}: ssa.OpLess32,
opAndType{OLT, TUINT32}: ssa.OpLess32U,
opAndType{OLT, TINT64}: ssa.OpLess64,
opAndType{OLT, TUINT64}: ssa.OpLess64U,
opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
opAndType{OGT, TINT8}: ssa.OpGreater8,
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
opAndType{OGT, TINT16}: ssa.OpGreater16,
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
opAndType{OGT, TINT32}: ssa.OpGreater32,
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
opAndType{OGT, TINT64}: ssa.OpGreater64,
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
opAndType{OLE, TINT8}: ssa.OpLeq8,
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
opAndType{OLE, TINT16}: ssa.OpLeq16,
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
opAndType{OLE, TINT32}: ssa.OpLeq32,
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
opAndType{OLE, TINT64}: ssa.OpLeq64,
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
opAndType{OGE, TINT8}: ssa.OpGeq8,
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
opAndType{OGE, TINT16}: ssa.OpGeq16,
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
opAndType{OGE, TINT32}: ssa.OpGeq32,
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
opAndType{OGE, TINT64}: ssa.OpGeq64,
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
opAndType{OLROT, TUINT8}: ssa.OpLrot8,
opAndType{OLROT, TUINT16}: ssa.OpLrot16,
opAndType{OLROT, TUINT32}: ssa.OpLrot32,
opAndType{OLROT, TUINT64}: ssa.OpLrot64,
opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
}
func (s *state) concreteEtype(t *Type) EType {
e := t.Etype
switch e {
default:
return e
case TINT:
if s.config.IntSize == 8 {
return TINT64
}
return TINT32
case TUINT:
if s.config.IntSize == 8 {
return TUINT64
}
return TUINT32
case TUINTPTR:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
}
}
func (s *state) ssaOp(op Op, t *Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(etype))
}
return x
}
func floatForComplex(t *Type) *Type {
if t.Size() == 8 {
return Types[TFLOAT32]
} else {
return Types[TFLOAT64]
}
}
type opAndTwoTypes struct {
op Op
etype1 EType
etype2 EType
}
type twoTypes struct {
etype1 EType
etype2 EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
// unsigned
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64},
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(etype1), Econv(etype2))
}
return x
}
func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
etype1 := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype1}]
if !ok {
s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(etype1))
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n *Node) *ssa.Value {
s.pushLine(n.Lineno)
defer s.popLine()
s.stmtList(n.Ninit)
switch n.Op {
case OCFUNC:
aux := s.lookupSymbol(n, &ssa.ExternSymbol{n.Type, n.Left.Sym})
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
case OPARAM:
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type, addr, s.mem())
case ONAME:
if n.Class == PFUNC {
// "value" of a function is the address of the function's closure
sym := funcsym(n.Sym)
aux := &ssa.ExternSymbol{n.Type, sym}
return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
}
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OCLOSUREVAR:
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OLITERAL:
switch n.Val().Ctype() {
case CTINT:
i := n.Int64()
switch n.Type.Size() {
case 1:
return s.constInt8(n.Type, int8(i))
case 2:
return s.constInt16(n.Type, int16(i))
case 4:
return s.constInt32(n.Type, int32(i))
case 8:
return s.constInt64(n.Type, i)
default:
s.Fatalf("bad integer size %d", n.Type.Size())
return nil
}
case CTSTR:
if n.Val().U == "" {
return s.constEmptyString(n.Type)
}
return s.entryNewValue0A(ssa.OpConstString, n.Type, n.Val().U)
case CTBOOL:
v := s.constBool(n.Val().U.(bool))
// For some reason the frontend gets the line numbers of
// CTBOOL literals totally wrong. Fix it here by grabbing
// the line number of the enclosing AST node.
if len(s.line) >= 2 {
v.Line = s.line[len(s.line)-2]
}
return v
case CTNIL:
t := n.Type
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case CTFLT:
f := n.Val().U.(*Mpflt)
switch n.Type.Size() {
case 4:
return s.constFloat32(n.Type, f.Float32())
case 8:
return s.constFloat64(n.Type, f.Float64())
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
case CTCPLX:
c := n.Val().U.(*Mpcplx)
r := &c.Real
i := &c.Imag
switch n.Type.Size() {
case 8:
{
pt := Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, i.Float32()))
}
case 16:
{
pt := Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
}
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
default:
s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype())
return nil
}
case OCONVNOP:
to := n.Type
from := n.Left.Type
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.Left)
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Etype == TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Etype == to.Etype {
return v
}
// unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
return v
}
dowidth(from)
dowidth(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
if etypesign(from.Etype) != etypesign(to.Etype) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(from.Etype), to, Econv(to.Etype))
return nil
}
if instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Etype) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case OCONV:
x := s.expr(n.Left)
ft := n.Left.Type // from type
tt := n.Type // to type
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %s -> %s", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %s -> %s", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %s -> %s", ft, tt)
}
}
return s.newValue1(op, n.Type, x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if !ok {
s.Fatalf("weird float conversion %s -> %s", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValue1(op2, n.Type, x)
}
if op2 == ssa.OpCopy {
return s.newValue1(op1, n.Type, x)
}
return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// therefore tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt)
}
// therefore ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
op = ssa.OpCopy
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %s -> %s", ft, tt)
}
ftp := floatForComplex(ft)
ttp := floatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Unimplementedf("unhandled OCONV %s -> %s", Econv(n.Left.Type.Etype), Econv(n.Type.Etype))
return nil
case ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i)
switch n.Op {
case OEQ:
return c
case ONE:
return s.newValue1(ssa.OpNot, Types[TBOOL], c)
default:
s.Fatalf("ordered complex compare %s", opnames[n.Op])
}
}
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case ODIV:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValue2(divop, wt, xreal, denom)
ximag = s.newValue2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
if n.Type.IsFloat() {
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
} else {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
case OMOD:
a := s.expr(n.Left)
b := s.expr(n.Right)
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OADD, OSUB:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
pt := floatForComplex(n.Type)
op := s.ssaOp(n.Op, pt)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OAND, OOR, OHMUL, OXOR:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
case OLROT:
a := s.expr(n.Left)
i := n.Right.Int64()
if i <= 0 || i >= n.Type.Size()*8 {
s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i)
}
return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a)
case OANDAND, OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
el := s.expr(n.Left)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op == OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op == OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Right)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, Types[TBOOL])
case OCOMPLEX:
r := s.expr(n.Left)
i := s.expr(n.Right)
return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
// unary ops
case OMINUS:
a := s.expr(n.Left)
if n.Type.IsComplex() {
tp := floatForComplex(n.Type)
negop := s.ssaOp(n.Op, tp)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case ONOT, OCOM, OSQRT:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case OIMAG, OREAL:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
case OPLUS:
return s.expr(n.Left)
case OADDR:
return s.addr(n.Left, n.Bounded)
case OINDREG:
if int(n.Reg) != Thearch.REGSP {
s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n)
return nil
}
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OIND:
p := s.expr(n.Left)
s.nilCheck(p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOT:
t := n.Left.Type
if canSSAType(t) {
v := s.expr(n.Left)
return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
}
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOTPTR:
p := s.expr(n.Left)
s.nilCheck(p)
p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
switch {
case n.Left.Type.IsString():
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i)
if !n.Bounded {
len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
s.boundsCheck(i, len)
}
ptrtyp := Ptrto(Types[TUINT8])
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
case n.Left.Type.IsArray():
// TODO: fix when we can SSA arrays of length 1.
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
default:
s.Fatalf("bad type for index %v", n.Left.Type)
return nil
}
case OLEN, OCAP:
switch {
case n.Left.Type.IsSlice():
op := ssa.OpSliceLen
if n.Op == OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, Types[TINT], s.expr(n.Left))
case n.Left.Type.IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.Left))
default: // array
return s.constInt(Types[TINT], n.Left.Type.NumElem())
}
case OSPTR:
a := s.expr(n.Left)
if n.Left.Type.IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type, a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type, a)
}
case OITAB:
a := s.expr(n.Left)
return s.newValue1(ssa.OpITab, n.Type, a)
case OEFACE:
tab := s.expr(n.Left)
data := s.expr(n.Right)
// The frontend allows putting things like struct{*byte} in
// the data portion of an eface. But we don't want struct{*byte}
// as a register type because (among other reasons) the liveness
// analysis is confused by the "fat" variables that result from
// such types being spilled.
// So here we ensure that we are selecting the underlying pointer
// when we build an eface.
// TODO: get rid of this now that structs can be SSA'd?
for !data.Type.IsPtrShaped() {
switch {
case data.Type.IsArray():
data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data)
case data.Type.IsStruct():
for i := data.Type.NumFields() - 1; i >= 0; i-- {
f := data.Type.FieldType(i)
if f.Size() == 0 {
// eface type could also be struct{p *byte; q [0]int}
continue
}
data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data)
break
}
default:
s.Fatalf("type being put into an eface isn't a pointer")
}
}
return s.newValue2(ssa.OpIMake, n.Type, tab, data)
case OSLICE, OSLICEARR:
v := s.expr(n.Left)
var i, j *ssa.Value
if n.Right.Left != nil {
i = s.extendIndex(s.expr(n.Right.Left))
}
if n.Right.Right != nil {
j = s.extendIndex(s.expr(n.Right.Right))
}
p, l, c := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OSLICESTR:
v := s.expr(n.Left)
var i, j *ssa.Value
if n.Right.Left != nil {
i = s.extendIndex(s.expr(n.Right.Left))
}
if n.Right.Right != nil {
j = s.extendIndex(s.expr(n.Right.Right))
}
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
case OSLICE3, OSLICE3ARR:
v := s.expr(n.Left)
var i *ssa.Value
if n.Right.Left != nil {
i = s.extendIndex(s.expr(n.Right.Left))
}
j := s.extendIndex(s.expr(n.Right.Right.Left))
k := s.extendIndex(s.expr(n.Right.Right.Right))
p, l, c := s.slice(n.Left.Type, v, i, j, k)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OCALLFUNC:
if isIntrinsicCall1(n) {
return s.intrinsicCall1(n)
}
fallthrough
case OCALLINTER, OCALLMETH:
a := s.call(n, callNormal)
return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
case OGETG:
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
case OAPPEND:
return s.exprAppend(n)
default:
s.Unimplementedf("unhandled expr %s", opnames[n.Op])
return nil
}
}
// exprAppend converts an OAPPEND node n to an ssa.Value, adds it to s, and returns the Value.
func (s *state) exprAppend(n *Node) *ssa.Value {
// append(s, e1, e2, e3). Compile like:
// ptr, len, cap := s
// newlen := len + 3
// if newlen > s.cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// makeslice(ptr, newlen, cap)
et := n.Type.Elem()
pt := Ptrto(et)
// Evaluate slice
slice := s.expr(n.List.First())
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
s.vars[&ptrVar] = p
s.vars[&newlenVar] = nl
s.vars[&capVar] = c
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Types[TUINTPTR], typenamesym(n.Type)}, s.sb)
r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
s.vars[&ptrVar] = r[0]
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
s.vars[&capVar] = r[2]
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
// Evaluate args
args := make([]*ssa.Value, 0, nargs)
store := make([]bool, 0, nargs)
for _, n := range n.List.Slice()[1:] {
if canSSAType(n.Type) {
args = append(args, s.expr(n))
store = append(store, true)
} else {
args = append(args, s.addr(n, false))
store = append(store, false)
}
}
p = s.variable(&ptrVar, pt) // generates phi for ptr
nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
c = s.variable(&capVar, Types[TINT]) // generates phi for cap
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
// TODO: just one write barrier call for all of these writes?
// TODO: maybe just one writeBarrier.enabled check?
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
if store[i] {
if haspointers(et) {
s.insertWBstore(et, addr, arg, n.Lineno, 0)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg, s.mem())
}
} else {
if haspointers(et) {
s.insertWBmove(et, addr, arg, n.Lineno)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg, s.mem())
}
}
}
// make result
delete(s.vars, &ptrVar)
delete(s.vars, &newlenVar)
delete(s.vars, &capVar)
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
if cond.Op == OANDAND {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
}
if cond.Op == OOROR {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
}
if cond.Op == ONOT {
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, no, yes, -likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// Include a write barrier if wb is true.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask) {
if left.Op == ONAME && isblank(left) {
return
}
t := left.Type
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %s but not RHS %s", left, right)
}
if left.Op == ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
t := left.Left.Type
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.Left)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.Left, new, false, false, line, 0)
// TODO: do we need to update named values here?
return
}
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
if left.Op == ONAME {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
}
if deref {
// Treat as a mem->mem move.
if right == nil {
s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
return
}
if wb {
s.insertWBmove(t, addr, right, line)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
return
}
// Treat as a store.
if wb {
if skip&skipPtr != 0 {
// Special case: if we don't write back the pointers, don't bother
// doing the write barrier check.
s.storeTypeScalars(t, addr, right, skip)
return
}
s.insertWBstore(t, addr, right, line, skip)
return
}
if skip != 0 {
if skip&skipPtr == 0 {
s.storeTypePtrs(t, addr, right)
}
s.storeTypeScalars(t, addr, right, skip)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %s", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %s", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(Types[TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %s", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
}
return v
}
s.Unimplementedf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callGo
)
// isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic
// that can be handled by the SSA backend.
// SSA uses this, but so does the front end to see if should not
// inline a function because it is a candidate for intrinsic
// substitution.
func isSSAIntrinsic1(s *Sym) bool {
// The test below is not quite accurate -- in the event that
// a function is disabled on a per-function basis, for example
// because of hash-keyed binary failure search, SSA might be
// disabled for that function but it would not be noted here,
// and thus an inlining would not occur (in practice, inlining
// so far has only been noticed for Bswap32 and the 16-bit count
// leading/trailing instructions, but heuristics might change
// in the future or on different architectures).
if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 {
return false
}
if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" {
switch s.Name {
case
"Ctz64", "Ctz32", "Ctz16",
"Bswap64", "Bswap32":
return true
}
}
return false
}
func isIntrinsicCall1(n *Node) bool {
if n == nil || n.Left == nil {
return false
}
return isSSAIntrinsic1(n.Left.Sym)
}
// intrinsicFirstArg extracts arg from n.List and eval
func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
x := n.List.First()
if x.Op == OAS {
x = x.Right
}
return s.expr(x)
}
// intrinsicCall1 converts a call to a recognized 1-arg intrinsic
// into the intrinsic
func (s *state) intrinsicCall1(n *Node) *ssa.Value {
var result *ssa.Value
switch n.Left.Sym.Name {
case "Ctz64":
result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
case "Ctz32":
result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
case "Ctz16":
result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n))
case "Bswap64":
result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
case "Bswap32":
result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
}
if result == nil {
Fatalf("Unknown special call: %v", n.Left.Sym)
}
if ssa.IntrinsicsDebug > 0 {
Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString())
}
return result
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
var sym *Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.Left
switch n.Op {
case OCALLFUNC:
if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
sym = fn.Sym
break
}
closure = s.expr(fn)
case OCALLMETH:
if fn.Op != ODOTMETH {
Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
if k == callNormal {
sym = fn.Sym
break
}
n2 := newname(fn.Sym)
n2.Class = PFUNC
n2.Lineno = fn.Lineno
closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't
// want to set it here.
case OCALLINTER:
if fn.Op != ODOTINTER {
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", Oconv(fn.Op, 0))
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
} else {
closure = itab
}
rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
}
dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver
// Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements.
s.stmtList(n.List)
// Set receiver (for interface calls)
if rcvr != nil {
argStart := Ctxt.FixedFrameSize()
if k != callNormal {
argStart += int64(2 * Widthptr)
}
addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argsize := s.constInt32(Types[TUINT32], int32(stksize))
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem())
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
// call target
bNext := s.f.NewBlock(ssa.BlockPlain)
var call *ssa.Value
switch {
case k == callDefer:
call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem())
case k == callGo:
call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem())
case closure != nil:
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
case sym != nil:
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem())
default:
Fatalf("bad call type %s %v", opnames[n.Op], n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
// Finish call block
s.vars[&memVar] = call
b := s.endBlock()
b.Kind = ssa.BlockCall
b.SetControl(call)
b.AddEdgeTo(bNext)
if k == callDefer {
// Add recover edge to exit code.
b.Kind = ssa.BlockDefer
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
}
// Start exit block, find address of result.
s.startBlock(bNext)
res := n.Left.Type.Results()
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset, s.sp)
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
return +1
}
return 0
}
// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
// This improves the effectiveness of cse by using the same Aux values for the
// same symbols.
func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
switch sym.(type) {
default:
s.Fatalf("sym %v is of uknown type %T", sym, sym)
case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
// these are the only valid types
}
if lsym, ok := s.varsyms[n]; ok {
return lsym
} else {
s.varsyms[n] = sym
return sym
}
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
// If bounded is true then this address does not require a nil check for its operand
// even if that would otherwise be implied.
func (s *state) addr(n *Node, bounded bool) *ssa.Value {
t := Ptrto(n.Type)
switch n.Op {
case ONAME:
switch n.Class {
case PEXTERN:
// global variable
aux := s.lookupSymbol(n, &ssa.ExternSymbol{n.Type, n.Sym})
v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if n.Xoffset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
}
return v
case PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
if n.String() == ".fp" {
// Special arg that points to the frame pointer.
// (Used by the race detector, others?)
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case PAUTO:
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
case PAUTO | PHEAP, PPARAM | PHEAP, PPARAMOUT | PHEAP, PPARAMREF:
return s.expr(n.Name.Heapaddr)
default:
s.Unimplementedf("variable address class %v not implemented", n.Class)
return nil
}
case OINDREG:
// indirect off a register
// used for storing/loading arguments/returns to/from callees
if int(n.Reg) != Thearch.REGSP {
s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n)
return nil
}
return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp)
case OINDEX:
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
if !n.Bounded {
s.boundsCheck(i, len)
}
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.Left, bounded)
i := s.expr(n.Right)
i = s.extendIndex(i)
len := s.constInt(Types[TINT], n.Left.Type.NumElem())
if !n.Bounded {
s.boundsCheck(i, len)
}
return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i)
}
case OIND:
p := s.expr(n.Left)
if !bounded {
s.nilCheck(p)
}
return p
case ODOT:
p := s.addr(n.Left, bounded)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case ODOTPTR:
p := s.expr(n.Left)
if !bounded {
s.nilCheck(p)
}
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case OCLOSUREVAR:
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8])))
case OPARAM:
p := n.Left
if p.Op != ONAME || !(p.Class == PPARAM|PHEAP || p.Class == PPARAMOUT|PHEAP) {
s.Fatalf("OPARAM not of ONAME,{PPARAM,PPARAMOUT}|PHEAP, instead %s", nodedump(p, 0))
}
// Recover original offset to address passed-in param value.
original_p := *p
original_p.Xoffset = n.Xoffset
aux := &ssa.ArgSymbol{Typ: n.Type, Node: &original_p}
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
case OCONVNOP:
addr := s.addr(n.Left, bounded)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case OCALLFUNC, OCALLINTER, OCALLMETH:
return s.call(n, callNormal)
default:
s.Unimplementedf("unhandled addr %v", Oconv(n.Op, 0))
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
for n.Op == ODOT {
n = n.Left
}
if n.Op != ONAME {
return false
}
if n.Addrtaken {
return false
}
if n.Class&PHEAP != 0 {
return false
}
switch n.Class {
case PEXTERN, PPARAMREF:
// TODO: maybe treat PPARAMREF with an Arg-like op to read from closure?
return false
case PPARAMOUT:
if hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if n.Class == PPARAM && n.String() == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
return false
}
return canSSAType(n.Type)
// TODO: try to make more variables SSAable?
}
// canSSA reports whether variables of type t are SSA-able.
func canSSAType(t *Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Etype {
case TARRAY:
if t.IsSlice() {
return true
}
// We can't do arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: maybe allow if length is <=1? All indexes
// are constant? Might be good for the arrays
// introduced by the compiler for variadic functions.
return false
case TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !canSSAType(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// nilCheck generates nil pointer checking code.
// Starts a new block on return, unless nil checks are disabled.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if Disable_checknil != 0 {
return
}
chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockCheck
b.SetControl(chk)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// TODO: convert index to full width?
// TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
// bounds check
cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
s.check(cmp, Panicindex)
}
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// TODO: convert index to full width?
// TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
// bounds check
cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
// If cmp (a bool) is true, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *Node) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekLine()
bPanic := s.panics[funcLine{fn, line}]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[funcLine{fn, line}] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
// If returns is true, the block is marked as a call block. A new block
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
var off int64 // TODO: arch-dependent starting offset?
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
}
size := t.Size()
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthptr))
// Issue call
call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
s.vars[&memVar] = call
// Finish block
b := s.endBlock()
if !returns {
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off
if len(results) > 0 {
Fatalf("panic call can't have results")
}
return nil
}
b.Kind = ssa.BlockCall
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
}
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
}
off = Rnd(off, int64(Widthptr))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// insertWBmove inserts the assignment *left = *right including a write barrier.
// t is the type being assigned.
func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32) {
// if writeBarrier.enabled {
// typedmemmove(&t, left, right)
// } else {
// *left = *right
// }
if s.noWB {
s.Fatalf("write barrier prohibited")
}
if s.WBLineno == 0 {
s.WBLineno = left.Line
}
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(flag)
b.AddEdgeTo(bThen)
b.AddEdgeTo(bElse)
s.startBlock(bThen)
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Types[TUINTPTR], typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, right)
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bElse)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem())
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
if Debug_wb > 0 {
Warnl(line, "write barrier")
}
}
// insertWBstore inserts the assignment *left = right including a write barrier.
// t is the type being assigned.
func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) {
// store scalar fields
// if writeBarrier.enabled {
// writebarrierptr for pointer fields
// } else {
// store pointer fields
// }
if s.noWB {
s.Fatalf("write barrier prohibited")
}
if s.WBLineno == 0 {
s.WBLineno = left.Line
}
s.storeTypeScalars(t, left, right, skip)
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(flag)
b.AddEdgeTo(bThen)
b.AddEdgeTo(bElse)
// Issue write barriers for pointer writes.
s.startBlock(bThen)
s.storeTypePtrsWB(t, left, right)
s.endBlock().AddEdgeTo(bEnd)
// Issue regular stores for pointer writes.
s.startBlock(bElse)
s.storeTypePtrs(t, left, right)
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
if Debug_wb > 0 {
Warnl(line, "write barrier")
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem())
case t.IsPtrShaped():
// no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft.(*Type), addr, val, 0)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !haspointers(ft.(*Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft.(*Type), addr, val)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// do *left = right with a write barrier for all pointer parts of t.
func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.rtcall(writebarrierptr, true, nil, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
s.rtcall(writebarrierptr, true, nil, left, ptr)
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
s.rtcall(writebarrierptr, true, nil, left, ptr)
case t.IsInterface():
idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
s.rtcall(writebarrierptr, true, nil, idataAddr, idata)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !haspointers(ft.(*Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrsWB(ft.(*Type), addr, val)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type.
func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var elemtype *Type
var ptrtype *Type
var ptr *ssa.Value
var len *ssa.Value
var cap *ssa.Value
zero := s.constInt(Types[TINT], 0)
switch {
case t.IsSlice():
elemtype = t.Elem()
ptrtype = Ptrto(elemtype)
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
case t.IsString():
elemtype = Types[TUINT8]
ptrtype = Ptrto(elemtype)
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
elemtype = t.Elem().Elem()
ptrtype = Ptrto(elemtype)
s.nilCheck(v)
ptr = v
len = s.constInt(Types[TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = zero
}
if j == nil {
j = len
}
if k == nil {
k = cap
}
// Panic if slice indices are not in bounds.
s.sliceBoundsCheck(i, j)
if j != k {
s.sliceBoundsCheck(j, k)
}
if k != cap {
s.sliceBoundsCheck(k, cap)
}
// Generate the following code assuming that indexes are in bounds.
// The conditional is to make sure that we don't generate a slice
// that points to the next object in memory.
// rlen = j-i
// rcap = k-i
// delta = i*elemsize
// if rcap == 0 {
// delta = 0
// }
// rptr = p+delta
// result = (SliceMake rptr rlen rcap)
subOp := s.ssaOp(OSUB, Types[TINT])
eqOp := s.ssaOp(OEQ, Types[TINT])
mulOp := s.ssaOp(OMUL, Types[TINT])
rlen := s.newValue2(subOp, Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
// Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rcap = rlen
case j == k:
rcap = rlen
default:
rcap = s.newValue2(subOp, Types[TINT], k, i)
}
// delta = # of elements to offset pointer by.
s.vars[&deltaVar] = i
// Generate code to set delta=0 if the resulting capacity is zero.
if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) ||
(i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) {
cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
// Generate block which zeros the delta variable.
nz := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(nz)
s.startBlock(nz)
s.vars[&deltaVar] = zero
s.endBlock()
// All done.
merge := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(merge)
nz.AddEdgeTo(merge)
s.startBlock(merge)
// TODO: use conditional moves somehow?
}
// Compute rptr = ptr + delta * elemsize
rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width)))
delete(s.vars, &deltaVar)
return rptr, rlen, rcap
}
type u2fcvtTab struct {
geq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, ssa.Type, int64) *ssa.Value
}
var u64_f64 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
// Excess generality on a machine with 64-bit integer registers.
// Not used on AMD64.
var u32_f32 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq32,
cvt2F: ssa.OpCvt32to32F,
and: ssa.OpAnd32,
rsh: ssa.OpRsh32Ux32,
or: ssa.OpOr32,
add: ssa.OpAdd32F,
one: func(s *state, t ssa.Type, x int64) *ssa.Value {
return s.constInt32(t, int32(x))
},
}
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.uintTofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.uintTofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type
nilValue := s.constNil(Types[TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
if n.Op == OLEN {
// length is stored in the first word for map/chan
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
} else if n.Op == OCAP {
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
} else {
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf ssa.Op
value func(*state, ssa.Type, float64) *ssa.Value
}
var f32_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
value: (*state).constFloat32,
}
var f64_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
value: (*state).constFloat64,
}
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
// if x < 9223372036854775808.0 {
// result = uintY(x)
// } else {
// y = x - 9223372036854775808.0
// z = uintY(y)
// result = z | -9223372036854775808
// }
twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0)
cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, twoToThe63)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := s.constInt64(tt, -9223372036854775808)
a1 := s.newValue2(ssa.OpOr64, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// ifaceType returns the value for the word containing the type.
// n is the node for the interface expression.
// v is the corresponding value.
func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value {
byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
if n.Type.IsEmptyInterface() {
// Have *eface. The type is the first word in the struct.
return s.newValue1(ssa.OpITab, byteptr, v)
}
// Have *iface.
// The first word in the struct is the *itab.
// If the *itab is nil, return 0.
// Otherwise, the second word in the *itab is the type.
tab := s.newValue1(ssa.OpITab, byteptr, v)
s.vars[&typVar] = tab
isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(isnonnil)
b.Likely = ssa.BranchLikely
bLoad := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bLoad)
b.AddEdgeTo(bEnd)
bLoad.AddEdgeTo(bEnd)
s.startBlock(bLoad)
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab)
s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
s.endBlock()
s.startBlock(bEnd)
typ := s.variable(&typVar, byteptr)
delete(s.vars, &typVar)
return typ
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left)
typ := s.ifaceType(n.Left, iface) // actual concrete type
target := s.expr(typename(n.Type)) // target type
if !isdirectiface(n.Type) {
// walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case.
Fatalf("dottype needs a direct iface type %s", n.Type)
}
if Debug_typeassert > 0 {
Warnl(n.Lineno, "type assertion inlined")
}
// TODO: If we have a nonempty interface and its itab field is nil,
// then this test is redundant and ifaceType should just branch directly to bFail.
cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
byteptr := Ptrto(Types[TUINT8])
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{byteptr, typenamesym(n.Left.Type)}, s.sb)
s.rtcall(panicdottype, false, nil, typ, target, taddr)
// on success, return idata field
s.startBlock(bOk)
return s.newValue1(ssa.OpIData, n.Type, iface), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// type assertion succeeded
s.startBlock(bOk)
s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface)
s.vars[&okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
s.vars[&idataVar] = s.constNil(byteptr)
s.vars[&okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
res = s.variable(&idataVar, byteptr)
resok = s.variable(&okVar, Types[TBOOL])
delete(s.vars, &idataVar)
delete(s.vars, &okVar)
return res, resok
}
// checkgoto checks that a goto from from to to does not
// jump into a block or jump over variable declarations.
// It is a copy of checkgoto in the pre-SSA backend,
// modified only for line number handling.
// TODO: document how this works and why it is designed the way it is.
func (s *state) checkgoto(from *Node, to *Node) {
if from.Sym == to.Sym {
return
}
nf := 0
for fs := from.Sym; fs != nil; fs = fs.Link {
nf++
}
nt := 0
for fs := to.Sym; fs != nil; fs = fs.Link {
nt++
}
fs := from.Sym
for ; nf > nt; nf-- {
fs = fs.Link
}
if fs != to.Sym {
// decide what to complain about.
// prefer to complain about 'into block' over declarations,
// so scan backward to find most recent block or else dcl.
var block *Sym
var dcl *Sym
ts := to.Sym
for ; nt > nf; nt-- {
if ts.Pkg == nil {
block = ts
} else {
dcl = ts
}
ts = ts.Link
}
for ts != fs {
if ts.Pkg == nil {
block = ts
} else {
dcl = ts
}
ts = ts.Link
fs = fs.Link
}
lno := from.Left.Lineno
if block != nil {
yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
} else {
yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
}
}
}
// variable returns the value of a variable at the current location.
func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
v := s.vars[name]
if v == nil {
v = s.newValue0A(ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
s.vars[name] = v
s.addNamedValue(name, v)
}
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(&memVar, ssa.TypeMem)
}
func (s *state) linkForwardReferences() {
// Build SSA graph. Each variable on its first use in a basic block
// leaves a FwdRef in that block representing the incoming value
// of that variable. This function links that ref up with possible definitions,
// inserting Phi values as needed. This is essentially the algorithm
// described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
// http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
// Differences:
// - We use FwdRef nodes to postpone phi building until the CFG is
// completely built. That way we can avoid the notion of "sealed"
// blocks.
// - Phi optimization is a separate pass (in ../ssa/phielim.go).
for len(s.fwdRefs) > 0 {
v := s.fwdRefs[len(s.fwdRefs)-1]
s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1]
s.resolveFwdRef(v)
}
}
// resolveFwdRef modifies v to be the variable's value at the start of its block.
// v must be a FwdRef op.
func (s *state) resolveFwdRef(v *ssa.Value) {
b := v.Block
name := v.Aux.(*Node)
v.Aux = nil
if b == s.f.Entry {
// Live variable at start of function.
if s.canSSA(name) {
if strings.HasPrefix(name.Sym.Name, "autotmp_") {
// It's likely that this is an uninitialized variable in the entry block.
s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v)
}
v.Op = ssa.OpArg
v.Aux = name
return
}
// Not SSAable. Load it.
addr := s.decladdrs[name]
if addr == nil {
// TODO: closure args reach here.
s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name)
}
if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
}
v.Op = ssa.OpLoad
v.AddArgs(addr, s.startmem)
return
}
if len(b.Preds) == 0 {
// This block is dead; we have no predecessors and we're not the entry block.
// It doesn't matter what we use here as long as it is well-formed.
v.Op = ssa.OpUnknown
return
}
// Find variable value on each predecessor.
var argstore [4]*ssa.Value
args := argstore[:0]
for _, p := range b.Preds {
args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
}
// Decide if we need a phi or not. We need a phi if there
// are two different args (which are both not v).
var w *ssa.Value
for _, a := range args {
if a == v {
continue // self-reference
}
if a == w {
continue // already have this witness
}
if w != nil {
// two witnesses, need a phi value
v.Op = ssa.OpPhi
v.AddArgs(args...)
return
}
w = a // save witness
}
if w == nil {
s.Fatalf("no witness for reachable phi %s", v)
}
// One witness. Make v a copy of w.
v.Op = ssa.OpCopy
v.AddArg(w)
}
// lookupVarOutgoing finds the variable's value at the end of block b.
func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value {
m := s.defvars[b.ID]
if v, ok := m[name]; ok {
return v
}
// The variable is not defined by b and we haven't
// looked it up yet. Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
m[name] = v
s.addNamedValue(name, v)
return v
}
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
if n.Class == Pxxx {
// Don't track our dummy nodes (&memVar etc.).
return
}
if strings.HasPrefix(n.Sym.Name, "autotmp_") {
// Don't track autotmp_ variables.
return
}
if n.Class == PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
if n.Class == PAUTO && n.Xoffset != 0 {
s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset)
}
loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return Pc
}
// SetLineno sets the current source line number.
func (s *SSAGenState) SetLineno(l int32) {
lineno = l
}
// genssa appends entries to ptxt for each instruction in f.
// gcargs and gclocals are filled in with pointer maps for the frame.
func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
var s SSAGenState
e := f.Config.Frontend().(*ssaExport)
// We're about to emit a bunch of Progs.
// Since the only way to get here is to explicitly request it,
// just fail on unimplemented instead of trying to unwind our mess.
e.mustImplement = true
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
var valueProgs map[*obj.Prog]*ssa.Value
var blockProgs map[*obj.Prog]*ssa.Block
var logProgs = e.log
if logProgs {
valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
blockProgs[Pc] = f.Blocks[0]
}
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = Pc
// Emit values in block
Thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := Pc
Thearch.SSAGenValue(&s, v)
if logProgs {
for ; x != Pc; x = x.Link {
valueProgs[x] = v
}
}
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := Pc
Thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != Pc; x = x.Link {
blockProgs[x] = b
}
}
}
// Resolve branches
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
}
if logProgs {
for p := ptxt; p != nil; p = p.Link {
var s string
if v, ok := valueProgs[p]; ok {
s = v.String()
} else if b, ok := blockProgs[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf("%s\t%s\n", s, p)
}
if f.Config.HTML != nil {
saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
ptxt.Ctxt.LineHist.PrintFilenameOnly = true
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
for p := ptxt; p != nil; p = p.Link {
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := valueProgs[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := blockProgs[p]; ok {
buf.WriteString(b.HTML())
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString(p.String()))
buf.WriteString("</dd>")
buf.WriteString("</li>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.Config.HTML.WriteColumn("genssa", buf.String())
ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
}
}
// Emit static data
if f.StaticData != nil {
for _, n := range f.StaticData.([]*Node) {
if !gen_as_init(n, false) {
Fatalf("non-static data marked as static: %v\n\n", n, f)
}
}
}
// Allocate stack frame
allocauto(ptxt)
// Generate gc bitmaps.
liveness(Curfn, ptxt, gcargs, gclocals)
gcsymdup(gcargs)
gcsymdup(gclocals)
// Add frame prologue. Zero ambiguously live variables.
Thearch.Defframe(ptxt)
if Debug['f'] != 0 {
frame(0)
}
// Remove leftover instrumentation from the instruction stream.
removevardef(ptxt)
f.Config.HTML.Close()
}
// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
p := Prog(as)
// TODO: use zero register on archs that support it.
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_MEM
p.To.Reg = regnum
p.To.Offset = offset
offset += width
nleft = nbytes - width
return nleft, offset
}
type FloatingEQNEJump struct {
Jump obj.As
Index int
}
func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
p := Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
branches = append(branches, Branch{p, b.Succs[to]})
if to == 1 {
likely = -likely
}
// liblink reorders the instruction stream as it sees fit.
// Pass along what we know so liblink can make use of it.
// TODO: Once we've fully switched to SSA,
// make liblink leave our output alone.
switch likely {
case ssa.BranchUnlikely:
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
case ssa.BranchLikely:
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
}
return branches
}
func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
likely := b.Likely
switch next {
case b.Succs[0]:
s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
case b.Succs[1]:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
default:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
q := Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1]})
}
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM {
v.Fatalf("bad AddAux addr %s", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch sym := v.Aux.(type) {
case *ssa.ExternSymbol:
a.Name = obj.NAME_EXTERN
switch s := sym.Sym.(type) {
case *Sym:
a.Sym = Linksym(s)
case *obj.LSym:
a.Sym = s
default:
v.Fatalf("ExternSymbol.Sym is %T", s)
}
case *ssa.ArgSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_PARAM
a.Node = n
a.Sym = Linksym(n.Orig.Sym)
a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables.
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_AUTO
a.Node = n
a.Sym = Linksym(n.Sym)
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
size := v.Type.Size()
if size == s.config.IntSize {
return v
}
if size > s.config.IntSize {
// TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
// the high word and branch to out-of-bounds failure if it is not 0.
s.Unimplementedf("64->32 index truncation not implemented")
return v
}
// Extend value to the required size
var op ssa.Op
if v.Type.IsSigned() {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", v.Type)
}
} else {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
return s.newValue1(op, Types[TINT], v)
}
// SSARegNum returns the register (in cmd/internal/obj numbering) to
// which v has been allocated. Panics if v is not assigned to a
// register.
// TODO: Make this panic again once it stops happening routinely.
func SSARegNum(v *ssa.Value) int16 {
reg := v.Block.Func.RegAlloc[v.ID]
if reg == nil {
v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func)
return 0
}
return Thearch.SSARegToReg[reg.(*ssa.Register).Num]
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
func AutoVar(v *ssa.Value) (*Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N.(*Node), loc.Off
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *Node) int {
t := n.Left.Type
f := n.Sym
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
var i int
for _, t1 := range t.Fields().Slice() {
if t1.Sym != f {
i++
continue
}
if t1.Offset != n.Xoffset {
panic("field offset doesn't match")
}
return i
}
panic(fmt.Sprintf("can't find field in expr %s\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssaExport exports a bunch of compiler services for the ssa backend.
type ssaExport struct {
log bool
unimplemented bool
mustImplement bool
}
func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] }
func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] }
func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] }
func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] }
func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] }
func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] }
func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] }
func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] }
func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] }
func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] }
func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] }
func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) }
// StringData returns a symbol (a *Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (*ssaExport) StringData(s string) interface{} {
// TODO: is idealstring correct? It might not matter...
_, data := stringsym(s)
return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
}
func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here!
return n
}
func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(Types[TUINT8])
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this string up into two separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
return ssa.LocalSlot{p, ptrType, 0}, ssa.LocalSlot{l, lenType, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, ptrType, name.Off}, ssa.LocalSlot{n, lenType, name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
t := Ptrto(Types[TUINT8])
if n.Class == PAUTO && !n.Addrtaken {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.namedAuto(n.Sym.Name+f, t)
d := e.namedAuto(n.Sym.Name+".data", t)
return ssa.LocalSlot{c, t, 0}, ssa.LocalSlot{d, t, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(name.Type.ElemType().(*Type))
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this slice up into three separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
c := e.namedAuto(n.Sym.Name+".cap", lenType)
return ssa.LocalSlot{p, ptrType, 0}, ssa.LocalSlot{l, lenType, 0}, ssa.LocalSlot{c, lenType, 0}
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{n, ptrType, name.Off},
ssa.LocalSlot{n, lenType, name.Off + int64(Widthptr)},
ssa.LocalSlot{n, lenType, name.Off + int64(2*Widthptr)}
}
func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *Type
if s == 8 {
t = Types[TFLOAT64]
} else {
t = Types[TFLOAT32]
}
if n.Class == PAUTO && !n.Addrtaken {
// Split this complex up into two separate variables.
c := e.namedAuto(n.Sym.Name+".real", t)
d := e.namedAuto(n.Sym.Name+".imag", t)
return ssa.LocalSlot{c, t, 0}, ssa.LocalSlot{d, t, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + s}
}
func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
if n.Class == PAUTO && !n.Addrtaken {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft)
return ssa.LocalSlot{x, ft, 0}
}
return ssa.LocalSlot{n, ft, name.Off + st.FieldOff(i)}
}
// namedAuto returns a new AUTO variable with the given name and type.
func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
t := typ.(*Type)
s := Lookup(name)
n := Nod(ONAME, nil, nil)
s.Def = n
s.Def.Used = true
n.Sym = s
n.Type = t
n.Class = PAUTO
n.Addable = true
n.Ullman = 1
n.Esc = EscNever
n.Xoffset = 0
n.Name.Curfn = Curfn
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
dowidth(t)
e.mustImplement = true
return n
}
func (e *ssaExport) CanSSA(t ssa.Type) bool {
return canSSAType(t.(*Type))
}
func (e *ssaExport) Line(line int32) string {
return linestr(line)
}
// Log logs a message from the compiler.
func (e *ssaExport) Logf(msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if e.log && !e.unimplemented {
fmt.Printf(msg, args...)
}
}
func (e *ssaExport) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if !e.unimplemented {
lineno = line
Fatalf(msg, args...)
}
}
// Unimplemented reports that the function cannot be compiled.
// It will be removed once SSA work is complete.
func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) {
if e.mustImplement {
lineno = line
Fatalf(msg, args...)
}
const alwaysLog = false // enable to calculate top unimplemented features
if !e.unimplemented && (e.log || alwaysLog) {
// first implementation failure, print explanation
fmt.Printf("SSA unimplemented: "+msg+"\n", args...)
}
e.unimplemented = true
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) {
Warnl(line, fmt_, args...)
}
func (e *ssaExport) Debug_checknil() bool {
return Debug_checknil != 0
}
func (n *Node) Typ() ssa.Type {
return n.Type
}
cmd/compile: avoid write barrier in append fast path
When we are writing the result of an append back
to the same slice, we don’t need a write barrier
on the fast path.
This re-implements an optimization that was present
in the old backend.
Updates #14921
Fixes #14969
Sample code:
var x []byte
func p() {
x = append(x, 1, 2, 3)
}
Before:
"".p t=1 size=224 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 199
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x(SB), DX
0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX
0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP)
0x0031 00049 (append.go:22) LEAQ 3(BX), BP
0x0035 00053 (append.go:22) CMPQ BP, CX
0x0038 00056 (append.go:22) JGT $0, 131
0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1)
0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1)
0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1)
0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB)
0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB)
0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x005c 00092 (append.go:22) TESTB AL, AL
0x005e 00094 (append.go:22) JNE $0, 108
0x0060 00096 (append.go:22) MOVQ DX, "".x(SB)
0x0067 00103 (append.go:23) ADDQ $72, SP
0x006b 00107 (append.go:23) RET
0x006c 00108 (append.go:22) LEAQ "".x(SB), CX
0x0073 00115 (append.go:22) MOVQ CX, (SP)
0x0077 00119 (append.go:22) MOVQ DX, 8(SP)
0x007c 00124 (append.go:22) PCDATA $0, $0
0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB)
0x0081 00129 (append.go:23) JMP 103
0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX
0x008a 00138 (append.go:22) MOVQ AX, (SP)
0x008e 00142 (append.go:22) MOVQ DX, 8(SP)
0x0093 00147 (append.go:22) MOVQ BX, 16(SP)
0x0098 00152 (append.go:22) MOVQ CX, 24(SP)
0x009d 00157 (append.go:22) MOVQ BP, 32(SP)
0x00a2 00162 (append.go:22) PCDATA $0, $0
0x00a2 00162 (append.go:22) CALL runtime.growslice(SB)
0x00a7 00167 (append.go:22) MOVQ 40(SP), DX
0x00ac 00172 (append.go:22) MOVQ 48(SP), AX
0x00b1 00177 (append.go:22) MOVQ 56(SP), CX
0x00b6 00182 (append.go:22) ADDQ $3, AX
0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX
0x00bf 00191 (append.go:22) MOVQ AX, BP
0x00c2 00194 (append.go:22) JMP 58
0x00c7 00199 (append.go:22) NOP
0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00cc 00204 (append.go:21) JMP 0
After:
"".p t=1 size=208 args=0x0 locals=0x48
0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0
0x0000 00000 (append.go:21) MOVQ (TLS), CX
0x0009 00009 (append.go:21) CMPQ SP, 16(CX)
0x000d 00013 (append.go:21) JLS 191
0x0013 00019 (append.go:21) SUBQ $72, SP
0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB)
0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX
0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX
0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP)
0x002a 00042 (append.go:19) MOVQ "".x(SB), BX
0x0031 00049 (append.go:22) LEAQ 3(DX), BP
0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB)
0x003c 00060 (append.go:22) CMPQ BP, CX
0x003f 00063 (append.go:22) JGT $0, 84
0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1)
0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1)
0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1)
0x004f 00079 (append.go:23) ADDQ $72, SP
0x0053 00083 (append.go:23) RET
0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX
0x005b 00091 (append.go:22) MOVQ AX, (SP)
0x005f 00095 (append.go:22) MOVQ BX, 8(SP)
0x0064 00100 (append.go:22) MOVQ DX, 16(SP)
0x0069 00105 (append.go:22) MOVQ CX, 24(SP)
0x006e 00110 (append.go:22) MOVQ BP, 32(SP)
0x0073 00115 (append.go:22) PCDATA $0, $0
0x0073 00115 (append.go:22) CALL runtime.growslice(SB)
0x0078 00120 (append.go:22) MOVQ 40(SP), CX
0x007d 00125 (append.go:22) MOVQ 56(SP), AX
0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB)
0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX
0x008f 00143 (append.go:22) TESTB AL, AL
0x0091 00145 (append.go:22) JNE $0, 168
0x0093 00147 (append.go:22) MOVQ CX, "".x(SB)
0x009a 00154 (append.go:22) MOVQ "".x(SB), BX
0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX
0x00a6 00166 (append.go:22) JMP 65
0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX
0x00af 00175 (append.go:22) MOVQ DX, (SP)
0x00b3 00179 (append.go:22) MOVQ CX, 8(SP)
0x00b8 00184 (append.go:22) PCDATA $0, $0
0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB)
0x00bd 00189 (append.go:22) JMP 154
0x00bf 00191 (append.go:22) NOP
0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB)
0x00c4 00196 (append.go:21) JMP 0
Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a
Reviewed-on: https://go-review.googlesource.com/21813
Run-TryBot: Josh Bleecher Snyder <cdc248cc4d32e4fbfe04e45228163bcecd48a0b9@gmail.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Keith Randall <8c99c3a9284e493be632950b84cd789d08ed3e9d@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"fmt"
"html"
"os"
"strings"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/sys"
)
var ssaEnabled = true
var ssaConfig *ssa.Config
var ssaExp ssaExport
func initssa() *ssa.Config {
ssaExp.unimplemented = false
ssaExp.mustImplement = true
if ssaConfig == nil {
ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
}
return ssaConfig
}
func shouldssa(fn *Node) bool {
switch Thearch.LinkArch.Name {
default:
// Only available for testing.
if os.Getenv("SSATEST") == "" {
return false
}
// Generally available.
case "amd64":
}
if !ssaEnabled {
return false
}
// Environment variable control of SSA CG
// 1. IF GOSSAFUNC == current function name THEN
// compile this function with SSA and log output to ssa.html
// 2. IF GOSSAHASH == "" THEN
// compile this function (and everything else) with SSA
// 3. IF GOSSAHASH == "n" or "N"
// IF GOSSAPKG == current package name THEN
// compile this function (and everything in this package) with SSA
// ELSE
// use the old back end for this function.
// This is for compatibility with existing test harness and should go away.
// 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN
// compile this function with SSA
// ELSE
// compile this function with the old back end.
// Plan is for 3 to be removed when the tests are revised.
// SSA is now default, and is disabled by setting
// GOSSAHASH to n or N, or selectively with strings of
// 0 and 1.
name := fn.Func.Nname.Sym.Name
funcname := os.Getenv("GOSSAFUNC")
if funcname != "" {
// If GOSSAFUNC is set, compile only that function.
return name == funcname
}
pkg := os.Getenv("GOSSAPKG")
if pkg != "" {
// If GOSSAPKG is set, compile only that package.
return localpkg.Name == pkg
}
return initssa().DebugHashMatch("GOSSAHASH", name)
}
// buildssa builds an SSA function.
func buildssa(fn *Node) *ssa.Func {
name := fn.Func.Nname.Sym.Name
printssa := name == os.Getenv("GOSSAFUNC")
if printssa {
fmt.Println("generating SSA for", name)
dumplist("buildssa-enter", fn.Func.Enter)
dumplist("buildssa-body", fn.Nbody)
dumplist("buildssa-exit", fn.Func.Exit)
}
var s state
s.pushLine(fn.Lineno)
defer s.popLine()
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
if fn.Func.Pragma&Nowritebarrier != 0 {
s.noWB = true
}
defer func() {
if s.WBLineno != 0 {
fn.Func.WBLineno = s.WBLineno
}
}()
// TODO(khr): build config just once at the start of the compiler binary
ssaExp.log = printssa
s.config = initssa()
s.f = s.config.NewFunc()
s.f.Name = name
s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{}
if name == os.Getenv("GOSSAFUNC") {
// TODO: tempfile? it is handy to have the location
// of this file be stable, so you can just reload in the browser.
s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
// TODO: generate and print a mapping from nodes to values and blocks
}
defer func() {
if !printssa {
s.config.HTML.Close()
}
}()
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
s.varsyms = map[*Node]interface{}{}
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
for _, n := range fn.Func.Dcl {
switch n.Class {
case PPARAM, PPARAMOUT:
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
if n.Class == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
case PAUTO | PHEAP:
// TODO this looks wrong for PAUTO|PHEAP, no vardef, but also no definition
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
case PPARAM | PHEAP, PPARAMOUT | PHEAP:
// This ends up wrong, have to do it at the PARAM node instead.
case PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
case PFUNC:
// local function - already handled by frontend
default:
str := ""
if n.Class&PHEAP != 0 {
str = ",heap"
}
s.Unimplementedf("local variable with class %s%s unimplemented", classnames[n.Class&^PHEAP], str)
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmts(fn.Func.Enter)
s.stmts(fn.Nbody)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Func.Endlineno)
s.exit()
s.popLine()
}
// Check that we used all labels
for name, lab := range s.labels {
if !lab.used() && !lab.reported {
yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
lab.reported = true
}
if lab.used() && !lab.defined() && !lab.reported {
yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
lab.reported = true
}
}
// Check any forward gotos. Non-forward gotos have already been checked.
for _, n := range s.fwdGotos {
lab := s.labels[n.Left.Sym.Name]
// If the label is undefined, we have already have printed an error.
if lab.defined() {
s.checkgoto(n, lab.defNode)
}
}
if nerrors > 0 {
s.f.Free()
return nil
}
// Link up variable uses to variable definitions
s.linkForwardReferences()
// Don't carry reference this around longer than necessary
s.exitCode = Nodes{}
// Main call to ssa package to compile function
ssa.Compile(s.f)
return s.f
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
labeledNodes map[*Node]*ssaLabel
// gotos that jump forward; required for deferred checkgoto calls
fwdGotos []*Node
// Code that must precede any return
// (e.g., copying value of heap-escaped paramout back to true paramout)
exitCode Nodes
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
vars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
decladdrs map[*Node]*ssa.Value
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
varsyms map[*Node]interface{}
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// line number stack. The current line number is top of stack
line []int32
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
// list of FwdRef values.
fwdRefs []*ssa.Value
// list of PPARAMOUT (return) variables. Does not include PPARAM|PHEAP vars.
returns []*Node
cgoUnsafeArgs bool
noWB bool
WBLineno int32 // line number of first write barrier. 0=no write barriers
}
type funcLine struct {
f *Node
line int32
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
defNode *Node // label definition Node (OLABEL)
// Label use Node (OGOTO, OBREAK, OCONTINUE).
// Used only for error detection and reporting.
// There might be multiple uses, but we only need to track one.
useNode *Node
reported bool // reported indicates whether an error has already been reported for this label
}
// defined reports whether the label has a definition (OLABEL node).
func (l *ssaLabel) defined() bool { return l.defNode != nil }
// used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
func (l *ssaLabel) used() bool { return l.useNode != nil }
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) }
func (s *state) Log() bool { return s.config.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) }
func (s *state) Unimplementedf(msg string, args ...interface{}) {
s.config.Unimplementedf(s.peekLine(), msg, args...)
}
func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) }
func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() }
var (
// dummy node for the memory variable
memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
// dummy nodes for temporary variables
ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}}
okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}}
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[*Node]*ssa.Value{}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
b.Line = s.peekLine()
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line int32) {
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekLine peek the top of the line number stack.
func (s *state) peekLine() int32 {
return s.line[len(s.line)-1]
}
func (s *state) Error(msg string, args ...interface{}) {
yyerrorl(s.peekLine(), msg, args...)
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekLine(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2)
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
return s.f.Entry.NewValue0(s.peekLine(), op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
}
// entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) }
func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) }
func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) }
func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
}
func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekLine(), t, c)
}
func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
return s.f.ConstInt16(s.peekLine(), t, c)
}
func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
return s.f.ConstInt32(s.peekLine(), t, c)
}
func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
return s.f.ConstInt64(s.peekLine(), t, c)
}
func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(s.peekLine(), t, c)
}
func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekLine(), t, c)
}
func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
if s.config.IntSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) stmts(a Nodes) {
for _, x := range a.Slice() {
s.stmt(x)
}
}
// ssaStmtList converts the statement n to SSA and adds it to s.
func (s *state) stmtList(l Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// ssaStmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
s.pushLine(n.Lineno)
defer s.popLine()
// If s.curBlock is nil, then we're about to generate dead code.
// We can't just short-circuit here, though,
// because we check labels and gotos as part of SSA generation.
// Provide a block for the dead code so that we don't have
// to add special cases everywhere else.
if s.curBlock == nil {
dead := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(dead)
}
s.stmtList(n.Ninit)
switch n.Op {
case OBLOCK:
s.stmtList(n.List)
// No-ops
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
// Expression statements
case OCALLFUNC, OCALLMETH, OCALLINTER:
s.call(n, callNormal)
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC &&
(compiling_runtime != 0 && n.Left.Sym.Name == "throw" ||
n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
case ODEFER:
s.call(n.Left, callDefer)
case OPROC:
s.call(n.Left, callGo)
case OAS2DOTTYPE:
res, resok := s.dottype(n.Rlist.First(), true)
s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0)
s.assign(n.List.Second(), resok, false, false, n.Lineno, 0)
return
case ODCL:
if n.Left.Class&PHEAP == 0 {
return
}
if compiling_runtime != 0 {
Fatalf("%v escapes to heap, not allowed in runtime.", n)
}
// TODO: the old pass hides the details of PHEAP
// variables behind ONAME nodes. Figure out if it's better
// to rewrite the tree and make the heapaddr construct explicit
// or to keep this detail hidden behind the scenes.
palloc := prealloc[n.Left]
if palloc == nil {
palloc = callnew(n.Left.Type)
prealloc[n.Left] = palloc
}
r := s.expr(palloc)
s.assign(n.Left.Name.Heapaddr, r, false, false, n.Lineno, 0)
case OLABEL:
sym := n.Left.Sym
if isblanksym(sym) {
// Empty identifier is valid but useless.
// See issues 11589, 11593.
return
}
lab := s.label(sym)
// Associate label with its control flow node, if any
if ctl := n.Name.Defn; ctl != nil {
switch ctl.Op {
case OFOR, OSWITCH, OSELECT:
s.labeledNodes[ctl] = lab
}
}
if !lab.defined() {
lab.defNode = n
} else {
s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
lab.reported = true
}
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// go to that label (we pretend "label:" is preceded by "goto label")
b := s.endBlock()
b.AddEdgeTo(lab.target)
s.startBlock(lab.target)
case OGOTO:
sym := n.Left.Sym
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
if !lab.used() {
lab.useNode = n
}
if lab.defined() {
s.checkgoto(n, lab.defNode)
} else {
s.fwdGotos = append(s.fwdGotos, n)
}
b := s.endBlock()
b.AddEdgeTo(lab.target)
case OAS, OASWB:
// Check whether we can generate static data rather than code.
// If so, ignore n and defer data generation until codegen.
// Failure to do this causes writes to readonly symbols.
if gen_as_init(n, true) {
var data []*Node
if s.f.StaticData != nil {
data = s.f.StaticData.([]*Node)
}
s.f.StaticData = append(data, n)
return
}
if n.Left == n.Right && n.Left.Op == ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
var t *Type
if n.Right != nil {
t = n.Right.Type
} else {
t = n.Left.Type
}
// Evaluate RHS.
rhs := n.Right
if rhs != nil {
switch rhs.Op {
case OSTRUCTLIT, OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case OAPPEND:
// If we're writing the result of an append back to the same slice,
// handle it specially to avoid write barriers on the fast (non-growth) path.
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) {
s.append(rhs, true)
return
}
}
}
var r *ssa.Value
needwb := n.Op == OASWB && rhs != nil
deref := !canSSAType(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r = s.addr(rhs, false)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
if rhs != nil && rhs.Op == OAPPEND {
// The frontend gets rid of the write barrier to enable the special OAPPEND
// handling above, but since this is not a special case, we need it.
// TODO: just add a ptr graying to the end of growslice?
// TODO: check whether we need to provide special handling and a write barrier
// for ODOTTYPE and ORECV also.
// They get similar wb-removal treatment in walk.go:OAS.
needwb = true
}
var skip skipMask
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i := rhs.Right.Left
var j, k *Node
if rhs.Op == OSLICE3 {
j = rhs.Right.Right.Left
k = rhs.Right.Right.Right
} else {
j = rhs.Right.Right
}
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.Left, r, needwb, deref, n.Lineno, skip)
case OIF:
bThen := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
var bElse *ssa.Block
if n.Rlist.Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
s.condBranch(n.Left, bThen, bElse, n.Likely)
} else {
s.condBranch(n.Left, bThen, bEnd, n.Likely)
}
s.startBlock(bThen)
s.stmts(n.Nbody)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
if n.Rlist.Len() != 0 {
s.startBlock(bElse)
s.stmtList(n.Rlist)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ORETURN:
s.stmtList(n.List)
s.exit()
case ORETJMP:
s.stmtList(n.List)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = n.Left.Sym
case OCONTINUE, OBREAK:
var op string
var to *ssa.Block
switch n.Op {
case OCONTINUE:
op = "continue"
to = s.continueTo
case OBREAK:
op = "break"
to = s.breakTo
}
if n.Left == nil {
// plain break/continue
if to == nil {
s.Error("%s is not in a loop", op)
return
}
// nothing to do; "to" is already the correct target
} else {
// labeled break/continue; look up the target
sym := n.Left.Sym
lab := s.label(sym)
if !lab.used() {
lab.useNode = n.Left
}
if !lab.defined() {
s.Error("%s label not defined: %v", op, sym)
lab.reported = true
return
}
switch n.Op {
case OCONTINUE:
to = lab.continueTarget
case OBREAK:
to = lab.breakTarget
}
if to == nil {
// Valid label but not usable with a break/continue here, e.g.:
// for {
// continue abc
// }
// abc:
// for {}
s.Error("invalid %s label %v", op, sym)
lab.reported = true
return
}
}
b := s.endBlock()
b.AddEdgeTo(to)
case OFOR:
// OFOR: for Ninit; Left; Right { Nbody }
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// first, jump to condition test
b := s.endBlock()
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled for loop
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmts(n.Nbody)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr
s.startBlock(bIncr)
if n.Right != nil {
s.stmt(n.Right)
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
}
s.startBlock(bEnd)
case OSWITCH, OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled
lab.breakTarget = bEnd
}
// generate body code
s.stmts(n.Nbody)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// OSWITCH never falls through (s.curBlock == nil here).
// OSELECT does not fall through if we're calling selectgo.
// OSELECT does fall through if we're calling selectnb{send,recv}[2].
// In those latter cases, go to the code after the select.
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
s.startBlock(bEnd)
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
}
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken {
s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left)
}
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
case OCHECKNIL:
p := s.expr(n.Left)
s.nilCheck(p)
default:
s.Unimplementedf("unhandled stmt %s", opnames[n.Op])
}
}
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if hasdefer {
s.rtcall(Deferreturn, true, nil)
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmts(s.exitCode)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
aux := &ssa.ArgSymbol{Typ: n.Type, Node: n}
addr := s.newValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
val := s.variable(n, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
// Do actual return.
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
return b
}
type opAndType struct {
op Op
etype EType
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TINT8}: ssa.OpAdd8,
opAndType{OADD, TUINT8}: ssa.OpAdd8,
opAndType{OADD, TINT16}: ssa.OpAdd16,
opAndType{OADD, TUINT16}: ssa.OpAdd16,
opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32,
opAndType{OADD, TPTR32}: ssa.OpAdd32,
opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64,
opAndType{OADD, TPTR64}: ssa.OpAdd64,
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
opAndType{OSUB, TINT8}: ssa.OpSub8,
opAndType{OSUB, TUINT8}: ssa.OpSub8,
opAndType{OSUB, TINT16}: ssa.OpSub16,
opAndType{OSUB, TUINT16}: ssa.OpSub16,
opAndType{OSUB, TINT32}: ssa.OpSub32,
opAndType{OSUB, TUINT32}: ssa.OpSub32,
opAndType{OSUB, TINT64}: ssa.OpSub64,
opAndType{OSUB, TUINT64}: ssa.OpSub64,
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
opAndType{ONOT, TBOOL}: ssa.OpNot,
opAndType{OMINUS, TINT8}: ssa.OpNeg8,
opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
opAndType{OMINUS, TINT16}: ssa.OpNeg16,
opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
opAndType{OMINUS, TINT32}: ssa.OpNeg32,
opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
opAndType{OMINUS, TINT64}: ssa.OpNeg64,
opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
opAndType{OCOM, TINT8}: ssa.OpCom8,
opAndType{OCOM, TUINT8}: ssa.OpCom8,
opAndType{OCOM, TINT16}: ssa.OpCom16,
opAndType{OCOM, TUINT16}: ssa.OpCom16,
opAndType{OCOM, TINT32}: ssa.OpCom32,
opAndType{OCOM, TUINT32}: ssa.OpCom32,
opAndType{OCOM, TINT64}: ssa.OpCom64,
opAndType{OCOM, TUINT64}: ssa.OpCom64,
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
opAndType{OMUL, TINT8}: ssa.OpMul8,
opAndType{OMUL, TUINT8}: ssa.OpMul8,
opAndType{OMUL, TINT16}: ssa.OpMul16,
opAndType{OMUL, TUINT16}: ssa.OpMul16,
opAndType{OMUL, TINT32}: ssa.OpMul32,
opAndType{OMUL, TUINT32}: ssa.OpMul32,
opAndType{OMUL, TINT64}: ssa.OpMul64,
opAndType{OMUL, TUINT64}: ssa.OpMul64,
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
opAndType{OHMUL, TINT8}: ssa.OpHmul8,
opAndType{OHMUL, TUINT8}: ssa.OpHmul8u,
opAndType{OHMUL, TINT16}: ssa.OpHmul16,
opAndType{OHMUL, TUINT16}: ssa.OpHmul16u,
opAndType{OHMUL, TINT32}: ssa.OpHmul32,
opAndType{OHMUL, TUINT32}: ssa.OpHmul32u,
opAndType{ODIV, TINT8}: ssa.OpDiv8,
opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
opAndType{ODIV, TINT16}: ssa.OpDiv16,
opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
opAndType{ODIV, TINT32}: ssa.OpDiv32,
opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
opAndType{ODIV, TINT64}: ssa.OpDiv64,
opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
opAndType{OMOD, TINT8}: ssa.OpMod8,
opAndType{OMOD, TUINT8}: ssa.OpMod8u,
opAndType{OMOD, TINT16}: ssa.OpMod16,
opAndType{OMOD, TUINT16}: ssa.OpMod16u,
opAndType{OMOD, TINT32}: ssa.OpMod32,
opAndType{OMOD, TUINT32}: ssa.OpMod32u,
opAndType{OMOD, TINT64}: ssa.OpMod64,
opAndType{OMOD, TUINT64}: ssa.OpMod64u,
opAndType{OAND, TINT8}: ssa.OpAnd8,
opAndType{OAND, TUINT8}: ssa.OpAnd8,
opAndType{OAND, TINT16}: ssa.OpAnd16,
opAndType{OAND, TUINT16}: ssa.OpAnd16,
opAndType{OAND, TINT32}: ssa.OpAnd32,
opAndType{OAND, TUINT32}: ssa.OpAnd32,
opAndType{OAND, TINT64}: ssa.OpAnd64,
opAndType{OAND, TUINT64}: ssa.OpAnd64,
opAndType{OOR, TINT8}: ssa.OpOr8,
opAndType{OOR, TUINT8}: ssa.OpOr8,
opAndType{OOR, TINT16}: ssa.OpOr16,
opAndType{OOR, TUINT16}: ssa.OpOr16,
opAndType{OOR, TINT32}: ssa.OpOr32,
opAndType{OOR, TUINT32}: ssa.OpOr32,
opAndType{OOR, TINT64}: ssa.OpOr64,
opAndType{OOR, TUINT64}: ssa.OpOr64,
opAndType{OXOR, TINT8}: ssa.OpXor8,
opAndType{OXOR, TUINT8}: ssa.OpXor8,
opAndType{OXOR, TINT16}: ssa.OpXor16,
opAndType{OXOR, TUINT16}: ssa.OpXor16,
opAndType{OXOR, TINT32}: ssa.OpXor32,
opAndType{OXOR, TUINT32}: ssa.OpXor32,
opAndType{OXOR, TINT64}: ssa.OpXor64,
opAndType{OXOR, TUINT64}: ssa.OpXor64,
opAndType{OEQ, TBOOL}: ssa.OpEq8,
opAndType{OEQ, TINT8}: ssa.OpEq8,
opAndType{OEQ, TUINT8}: ssa.OpEq8,
opAndType{OEQ, TINT16}: ssa.OpEq16,
opAndType{OEQ, TUINT16}: ssa.OpEq16,
opAndType{OEQ, TINT32}: ssa.OpEq32,
opAndType{OEQ, TUINT32}: ssa.OpEq32,
opAndType{OEQ, TINT64}: ssa.OpEq64,
opAndType{OEQ, TUINT64}: ssa.OpEq64,
opAndType{OEQ, TINTER}: ssa.OpEqInter,
opAndType{OEQ, TARRAY}: ssa.OpEqSlice,
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
opAndType{ONE, TBOOL}: ssa.OpNeq8,
opAndType{ONE, TINT8}: ssa.OpNeq8,
opAndType{ONE, TUINT8}: ssa.OpNeq8,
opAndType{ONE, TINT16}: ssa.OpNeq16,
opAndType{ONE, TUINT16}: ssa.OpNeq16,
opAndType{ONE, TINT32}: ssa.OpNeq32,
opAndType{ONE, TUINT32}: ssa.OpNeq32,
opAndType{ONE, TINT64}: ssa.OpNeq64,
opAndType{ONE, TUINT64}: ssa.OpNeq64,
opAndType{ONE, TINTER}: ssa.OpNeqInter,
opAndType{ONE, TARRAY}: ssa.OpNeqSlice,
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
opAndType{OLT, TINT8}: ssa.OpLess8,
opAndType{OLT, TUINT8}: ssa.OpLess8U,
opAndType{OLT, TINT16}: ssa.OpLess16,
opAndType{OLT, TUINT16}: ssa.OpLess16U,
opAndType{OLT, TINT32}: ssa.OpLess32,
opAndType{OLT, TUINT32}: ssa.OpLess32U,
opAndType{OLT, TINT64}: ssa.OpLess64,
opAndType{OLT, TUINT64}: ssa.OpLess64U,
opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
opAndType{OGT, TINT8}: ssa.OpGreater8,
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
opAndType{OGT, TINT16}: ssa.OpGreater16,
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
opAndType{OGT, TINT32}: ssa.OpGreater32,
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
opAndType{OGT, TINT64}: ssa.OpGreater64,
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
opAndType{OLE, TINT8}: ssa.OpLeq8,
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
opAndType{OLE, TINT16}: ssa.OpLeq16,
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
opAndType{OLE, TINT32}: ssa.OpLeq32,
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
opAndType{OLE, TINT64}: ssa.OpLeq64,
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
opAndType{OGE, TINT8}: ssa.OpGeq8,
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
opAndType{OGE, TINT16}: ssa.OpGeq16,
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
opAndType{OGE, TINT32}: ssa.OpGeq32,
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
opAndType{OGE, TINT64}: ssa.OpGeq64,
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
opAndType{OLROT, TUINT8}: ssa.OpLrot8,
opAndType{OLROT, TUINT16}: ssa.OpLrot16,
opAndType{OLROT, TUINT32}: ssa.OpLrot32,
opAndType{OLROT, TUINT64}: ssa.OpLrot64,
opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
}
func (s *state) concreteEtype(t *Type) EType {
e := t.Etype
switch e {
default:
return e
case TINT:
if s.config.IntSize == 8 {
return TINT64
}
return TINT32
case TUINT:
if s.config.IntSize == 8 {
return TUINT64
}
return TUINT32
case TUINTPTR:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
}
}
func (s *state) ssaOp(op Op, t *Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(etype))
}
return x
}
func floatForComplex(t *Type) *Type {
if t.Size() == 8 {
return Types[TFLOAT32]
} else {
return Types[TFLOAT64]
}
}
type opAndTwoTypes struct {
op Op
etype1 EType
etype2 EType
}
type twoTypes struct {
etype1 EType
etype2 EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
// unsigned
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64},
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(etype1), Econv(etype2))
}
return x
}
func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
etype1 := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype1}]
if !ok {
s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(etype1))
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n *Node) *ssa.Value {
s.pushLine(n.Lineno)
defer s.popLine()
s.stmtList(n.Ninit)
switch n.Op {
case OCFUNC:
aux := s.lookupSymbol(n, &ssa.ExternSymbol{n.Type, n.Left.Sym})
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
case OPARAM:
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type, addr, s.mem())
case ONAME:
if n.Class == PFUNC {
// "value" of a function is the address of the function's closure
sym := funcsym(n.Sym)
aux := &ssa.ExternSymbol{n.Type, sym}
return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
}
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OCLOSUREVAR:
addr := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OLITERAL:
switch n.Val().Ctype() {
case CTINT:
i := n.Int64()
switch n.Type.Size() {
case 1:
return s.constInt8(n.Type, int8(i))
case 2:
return s.constInt16(n.Type, int16(i))
case 4:
return s.constInt32(n.Type, int32(i))
case 8:
return s.constInt64(n.Type, i)
default:
s.Fatalf("bad integer size %d", n.Type.Size())
return nil
}
case CTSTR:
if n.Val().U == "" {
return s.constEmptyString(n.Type)
}
return s.entryNewValue0A(ssa.OpConstString, n.Type, n.Val().U)
case CTBOOL:
v := s.constBool(n.Val().U.(bool))
// For some reason the frontend gets the line numbers of
// CTBOOL literals totally wrong. Fix it here by grabbing
// the line number of the enclosing AST node.
if len(s.line) >= 2 {
v.Line = s.line[len(s.line)-2]
}
return v
case CTNIL:
t := n.Type
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case CTFLT:
f := n.Val().U.(*Mpflt)
switch n.Type.Size() {
case 4:
return s.constFloat32(n.Type, f.Float32())
case 8:
return s.constFloat64(n.Type, f.Float64())
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
case CTCPLX:
c := n.Val().U.(*Mpcplx)
r := &c.Real
i := &c.Imag
switch n.Type.Size() {
case 8:
{
pt := Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, i.Float32()))
}
case 16:
{
pt := Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
}
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
default:
s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype())
return nil
}
case OCONVNOP:
to := n.Type
from := n.Left.Type
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.Left)
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Etype == TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Etype == to.Etype {
return v
}
// unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
return v
}
dowidth(from)
dowidth(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
if etypesign(from.Etype) != etypesign(to.Etype) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(from.Etype), to, Econv(to.Etype))
return nil
}
if instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Etype) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case OCONV:
x := s.expr(n.Left)
ft := n.Left.Type // from type
tt := n.Type // to type
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %s -> %s", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %s -> %s", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %s -> %s", ft, tt)
}
}
return s.newValue1(op, n.Type, x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if !ok {
s.Fatalf("weird float conversion %s -> %s", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValue1(op2, n.Type, x)
}
if op2 == ssa.OpCopy {
return s.newValue1(op1, n.Type, x)
}
return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// therefore tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt)
}
// therefore ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
op = ssa.OpCopy
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %s -> %s", ft, tt)
}
ftp := floatForComplex(ft)
ttp := floatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Unimplementedf("unhandled OCONV %s -> %s", Econv(n.Left.Type.Etype), Econv(n.Type.Etype))
return nil
case ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i)
switch n.Op {
case OEQ:
return c
case ONE:
return s.newValue1(ssa.OpNot, Types[TBOOL], c)
default:
s.Fatalf("ordered complex compare %s", opnames[n.Op])
}
}
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case ODIV:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValue2(divop, wt, xreal, denom)
ximag = s.newValue2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
if n.Type.IsFloat() {
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
} else {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
case OMOD:
a := s.expr(n.Left)
b := s.expr(n.Right)
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OADD, OSUB:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
pt := floatForComplex(n.Type)
op := s.ssaOp(n.Op, pt)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OAND, OOR, OHMUL, OXOR:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
case OLROT:
a := s.expr(n.Left)
i := n.Right.Int64()
if i <= 0 || i >= n.Type.Size()*8 {
s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i)
}
return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a)
case OANDAND, OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
el := s.expr(n.Left)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op == OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op == OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Right)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, Types[TBOOL])
case OCOMPLEX:
r := s.expr(n.Left)
i := s.expr(n.Right)
return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
// unary ops
case OMINUS:
a := s.expr(n.Left)
if n.Type.IsComplex() {
tp := floatForComplex(n.Type)
negop := s.ssaOp(n.Op, tp)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case ONOT, OCOM, OSQRT:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case OIMAG, OREAL:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
case OPLUS:
return s.expr(n.Left)
case OADDR:
return s.addr(n.Left, n.Bounded)
case OINDREG:
if int(n.Reg) != Thearch.REGSP {
s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n)
return nil
}
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OIND:
p := s.expr(n.Left)
s.nilCheck(p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOT:
t := n.Left.Type
if canSSAType(t) {
v := s.expr(n.Left)
return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
}
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOTPTR:
p := s.expr(n.Left)
s.nilCheck(p)
p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
switch {
case n.Left.Type.IsString():
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i)
if !n.Bounded {
len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
s.boundsCheck(i, len)
}
ptrtyp := Ptrto(Types[TUINT8])
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
case n.Left.Type.IsArray():
// TODO: fix when we can SSA arrays of length 1.
p := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
default:
s.Fatalf("bad type for index %v", n.Left.Type)
return nil
}
case OLEN, OCAP:
switch {
case n.Left.Type.IsSlice():
op := ssa.OpSliceLen
if n.Op == OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, Types[TINT], s.expr(n.Left))
case n.Left.Type.IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.Left))
default: // array
return s.constInt(Types[TINT], n.Left.Type.NumElem())
}
case OSPTR:
a := s.expr(n.Left)
if n.Left.Type.IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type, a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type, a)
}
case OITAB:
a := s.expr(n.Left)
return s.newValue1(ssa.OpITab, n.Type, a)
case OEFACE:
tab := s.expr(n.Left)
data := s.expr(n.Right)
// The frontend allows putting things like struct{*byte} in
// the data portion of an eface. But we don't want struct{*byte}
// as a register type because (among other reasons) the liveness
// analysis is confused by the "fat" variables that result from
// such types being spilled.
// So here we ensure that we are selecting the underlying pointer
// when we build an eface.
// TODO: get rid of this now that structs can be SSA'd?
for !data.Type.IsPtrShaped() {
switch {
case data.Type.IsArray():
data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data)
case data.Type.IsStruct():
for i := data.Type.NumFields() - 1; i >= 0; i-- {
f := data.Type.FieldType(i)
if f.Size() == 0 {
// eface type could also be struct{p *byte; q [0]int}
continue
}
data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data)
break
}
default:
s.Fatalf("type being put into an eface isn't a pointer")
}
}
return s.newValue2(ssa.OpIMake, n.Type, tab, data)
case OSLICE, OSLICEARR:
v := s.expr(n.Left)
var i, j *ssa.Value
if n.Right.Left != nil {
i = s.extendIndex(s.expr(n.Right.Left))
}
if n.Right.Right != nil {
j = s.extendIndex(s.expr(n.Right.Right))
}
p, l, c := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OSLICESTR:
v := s.expr(n.Left)
var i, j *ssa.Value
if n.Right.Left != nil {
i = s.extendIndex(s.expr(n.Right.Left))
}
if n.Right.Right != nil {
j = s.extendIndex(s.expr(n.Right.Right))
}
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
case OSLICE3, OSLICE3ARR:
v := s.expr(n.Left)
var i *ssa.Value
if n.Right.Left != nil {
i = s.extendIndex(s.expr(n.Right.Left))
}
j := s.extendIndex(s.expr(n.Right.Right.Left))
k := s.extendIndex(s.expr(n.Right.Right.Right))
p, l, c := s.slice(n.Left.Type, v, i, j, k)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OCALLFUNC:
if isIntrinsicCall1(n) {
return s.intrinsicCall1(n)
}
fallthrough
case OCALLINTER, OCALLMETH:
a := s.call(n, callNormal)
return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
case OGETG:
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
case OAPPEND:
return s.append(n, false)
default:
s.Unimplementedf("unhandled expr %s", opnames[n.Op])
return nil
}
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// *a.len = newlen // store newlen immediately to avoid a spill
// if newlen > cap {
// newptr, _, newcap = growslice(ptr, len, cap, newlen)
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type.Elem()
pt := Ptrto(et)
// Evaluate slice
sn := n.List.First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn, false)
slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
if inplace {
lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem())
}
cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
s.vars[&ptrVar] = p
if !inplace {
s.vars[&newlenVar] = nl
s.vars[&capVar] = c
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Types[TUINTPTR], typenamesym(n.Type)}, s.sb)
r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
if inplace {
capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem())
s.insertWBstore(pt, addr, r[0], n.Lineno, 0)
// load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
} else {
s.vars[&ptrVar] = r[0]
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
s.vars[&capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
// Evaluate args
args := make([]*ssa.Value, 0, nargs)
store := make([]bool, 0, nargs)
for _, n := range n.List.Slice()[1:] {
if canSSAType(n.Type) {
args = append(args, s.expr(n))
store = append(store, true)
} else {
args = append(args, s.addr(n, false))
store = append(store, false)
}
}
p = s.variable(&ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
c = s.variable(&capVar, Types[TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
// TODO: just one write barrier call for all of these writes?
// TODO: maybe just one writeBarrier.enabled check?
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
if store[i] {
if haspointers(et) {
s.insertWBstore(et, addr, arg, n.Lineno, 0)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg, s.mem())
}
} else {
if haspointers(et) {
s.insertWBmove(et, addr, arg, n.Lineno)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg, s.mem())
}
}
}
delete(s.vars, &ptrVar)
if inplace {
return nil
}
delete(s.vars, &newlenVar)
delete(s.vars, &capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
if cond.Op == OANDAND {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
}
if cond.Op == OOROR {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
}
if cond.Op == ONOT {
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, no, yes, -likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// Include a write barrier if wb is true.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask) {
if left.Op == ONAME && isblank(left) {
return
}
t := left.Type
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %s but not RHS %s", left, right)
}
if left.Op == ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
t := left.Left.Type
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.Left)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.Left, new, false, false, line, 0)
// TODO: do we need to update named values here?
return
}
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
if left.Op == ONAME {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
}
if deref {
// Treat as a mem->mem move.
if right == nil {
s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
return
}
if wb {
s.insertWBmove(t, addr, right, line)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
return
}
// Treat as a store.
if wb {
if skip&skipPtr != 0 {
// Special case: if we don't write back the pointers, don't bother
// doing the write barrier check.
s.storeTypeScalars(t, addr, right, skip)
return
}
s.insertWBstore(t, addr, right, line, skip)
return
}
if skip != 0 {
if skip&skipPtr == 0 {
s.storeTypePtrs(t, addr, right)
}
s.storeTypeScalars(t, addr, right, skip)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %s", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %s", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(Types[TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %s", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
}
return v
}
s.Unimplementedf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callGo
)
// isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic
// that can be handled by the SSA backend.
// SSA uses this, but so does the front end to see if should not
// inline a function because it is a candidate for intrinsic
// substitution.
func isSSAIntrinsic1(s *Sym) bool {
// The test below is not quite accurate -- in the event that
// a function is disabled on a per-function basis, for example
// because of hash-keyed binary failure search, SSA might be
// disabled for that function but it would not be noted here,
// and thus an inlining would not occur (in practice, inlining
// so far has only been noticed for Bswap32 and the 16-bit count
// leading/trailing instructions, but heuristics might change
// in the future or on different architectures).
if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 {
return false
}
if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" {
switch s.Name {
case
"Ctz64", "Ctz32", "Ctz16",
"Bswap64", "Bswap32":
return true
}
}
return false
}
func isIntrinsicCall1(n *Node) bool {
if n == nil || n.Left == nil {
return false
}
return isSSAIntrinsic1(n.Left.Sym)
}
// intrinsicFirstArg extracts arg from n.List and eval
func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
x := n.List.First()
if x.Op == OAS {
x = x.Right
}
return s.expr(x)
}
// intrinsicCall1 converts a call to a recognized 1-arg intrinsic
// into the intrinsic
func (s *state) intrinsicCall1(n *Node) *ssa.Value {
var result *ssa.Value
switch n.Left.Sym.Name {
case "Ctz64":
result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
case "Ctz32":
result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
case "Ctz16":
result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n))
case "Bswap64":
result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
case "Bswap32":
result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
}
if result == nil {
Fatalf("Unknown special call: %v", n.Left.Sym)
}
if ssa.IntrinsicsDebug > 0 {
Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString())
}
return result
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
var sym *Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.Left
switch n.Op {
case OCALLFUNC:
if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
sym = fn.Sym
break
}
closure = s.expr(fn)
case OCALLMETH:
if fn.Op != ODOTMETH {
Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
if k == callNormal {
sym = fn.Sym
break
}
n2 := newname(fn.Sym)
n2.Class = PFUNC
n2.Lineno = fn.Lineno
closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't
// want to set it here.
case OCALLINTER:
if fn.Op != ODOTINTER {
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", Oconv(fn.Op, 0))
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
} else {
closure = itab
}
rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
}
dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver
// Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements.
s.stmtList(n.List)
// Set receiver (for interface calls)
if rcvr != nil {
argStart := Ctxt.FixedFrameSize()
if k != callNormal {
argStart += int64(2 * Widthptr)
}
addr := s.entryNewValue1I(ssa.OpOffPtr, Types[TUINTPTR], argStart, s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argsize := s.constInt32(Types[TUINT32], int32(stksize))
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, s.sp, argsize, s.mem())
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), int64(Widthptr), s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
// call target
bNext := s.f.NewBlock(ssa.BlockPlain)
var call *ssa.Value
switch {
case k == callDefer:
call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem())
case k == callGo:
call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem())
case closure != nil:
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
case sym != nil:
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem())
default:
Fatalf("bad call type %s %v", opnames[n.Op], n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
// Finish call block
s.vars[&memVar] = call
b := s.endBlock()
b.Kind = ssa.BlockCall
b.SetControl(call)
b.AddEdgeTo(bNext)
if k == callDefer {
// Add recover edge to exit code.
b.Kind = ssa.BlockDefer
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
}
// Start exit block, find address of result.
s.startBlock(bNext)
res := n.Left.Type.Results()
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset, s.sp)
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
return +1
}
return 0
}
// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
// This improves the effectiveness of cse by using the same Aux values for the
// same symbols.
func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
switch sym.(type) {
default:
s.Fatalf("sym %v is of uknown type %T", sym, sym)
case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
// these are the only valid types
}
if lsym, ok := s.varsyms[n]; ok {
return lsym
} else {
s.varsyms[n] = sym
return sym
}
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
// If bounded is true then this address does not require a nil check for its operand
// even if that would otherwise be implied.
func (s *state) addr(n *Node, bounded bool) *ssa.Value {
t := Ptrto(n.Type)
switch n.Op {
case ONAME:
switch n.Class {
case PEXTERN:
// global variable
aux := s.lookupSymbol(n, &ssa.ExternSymbol{n.Type, n.Sym})
v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if n.Xoffset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
}
return v
case PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v
}
if n.String() == ".fp" {
// Special arg that points to the frame pointer.
// (Used by the race detector, others?)
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case PAUTO:
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
case PAUTO | PHEAP, PPARAM | PHEAP, PPARAMOUT | PHEAP, PPARAMREF:
return s.expr(n.Name.Heapaddr)
default:
s.Unimplementedf("variable address class %v not implemented", n.Class)
return nil
}
case OINDREG:
// indirect off a register
// used for storing/loading arguments/returns to/from callees
if int(n.Reg) != Thearch.REGSP {
s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n)
return nil
}
return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp)
case OINDEX:
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
if !n.Bounded {
s.boundsCheck(i, len)
}
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
a := s.addr(n.Left, bounded)
i := s.expr(n.Right)
i = s.extendIndex(i)
len := s.constInt(Types[TINT], n.Left.Type.NumElem())
if !n.Bounded {
s.boundsCheck(i, len)
}
return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i)
}
case OIND:
p := s.expr(n.Left)
if !bounded {
s.nilCheck(p)
}
return p
case ODOT:
p := s.addr(n.Left, bounded)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case ODOTPTR:
p := s.expr(n.Left)
if !bounded {
s.nilCheck(p)
}
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case OCLOSUREVAR:
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8])))
case OPARAM:
p := n.Left
if p.Op != ONAME || !(p.Class == PPARAM|PHEAP || p.Class == PPARAMOUT|PHEAP) {
s.Fatalf("OPARAM not of ONAME,{PPARAM,PPARAMOUT}|PHEAP, instead %s", nodedump(p, 0))
}
// Recover original offset to address passed-in param value.
original_p := *p
original_p.Xoffset = n.Xoffset
aux := &ssa.ArgSymbol{Typ: n.Type, Node: &original_p}
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
case OCONVNOP:
addr := s.addr(n.Left, bounded)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case OCALLFUNC, OCALLINTER, OCALLMETH:
return s.call(n, callNormal)
default:
s.Unimplementedf("unhandled addr %v", Oconv(n.Op, 0))
return nil
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
for n.Op == ODOT {
n = n.Left
}
if n.Op != ONAME {
return false
}
if n.Addrtaken {
return false
}
if n.Class&PHEAP != 0 {
return false
}
switch n.Class {
case PEXTERN, PPARAMREF:
// TODO: maybe treat PPARAMREF with an Arg-like op to read from closure?
return false
case PPARAMOUT:
if hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if n.Class == PPARAM && n.String() == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
return false
}
return canSSAType(n.Type)
// TODO: try to make more variables SSAable?
}
// canSSA reports whether variables of type t are SSA-able.
func canSSAType(t *Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Etype {
case TARRAY:
if t.IsSlice() {
return true
}
// We can't do arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: maybe allow if length is <=1? All indexes
// are constant? Might be good for the arrays
// introduced by the compiler for variadic functions.
return false
case TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !canSSAType(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// nilCheck generates nil pointer checking code.
// Starts a new block on return, unless nil checks are disabled.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if Disable_checknil != 0 {
return
}
chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockCheck
b.SetControl(chk)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// TODO: convert index to full width?
// TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
// bounds check
cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
s.check(cmp, Panicindex)
}
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// TODO: convert index to full width?
// TODO: if index is 64-bit and we're compiling to 32-bit, check that high 32 bits are zero.
// bounds check
cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
// If cmp (a bool) is true, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *Node) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekLine()
bPanic := s.panics[funcLine{fn, line}]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[funcLine{fn, line}] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
// If returns is true, the block is marked as a call block. A new block
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
var off int64 // TODO: arch-dependent starting offset?
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
}
size := t.Size()
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthptr))
// Issue call
call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
s.vars[&memVar] = call
// Finish block
b := s.endBlock()
if !returns {
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off
if len(results) > 0 {
Fatalf("panic call can't have results")
}
return nil
}
b.Kind = ssa.BlockCall
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
ptr = s.newValue1I(ssa.OpOffPtr, Types[TUINTPTR], off, s.sp)
}
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
}
off = Rnd(off, int64(Widthptr))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// insertWBmove inserts the assignment *left = *right including a write barrier.
// t is the type being assigned.
func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32) {
// if writeBarrier.enabled {
// typedmemmove(&t, left, right)
// } else {
// *left = *right
// }
if s.noWB {
s.Fatalf("write barrier prohibited")
}
if s.WBLineno == 0 {
s.WBLineno = left.Line
}
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(flag)
b.AddEdgeTo(bThen)
b.AddEdgeTo(bElse)
s.startBlock(bThen)
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Types[TUINTPTR], typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, right)
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bElse)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), left, right, s.mem())
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
if Debug_wb > 0 {
Warnl(line, "write barrier")
}
}
// insertWBstore inserts the assignment *left = right including a write barrier.
// t is the type being assigned.
func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) {
// store scalar fields
// if writeBarrier.enabled {
// writebarrierptr for pointer fields
// } else {
// store pointer fields
// }
if s.noWB {
s.Fatalf("write barrier prohibited")
}
if s.WBLineno == 0 {
s.WBLineno = left.Line
}
s.storeTypeScalars(t, left, right, skip)
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(flag)
b.AddEdgeTo(bThen)
b.AddEdgeTo(bElse)
// Issue write barriers for pointer writes.
s.startBlock(bThen)
s.storeTypePtrsWB(t, left, right)
s.endBlock().AddEdgeTo(bEnd)
// Issue regular stores for pointer writes.
s.startBlock(bElse)
s.storeTypePtrs(t, left, right)
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
if Debug_wb > 0 {
Warnl(line, "write barrier")
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem())
case t.IsPtrShaped():
// no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft.(*Type), addr, val, 0)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !haspointers(ft.(*Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft.(*Type), addr, val)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// do *left = right with a write barrier for all pointer parts of t.
func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.rtcall(writebarrierptr, true, nil, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
s.rtcall(writebarrierptr, true, nil, left, ptr)
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
s.rtcall(writebarrierptr, true, nil, left, ptr)
case t.IsInterface():
idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
s.rtcall(writebarrierptr, true, nil, idataAddr, idata)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !haspointers(ft.(*Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrsWB(ft.(*Type), addr, val)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type.
func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var elemtype *Type
var ptrtype *Type
var ptr *ssa.Value
var len *ssa.Value
var cap *ssa.Value
zero := s.constInt(Types[TINT], 0)
switch {
case t.IsSlice():
elemtype = t.Elem()
ptrtype = Ptrto(elemtype)
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
case t.IsString():
elemtype = Types[TUINT8]
ptrtype = Ptrto(elemtype)
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
elemtype = t.Elem().Elem()
ptrtype = Ptrto(elemtype)
s.nilCheck(v)
ptr = v
len = s.constInt(Types[TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = zero
}
if j == nil {
j = len
}
if k == nil {
k = cap
}
// Panic if slice indices are not in bounds.
s.sliceBoundsCheck(i, j)
if j != k {
s.sliceBoundsCheck(j, k)
}
if k != cap {
s.sliceBoundsCheck(k, cap)
}
// Generate the following code assuming that indexes are in bounds.
// The conditional is to make sure that we don't generate a slice
// that points to the next object in memory.
// rlen = j-i
// rcap = k-i
// delta = i*elemsize
// if rcap == 0 {
// delta = 0
// }
// rptr = p+delta
// result = (SliceMake rptr rlen rcap)
subOp := s.ssaOp(OSUB, Types[TINT])
eqOp := s.ssaOp(OEQ, Types[TINT])
mulOp := s.ssaOp(OMUL, Types[TINT])
rlen := s.newValue2(subOp, Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
// Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rcap = rlen
case j == k:
rcap = rlen
default:
rcap = s.newValue2(subOp, Types[TINT], k, i)
}
// delta = # of elements to offset pointer by.
s.vars[&deltaVar] = i
// Generate code to set delta=0 if the resulting capacity is zero.
if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) ||
(i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) {
cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
// Generate block which zeros the delta variable.
nz := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(nz)
s.startBlock(nz)
s.vars[&deltaVar] = zero
s.endBlock()
// All done.
merge := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(merge)
nz.AddEdgeTo(merge)
s.startBlock(merge)
// TODO: use conditional moves somehow?
}
// Compute rptr = ptr + delta * elemsize
rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width)))
delete(s.vars, &deltaVar)
return rptr, rlen, rcap
}
type u2fcvtTab struct {
geq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, ssa.Type, int64) *ssa.Value
}
var u64_f64 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
// Excess generality on a machine with 64-bit integer registers.
// Not used on AMD64.
var u32_f32 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq32,
cvt2F: ssa.OpCvt32to32F,
and: ssa.OpAnd32,
rsh: ssa.OpRsh32Ux32,
or: ssa.OpOr32,
add: ssa.OpAdd32F,
one: func(s *state, t ssa.Type, x int64) *ssa.Value {
return s.constInt32(t, int32(x))
},
}
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.uintTofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.uintTofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type
nilValue := s.constNil(Types[TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
if n.Op == OLEN {
// length is stored in the first word for map/chan
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
} else if n.Op == OCAP {
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
} else {
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf ssa.Op
value func(*state, ssa.Type, float64) *ssa.Value
}
var f32_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
value: (*state).constFloat32,
}
var f64_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
value: (*state).constFloat64,
}
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
// if x < 9223372036854775808.0 {
// result = uintY(x)
// } else {
// y = x - 9223372036854775808.0
// z = uintY(y)
// result = z | -9223372036854775808
// }
twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0)
cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, twoToThe63)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := s.constInt64(tt, -9223372036854775808)
a1 := s.newValue2(ssa.OpOr64, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// ifaceType returns the value for the word containing the type.
// n is the node for the interface expression.
// v is the corresponding value.
func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value {
byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
if n.Type.IsEmptyInterface() {
// Have *eface. The type is the first word in the struct.
return s.newValue1(ssa.OpITab, byteptr, v)
}
// Have *iface.
// The first word in the struct is the *itab.
// If the *itab is nil, return 0.
// Otherwise, the second word in the *itab is the type.
tab := s.newValue1(ssa.OpITab, byteptr, v)
s.vars[&typVar] = tab
isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(isnonnil)
b.Likely = ssa.BranchLikely
bLoad := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bLoad)
b.AddEdgeTo(bEnd)
bLoad.AddEdgeTo(bEnd)
s.startBlock(bLoad)
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab)
s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
s.endBlock()
s.startBlock(bEnd)
typ := s.variable(&typVar, byteptr)
delete(s.vars, &typVar)
return typ
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left)
typ := s.ifaceType(n.Left, iface) // actual concrete type
target := s.expr(typename(n.Type)) // target type
if !isdirectiface(n.Type) {
// walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case.
Fatalf("dottype needs a direct iface type %s", n.Type)
}
if Debug_typeassert > 0 {
Warnl(n.Lineno, "type assertion inlined")
}
// TODO: If we have a nonempty interface and its itab field is nil,
// then this test is redundant and ifaceType should just branch directly to bFail.
cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
byteptr := Ptrto(Types[TUINT8])
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{byteptr, typenamesym(n.Left.Type)}, s.sb)
s.rtcall(panicdottype, false, nil, typ, target, taddr)
// on success, return idata field
s.startBlock(bOk)
return s.newValue1(ssa.OpIData, n.Type, iface), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// type assertion succeeded
s.startBlock(bOk)
s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface)
s.vars[&okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
s.vars[&idataVar] = s.constNil(byteptr)
s.vars[&okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
res = s.variable(&idataVar, byteptr)
resok = s.variable(&okVar, Types[TBOOL])
delete(s.vars, &idataVar)
delete(s.vars, &okVar)
return res, resok
}
// checkgoto checks that a goto from from to to does not
// jump into a block or jump over variable declarations.
// It is a copy of checkgoto in the pre-SSA backend,
// modified only for line number handling.
// TODO: document how this works and why it is designed the way it is.
func (s *state) checkgoto(from *Node, to *Node) {
if from.Sym == to.Sym {
return
}
nf := 0
for fs := from.Sym; fs != nil; fs = fs.Link {
nf++
}
nt := 0
for fs := to.Sym; fs != nil; fs = fs.Link {
nt++
}
fs := from.Sym
for ; nf > nt; nf-- {
fs = fs.Link
}
if fs != to.Sym {
// decide what to complain about.
// prefer to complain about 'into block' over declarations,
// so scan backward to find most recent block or else dcl.
var block *Sym
var dcl *Sym
ts := to.Sym
for ; nt > nf; nt-- {
if ts.Pkg == nil {
block = ts
} else {
dcl = ts
}
ts = ts.Link
}
for ts != fs {
if ts.Pkg == nil {
block = ts
} else {
dcl = ts
}
ts = ts.Link
fs = fs.Link
}
lno := from.Left.Lineno
if block != nil {
yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
} else {
yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
}
}
}
// variable returns the value of a variable at the current location.
func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
v := s.vars[name]
if v == nil {
v = s.newValue0A(ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
s.vars[name] = v
s.addNamedValue(name, v)
}
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(&memVar, ssa.TypeMem)
}
func (s *state) linkForwardReferences() {
// Build SSA graph. Each variable on its first use in a basic block
// leaves a FwdRef in that block representing the incoming value
// of that variable. This function links that ref up with possible definitions,
// inserting Phi values as needed. This is essentially the algorithm
// described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
// http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
// Differences:
// - We use FwdRef nodes to postpone phi building until the CFG is
// completely built. That way we can avoid the notion of "sealed"
// blocks.
// - Phi optimization is a separate pass (in ../ssa/phielim.go).
for len(s.fwdRefs) > 0 {
v := s.fwdRefs[len(s.fwdRefs)-1]
s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1]
s.resolveFwdRef(v)
}
}
// resolveFwdRef modifies v to be the variable's value at the start of its block.
// v must be a FwdRef op.
func (s *state) resolveFwdRef(v *ssa.Value) {
b := v.Block
name := v.Aux.(*Node)
v.Aux = nil
if b == s.f.Entry {
// Live variable at start of function.
if s.canSSA(name) {
if strings.HasPrefix(name.Sym.Name, "autotmp_") {
// It's likely that this is an uninitialized variable in the entry block.
s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v)
}
v.Op = ssa.OpArg
v.Aux = name
return
}
// Not SSAable. Load it.
addr := s.decladdrs[name]
if addr == nil {
// TODO: closure args reach here.
s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name)
}
if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
}
v.Op = ssa.OpLoad
v.AddArgs(addr, s.startmem)
return
}
if len(b.Preds) == 0 {
// This block is dead; we have no predecessors and we're not the entry block.
// It doesn't matter what we use here as long as it is well-formed.
v.Op = ssa.OpUnknown
return
}
// Find variable value on each predecessor.
var argstore [4]*ssa.Value
args := argstore[:0]
for _, p := range b.Preds {
args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
}
// Decide if we need a phi or not. We need a phi if there
// are two different args (which are both not v).
var w *ssa.Value
for _, a := range args {
if a == v {
continue // self-reference
}
if a == w {
continue // already have this witness
}
if w != nil {
// two witnesses, need a phi value
v.Op = ssa.OpPhi
v.AddArgs(args...)
return
}
w = a // save witness
}
if w == nil {
s.Fatalf("no witness for reachable phi %s", v)
}
// One witness. Make v a copy of w.
v.Op = ssa.OpCopy
v.AddArg(w)
}
// lookupVarOutgoing finds the variable's value at the end of block b.
func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value {
m := s.defvars[b.ID]
if v, ok := m[name]; ok {
return v
}
// The variable is not defined by b and we haven't
// looked it up yet. Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
m[name] = v
s.addNamedValue(name, v)
return v
}
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
if n.Class == Pxxx {
// Don't track our dummy nodes (&memVar etc.).
return
}
if strings.HasPrefix(n.Sym.Name, "autotmp_") {
// Don't track autotmp_ variables.
return
}
if n.Class == PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
if n.Class == PAUTO && n.Xoffset != 0 {
s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset)
}
loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return Pc
}
// SetLineno sets the current source line number.
func (s *SSAGenState) SetLineno(l int32) {
lineno = l
}
// genssa appends entries to ptxt for each instruction in f.
// gcargs and gclocals are filled in with pointer maps for the frame.
func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
var s SSAGenState
e := f.Config.Frontend().(*ssaExport)
// We're about to emit a bunch of Progs.
// Since the only way to get here is to explicitly request it,
// just fail on unimplemented instead of trying to unwind our mess.
e.mustImplement = true
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
var valueProgs map[*obj.Prog]*ssa.Value
var blockProgs map[*obj.Prog]*ssa.Block
var logProgs = e.log
if logProgs {
valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
blockProgs[Pc] = f.Blocks[0]
}
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = Pc
// Emit values in block
Thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := Pc
Thearch.SSAGenValue(&s, v)
if logProgs {
for ; x != Pc; x = x.Link {
valueProgs[x] = v
}
}
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := Pc
Thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != Pc; x = x.Link {
blockProgs[x] = b
}
}
}
// Resolve branches
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
}
if logProgs {
for p := ptxt; p != nil; p = p.Link {
var s string
if v, ok := valueProgs[p]; ok {
s = v.String()
} else if b, ok := blockProgs[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf("%s\t%s\n", s, p)
}
if f.Config.HTML != nil {
saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
ptxt.Ctxt.LineHist.PrintFilenameOnly = true
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
for p := ptxt; p != nil; p = p.Link {
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := valueProgs[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := blockProgs[p]; ok {
buf.WriteString(b.HTML())
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString(p.String()))
buf.WriteString("</dd>")
buf.WriteString("</li>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.Config.HTML.WriteColumn("genssa", buf.String())
ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
}
}
// Emit static data
if f.StaticData != nil {
for _, n := range f.StaticData.([]*Node) {
if !gen_as_init(n, false) {
Fatalf("non-static data marked as static: %v\n\n", n, f)
}
}
}
// Allocate stack frame
allocauto(ptxt)
// Generate gc bitmaps.
liveness(Curfn, ptxt, gcargs, gclocals)
gcsymdup(gcargs)
gcsymdup(gclocals)
// Add frame prologue. Zero ambiguously live variables.
Thearch.Defframe(ptxt)
if Debug['f'] != 0 {
frame(0)
}
// Remove leftover instrumentation from the instruction stream.
removevardef(ptxt)
f.Config.HTML.Close()
}
// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
p := Prog(as)
// TODO: use zero register on archs that support it.
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_MEM
p.To.Reg = regnum
p.To.Offset = offset
offset += width
nleft = nbytes - width
return nleft, offset
}
type FloatingEQNEJump struct {
Jump obj.As
Index int
}
func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
p := Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
branches = append(branches, Branch{p, b.Succs[to]})
if to == 1 {
likely = -likely
}
// liblink reorders the instruction stream as it sees fit.
// Pass along what we know so liblink can make use of it.
// TODO: Once we've fully switched to SSA,
// make liblink leave our output alone.
switch likely {
case ssa.BranchUnlikely:
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
case ssa.BranchLikely:
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
}
return branches
}
func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
likely := b.Likely
switch next {
case b.Succs[0]:
s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
case b.Succs[1]:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
default:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
q := Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1]})
}
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM {
v.Fatalf("bad AddAux addr %s", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch sym := v.Aux.(type) {
case *ssa.ExternSymbol:
a.Name = obj.NAME_EXTERN
switch s := sym.Sym.(type) {
case *Sym:
a.Sym = Linksym(s)
case *obj.LSym:
a.Sym = s
default:
v.Fatalf("ExternSymbol.Sym is %T", s)
}
case *ssa.ArgSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_PARAM
a.Node = n
a.Sym = Linksym(n.Orig.Sym)
a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables.
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_AUTO
a.Node = n
a.Sym = Linksym(n.Sym)
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// extendIndex extends v to a full int width.
func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
size := v.Type.Size()
if size == s.config.IntSize {
return v
}
if size > s.config.IntSize {
// TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
// the high word and branch to out-of-bounds failure if it is not 0.
s.Unimplementedf("64->32 index truncation not implemented")
return v
}
// Extend value to the required size
var op ssa.Op
if v.Type.IsSigned() {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", v.Type)
}
} else {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
return s.newValue1(op, Types[TINT], v)
}
// SSARegNum returns the register (in cmd/internal/obj numbering) to
// which v has been allocated. Panics if v is not assigned to a
// register.
// TODO: Make this panic again once it stops happening routinely.
func SSARegNum(v *ssa.Value) int16 {
reg := v.Block.Func.RegAlloc[v.ID]
if reg == nil {
v.Unimplementedf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func)
return 0
}
return Thearch.SSARegToReg[reg.(*ssa.Register).Num]
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
func AutoVar(v *ssa.Value) (*Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N.(*Node), loc.Off
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *Node) int {
t := n.Left.Type
f := n.Sym
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
var i int
for _, t1 := range t.Fields().Slice() {
if t1.Sym != f {
i++
continue
}
if t1.Offset != n.Xoffset {
panic("field offset doesn't match")
}
return i
}
panic(fmt.Sprintf("can't find field in expr %s\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssaExport exports a bunch of compiler services for the ssa backend.
type ssaExport struct {
log bool
unimplemented bool
mustImplement bool
}
func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] }
func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] }
func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] }
func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] }
func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] }
func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] }
func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] }
func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] }
func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] }
func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] }
func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] }
func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) }
// StringData returns a symbol (a *Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (*ssaExport) StringData(s string) interface{} {
// TODO: is idealstring correct? It might not matter...
_, data := stringsym(s)
return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
}
func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here!
return n
}
func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(Types[TUINT8])
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this string up into two separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
return ssa.LocalSlot{p, ptrType, 0}, ssa.LocalSlot{l, lenType, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, ptrType, name.Off}, ssa.LocalSlot{n, lenType, name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
t := Ptrto(Types[TUINT8])
if n.Class == PAUTO && !n.Addrtaken {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.namedAuto(n.Sym.Name+f, t)
d := e.namedAuto(n.Sym.Name+".data", t)
return ssa.LocalSlot{c, t, 0}, ssa.LocalSlot{d, t, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(name.Type.ElemType().(*Type))
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this slice up into three separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
c := e.namedAuto(n.Sym.Name+".cap", lenType)
return ssa.LocalSlot{p, ptrType, 0}, ssa.LocalSlot{l, lenType, 0}, ssa.LocalSlot{c, lenType, 0}
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{n, ptrType, name.Off},
ssa.LocalSlot{n, lenType, name.Off + int64(Widthptr)},
ssa.LocalSlot{n, lenType, name.Off + int64(2*Widthptr)}
}
func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *Type
if s == 8 {
t = Types[TFLOAT64]
} else {
t = Types[TFLOAT32]
}
if n.Class == PAUTO && !n.Addrtaken {
// Split this complex up into two separate variables.
c := e.namedAuto(n.Sym.Name+".real", t)
d := e.namedAuto(n.Sym.Name+".imag", t)
return ssa.LocalSlot{c, t, 0}, ssa.LocalSlot{d, t, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + s}
}
func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
if n.Class == PAUTO && !n.Addrtaken {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft)
return ssa.LocalSlot{x, ft, 0}
}
return ssa.LocalSlot{n, ft, name.Off + st.FieldOff(i)}
}
// namedAuto returns a new AUTO variable with the given name and type.
func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
t := typ.(*Type)
s := Lookup(name)
n := Nod(ONAME, nil, nil)
s.Def = n
s.Def.Used = true
n.Sym = s
n.Type = t
n.Class = PAUTO
n.Addable = true
n.Ullman = 1
n.Esc = EscNever
n.Xoffset = 0
n.Name.Curfn = Curfn
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
dowidth(t)
e.mustImplement = true
return n
}
func (e *ssaExport) CanSSA(t ssa.Type) bool {
return canSSAType(t.(*Type))
}
func (e *ssaExport) Line(line int32) string {
return linestr(line)
}
// Log logs a message from the compiler.
func (e *ssaExport) Logf(msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if e.log && !e.unimplemented {
fmt.Printf(msg, args...)
}
}
func (e *ssaExport) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if !e.unimplemented {
lineno = line
Fatalf(msg, args...)
}
}
// Unimplemented reports that the function cannot be compiled.
// It will be removed once SSA work is complete.
func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) {
if e.mustImplement {
lineno = line
Fatalf(msg, args...)
}
const alwaysLog = false // enable to calculate top unimplemented features
if !e.unimplemented && (e.log || alwaysLog) {
// first implementation failure, print explanation
fmt.Printf("SSA unimplemented: "+msg+"\n", args...)
}
e.unimplemented = true
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) {
Warnl(line, fmt_, args...)
}
func (e *ssaExport) Debug_checknil() bool {
return Debug_checknil != 0
}
func (n *Node) Typ() ssa.Type {
return n.Type
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"fmt"
"sort"
)
const (
// expression switch
switchKindExpr = iota // switch a {...} or switch 5 {...}
switchKindTrue // switch true {...} or switch {...}
switchKindFalse // switch false {...}
)
const (
binarySearchMin = 4 // minimum number of cases for binary search
integerRangeMin = 2 // minimum size of integer ranges
)
// An exprSwitch walks an expression switch.
type exprSwitch struct {
exprname *Node // node for the expression being switched on
kind int // kind of switch statement (switchKind*)
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
hashname *Node // node for the hash of the type of the variable being switched on
facename *Node // node for the concrete type of the variable being switched on
okname *Node // boolean node used for comma-ok type assertions
}
// A caseClause is a single case clause in a switch statement.
type caseClause struct {
node *Node // points at case statement
ordinal int // position in switch
hash uint32 // hash of a type switch
// isconst indicates whether this case clause is a constant,
// for the purposes of the switch code generation.
// For expression switches, that's generally literals (case 5:, not case x:).
// For type switches, that's concrete types (case time.Time:), not interfaces (case io.Reader:).
isconst bool
}
// caseClauses are all the case clauses in a switch statement.
type caseClauses struct {
list []caseClause // general cases
defjmp *Node // OGOTO for default case or OBREAK if no default case present
niljmp *Node // OGOTO for nil type case in a type switch
}
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *Node) {
typecheckslice(n.Ninit.Slice(), Etop)
var nilonly string
var top int
var t *types.Type
if n.Left != nil && n.Left.Op == OTYPESW {
// type switch
top = Etype
n.Left.Right = typecheck(n.Left.Right, Erv)
t = n.Left.Right.Type
if t != nil && !t.IsInterface() {
yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
}
if v := n.Left.Left; v != nil && !isblank(v) && n.List.Len() == 0 {
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we
// won't notice that it went unused.
yyerrorl(v.Pos, "%v declared and not used", v.Sym)
}
} else {
// expression switch
top = Erv
if n.Left != nil {
n.Left = typecheck(n.Left, Erv)
n.Left = defaultlit(n.Left, nil)
t = n.Left.Type
} else {
t = types.Types[TBOOL]
}
if t != nil {
switch {
case !okforeq[t.Etype]:
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
case t.IsSlice():
nilonly = "slice"
case t.IsArray() && !IsComparable(t):
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
case t.IsStruct():
if f := IncomparableField(t); f != nil {
yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, f.Type)
}
case t.Etype == TFUNC:
nilonly = "func"
case t.IsMap():
nilonly = "map"
}
}
}
n.Type = t
var def, niltype *Node
for _, ncase := range n.List.Slice() {
if ncase.List.Len() == 0 {
// default
if def != nil {
setlineno(ncase)
yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", def.Line())
} else {
def = ncase
}
} else {
ls := ncase.List.Slice()
for i1, n1 := range ls {
setlineno(n1)
ls[i1] = typecheck(ls[i1], Erv|Etype)
n1 = ls[i1]
if n1.Type == nil || t == nil {
continue
}
setlineno(ncase)
switch top {
// expression switch
case Erv:
ls[i1] = defaultlit(ls[i1], t)
n1 = ls[i1]
switch {
case n1.Op == OTYPE:
yyerrorl(ncase.Pos, "type %v is not an expression", n1.Type)
case n1.Type != nil && assignop(n1.Type, t, nil) == 0 && assignop(t, n1.Type, nil) == 0:
if n.Left != nil {
yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
} else {
yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
}
case nilonly != "" && !isnil(n1):
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
case t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type):
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
}
// type switch
case Etype:
var missing, have *types.Field
var ptr int
switch {
case n1.Op == OLITERAL && n1.Type.IsKind(TNIL):
// case nil:
if niltype != nil {
yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", niltype.Line())
} else {
niltype = ncase
}
case n1.Op != OTYPE && n1.Type != nil: // should this be ||?
yyerrorl(ncase.Pos, "%L is not a type", n1)
// reset to original type
n1 = n.Left.Right
ls[i1] = n1
case !n1.Type.IsInterface() && t.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr):
if have != nil && !missing.Broke() && !have.Broke() {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if !missing.Broke() {
if ptr != 0 {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
} else {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
}
}
}
}
}
}
if n.Type == nil || n.Type.IsUntyped() {
// if the value we're switching on has no type or is untyped,
// we've already printed an error and don't need to continue
// typechecking the body
return
}
if top == Etype {
ll := ncase.List
if ncase.Rlist.Len() != 0 {
nvar := ncase.Rlist.First()
if ll.Len() == 1 && ll.First().Type != nil && !ll.First().Type.IsKind(TNIL) {
// single entry type switch
nvar.Type = ll.First().Type
} else {
// multiple entry type switch or default
nvar.Type = n.Type
}
nvar = typecheck(nvar, Erv|Easgn)
ncase.Rlist.SetFirst(nvar)
}
}
typecheckslice(ncase.Nbody.Slice(), Etop)
}
switch top {
// expression switch
case Erv:
checkDupExprCases(n.Left, n.List.Slice())
}
}
// walkswitch walks a switch statement.
func walkswitch(sw *Node) {
// convert switch {...} to switch true {...}
if sw.Left == nil {
sw.Left = nodbool(true)
sw.Left = typecheck(sw.Left, Erv)
sw.Left = defaultlit(sw.Left, nil)
}
if sw.Left.Op == OTYPESW {
var s typeSwitch
s.walk(sw)
} else {
var s exprSwitch
s.walk(sw)
}
}
// walk generates an AST implementing sw.
// sw is an expression switch.
// The AST is generally of the form of a linear
// search using if..goto, although binary search
// is used with long runs of constants.
func (s *exprSwitch) walk(sw *Node) {
casebody(sw, nil)
cond := sw.Left
sw.Left = nil
s.kind = switchKindExpr
if Isconst(cond, CTBOOL) {
s.kind = switchKindTrue
if !cond.Val().U.(bool) {
s.kind = switchKindFalse
}
}
cond = walkexpr(cond, &sw.Ninit)
t := sw.Type
if t == nil {
return
}
// convert the switch into OIF statements
var cas []*Node
if s.kind == switchKindTrue || s.kind == switchKindFalse {
s.exprname = nodbool(s.kind == switchKindTrue)
} else if consttype(cond) > 0 {
// leave constants to enable dead code elimination (issue 9608)
s.exprname = cond
} else {
s.exprname = temp(cond.Type)
cas = []*Node{nod(OAS, s.exprname, cond)}
typecheckslice(cas, Etop)
}
// Enumerate the cases and prepare the default case.
clauses := s.genCaseClauses(sw.List.Slice())
sw.List.Set(nil)
cc := clauses.list
// handle the cases in order
for len(cc) > 0 {
run := 1
if okforcmp[t.Etype] && cc[0].isconst {
// do binary search on runs of constants
for ; run < len(cc) && cc[run].isconst; run++ {
}
// sort and compile constants
sort.Sort(caseClauseByConstVal(cc[:run]))
}
a := s.walkCases(cc[:run])
cas = append(cas, a)
cc = cc[run:]
}
// handle default case
if nerrors == 0 {
cas = append(cas, clauses.defjmp)
sw.Nbody.Prepend(cas...)
walkstmtlist(sw.Nbody.Slice())
}
}
// walkCases generates an AST implementing the cases in cc.
func (s *exprSwitch) walkCases(cc []caseClause) *Node {
if len(cc) < binarySearchMin {
// linear search
var cas []*Node
for _, c := range cc {
n := c.node
lno := setlineno(n)
a := nod(OIF, nil, nil)
if rng := n.List.Slice(); rng != nil {
// Integer range.
// exprname is a temp or a constant,
// so it is safe to evaluate twice.
// In most cases, this conjunction will be
// rewritten by walkinrange into a single comparison.
low := nod(OGE, s.exprname, rng[0])
high := nod(OLE, s.exprname, rng[1])
a.Left = nod(OANDAND, low, high)
} else if (s.kind != switchKindTrue && s.kind != switchKindFalse) || assignop(n.Left.Type, s.exprname.Type, nil) == OCONVIFACE || assignop(s.exprname.Type, n.Left.Type, nil) == OCONVIFACE {
a.Left = nod(OEQ, s.exprname, n.Left) // if name == val
} else if s.kind == switchKindTrue {
a.Left = n.Left // if val
} else {
// s.kind == switchKindFalse
a.Left = nod(ONOT, n.Left, nil) // if !val
}
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(n.Right) // goto l
cas = append(cas, a)
lineno = lno
}
return liststmt(cas)
}
// find the middle and recur
half := len(cc) / 2
a := nod(OIF, nil, nil)
n := cc[half-1].node
var mid *Node
if rng := n.List.Slice(); rng != nil {
mid = rng[1] // high end of range
} else {
mid = n.Left
}
le := nod(OLE, s.exprname, mid)
if Isconst(mid, CTSTR) {
// Search by length and then by value; see caseClauseByConstVal.
lenlt := nod(OLT, nod(OLEN, s.exprname, nil), nod(OLEN, mid, nil))
leneq := nod(OEQ, nod(OLEN, s.exprname, nil), nod(OLEN, mid, nil))
a.Left = nod(OOROR, lenlt, nod(OANDAND, leneq, le))
} else {
a.Left = le
}
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(s.walkCases(cc[:half]))
a.Rlist.Set1(s.walkCases(cc[half:]))
return a
}
// casebody builds separate lists of statements and cases.
// It makes labels between cases and statements
// and deals with fallthrough, break, and unreachable statements.
func casebody(sw *Node, typeswvar *Node) {
if sw.List.Len() == 0 {
return
}
lno := setlineno(sw)
var cas []*Node // cases
var stat []*Node // statements
var def *Node // defaults
br := nod(OBREAK, nil, nil)
for _, n := range sw.List.Slice() {
setlineno(n)
if n.Op != OXCASE {
Fatalf("casebody %v", n.Op)
}
n.Op = OCASE
needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL
jmp := nod(OGOTO, autolabel(".s"), nil)
switch n.List.Len() {
case 0:
// default
if def != nil {
yyerrorl(n.Pos, "more than one default case")
}
// reuse original default case
n.Right = jmp
def = n
case 1:
// one case -- reuse OCASE node
n.Left = n.List.First()
n.Right = jmp
n.List.Set(nil)
cas = append(cas, n)
default:
// Expand multi-valued cases and detect ranges of integer cases.
if typeswvar != nil || sw.Left.Type.IsInterface() || !n.List.First().Type.IsInteger() || n.List.Len() < integerRangeMin {
// Can't use integer ranges. Expand each case into a separate node.
for _, n1 := range n.List.Slice() {
cas = append(cas, nod(OCASE, n1, jmp))
}
break
}
// Find integer ranges within runs of constants.
s := n.List.Slice()
j := 0
for j < len(s) {
// Find a run of constants.
var run int
for run = j; run < len(s) && Isconst(s[run], CTINT); run++ {
}
if run-j >= integerRangeMin {
// Search for integer ranges in s[j:run].
// Typechecking is done, so all values are already in an appropriate range.
search := s[j:run]
sort.Sort(constIntNodesByVal(search))
for beg, end := 0, 1; end <= len(search); end++ {
if end < len(search) && search[end].Int64() == search[end-1].Int64()+1 {
continue
}
if end-beg >= integerRangeMin {
// Record range in List.
c := nod(OCASE, nil, jmp)
c.List.Set2(search[beg], search[end-1])
cas = append(cas, c)
} else {
// Not large enough for range; record separately.
for _, n := range search[beg:end] {
cas = append(cas, nod(OCASE, n, jmp))
}
}
beg = end
}
j = run
}
// Advance to next constant, adding individual non-constant
// or as-yet-unhandled constant cases as we go.
for ; j < len(s) && (j < run || !Isconst(s[j], CTINT)); j++ {
cas = append(cas, nod(OCASE, s[j], jmp))
}
}
}
stat = append(stat, nod(OLABEL, jmp.Left, nil))
if typeswvar != nil && needvar && n.Rlist.Len() != 0 {
l := []*Node{
nod(ODCL, n.Rlist.First(), nil),
nod(OAS, n.Rlist.First(), typeswvar),
}
typecheckslice(l, Etop)
stat = append(stat, l...)
}
stat = append(stat, n.Nbody.Slice()...)
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
// list contains autotmp_ variables), one or more OVARKILL
// nodes will be at the end of the list.
fallIndex := len(stat) - 1
for stat[fallIndex].Op == OVARKILL {
fallIndex--
}
last := stat[fallIndex]
if last.Op != OFALL {
stat = append(stat, br)
}
}
stat = append(stat, br)
if def != nil {
cas = append(cas, def)
}
sw.List.Set(cas)
sw.Nbody.Set(stat)
lineno = lno
}
// genCaseClauses generates the caseClauses value for clauses.
func (s *exprSwitch) genCaseClauses(clauses []*Node) caseClauses {
var cc caseClauses
for _, n := range clauses {
if n.Left == nil && n.List.Len() == 0 {
// default case
if cc.defjmp != nil {
Fatalf("duplicate default case not detected during typechecking")
}
cc.defjmp = n.Right
continue
}
c := caseClause{node: n, ordinal: len(cc.list)}
if n.List.Len() > 0 {
c.isconst = true
}
switch consttype(n.Left) {
case CTFLT, CTINT, CTRUNE, CTSTR:
c.isconst = true
}
cc.list = append(cc.list, c)
}
if cc.defjmp == nil {
cc.defjmp = nod(OBREAK, nil, nil)
}
return cc
}
// genCaseClauses generates the caseClauses value for clauses.
func (s *typeSwitch) genCaseClauses(clauses []*Node) caseClauses {
var cc caseClauses
for _, n := range clauses {
switch {
case n.Left == nil:
// default case
if cc.defjmp != nil {
Fatalf("duplicate default case not detected during typechecking")
}
cc.defjmp = n.Right
continue
case n.Left.Op == OLITERAL:
// nil case in type switch
if cc.niljmp != nil {
Fatalf("duplicate nil case not detected during typechecking")
}
cc.niljmp = n.Right
continue
}
// general case
c := caseClause{
node: n,
ordinal: len(cc.list),
isconst: !n.Left.Type.IsInterface(),
hash: typehash(n.Left.Type),
}
cc.list = append(cc.list, c)
}
if cc.defjmp == nil {
cc.defjmp = nod(OBREAK, nil, nil)
}
// diagnose duplicate cases
s.checkDupCases(cc.list)
return cc
}
func (s *typeSwitch) checkDupCases(cc []caseClause) {
if len(cc) < 2 {
return
}
// We store seen types in a map keyed by type hash.
// It is possible, but very unlikely, for multiple distinct types to have the same hash.
seen := make(map[uint32][]*Node)
// To avoid many small allocations of length 1 slices,
// also set up a single large slice to slice into.
nn := make([]*Node, 0, len(cc))
Outer:
for _, c := range cc {
prev, ok := seen[c.hash]
if !ok {
// First entry for this hash.
nn = append(nn, c.node)
seen[c.hash] = nn[len(nn)-1 : len(nn) : len(nn)]
continue
}
for _, n := range prev {
if eqtype(n.Left.Type, c.node.Left.Type) {
yyerrorl(c.node.Pos, "duplicate case %v in type switch\n\tprevious case at %v", c.node.Left.Type, n.Line())
// avoid double-reporting errors
continue Outer
}
}
seen[c.hash] = append(seen[c.hash], c.node)
}
}
func checkDupExprCases(exprname *Node, clauses []*Node) {
// boolean (naked) switch, nothing to do.
if exprname == nil {
return
}
// The common case is that s's expression is not an interface.
// In that case, all constant clauses have the same type,
// so checking for duplicates can be done solely by value.
if !exprname.Type.IsInterface() {
seen := make(map[interface{}]*Node)
for _, ncase := range clauses {
for _, n := range ncase.List.Slice() {
// Can't check for duplicates that aren't constants, per the spec. Issue 15896.
// Don't check for duplicate bools. Although the spec allows it,
// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
// (2) it would disallow useful things like
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
if ct := consttype(n); ct == 0 || ct == CTBOOL {
continue
}
val := n.Val().Interface()
prev, dup := seen[val]
if !dup {
seen[val] = n
continue
}
yyerrorl(ncase.Pos, "duplicate case %s in switch\n\tprevious case at %v",
nodeAndVal(n), prev.Line())
}
}
return
}
// s's expression is an interface. This is fairly rare, so
// keep this simple. Case expressions are only duplicates if
// they have the same value and identical types.
//
// In general, we have to use eqtype to test type identity,
// because == gives false negatives for anonymous types and
// the byte/uint8 and rune/int32 builtin type aliases.
// However, this is not a problem here, because constant
// expressions are always untyped or have a named type, and we
// explicitly handle the builtin type aliases below.
//
// This approach may need to be revisited though if we fix
// #21866 by treating all type aliases like byte/uint8 and
// rune/int32.
type typeVal struct {
typ *types.Type
val interface{}
}
seen := make(map[typeVal]*Node)
for _, ncase := range clauses {
for _, n := range ncase.List.Slice() {
if ct := consttype(n); ct == 0 || ct == CTBOOL {
continue
}
tv := typeVal{
typ: n.Type,
val: n.Val().Interface(),
}
switch tv.typ {
case types.Bytetype:
tv.typ = types.Types[TUINT8]
case types.Runetype:
tv.typ = types.Types[TINT32]
}
prev, dup := seen[tv]
if !dup {
seen[tv] = n
continue
}
yyerrorl(ncase.Pos, "duplicate case %s in switch\n\tprevious case at %v",
nodeAndVal(n), prev.Line())
}
}
}
func nodeAndVal(n *Node) string {
show := n.String()
val := n.Val().Interface()
if s := fmt.Sprintf("%#v", val); show != s {
show += " (value " + s + ")"
}
return show
}
// walk generates an AST that implements sw,
// where sw is a type switch.
// The AST is generally of the form of a linear
// search using if..goto, although binary search
// is used with long runs of concrete types.
func (s *typeSwitch) walk(sw *Node) {
cond := sw.Left
sw.Left = nil
if cond == nil {
sw.List.Set(nil)
return
}
if cond.Right == nil {
yyerrorl(sw.Pos, "type switch must have an assignment")
return
}
cond.Right = walkexpr(cond.Right, &sw.Ninit)
if !cond.Right.Type.IsInterface() {
yyerrorl(sw.Pos, "type switch must be on an interface")
return
}
var cas []*Node
// predeclare temporary variables and the boolean var
s.facename = temp(cond.Right.Type)
a := nod(OAS, s.facename, cond.Right)
a = typecheck(a, Etop)
cas = append(cas, a)
s.okname = temp(types.Types[TBOOL])
s.okname = typecheck(s.okname, Erv)
s.hashname = temp(types.Types[TUINT32])
s.hashname = typecheck(s.hashname, Erv)
// set up labels and jumps
casebody(sw, s.facename)
clauses := s.genCaseClauses(sw.List.Slice())
sw.List.Set(nil)
def := clauses.defjmp
// For empty interfaces, do:
// if e._type == nil {
// do nil case if it exists, otherwise default
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
itab := nod(OITAB, s.facename, nil)
// Check for nil first.
i := nod(OIF, nil, nil)
i.Left = nod(OEQ, itab, nodnil())
if clauses.niljmp != nil {
// Do explicit nil case right here.
i.Nbody.Set1(clauses.niljmp)
} else {
// Jump to default case.
lbl := autolabel(".s")
i.Nbody.Set1(nod(OGOTO, lbl, nil))
// Wrap default case with label.
blk := nod(OBLOCK, nil, nil)
blk.List.Set2(nod(OLABEL, lbl, nil), def)
def = blk
}
i.Left = typecheck(i.Left, Erv)
i.Left = defaultlit(i.Left, nil)
cas = append(cas, i)
// Load hash from type or itab.
h := nodSym(ODOTPTR, itab, nil)
h.Type = types.Types[TUINT32]
h.SetTypecheck(1)
if cond.Right.Type.IsEmptyInterface() {
h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
} else {
h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
}
h.SetBounded(true) // guaranteed not to fault
a = nod(OAS, s.hashname, h)
a = typecheck(a, Etop)
cas = append(cas, a)
cc := clauses.list
// insert type equality check into each case block
for _, c := range cc {
c.node.Right = s.typeone(c.node)
}
// generate list of if statements, binary search for constant sequences
for len(cc) > 0 {
if !cc[0].isconst {
n := cc[0].node
cas = append(cas, n.Right)
cc = cc[1:]
continue
}
// identify run of constants
var run int
for run = 1; run < len(cc) && cc[run].isconst; run++ {
}
// sort by hash
sort.Sort(caseClauseByType(cc[:run]))
// for debugging: linear search
if false {
for i := 0; i < run; i++ {
n := cc[i].node
cas = append(cas, n.Right)
}
continue
}
// combine adjacent cases with the same hash
ncase := 0
for i := 0; i < run; i++ {
ncase++
hash := []*Node{cc[i].node.Right}
for j := i + 1; j < run && cc[i].hash == cc[j].hash; j++ {
hash = append(hash, cc[j].node.Right)
}
cc[i].node.Right = liststmt(hash)
}
// binary search among cases to narrow by hash
cas = append(cas, s.walkCases(cc[:ncase]))
cc = cc[ncase:]
}
// handle default case
if nerrors == 0 {
cas = append(cas, def)
sw.Nbody.Prepend(cas...)
sw.List.Set(nil)
walkstmtlist(sw.Nbody.Slice())
}
}
// typeone generates an AST that jumps to the
// case body if the variable is of type t.
func (s *typeSwitch) typeone(t *Node) *Node {
var name *Node
var init Nodes
if t.Rlist.Len() == 0 {
name = nblank
nblank = typecheck(nblank, Erv|Easgn)
} else {
name = t.Rlist.First()
init.Append(nod(ODCL, name, nil))
a := nod(OAS, name, nil)
a = typecheck(a, Etop)
init.Append(a)
}
a := nod(OAS2, nil, nil)
a.List.Set2(name, s.okname) // name, ok =
b := nod(ODOTTYPE, s.facename, nil)
b.Type = t.Left.Type // interface.(type)
a.Rlist.Set1(b)
a = typecheck(a, Etop)
a = walkexpr(a, &init)
init.Append(a)
c := nod(OIF, nil, nil)
c.Left = s.okname
c.Nbody.Set1(t.Right) // if ok { goto l }
init.Append(c)
return init.asblock()
}
// walkCases generates an AST implementing the cases in cc.
func (s *typeSwitch) walkCases(cc []caseClause) *Node {
if len(cc) < binarySearchMin {
var cas []*Node
for _, c := range cc {
n := c.node
if !c.isconst {
Fatalf("typeSwitch walkCases")
}
a := nod(OIF, nil, nil)
a.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(n.Right)
cas = append(cas, a)
}
return liststmt(cas)
}
// find the middle and recur
half := len(cc) / 2
a := nod(OIF, nil, nil)
a.Left = nod(OLE, s.hashname, nodintconst(int64(cc[half-1].hash)))
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(s.walkCases(cc[:half]))
a.Rlist.Set1(s.walkCases(cc[half:]))
return a
}
// caseClauseByConstVal sorts clauses by constant value to enable binary search.
type caseClauseByConstVal []caseClause
func (x caseClauseByConstVal) Len() int { return len(x) }
func (x caseClauseByConstVal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x caseClauseByConstVal) Less(i, j int) bool {
// n1 and n2 might be individual constants or integer ranges.
// We have checked for duplicates already,
// so ranges can be safely represented by any value in the range.
n1 := x[i].node
var v1 interface{}
if s := n1.List.Slice(); s != nil {
v1 = s[0].Val().U
} else {
v1 = n1.Left.Val().U
}
n2 := x[j].node
var v2 interface{}
if s := n2.List.Slice(); s != nil {
v2 = s[0].Val().U
} else {
v2 = n2.Left.Val().U
}
switch v1 := v1.(type) {
case *Mpflt:
return v1.Cmp(v2.(*Mpflt)) < 0
case *Mpint:
return v1.Cmp(v2.(*Mpint)) < 0
case string:
// Sort strings by length and then by value.
// It is much cheaper to compare lengths than values,
// and all we need here is consistency.
// We respect this sorting in exprSwitch.walkCases.
a := v1
b := v2.(string)
if len(a) != len(b) {
return len(a) < len(b)
}
return a < b
}
Fatalf("caseClauseByConstVal passed bad clauses %v < %v", x[i].node.Left, x[j].node.Left)
return false
}
type caseClauseByType []caseClause
func (x caseClauseByType) Len() int { return len(x) }
func (x caseClauseByType) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x caseClauseByType) Less(i, j int) bool {
c1, c2 := x[i], x[j]
// sort by hash code, then ordinal (for the rare case of hash collisions)
if c1.hash != c2.hash {
return c1.hash < c2.hash
}
return c1.ordinal < c2.ordinal
}
type constIntNodesByVal []*Node
func (x constIntNodesByVal) Len() int { return len(x) }
func (x constIntNodesByVal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x constIntNodesByVal) Less(i, j int) bool {
return x[i].Val().U.(*Mpint).Cmp(x[j].Val().U.(*Mpint)) < 0
}
cmd/compile: fix duplicate code generation in swt.go
When combining adjacent type switch cases with the same type hash, we
failed to actually remove the combined cases, so we would generate
code for them twice.
We use MD5 for type hashes, so collisions are rare, but they do
currently appear in test/fixedbugs/bug248.dir/bug2.go, which is how I
noticed this failure.
Passes toolstash-check.
Change-Id: I66729b3366b96cb8ddc8fa6f3ebea11ef6d74012
Reviewed-on: https://go-review.googlesource.com/100461
Run-TryBot: Matthew Dempsky <e56ac2c500064424afac424d4b5e53bd0570a731@google.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Josh Bleecher Snyder <cdc248cc4d32e4fbfe04e45228163bcecd48a0b9@gmail.com>
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"fmt"
"sort"
)
const (
// expression switch
switchKindExpr = iota // switch a {...} or switch 5 {...}
switchKindTrue // switch true {...} or switch {...}
switchKindFalse // switch false {...}
)
const (
binarySearchMin = 4 // minimum number of cases for binary search
integerRangeMin = 2 // minimum size of integer ranges
)
// An exprSwitch walks an expression switch.
type exprSwitch struct {
exprname *Node // node for the expression being switched on
kind int // kind of switch statement (switchKind*)
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
hashname *Node // node for the hash of the type of the variable being switched on
facename *Node // node for the concrete type of the variable being switched on
okname *Node // boolean node used for comma-ok type assertions
}
// A caseClause is a single case clause in a switch statement.
type caseClause struct {
node *Node // points at case statement
ordinal int // position in switch
hash uint32 // hash of a type switch
// isconst indicates whether this case clause is a constant,
// for the purposes of the switch code generation.
// For expression switches, that's generally literals (case 5:, not case x:).
// For type switches, that's concrete types (case time.Time:), not interfaces (case io.Reader:).
isconst bool
}
// caseClauses are all the case clauses in a switch statement.
type caseClauses struct {
list []caseClause // general cases
defjmp *Node // OGOTO for default case or OBREAK if no default case present
niljmp *Node // OGOTO for nil type case in a type switch
}
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *Node) {
typecheckslice(n.Ninit.Slice(), Etop)
var nilonly string
var top int
var t *types.Type
if n.Left != nil && n.Left.Op == OTYPESW {
// type switch
top = Etype
n.Left.Right = typecheck(n.Left.Right, Erv)
t = n.Left.Right.Type
if t != nil && !t.IsInterface() {
yyerrorl(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
}
if v := n.Left.Left; v != nil && !isblank(v) && n.List.Len() == 0 {
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we
// won't notice that it went unused.
yyerrorl(v.Pos, "%v declared and not used", v.Sym)
}
} else {
// expression switch
top = Erv
if n.Left != nil {
n.Left = typecheck(n.Left, Erv)
n.Left = defaultlit(n.Left, nil)
t = n.Left.Type
} else {
t = types.Types[TBOOL]
}
if t != nil {
switch {
case !okforeq[t.Etype]:
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
case t.IsSlice():
nilonly = "slice"
case t.IsArray() && !IsComparable(t):
yyerrorl(n.Pos, "cannot switch on %L", n.Left)
case t.IsStruct():
if f := IncomparableField(t); f != nil {
yyerrorl(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, f.Type)
}
case t.Etype == TFUNC:
nilonly = "func"
case t.IsMap():
nilonly = "map"
}
}
}
n.Type = t
var def, niltype *Node
for _, ncase := range n.List.Slice() {
if ncase.List.Len() == 0 {
// default
if def != nil {
setlineno(ncase)
yyerrorl(ncase.Pos, "multiple defaults in switch (first at %v)", def.Line())
} else {
def = ncase
}
} else {
ls := ncase.List.Slice()
for i1, n1 := range ls {
setlineno(n1)
ls[i1] = typecheck(ls[i1], Erv|Etype)
n1 = ls[i1]
if n1.Type == nil || t == nil {
continue
}
setlineno(ncase)
switch top {
// expression switch
case Erv:
ls[i1] = defaultlit(ls[i1], t)
n1 = ls[i1]
switch {
case n1.Op == OTYPE:
yyerrorl(ncase.Pos, "type %v is not an expression", n1.Type)
case n1.Type != nil && assignop(n1.Type, t, nil) == 0 && assignop(t, n1.Type, nil) == 0:
if n.Left != nil {
yyerrorl(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
} else {
yyerrorl(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
}
case nilonly != "" && !isnil(n1):
yyerrorl(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
case t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type):
yyerrorl(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
}
// type switch
case Etype:
var missing, have *types.Field
var ptr int
switch {
case n1.Op == OLITERAL && n1.Type.IsKind(TNIL):
// case nil:
if niltype != nil {
yyerrorl(ncase.Pos, "multiple nil cases in type switch (first at %v)", niltype.Line())
} else {
niltype = ncase
}
case n1.Op != OTYPE && n1.Type != nil: // should this be ||?
yyerrorl(ncase.Pos, "%L is not a type", n1)
// reset to original type
n1 = n.Left.Right
ls[i1] = n1
case !n1.Type.IsInterface() && t.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr):
if have != nil && !missing.Broke() && !have.Broke() {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if !missing.Broke() {
if ptr != 0 {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
} else {
yyerrorl(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
}
}
}
}
}
}
if n.Type == nil || n.Type.IsUntyped() {
// if the value we're switching on has no type or is untyped,
// we've already printed an error and don't need to continue
// typechecking the body
return
}
if top == Etype {
ll := ncase.List
if ncase.Rlist.Len() != 0 {
nvar := ncase.Rlist.First()
if ll.Len() == 1 && ll.First().Type != nil && !ll.First().Type.IsKind(TNIL) {
// single entry type switch
nvar.Type = ll.First().Type
} else {
// multiple entry type switch or default
nvar.Type = n.Type
}
nvar = typecheck(nvar, Erv|Easgn)
ncase.Rlist.SetFirst(nvar)
}
}
typecheckslice(ncase.Nbody.Slice(), Etop)
}
switch top {
// expression switch
case Erv:
checkDupExprCases(n.Left, n.List.Slice())
}
}
// walkswitch walks a switch statement.
func walkswitch(sw *Node) {
// convert switch {...} to switch true {...}
if sw.Left == nil {
sw.Left = nodbool(true)
sw.Left = typecheck(sw.Left, Erv)
sw.Left = defaultlit(sw.Left, nil)
}
if sw.Left.Op == OTYPESW {
var s typeSwitch
s.walk(sw)
} else {
var s exprSwitch
s.walk(sw)
}
}
// walk generates an AST implementing sw.
// sw is an expression switch.
// The AST is generally of the form of a linear
// search using if..goto, although binary search
// is used with long runs of constants.
func (s *exprSwitch) walk(sw *Node) {
casebody(sw, nil)
cond := sw.Left
sw.Left = nil
s.kind = switchKindExpr
if Isconst(cond, CTBOOL) {
s.kind = switchKindTrue
if !cond.Val().U.(bool) {
s.kind = switchKindFalse
}
}
cond = walkexpr(cond, &sw.Ninit)
t := sw.Type
if t == nil {
return
}
// convert the switch into OIF statements
var cas []*Node
if s.kind == switchKindTrue || s.kind == switchKindFalse {
s.exprname = nodbool(s.kind == switchKindTrue)
} else if consttype(cond) > 0 {
// leave constants to enable dead code elimination (issue 9608)
s.exprname = cond
} else {
s.exprname = temp(cond.Type)
cas = []*Node{nod(OAS, s.exprname, cond)}
typecheckslice(cas, Etop)
}
// Enumerate the cases and prepare the default case.
clauses := s.genCaseClauses(sw.List.Slice())
sw.List.Set(nil)
cc := clauses.list
// handle the cases in order
for len(cc) > 0 {
run := 1
if okforcmp[t.Etype] && cc[0].isconst {
// do binary search on runs of constants
for ; run < len(cc) && cc[run].isconst; run++ {
}
// sort and compile constants
sort.Sort(caseClauseByConstVal(cc[:run]))
}
a := s.walkCases(cc[:run])
cas = append(cas, a)
cc = cc[run:]
}
// handle default case
if nerrors == 0 {
cas = append(cas, clauses.defjmp)
sw.Nbody.Prepend(cas...)
walkstmtlist(sw.Nbody.Slice())
}
}
// walkCases generates an AST implementing the cases in cc.
func (s *exprSwitch) walkCases(cc []caseClause) *Node {
if len(cc) < binarySearchMin {
// linear search
var cas []*Node
for _, c := range cc {
n := c.node
lno := setlineno(n)
a := nod(OIF, nil, nil)
if rng := n.List.Slice(); rng != nil {
// Integer range.
// exprname is a temp or a constant,
// so it is safe to evaluate twice.
// In most cases, this conjunction will be
// rewritten by walkinrange into a single comparison.
low := nod(OGE, s.exprname, rng[0])
high := nod(OLE, s.exprname, rng[1])
a.Left = nod(OANDAND, low, high)
} else if (s.kind != switchKindTrue && s.kind != switchKindFalse) || assignop(n.Left.Type, s.exprname.Type, nil) == OCONVIFACE || assignop(s.exprname.Type, n.Left.Type, nil) == OCONVIFACE {
a.Left = nod(OEQ, s.exprname, n.Left) // if name == val
} else if s.kind == switchKindTrue {
a.Left = n.Left // if val
} else {
// s.kind == switchKindFalse
a.Left = nod(ONOT, n.Left, nil) // if !val
}
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(n.Right) // goto l
cas = append(cas, a)
lineno = lno
}
return liststmt(cas)
}
// find the middle and recur
half := len(cc) / 2
a := nod(OIF, nil, nil)
n := cc[half-1].node
var mid *Node
if rng := n.List.Slice(); rng != nil {
mid = rng[1] // high end of range
} else {
mid = n.Left
}
le := nod(OLE, s.exprname, mid)
if Isconst(mid, CTSTR) {
// Search by length and then by value; see caseClauseByConstVal.
lenlt := nod(OLT, nod(OLEN, s.exprname, nil), nod(OLEN, mid, nil))
leneq := nod(OEQ, nod(OLEN, s.exprname, nil), nod(OLEN, mid, nil))
a.Left = nod(OOROR, lenlt, nod(OANDAND, leneq, le))
} else {
a.Left = le
}
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(s.walkCases(cc[:half]))
a.Rlist.Set1(s.walkCases(cc[half:]))
return a
}
// casebody builds separate lists of statements and cases.
// It makes labels between cases and statements
// and deals with fallthrough, break, and unreachable statements.
func casebody(sw *Node, typeswvar *Node) {
if sw.List.Len() == 0 {
return
}
lno := setlineno(sw)
var cas []*Node // cases
var stat []*Node // statements
var def *Node // defaults
br := nod(OBREAK, nil, nil)
for _, n := range sw.List.Slice() {
setlineno(n)
if n.Op != OXCASE {
Fatalf("casebody %v", n.Op)
}
n.Op = OCASE
needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL
jmp := nod(OGOTO, autolabel(".s"), nil)
switch n.List.Len() {
case 0:
// default
if def != nil {
yyerrorl(n.Pos, "more than one default case")
}
// reuse original default case
n.Right = jmp
def = n
case 1:
// one case -- reuse OCASE node
n.Left = n.List.First()
n.Right = jmp
n.List.Set(nil)
cas = append(cas, n)
default:
// Expand multi-valued cases and detect ranges of integer cases.
if typeswvar != nil || sw.Left.Type.IsInterface() || !n.List.First().Type.IsInteger() || n.List.Len() < integerRangeMin {
// Can't use integer ranges. Expand each case into a separate node.
for _, n1 := range n.List.Slice() {
cas = append(cas, nod(OCASE, n1, jmp))
}
break
}
// Find integer ranges within runs of constants.
s := n.List.Slice()
j := 0
for j < len(s) {
// Find a run of constants.
var run int
for run = j; run < len(s) && Isconst(s[run], CTINT); run++ {
}
if run-j >= integerRangeMin {
// Search for integer ranges in s[j:run].
// Typechecking is done, so all values are already in an appropriate range.
search := s[j:run]
sort.Sort(constIntNodesByVal(search))
for beg, end := 0, 1; end <= len(search); end++ {
if end < len(search) && search[end].Int64() == search[end-1].Int64()+1 {
continue
}
if end-beg >= integerRangeMin {
// Record range in List.
c := nod(OCASE, nil, jmp)
c.List.Set2(search[beg], search[end-1])
cas = append(cas, c)
} else {
// Not large enough for range; record separately.
for _, n := range search[beg:end] {
cas = append(cas, nod(OCASE, n, jmp))
}
}
beg = end
}
j = run
}
// Advance to next constant, adding individual non-constant
// or as-yet-unhandled constant cases as we go.
for ; j < len(s) && (j < run || !Isconst(s[j], CTINT)); j++ {
cas = append(cas, nod(OCASE, s[j], jmp))
}
}
}
stat = append(stat, nod(OLABEL, jmp.Left, nil))
if typeswvar != nil && needvar && n.Rlist.Len() != 0 {
l := []*Node{
nod(ODCL, n.Rlist.First(), nil),
nod(OAS, n.Rlist.First(), typeswvar),
}
typecheckslice(l, Etop)
stat = append(stat, l...)
}
stat = append(stat, n.Nbody.Slice()...)
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
// list contains autotmp_ variables), one or more OVARKILL
// nodes will be at the end of the list.
fallIndex := len(stat) - 1
for stat[fallIndex].Op == OVARKILL {
fallIndex--
}
last := stat[fallIndex]
if last.Op != OFALL {
stat = append(stat, br)
}
}
stat = append(stat, br)
if def != nil {
cas = append(cas, def)
}
sw.List.Set(cas)
sw.Nbody.Set(stat)
lineno = lno
}
// genCaseClauses generates the caseClauses value for clauses.
func (s *exprSwitch) genCaseClauses(clauses []*Node) caseClauses {
var cc caseClauses
for _, n := range clauses {
if n.Left == nil && n.List.Len() == 0 {
// default case
if cc.defjmp != nil {
Fatalf("duplicate default case not detected during typechecking")
}
cc.defjmp = n.Right
continue
}
c := caseClause{node: n, ordinal: len(cc.list)}
if n.List.Len() > 0 {
c.isconst = true
}
switch consttype(n.Left) {
case CTFLT, CTINT, CTRUNE, CTSTR:
c.isconst = true
}
cc.list = append(cc.list, c)
}
if cc.defjmp == nil {
cc.defjmp = nod(OBREAK, nil, nil)
}
return cc
}
// genCaseClauses generates the caseClauses value for clauses.
func (s *typeSwitch) genCaseClauses(clauses []*Node) caseClauses {
var cc caseClauses
for _, n := range clauses {
switch {
case n.Left == nil:
// default case
if cc.defjmp != nil {
Fatalf("duplicate default case not detected during typechecking")
}
cc.defjmp = n.Right
continue
case n.Left.Op == OLITERAL:
// nil case in type switch
if cc.niljmp != nil {
Fatalf("duplicate nil case not detected during typechecking")
}
cc.niljmp = n.Right
continue
}
// general case
c := caseClause{
node: n,
ordinal: len(cc.list),
isconst: !n.Left.Type.IsInterface(),
hash: typehash(n.Left.Type),
}
cc.list = append(cc.list, c)
}
if cc.defjmp == nil {
cc.defjmp = nod(OBREAK, nil, nil)
}
// diagnose duplicate cases
s.checkDupCases(cc.list)
return cc
}
func (s *typeSwitch) checkDupCases(cc []caseClause) {
if len(cc) < 2 {
return
}
// We store seen types in a map keyed by type hash.
// It is possible, but very unlikely, for multiple distinct types to have the same hash.
seen := make(map[uint32][]*Node)
// To avoid many small allocations of length 1 slices,
// also set up a single large slice to slice into.
nn := make([]*Node, 0, len(cc))
Outer:
for _, c := range cc {
prev, ok := seen[c.hash]
if !ok {
// First entry for this hash.
nn = append(nn, c.node)
seen[c.hash] = nn[len(nn)-1 : len(nn) : len(nn)]
continue
}
for _, n := range prev {
if eqtype(n.Left.Type, c.node.Left.Type) {
yyerrorl(c.node.Pos, "duplicate case %v in type switch\n\tprevious case at %v", c.node.Left.Type, n.Line())
// avoid double-reporting errors
continue Outer
}
}
seen[c.hash] = append(seen[c.hash], c.node)
}
}
func checkDupExprCases(exprname *Node, clauses []*Node) {
// boolean (naked) switch, nothing to do.
if exprname == nil {
return
}
// The common case is that s's expression is not an interface.
// In that case, all constant clauses have the same type,
// so checking for duplicates can be done solely by value.
if !exprname.Type.IsInterface() {
seen := make(map[interface{}]*Node)
for _, ncase := range clauses {
for _, n := range ncase.List.Slice() {
// Can't check for duplicates that aren't constants, per the spec. Issue 15896.
// Don't check for duplicate bools. Although the spec allows it,
// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
// (2) it would disallow useful things like
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
if ct := consttype(n); ct == 0 || ct == CTBOOL {
continue
}
val := n.Val().Interface()
prev, dup := seen[val]
if !dup {
seen[val] = n
continue
}
yyerrorl(ncase.Pos, "duplicate case %s in switch\n\tprevious case at %v",
nodeAndVal(n), prev.Line())
}
}
return
}
// s's expression is an interface. This is fairly rare, so
// keep this simple. Case expressions are only duplicates if
// they have the same value and identical types.
//
// In general, we have to use eqtype to test type identity,
// because == gives false negatives for anonymous types and
// the byte/uint8 and rune/int32 builtin type aliases.
// However, this is not a problem here, because constant
// expressions are always untyped or have a named type, and we
// explicitly handle the builtin type aliases below.
//
// This approach may need to be revisited though if we fix
// #21866 by treating all type aliases like byte/uint8 and
// rune/int32.
type typeVal struct {
typ *types.Type
val interface{}
}
seen := make(map[typeVal]*Node)
for _, ncase := range clauses {
for _, n := range ncase.List.Slice() {
if ct := consttype(n); ct == 0 || ct == CTBOOL {
continue
}
tv := typeVal{
typ: n.Type,
val: n.Val().Interface(),
}
switch tv.typ {
case types.Bytetype:
tv.typ = types.Types[TUINT8]
case types.Runetype:
tv.typ = types.Types[TINT32]
}
prev, dup := seen[tv]
if !dup {
seen[tv] = n
continue
}
yyerrorl(ncase.Pos, "duplicate case %s in switch\n\tprevious case at %v",
nodeAndVal(n), prev.Line())
}
}
}
func nodeAndVal(n *Node) string {
show := n.String()
val := n.Val().Interface()
if s := fmt.Sprintf("%#v", val); show != s {
show += " (value " + s + ")"
}
return show
}
// walk generates an AST that implements sw,
// where sw is a type switch.
// The AST is generally of the form of a linear
// search using if..goto, although binary search
// is used with long runs of concrete types.
func (s *typeSwitch) walk(sw *Node) {
cond := sw.Left
sw.Left = nil
if cond == nil {
sw.List.Set(nil)
return
}
if cond.Right == nil {
yyerrorl(sw.Pos, "type switch must have an assignment")
return
}
cond.Right = walkexpr(cond.Right, &sw.Ninit)
if !cond.Right.Type.IsInterface() {
yyerrorl(sw.Pos, "type switch must be on an interface")
return
}
var cas []*Node
// predeclare temporary variables and the boolean var
s.facename = temp(cond.Right.Type)
a := nod(OAS, s.facename, cond.Right)
a = typecheck(a, Etop)
cas = append(cas, a)
s.okname = temp(types.Types[TBOOL])
s.okname = typecheck(s.okname, Erv)
s.hashname = temp(types.Types[TUINT32])
s.hashname = typecheck(s.hashname, Erv)
// set up labels and jumps
casebody(sw, s.facename)
clauses := s.genCaseClauses(sw.List.Slice())
sw.List.Set(nil)
def := clauses.defjmp
// For empty interfaces, do:
// if e._type == nil {
// do nil case if it exists, otherwise default
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
itab := nod(OITAB, s.facename, nil)
// Check for nil first.
i := nod(OIF, nil, nil)
i.Left = nod(OEQ, itab, nodnil())
if clauses.niljmp != nil {
// Do explicit nil case right here.
i.Nbody.Set1(clauses.niljmp)
} else {
// Jump to default case.
lbl := autolabel(".s")
i.Nbody.Set1(nod(OGOTO, lbl, nil))
// Wrap default case with label.
blk := nod(OBLOCK, nil, nil)
blk.List.Set2(nod(OLABEL, lbl, nil), def)
def = blk
}
i.Left = typecheck(i.Left, Erv)
i.Left = defaultlit(i.Left, nil)
cas = append(cas, i)
// Load hash from type or itab.
h := nodSym(ODOTPTR, itab, nil)
h.Type = types.Types[TUINT32]
h.SetTypecheck(1)
if cond.Right.Type.IsEmptyInterface() {
h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
} else {
h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
}
h.SetBounded(true) // guaranteed not to fault
a = nod(OAS, s.hashname, h)
a = typecheck(a, Etop)
cas = append(cas, a)
cc := clauses.list
// insert type equality check into each case block
for _, c := range cc {
c.node.Right = s.typeone(c.node)
}
// generate list of if statements, binary search for constant sequences
for len(cc) > 0 {
if !cc[0].isconst {
n := cc[0].node
cas = append(cas, n.Right)
cc = cc[1:]
continue
}
// identify run of constants
var run int
for run = 1; run < len(cc) && cc[run].isconst; run++ {
}
// sort by hash
sort.Sort(caseClauseByType(cc[:run]))
// for debugging: linear search
if false {
for i := 0; i < run; i++ {
n := cc[i].node
cas = append(cas, n.Right)
}
continue
}
// combine adjacent cases with the same hash
var batch []caseClause
for i, j := 0, 0; i < run; i = j {
hash := []*Node{cc[i].node.Right}
for j = i + 1; j < run && cc[i].hash == cc[j].hash; j++ {
hash = append(hash, cc[j].node.Right)
}
cc[i].node.Right = liststmt(hash)
batch = append(batch, cc[i])
}
// binary search among cases to narrow by hash
cas = append(cas, s.walkCases(batch))
cc = cc[run:]
}
// handle default case
if nerrors == 0 {
cas = append(cas, def)
sw.Nbody.Prepend(cas...)
sw.List.Set(nil)
walkstmtlist(sw.Nbody.Slice())
}
}
// typeone generates an AST that jumps to the
// case body if the variable is of type t.
func (s *typeSwitch) typeone(t *Node) *Node {
var name *Node
var init Nodes
if t.Rlist.Len() == 0 {
name = nblank
nblank = typecheck(nblank, Erv|Easgn)
} else {
name = t.Rlist.First()
init.Append(nod(ODCL, name, nil))
a := nod(OAS, name, nil)
a = typecheck(a, Etop)
init.Append(a)
}
a := nod(OAS2, nil, nil)
a.List.Set2(name, s.okname) // name, ok =
b := nod(ODOTTYPE, s.facename, nil)
b.Type = t.Left.Type // interface.(type)
a.Rlist.Set1(b)
a = typecheck(a, Etop)
a = walkexpr(a, &init)
init.Append(a)
c := nod(OIF, nil, nil)
c.Left = s.okname
c.Nbody.Set1(t.Right) // if ok { goto l }
init.Append(c)
return init.asblock()
}
// walkCases generates an AST implementing the cases in cc.
func (s *typeSwitch) walkCases(cc []caseClause) *Node {
if len(cc) < binarySearchMin {
var cas []*Node
for _, c := range cc {
n := c.node
if !c.isconst {
Fatalf("typeSwitch walkCases")
}
a := nod(OIF, nil, nil)
a.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(n.Right)
cas = append(cas, a)
}
return liststmt(cas)
}
// find the middle and recur
half := len(cc) / 2
a := nod(OIF, nil, nil)
a.Left = nod(OLE, s.hashname, nodintconst(int64(cc[half-1].hash)))
a.Left = typecheck(a.Left, Erv)
a.Left = defaultlit(a.Left, nil)
a.Nbody.Set1(s.walkCases(cc[:half]))
a.Rlist.Set1(s.walkCases(cc[half:]))
return a
}
// caseClauseByConstVal sorts clauses by constant value to enable binary search.
type caseClauseByConstVal []caseClause
func (x caseClauseByConstVal) Len() int { return len(x) }
func (x caseClauseByConstVal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x caseClauseByConstVal) Less(i, j int) bool {
// n1 and n2 might be individual constants or integer ranges.
// We have checked for duplicates already,
// so ranges can be safely represented by any value in the range.
n1 := x[i].node
var v1 interface{}
if s := n1.List.Slice(); s != nil {
v1 = s[0].Val().U
} else {
v1 = n1.Left.Val().U
}
n2 := x[j].node
var v2 interface{}
if s := n2.List.Slice(); s != nil {
v2 = s[0].Val().U
} else {
v2 = n2.Left.Val().U
}
switch v1 := v1.(type) {
case *Mpflt:
return v1.Cmp(v2.(*Mpflt)) < 0
case *Mpint:
return v1.Cmp(v2.(*Mpint)) < 0
case string:
// Sort strings by length and then by value.
// It is much cheaper to compare lengths than values,
// and all we need here is consistency.
// We respect this sorting in exprSwitch.walkCases.
a := v1
b := v2.(string)
if len(a) != len(b) {
return len(a) < len(b)
}
return a < b
}
Fatalf("caseClauseByConstVal passed bad clauses %v < %v", x[i].node.Left, x[j].node.Left)
return false
}
type caseClauseByType []caseClause
func (x caseClauseByType) Len() int { return len(x) }
func (x caseClauseByType) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x caseClauseByType) Less(i, j int) bool {
c1, c2 := x[i], x[j]
// sort by hash code, then ordinal (for the rare case of hash collisions)
if c1.hash != c2.hash {
return c1.hash < c2.hash
}
return c1.ordinal < c2.ordinal
}
type constIntNodesByVal []*Node
func (x constIntNodesByVal) Len() int { return len(x) }
func (x constIntNodesByVal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x constIntNodesByVal) Less(i, j int) bool {
return x[i].Val().U.(*Mpint).Cmp(x[j].Val().U.(*Mpint)) < 0
}
|
// On-disk mutex protecting a resource
//
// A lock is represented on disk by a directory of a particular name,
// containing an information file. Taking a lock is done by renaming a
// temporary directory into place. We use temporary directories because for
// all filesystems we believe that exactly one attempt to claim the lock will
// succeed and the others will fail.
package fslock
import (
"crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"regexp"
"time"
)
const nameRegexp = "^[a-z]+[a-z0-9.-]*$"
var (
ErrLockNotHeld = errors.New("lock not held")
validName = regexp.MustCompile(nameRegexp)
lockWaitDelay = 1 * time.Second
)
type Lock struct {
name string
parent string
nonce []byte
}
func generateNonce() ([]byte, error) {
nonce := make([]byte, 20)
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return nil, err
}
return nonce, nil
}
// NewLock returns a new lock with the given name within the given lock
// directory, without acquiring it. The lock name must match the regular
// expression `^[a-z]+[a-z0-9.-]*`.
func NewLock(lockDir, name string) (*Lock, error) {
nonce, err := generateNonce()
if err != nil {
return nil, err
}
if !validName.MatchString(name) {
return nil, fmt.Errorf("Invalid lock name %q. Names must match %q", name, nameRegexp)
}
if err != nil {
return nil, err
}
lock := &Lock{
name: name,
parent: lockDir,
nonce: nonce,
}
// Ensure the parent exists.
dir, err := os.Open(lock.parent)
if os.IsNotExist(err) {
// try to make it
err = os.MkdirAll(lock.parent, 0755)
// Since we have just created the directory successfully, return now.
if err == nil {
return lock, nil
}
}
if err != nil {
return nil, err
}
// Make sure it is actually a directory
fileInfo, err := dir.Stat()
if err != nil {
return nil, err
}
if !fileInfo.IsDir() {
return nil, fmt.Errorf("lock dir %q exists and is a file not a directory", lockDir)
}
return lock, nil
}
func (lock *Lock) namedLockDir() string {
return path.Join(lock.parent, lock.name)
}
func (lock *Lock) heldFile() string {
return path.Join(lock.namedLockDir(), "held")
}
func (lock *Lock) acquire() (bool, error) {
// If the namedLockDir exists, then the lock is held by someone else.
dir, err := os.Open(lock.namedLockDir())
if err == nil {
dir.Close()
return false, nil
}
if !os.IsNotExist(err) {
return false, err
}
// Create a temporary directory (in the temp dir), and then move it to the right name.
tempDirName, err := ioutil.TempDir("", "temp-lock")
if err != nil {
return false, err // this shouldn't really fail...
}
err = os.Rename(tempDirName, lock.namedLockDir())
if os.IsExist(err) {
// Beaten to it, clean up temporary directory.
os.RemoveAll(tempDirName)
return false, nil
} else if err != nil {
return false, err
}
// write nonce
err = ioutil.WriteFile(lock.heldFile(), []byte(lock.nonce), 0755)
if err != nil {
return false, err
}
// We now have the lock.
return true, nil
}
// Lock blocks until it is able to acquire the lock.
func (lock *Lock) Lock() error {
for {
acquired, err := lock.acquire()
if err != nil {
return err
}
if acquired {
return nil
}
time.Sleep(lockWaitDelay)
}
panic("unreachable")
return nil // unreachable
}
func (lock *Lock) TryLock(duration time.Duration) (isLocked bool, err error) {
locked := make(chan bool)
error := make(chan error)
timeout := make(chan struct{})
defer func() {
close(locked)
close(error)
close(timeout)
}()
go func() {
for {
acquired, err := lock.acquire()
if err != nil {
locked <- false
error <- err
return
}
if acquired {
locked <- true
error <- nil
return
}
select {
case <-timeout:
locked <- false
error <- nil
return
case <-time.After(lockWaitDelay):
// Keep trying...
}
}
}()
select {
case isLocked = <-locked:
err = <-error
return
case <-time.After(duration):
timeout <- struct{}{}
}
// It is possible that the timeout got signalled just before the goroutine
// tried again, so check the results rather than automatically failing.
return <-locked, <-error
}
// IsLockHeld returns true if and only if the namedLockDir exists, and the
// file 'held' in that directory contains the nonce for this lock.
func (lock *Lock) IsLockHeld() bool {
heldNonce, err := ioutil.ReadFile(lock.heldFile())
if err != nil {
return false
}
return string(heldNonce) == lock.nonce
}
func (lock *Lock) Unlock() error {
if !lock.IsLockHeld() {
return ErrLockNotHeld
}
return os.RemoveAll(lock.namedLockDir())
}
Generate the nonce after checking the name.
// On-disk mutex protecting a resource
//
// A lock is represented on disk by a directory of a particular name,
// containing an information file. Taking a lock is done by renaming a
// temporary directory into place. We use temporary directories because for
// all filesystems we believe that exactly one attempt to claim the lock will
// succeed and the others will fail.
package fslock
import (
"crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"regexp"
"time"
)
const nameRegexp = "^[a-z]+[a-z0-9.-]*$"
var (
ErrLockNotHeld = errors.New("lock not held")
validName = regexp.MustCompile(nameRegexp)
lockWaitDelay = 1 * time.Second
)
type Lock struct {
name string
parent string
nonce []byte
}
func generateNonce() ([]byte, error) {
nonce := make([]byte, 20)
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return nil, err
}
return nonce, nil
}
// NewLock returns a new lock with the given name within the given lock
// directory, without acquiring it. The lock name must match the regular
// expression `^[a-z]+[a-z0-9.-]*`.
func NewLock(lockDir, name string) (*Lock, error) {
if !validName.MatchString(name) {
return nil, fmt.Errorf("Invalid lock name %q. Names must match %q", name, nameRegexp)
}
nonce, err := generateNonce()
if err != nil {
return nil, err
}
lock := &Lock{
name: name,
parent: lockDir,
nonce: nonce,
}
// Ensure the parent exists.
dir, err := os.Open(lock.parent)
if os.IsNotExist(err) {
// try to make it
err = os.MkdirAll(lock.parent, 0755)
// Since we have just created the directory successfully, return now.
if err == nil {
return lock, nil
}
}
if err != nil {
return nil, err
}
// Make sure it is actually a directory
fileInfo, err := dir.Stat()
if err != nil {
return nil, err
}
if !fileInfo.IsDir() {
return nil, fmt.Errorf("lock dir %q exists and is a file not a directory", lockDir)
}
return lock, nil
}
func (lock *Lock) namedLockDir() string {
return path.Join(lock.parent, lock.name)
}
func (lock *Lock) heldFile() string {
return path.Join(lock.namedLockDir(), "held")
}
func (lock *Lock) acquire() (bool, error) {
// If the namedLockDir exists, then the lock is held by someone else.
dir, err := os.Open(lock.namedLockDir())
if err == nil {
dir.Close()
return false, nil
}
if !os.IsNotExist(err) {
return false, err
}
// Create a temporary directory (in the temp dir), and then move it to the right name.
tempDirName, err := ioutil.TempDir("", "temp-lock")
if err != nil {
return false, err // this shouldn't really fail...
}
err = os.Rename(tempDirName, lock.namedLockDir())
if os.IsExist(err) {
// Beaten to it, clean up temporary directory.
os.RemoveAll(tempDirName)
return false, nil
} else if err != nil {
return false, err
}
// write nonce
err = ioutil.WriteFile(lock.heldFile(), []byte(lock.nonce), 0755)
if err != nil {
return false, err
}
// We now have the lock.
return true, nil
}
// Lock blocks until it is able to acquire the lock.
func (lock *Lock) Lock() error {
for {
acquired, err := lock.acquire()
if err != nil {
return err
}
if acquired {
return nil
}
time.Sleep(lockWaitDelay)
}
panic("unreachable")
return nil // unreachable
}
func (lock *Lock) TryLock(duration time.Duration) (isLocked bool, err error) {
locked := make(chan bool)
error := make(chan error)
timeout := make(chan struct{})
defer func() {
close(locked)
close(error)
close(timeout)
}()
go func() {
for {
acquired, err := lock.acquire()
if err != nil {
locked <- false
error <- err
return
}
if acquired {
locked <- true
error <- nil
return
}
select {
case <-timeout:
locked <- false
error <- nil
return
case <-time.After(lockWaitDelay):
// Keep trying...
}
}
}()
select {
case isLocked = <-locked:
err = <-error
return
case <-time.After(duration):
timeout <- struct{}{}
}
// It is possible that the timeout got signalled just before the goroutine
// tried again, so check the results rather than automatically failing.
return <-locked, <-error
}
// IsLockHeld returns true if and only if the namedLockDir exists, and the
// file 'held' in that directory contains the nonce for this lock.
func (lock *Lock) IsLockHeld() bool {
heldNonce, err := ioutil.ReadFile(lock.heldFile())
if err != nil {
return false
}
return string(heldNonce) == lock.nonce
}
func (lock *Lock) Unlock() error {
if !lock.IsLockHeld() {
return ErrLockNotHeld
}
return os.RemoveAll(lock.namedLockDir())
}
|
// Inferno utils/6l/span.c
// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package ld
import (
"cmd/internal/objabi"
"cmd/internal/sys"
"cmd/link/internal/sym"
"fmt"
"path/filepath"
"strings"
)
// Symbol table.
func putelfstr(s string) int {
if len(Elfstrdat) == 0 && s != "" {
// first entry must be empty string
putelfstr("")
}
off := len(Elfstrdat)
Elfstrdat = append(Elfstrdat, s...)
Elfstrdat = append(Elfstrdat, 0)
return off
}
func putelfsyment(out *OutBuf, off int, addr int64, size int64, info int, shndx int, other int) {
if elf64 {
out.Write32(uint32(off))
out.Write8(uint8(info))
out.Write8(uint8(other))
out.Write16(uint16(shndx))
out.Write64(uint64(addr))
out.Write64(uint64(size))
Symsize += ELF64SYMSIZE
} else {
out.Write32(uint32(off))
out.Write32(uint32(addr))
out.Write32(uint32(size))
out.Write8(uint8(info))
out.Write8(uint8(other))
out.Write16(uint16(shndx))
Symsize += ELF32SYMSIZE
}
}
var numelfsym = 1 // 0 is reserved
var elfbind int
func putelfsym(ctxt *Link, x *sym.Symbol, s string, t SymbolType, addr int64, go_ *sym.Symbol) {
var typ int
switch t {
default:
return
case TextSym:
typ = STT_FUNC
case DataSym, BSSSym:
typ = STT_OBJECT
case UndefinedSym:
// ElfType is only set for symbols read from Go shared libraries, but
// for other symbols it is left as STT_NOTYPE which is fine.
typ = int(x.ElfType())
case TLSSym:
typ = STT_TLS
}
size := x.Size
if t == UndefinedSym {
size = 0
}
xo := x
for xo.Outer != nil {
xo = xo.Outer
}
var elfshnum int
if xo.Type == sym.SDYNIMPORT || xo.Type == sym.SHOSTOBJ || xo.Type == sym.SUNDEFEXT {
elfshnum = SHN_UNDEF
} else {
if xo.Sect == nil {
Errorf(x, "missing section in putelfsym")
return
}
if xo.Sect.Elfsect == nil {
Errorf(x, "missing ELF section in putelfsym")
return
}
elfshnum = xo.Sect.Elfsect.(*ElfShdr).shnum
}
// One pass for each binding: STB_LOCAL, STB_GLOBAL,
// maybe one day STB_WEAK.
bind := STB_GLOBAL
if x.IsFileLocal() || x.Attr.VisibilityHidden() || x.Attr.Local() {
bind = STB_LOCAL
}
// In external linking mode, we have to invoke gcc with -rdynamic
// to get the exported symbols put into the dynamic symbol table.
// To avoid filling the dynamic table with lots of unnecessary symbols,
// mark all Go symbols local (not global) in the final executable.
// But when we're dynamically linking, we need all those global symbols.
if !ctxt.DynlinkingGo() && ctxt.LinkMode == LinkExternal && !x.Attr.CgoExportStatic() && elfshnum != SHN_UNDEF {
bind = STB_LOCAL
}
if ctxt.LinkMode == LinkExternal && elfshnum != SHN_UNDEF {
addr -= int64(xo.Sect.Vaddr)
}
other := STV_DEFAULT
if x.Attr.VisibilityHidden() {
// TODO(mwhudson): We only set AttrVisibilityHidden in ldelf, i.e. when
// internally linking. But STV_HIDDEN visibility only matters in object
// files and shared libraries, and as we are a long way from implementing
// internal linking for shared libraries and only create object files when
// externally linking, I don't think this makes a lot of sense.
other = STV_HIDDEN
}
if ctxt.Arch.Family == sys.PPC64 && typ == STT_FUNC && x.Attr.Shared() && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" {
// On ppc64 the top three bits of the st_other field indicate how
// many instructions separate the global and local entry points. In
// our case it is two instructions, indicated by the value 3.
// The conditions here match those in preprocess in
// cmd/internal/obj/ppc64/obj9.go, which is where the
// instructions are inserted.
other |= 3 << 5
}
// When dynamically linking, we create Symbols by reading the names from
// the symbol tables of the shared libraries and so the names need to
// match exactly. Tools like DTrace will have to wait for now.
if !ctxt.DynlinkingGo() {
// Rewrite · to . for ASCII-only tools like DTrace (sigh)
s = strings.Replace(s, "·", ".", -1)
}
if ctxt.DynlinkingGo() && bind == STB_GLOBAL && elfbind == STB_LOCAL && x.Type == sym.STEXT {
// When dynamically linking, we want references to functions defined
// in this module to always be to the function object, not to the
// PLT. We force this by writing an additional local symbol for every
// global function symbol and making all relocations against the
// global symbol refer to this local symbol instead (see
// (*sym.Symbol).ElfsymForReloc). This is approximately equivalent to the
// ELF linker -Bsymbolic-functions option, but that is buggy on
// several platforms.
putelfsyment(ctxt.Out, putelfstr("local."+s), addr, size, STB_LOCAL<<4|typ&0xf, elfshnum, other)
x.LocalElfsym = int32(numelfsym)
numelfsym++
return
} else if bind != elfbind {
return
}
putelfsyment(ctxt.Out, putelfstr(s), addr, size, bind<<4|typ&0xf, elfshnum, other)
x.Elfsym = int32(numelfsym)
numelfsym++
}
func putelfsectionsym(out *OutBuf, s *sym.Symbol, shndx int) {
putelfsyment(out, 0, 0, 0, STB_LOCAL<<4|STT_SECTION, shndx, 0)
s.Elfsym = int32(numelfsym)
numelfsym++
}
func Asmelfsym(ctxt *Link) {
// the first symbol entry is reserved
putelfsyment(ctxt.Out, 0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
dwarfaddelfsectionsyms(ctxt)
// Some linkers will add a FILE sym if one is not present.
// Avoid having the working directory inserted into the symbol table.
// It is added with a name to avoid problems with external linking
// encountered on some versions of Solaris. See issue #14957.
putelfsyment(ctxt.Out, putelfstr("go.go"), 0, 0, STB_LOCAL<<4|STT_FILE, SHN_ABS, 0)
numelfsym++
elfbind = STB_LOCAL
genasmsym(ctxt, putelfsym)
elfbind = STB_GLOBAL
elfglobalsymndx = numelfsym
genasmsym(ctxt, putelfsym)
}
func putplan9sym(ctxt *Link, x *sym.Symbol, s string, typ SymbolType, addr int64, go_ *sym.Symbol) {
t := int(typ)
switch typ {
case TextSym, DataSym, BSSSym:
if x.IsFileLocal() {
t += 'a' - 'A'
}
fallthrough
case AutoSym, ParamSym, FrameSym:
l := 4
if ctxt.HeadType == objabi.Hplan9 && ctxt.Arch.Family == sys.AMD64 && !Flag8 {
ctxt.Out.Write32b(uint32(addr >> 32))
l = 8
}
ctxt.Out.Write32b(uint32(addr))
ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */
ctxt.Out.WriteString(s)
ctxt.Out.Write8(0)
Symsize += int32(l) + 1 + int32(len(s)) + 1
default:
return
}
}
func Asmplan9sym(ctxt *Link) {
genasmsym(ctxt, putplan9sym)
}
var symt *sym.Symbol
type byPkg []*sym.Library
func (libs byPkg) Len() int {
return len(libs)
}
func (libs byPkg) Less(a, b int) bool {
return libs[a].Pkg < libs[b].Pkg
}
func (libs byPkg) Swap(a, b int) {
libs[a], libs[b] = libs[b], libs[a]
}
// Create a table with information on the text sections.
func textsectionmap(ctxt *Link) uint32 {
t := ctxt.Syms.Lookup("runtime.textsectionmap", 0)
t.Type = sym.SRODATA
t.Attr |= sym.AttrReachable
nsections := int64(0)
for _, sect := range Segtext.Sections {
if sect.Name == ".text" {
nsections++
} else {
break
}
}
t.Grow(3 * nsections * int64(ctxt.Arch.PtrSize))
off := int64(0)
n := 0
// The vaddr for each text section is the difference between the section's
// Vaddr and the Vaddr for the first text section as determined at compile
// time.
// The symbol for the first text section is named runtime.text as before.
// Additional text sections are named runtime.text.n where n is the
// order of creation starting with 1. These symbols provide the section's
// address after relocation by the linker.
textbase := Segtext.Sections[0].Vaddr
for _, sect := range Segtext.Sections {
if sect.Name != ".text" {
break
}
off = t.SetUint(ctxt.Arch, off, sect.Vaddr-textbase)
off = t.SetUint(ctxt.Arch, off, sect.Length)
if n == 0 {
s := ctxt.Syms.ROLookup("runtime.text", 0)
if s == nil {
Errorf(nil, "Unable to find symbol runtime.text\n")
}
off = t.SetAddr(ctxt.Arch, off, s)
} else {
s := ctxt.Syms.Lookup(fmt.Sprintf("runtime.text.%d", n), 0)
if s == nil {
Errorf(nil, "Unable to find symbol runtime.text.%d\n", n)
}
off = t.SetAddr(ctxt.Arch, off, s)
}
n++
}
return uint32(n)
}
func (ctxt *Link) symtab() {
switch ctxt.BuildMode {
case BuildModeCArchive, BuildModeCShared:
for _, s := range ctxt.Syms.Allsym {
// Create a new entry in the .init_array section that points to the
// library initializer function.
if s.Name == *flagEntrySymbol && ctxt.HeadType != objabi.Haix {
addinitarrdata(ctxt, s)
}
}
}
// Define these so that they'll get put into the symbol table.
// data.c:/^address will provide the actual values.
ctxt.xdefine("runtime.text", sym.STEXT, 0)
ctxt.xdefine("runtime.etext", sym.STEXT, 0)
ctxt.xdefine("runtime.itablink", sym.SRODATA, 0)
ctxt.xdefine("runtime.eitablink", sym.SRODATA, 0)
ctxt.xdefine("runtime.rodata", sym.SRODATA, 0)
ctxt.xdefine("runtime.erodata", sym.SRODATA, 0)
ctxt.xdefine("runtime.types", sym.SRODATA, 0)
ctxt.xdefine("runtime.etypes", sym.SRODATA, 0)
ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, 0)
ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, 0)
ctxt.xdefine("runtime.data", sym.SDATA, 0)
ctxt.xdefine("runtime.edata", sym.SDATA, 0)
ctxt.xdefine("runtime.bss", sym.SBSS, 0)
ctxt.xdefine("runtime.ebss", sym.SBSS, 0)
ctxt.xdefine("runtime.noptrbss", sym.SNOPTRBSS, 0)
ctxt.xdefine("runtime.enoptrbss", sym.SNOPTRBSS, 0)
ctxt.xdefine("runtime.end", sym.SBSS, 0)
ctxt.xdefine("runtime.epclntab", sym.SRODATA, 0)
ctxt.xdefine("runtime.esymtab", sym.SRODATA, 0)
// garbage collection symbols
s := ctxt.Syms.Lookup("runtime.gcdata", 0)
s.Type = sym.SRODATA
s.Size = 0
s.Attr |= sym.AttrReachable
ctxt.xdefine("runtime.egcdata", sym.SRODATA, 0)
s = ctxt.Syms.Lookup("runtime.gcbss", 0)
s.Type = sym.SRODATA
s.Size = 0
s.Attr |= sym.AttrReachable
ctxt.xdefine("runtime.egcbss", sym.SRODATA, 0)
// pseudo-symbols to mark locations of type, string, and go string data.
var symtype *sym.Symbol
var symtyperel *sym.Symbol
if !ctxt.DynlinkingGo() {
if ctxt.UseRelro() && (ctxt.BuildMode == BuildModeCArchive || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE) {
s = ctxt.Syms.Lookup("type.*", 0)
s.Type = sym.STYPE
s.Size = 0
s.Attr |= sym.AttrReachable
symtype = s
s = ctxt.Syms.Lookup("typerel.*", 0)
s.Type = sym.STYPERELRO
s.Size = 0
s.Attr |= sym.AttrReachable
symtyperel = s
} else {
s = ctxt.Syms.Lookup("type.*", 0)
s.Type = sym.STYPE
s.Size = 0
s.Attr |= sym.AttrReachable
symtype = s
symtyperel = s
}
}
groupSym := func(name string, t sym.SymKind) *sym.Symbol {
s := ctxt.Syms.Lookup(name, 0)
s.Type = t
s.Size = 0
s.Attr |= sym.AttrLocal | sym.AttrReachable
return s
}
var (
symgostring = groupSym("go.string.*", sym.SGOSTRING)
symgofunc = groupSym("go.func.*", sym.SGOFUNC)
symgcbits = groupSym("runtime.gcbits.*", sym.SGCBITS)
)
var symgofuncrel *sym.Symbol
if !ctxt.DynlinkingGo() {
if ctxt.UseRelro() {
symgofuncrel = groupSym("go.funcrel.*", sym.SGOFUNCRELRO)
} else {
symgofuncrel = symgofunc
}
}
symitablink := ctxt.Syms.Lookup("runtime.itablink", 0)
symitablink.Type = sym.SITABLINK
symt = ctxt.Syms.Lookup("runtime.symtab", 0)
symt.Attr |= sym.AttrLocal
symt.Type = sym.SSYMTAB
symt.Size = 0
symt.Attr |= sym.AttrReachable
nitablinks := 0
// assign specific types so that they sort together.
// within a type they sort by size, so the .* symbols
// just defined above will be first.
// hide the specific symbols.
for _, s := range ctxt.Syms.Allsym {
if ctxt.LinkMode != LinkExternal && isStaticTemp(s.Name) {
s.Attr |= sym.AttrNotInSymbolTable
}
if !s.Attr.Reachable() || s.Attr.Special() || s.Type != sym.SRODATA {
continue
}
switch {
case strings.HasPrefix(s.Name, "type."):
if !ctxt.DynlinkingGo() {
s.Attr |= sym.AttrNotInSymbolTable
}
if ctxt.UseRelro() {
s.Type = sym.STYPERELRO
s.Outer = symtyperel
} else {
s.Type = sym.STYPE
s.Outer = symtype
}
case strings.HasPrefix(s.Name, "go.importpath.") && ctxt.UseRelro():
// Keep go.importpath symbols in the same section as types and
// names, as they can be referred to by a section offset.
s.Type = sym.STYPERELRO
case strings.HasPrefix(s.Name, "go.itablink."):
nitablinks++
s.Type = sym.SITABLINK
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symitablink
case strings.HasPrefix(s.Name, "go.string."):
s.Type = sym.SGOSTRING
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgostring
case strings.HasPrefix(s.Name, "runtime.gcbits."):
s.Type = sym.SGCBITS
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgcbits
case strings.HasSuffix(s.Name, "·f"):
if !ctxt.DynlinkingGo() {
s.Attr |= sym.AttrNotInSymbolTable
}
if ctxt.UseRelro() {
s.Type = sym.SGOFUNCRELRO
s.Outer = symgofuncrel
} else {
s.Type = sym.SGOFUNC
s.Outer = symgofunc
}
case strings.HasPrefix(s.Name, "gcargs."),
strings.HasPrefix(s.Name, "gclocals."),
strings.HasPrefix(s.Name, "gclocals·"),
strings.HasPrefix(s.Name, "inltree."),
strings.HasSuffix(s.Name, ".opendefer"):
s.Type = sym.SGOFUNC
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgofunc
s.Align = 4
liveness += (s.Size + int64(s.Align) - 1) &^ (int64(s.Align) - 1)
}
}
if ctxt.BuildMode == BuildModeShared {
abihashgostr := ctxt.Syms.Lookup("go.link.abihash."+filepath.Base(*flagOutfile), 0)
abihashgostr.Attr |= sym.AttrReachable
abihashgostr.Type = sym.SRODATA
hashsym := ctxt.Syms.Lookup("go.link.abihashbytes", 0)
abihashgostr.AddAddr(ctxt.Arch, hashsym)
abihashgostr.AddUint(ctxt.Arch, uint64(hashsym.Size))
}
if ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() {
for _, l := range ctxt.Library {
s := ctxt.Syms.Lookup("go.link.pkghashbytes."+l.Pkg, 0)
s.Attr |= sym.AttrReachable
s.Type = sym.SRODATA
s.Size = int64(len(l.Hash))
s.P = []byte(l.Hash)
str := ctxt.Syms.Lookup("go.link.pkghash."+l.Pkg, 0)
str.Attr |= sym.AttrReachable
str.Type = sym.SRODATA
str.AddAddr(ctxt.Arch, s)
str.AddUint(ctxt.Arch, uint64(len(l.Hash)))
}
}
nsections := textsectionmap(ctxt)
// Information about the layout of the executable image for the
// runtime to use. Any changes here must be matched by changes to
// the definition of moduledata in runtime/symtab.go.
// This code uses several global variables that are set by pcln.go:pclntab.
moduledata := ctxt.Moduledata
// The pclntab slice
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0))
moduledata.AddUint(ctxt.Arch, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size))
moduledata.AddUint(ctxt.Arch, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size))
// The ftab slice
moduledata.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabPclntabOffset))
moduledata.AddUint(ctxt.Arch, uint64(pclntabNfunc+1))
moduledata.AddUint(ctxt.Arch, uint64(pclntabNfunc+1))
// The filetab slice
moduledata.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabFiletabOffset))
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Filesyms))+1)
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Filesyms))+1)
// findfunctab
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.findfunctab", 0))
// minpc, maxpc
moduledata.AddAddr(ctxt.Arch, pclntabFirstFunc)
moduledata.AddAddrPlus(ctxt.Arch, pclntabLastFunc, pclntabLastFunc.Size)
// pointers to specific parts of the module
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.text", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.etext", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.noptrdata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.enoptrdata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.data", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.edata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.bss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.ebss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.noptrbss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.enoptrbss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.end", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.gcdata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.gcbss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.types", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.etypes", 0))
if ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal {
// Add R_REF relocation to prevent ld's garbage collection of
// runtime.rodata, runtime.erodata and runtime.epclntab.
addRef := func(name string) {
r := moduledata.AddRel()
r.Sym = ctxt.Syms.Lookup(name, 0)
r.Type = objabi.R_XCOFFREF
r.Siz = uint8(ctxt.Arch.PtrSize)
}
addRef("runtime.rodata")
addRef("runtime.erodata")
addRef("runtime.epclntab")
}
// text section information
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.textsectionmap", 0))
moduledata.AddUint(ctxt.Arch, uint64(nsections))
moduledata.AddUint(ctxt.Arch, uint64(nsections))
// The typelinks slice
typelinkSym := ctxt.Syms.Lookup("runtime.typelink", 0)
ntypelinks := uint64(typelinkSym.Size) / 4
moduledata.AddAddr(ctxt.Arch, typelinkSym)
moduledata.AddUint(ctxt.Arch, ntypelinks)
moduledata.AddUint(ctxt.Arch, ntypelinks)
// The itablinks slice
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.itablink", 0))
moduledata.AddUint(ctxt.Arch, uint64(nitablinks))
moduledata.AddUint(ctxt.Arch, uint64(nitablinks))
// The ptab slice
if ptab := ctxt.Syms.ROLookup("go.plugin.tabs", 0); ptab != nil && ptab.Attr.Reachable() {
ptab.Attr |= sym.AttrLocal
ptab.Type = sym.SRODATA
nentries := uint64(len(ptab.P) / 8) // sizeof(nameOff) + sizeof(typeOff)
moduledata.AddAddr(ctxt.Arch, ptab)
moduledata.AddUint(ctxt.Arch, nentries)
moduledata.AddUint(ctxt.Arch, nentries)
} else {
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
}
if ctxt.BuildMode == BuildModePlugin {
addgostring(ctxt, moduledata, "go.link.thispluginpath", objabi.PathToPrefix(*flagPluginPath))
pkghashes := ctxt.Syms.Lookup("go.link.pkghashes", 0)
pkghashes.Attr |= sym.AttrReachable
pkghashes.Attr |= sym.AttrLocal
pkghashes.Type = sym.SRODATA
for i, l := range ctxt.Library {
// pkghashes[i].name
addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkgname.%d", i), l.Pkg)
// pkghashes[i].linktimehash
addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkglinkhash.%d", i), l.Hash)
// pkghashes[i].runtimehash
hash := ctxt.Syms.ROLookup("go.link.pkghash."+l.Pkg, 0)
pkghashes.AddAddr(ctxt.Arch, hash)
}
moduledata.AddAddr(ctxt.Arch, pkghashes)
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Library)))
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Library)))
} else {
moduledata.AddUint(ctxt.Arch, 0) // pluginpath
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0) // pkghashes slice
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
}
if len(ctxt.Shlibs) > 0 {
thismodulename := filepath.Base(*flagOutfile)
switch ctxt.BuildMode {
case BuildModeExe, BuildModePIE:
// When linking an executable, outfile is just "a.out". Make
// it something slightly more comprehensible.
thismodulename = "the executable"
}
addgostring(ctxt, moduledata, "go.link.thismodulename", thismodulename)
modulehashes := ctxt.Syms.Lookup("go.link.abihashes", 0)
modulehashes.Attr |= sym.AttrReachable
modulehashes.Attr |= sym.AttrLocal
modulehashes.Type = sym.SRODATA
for i, shlib := range ctxt.Shlibs {
// modulehashes[i].modulename
modulename := filepath.Base(shlib.Path)
addgostring(ctxt, modulehashes, fmt.Sprintf("go.link.libname.%d", i), modulename)
// modulehashes[i].linktimehash
addgostring(ctxt, modulehashes, fmt.Sprintf("go.link.linkhash.%d", i), string(shlib.Hash))
// modulehashes[i].runtimehash
abihash := ctxt.Syms.Lookup("go.link.abihash."+modulename, 0)
abihash.Attr |= sym.AttrReachable
modulehashes.AddAddr(ctxt.Arch, abihash)
}
moduledata.AddAddr(ctxt.Arch, modulehashes)
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Shlibs)))
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Shlibs)))
} else {
moduledata.AddUint(ctxt.Arch, 0) // modulename
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0) // moduleshashes slice
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
}
hasmain := ctxt.BuildMode == BuildModeExe || ctxt.BuildMode == BuildModePIE
if hasmain {
moduledata.AddUint8(1)
} else {
moduledata.AddUint8(0)
}
// The rest of moduledata is zero initialized.
// When linking an object that does not contain the runtime we are
// creating the moduledata from scratch and it does not have a
// compiler-provided size, so read it from the type data.
moduledatatype := ctxt.Syms.ROLookup("type.runtime.moduledata", 0)
moduledata.Size = decodetypeSize(ctxt.Arch, moduledatatype.P)
moduledata.Grow(moduledata.Size)
lastmoduledatap := ctxt.Syms.Lookup("runtime.lastmoduledatap", 0)
if lastmoduledatap.Type != sym.SDYNIMPORT {
lastmoduledatap.Type = sym.SNOPTRDATA
lastmoduledatap.Size = 0 // overwrite existing value
lastmoduledatap.AddAddr(ctxt.Arch, moduledata)
}
}
func isStaticTemp(name string) bool {
if i := strings.LastIndex(name, "/"); i >= 0 {
name = name[i:]
}
return strings.Contains(name, "..stmp_")
}
[dev.link] cmd/link: avoid allsyms loop in initarray setup
In the linker's symtab() function, avoid looping over the context's
Syms.Allsyms array to locate the entry symbol when setting up the init
array section; do an explicit ABI0 symbol lookup instead. This is a
minor efficiency tweak / code cleanup.
Fixes #20205.
Change-Id: I2ebc17a3cb2cd63e9f5052bc80f1b0ac72c960e3
Reviewed-on: https://go-review.googlesource.com/c/go/+/209838
Run-TryBot: Than McIntosh <4b2922593166fc595a9b1a408f34f5d3817fe9d2@google.com>
Reviewed-by: Jeremy Faller <b3f594e10a9edcf5413cf1190121d45078c62290@golang.org>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
// Inferno utils/6l/span.c
// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package ld
import (
"cmd/internal/objabi"
"cmd/internal/sys"
"cmd/link/internal/sym"
"fmt"
"path/filepath"
"strings"
)
// Symbol table.
func putelfstr(s string) int {
if len(Elfstrdat) == 0 && s != "" {
// first entry must be empty string
putelfstr("")
}
off := len(Elfstrdat)
Elfstrdat = append(Elfstrdat, s...)
Elfstrdat = append(Elfstrdat, 0)
return off
}
func putelfsyment(out *OutBuf, off int, addr int64, size int64, info int, shndx int, other int) {
if elf64 {
out.Write32(uint32(off))
out.Write8(uint8(info))
out.Write8(uint8(other))
out.Write16(uint16(shndx))
out.Write64(uint64(addr))
out.Write64(uint64(size))
Symsize += ELF64SYMSIZE
} else {
out.Write32(uint32(off))
out.Write32(uint32(addr))
out.Write32(uint32(size))
out.Write8(uint8(info))
out.Write8(uint8(other))
out.Write16(uint16(shndx))
Symsize += ELF32SYMSIZE
}
}
var numelfsym = 1 // 0 is reserved
var elfbind int
func putelfsym(ctxt *Link, x *sym.Symbol, s string, t SymbolType, addr int64, go_ *sym.Symbol) {
var typ int
switch t {
default:
return
case TextSym:
typ = STT_FUNC
case DataSym, BSSSym:
typ = STT_OBJECT
case UndefinedSym:
// ElfType is only set for symbols read from Go shared libraries, but
// for other symbols it is left as STT_NOTYPE which is fine.
typ = int(x.ElfType())
case TLSSym:
typ = STT_TLS
}
size := x.Size
if t == UndefinedSym {
size = 0
}
xo := x
for xo.Outer != nil {
xo = xo.Outer
}
var elfshnum int
if xo.Type == sym.SDYNIMPORT || xo.Type == sym.SHOSTOBJ || xo.Type == sym.SUNDEFEXT {
elfshnum = SHN_UNDEF
} else {
if xo.Sect == nil {
Errorf(x, "missing section in putelfsym")
return
}
if xo.Sect.Elfsect == nil {
Errorf(x, "missing ELF section in putelfsym")
return
}
elfshnum = xo.Sect.Elfsect.(*ElfShdr).shnum
}
// One pass for each binding: STB_LOCAL, STB_GLOBAL,
// maybe one day STB_WEAK.
bind := STB_GLOBAL
if x.IsFileLocal() || x.Attr.VisibilityHidden() || x.Attr.Local() {
bind = STB_LOCAL
}
// In external linking mode, we have to invoke gcc with -rdynamic
// to get the exported symbols put into the dynamic symbol table.
// To avoid filling the dynamic table with lots of unnecessary symbols,
// mark all Go symbols local (not global) in the final executable.
// But when we're dynamically linking, we need all those global symbols.
if !ctxt.DynlinkingGo() && ctxt.LinkMode == LinkExternal && !x.Attr.CgoExportStatic() && elfshnum != SHN_UNDEF {
bind = STB_LOCAL
}
if ctxt.LinkMode == LinkExternal && elfshnum != SHN_UNDEF {
addr -= int64(xo.Sect.Vaddr)
}
other := STV_DEFAULT
if x.Attr.VisibilityHidden() {
// TODO(mwhudson): We only set AttrVisibilityHidden in ldelf, i.e. when
// internally linking. But STV_HIDDEN visibility only matters in object
// files and shared libraries, and as we are a long way from implementing
// internal linking for shared libraries and only create object files when
// externally linking, I don't think this makes a lot of sense.
other = STV_HIDDEN
}
if ctxt.Arch.Family == sys.PPC64 && typ == STT_FUNC && x.Attr.Shared() && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" {
// On ppc64 the top three bits of the st_other field indicate how
// many instructions separate the global and local entry points. In
// our case it is two instructions, indicated by the value 3.
// The conditions here match those in preprocess in
// cmd/internal/obj/ppc64/obj9.go, which is where the
// instructions are inserted.
other |= 3 << 5
}
// When dynamically linking, we create Symbols by reading the names from
// the symbol tables of the shared libraries and so the names need to
// match exactly. Tools like DTrace will have to wait for now.
if !ctxt.DynlinkingGo() {
// Rewrite · to . for ASCII-only tools like DTrace (sigh)
s = strings.Replace(s, "·", ".", -1)
}
if ctxt.DynlinkingGo() && bind == STB_GLOBAL && elfbind == STB_LOCAL && x.Type == sym.STEXT {
// When dynamically linking, we want references to functions defined
// in this module to always be to the function object, not to the
// PLT. We force this by writing an additional local symbol for every
// global function symbol and making all relocations against the
// global symbol refer to this local symbol instead (see
// (*sym.Symbol).ElfsymForReloc). This is approximately equivalent to the
// ELF linker -Bsymbolic-functions option, but that is buggy on
// several platforms.
putelfsyment(ctxt.Out, putelfstr("local."+s), addr, size, STB_LOCAL<<4|typ&0xf, elfshnum, other)
x.LocalElfsym = int32(numelfsym)
numelfsym++
return
} else if bind != elfbind {
return
}
putelfsyment(ctxt.Out, putelfstr(s), addr, size, bind<<4|typ&0xf, elfshnum, other)
x.Elfsym = int32(numelfsym)
numelfsym++
}
func putelfsectionsym(out *OutBuf, s *sym.Symbol, shndx int) {
putelfsyment(out, 0, 0, 0, STB_LOCAL<<4|STT_SECTION, shndx, 0)
s.Elfsym = int32(numelfsym)
numelfsym++
}
func Asmelfsym(ctxt *Link) {
// the first symbol entry is reserved
putelfsyment(ctxt.Out, 0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
dwarfaddelfsectionsyms(ctxt)
// Some linkers will add a FILE sym if one is not present.
// Avoid having the working directory inserted into the symbol table.
// It is added with a name to avoid problems with external linking
// encountered on some versions of Solaris. See issue #14957.
putelfsyment(ctxt.Out, putelfstr("go.go"), 0, 0, STB_LOCAL<<4|STT_FILE, SHN_ABS, 0)
numelfsym++
elfbind = STB_LOCAL
genasmsym(ctxt, putelfsym)
elfbind = STB_GLOBAL
elfglobalsymndx = numelfsym
genasmsym(ctxt, putelfsym)
}
func putplan9sym(ctxt *Link, x *sym.Symbol, s string, typ SymbolType, addr int64, go_ *sym.Symbol) {
t := int(typ)
switch typ {
case TextSym, DataSym, BSSSym:
if x.IsFileLocal() {
t += 'a' - 'A'
}
fallthrough
case AutoSym, ParamSym, FrameSym:
l := 4
if ctxt.HeadType == objabi.Hplan9 && ctxt.Arch.Family == sys.AMD64 && !Flag8 {
ctxt.Out.Write32b(uint32(addr >> 32))
l = 8
}
ctxt.Out.Write32b(uint32(addr))
ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */
ctxt.Out.WriteString(s)
ctxt.Out.Write8(0)
Symsize += int32(l) + 1 + int32(len(s)) + 1
default:
return
}
}
func Asmplan9sym(ctxt *Link) {
genasmsym(ctxt, putplan9sym)
}
var symt *sym.Symbol
type byPkg []*sym.Library
func (libs byPkg) Len() int {
return len(libs)
}
func (libs byPkg) Less(a, b int) bool {
return libs[a].Pkg < libs[b].Pkg
}
func (libs byPkg) Swap(a, b int) {
libs[a], libs[b] = libs[b], libs[a]
}
// Create a table with information on the text sections.
func textsectionmap(ctxt *Link) uint32 {
t := ctxt.Syms.Lookup("runtime.textsectionmap", 0)
t.Type = sym.SRODATA
t.Attr |= sym.AttrReachable
nsections := int64(0)
for _, sect := range Segtext.Sections {
if sect.Name == ".text" {
nsections++
} else {
break
}
}
t.Grow(3 * nsections * int64(ctxt.Arch.PtrSize))
off := int64(0)
n := 0
// The vaddr for each text section is the difference between the section's
// Vaddr and the Vaddr for the first text section as determined at compile
// time.
// The symbol for the first text section is named runtime.text as before.
// Additional text sections are named runtime.text.n where n is the
// order of creation starting with 1. These symbols provide the section's
// address after relocation by the linker.
textbase := Segtext.Sections[0].Vaddr
for _, sect := range Segtext.Sections {
if sect.Name != ".text" {
break
}
off = t.SetUint(ctxt.Arch, off, sect.Vaddr-textbase)
off = t.SetUint(ctxt.Arch, off, sect.Length)
if n == 0 {
s := ctxt.Syms.ROLookup("runtime.text", 0)
if s == nil {
Errorf(nil, "Unable to find symbol runtime.text\n")
}
off = t.SetAddr(ctxt.Arch, off, s)
} else {
s := ctxt.Syms.Lookup(fmt.Sprintf("runtime.text.%d", n), 0)
if s == nil {
Errorf(nil, "Unable to find symbol runtime.text.%d\n", n)
}
off = t.SetAddr(ctxt.Arch, off, s)
}
n++
}
return uint32(n)
}
func (ctxt *Link) symtab() {
if ctxt.HeadType != objabi.Haix {
switch ctxt.BuildMode {
case BuildModeCArchive, BuildModeCShared:
s := ctxt.Syms.ROLookup(*flagEntrySymbol, sym.SymVerABI0)
if s != nil {
addinitarrdata(ctxt, s)
}
}
}
// Define these so that they'll get put into the symbol table.
// data.c:/^address will provide the actual values.
ctxt.xdefine("runtime.text", sym.STEXT, 0)
ctxt.xdefine("runtime.etext", sym.STEXT, 0)
ctxt.xdefine("runtime.itablink", sym.SRODATA, 0)
ctxt.xdefine("runtime.eitablink", sym.SRODATA, 0)
ctxt.xdefine("runtime.rodata", sym.SRODATA, 0)
ctxt.xdefine("runtime.erodata", sym.SRODATA, 0)
ctxt.xdefine("runtime.types", sym.SRODATA, 0)
ctxt.xdefine("runtime.etypes", sym.SRODATA, 0)
ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, 0)
ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, 0)
ctxt.xdefine("runtime.data", sym.SDATA, 0)
ctxt.xdefine("runtime.edata", sym.SDATA, 0)
ctxt.xdefine("runtime.bss", sym.SBSS, 0)
ctxt.xdefine("runtime.ebss", sym.SBSS, 0)
ctxt.xdefine("runtime.noptrbss", sym.SNOPTRBSS, 0)
ctxt.xdefine("runtime.enoptrbss", sym.SNOPTRBSS, 0)
ctxt.xdefine("runtime.end", sym.SBSS, 0)
ctxt.xdefine("runtime.epclntab", sym.SRODATA, 0)
ctxt.xdefine("runtime.esymtab", sym.SRODATA, 0)
// garbage collection symbols
s := ctxt.Syms.Lookup("runtime.gcdata", 0)
s.Type = sym.SRODATA
s.Size = 0
s.Attr |= sym.AttrReachable
ctxt.xdefine("runtime.egcdata", sym.SRODATA, 0)
s = ctxt.Syms.Lookup("runtime.gcbss", 0)
s.Type = sym.SRODATA
s.Size = 0
s.Attr |= sym.AttrReachable
ctxt.xdefine("runtime.egcbss", sym.SRODATA, 0)
// pseudo-symbols to mark locations of type, string, and go string data.
var symtype *sym.Symbol
var symtyperel *sym.Symbol
if !ctxt.DynlinkingGo() {
if ctxt.UseRelro() && (ctxt.BuildMode == BuildModeCArchive || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE) {
s = ctxt.Syms.Lookup("type.*", 0)
s.Type = sym.STYPE
s.Size = 0
s.Attr |= sym.AttrReachable
symtype = s
s = ctxt.Syms.Lookup("typerel.*", 0)
s.Type = sym.STYPERELRO
s.Size = 0
s.Attr |= sym.AttrReachable
symtyperel = s
} else {
s = ctxt.Syms.Lookup("type.*", 0)
s.Type = sym.STYPE
s.Size = 0
s.Attr |= sym.AttrReachable
symtype = s
symtyperel = s
}
}
groupSym := func(name string, t sym.SymKind) *sym.Symbol {
s := ctxt.Syms.Lookup(name, 0)
s.Type = t
s.Size = 0
s.Attr |= sym.AttrLocal | sym.AttrReachable
return s
}
var (
symgostring = groupSym("go.string.*", sym.SGOSTRING)
symgofunc = groupSym("go.func.*", sym.SGOFUNC)
symgcbits = groupSym("runtime.gcbits.*", sym.SGCBITS)
)
var symgofuncrel *sym.Symbol
if !ctxt.DynlinkingGo() {
if ctxt.UseRelro() {
symgofuncrel = groupSym("go.funcrel.*", sym.SGOFUNCRELRO)
} else {
symgofuncrel = symgofunc
}
}
symitablink := ctxt.Syms.Lookup("runtime.itablink", 0)
symitablink.Type = sym.SITABLINK
symt = ctxt.Syms.Lookup("runtime.symtab", 0)
symt.Attr |= sym.AttrLocal
symt.Type = sym.SSYMTAB
symt.Size = 0
symt.Attr |= sym.AttrReachable
nitablinks := 0
// assign specific types so that they sort together.
// within a type they sort by size, so the .* symbols
// just defined above will be first.
// hide the specific symbols.
for _, s := range ctxt.Syms.Allsym {
if ctxt.LinkMode != LinkExternal && isStaticTemp(s.Name) {
s.Attr |= sym.AttrNotInSymbolTable
}
if !s.Attr.Reachable() || s.Attr.Special() || s.Type != sym.SRODATA {
continue
}
switch {
case strings.HasPrefix(s.Name, "type."):
if !ctxt.DynlinkingGo() {
s.Attr |= sym.AttrNotInSymbolTable
}
if ctxt.UseRelro() {
s.Type = sym.STYPERELRO
s.Outer = symtyperel
} else {
s.Type = sym.STYPE
s.Outer = symtype
}
case strings.HasPrefix(s.Name, "go.importpath.") && ctxt.UseRelro():
// Keep go.importpath symbols in the same section as types and
// names, as they can be referred to by a section offset.
s.Type = sym.STYPERELRO
case strings.HasPrefix(s.Name, "go.itablink."):
nitablinks++
s.Type = sym.SITABLINK
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symitablink
case strings.HasPrefix(s.Name, "go.string."):
s.Type = sym.SGOSTRING
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgostring
case strings.HasPrefix(s.Name, "runtime.gcbits."):
s.Type = sym.SGCBITS
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgcbits
case strings.HasSuffix(s.Name, "·f"):
if !ctxt.DynlinkingGo() {
s.Attr |= sym.AttrNotInSymbolTable
}
if ctxt.UseRelro() {
s.Type = sym.SGOFUNCRELRO
s.Outer = symgofuncrel
} else {
s.Type = sym.SGOFUNC
s.Outer = symgofunc
}
case strings.HasPrefix(s.Name, "gcargs."),
strings.HasPrefix(s.Name, "gclocals."),
strings.HasPrefix(s.Name, "gclocals·"),
strings.HasPrefix(s.Name, "inltree."),
strings.HasSuffix(s.Name, ".opendefer"):
s.Type = sym.SGOFUNC
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgofunc
s.Align = 4
liveness += (s.Size + int64(s.Align) - 1) &^ (int64(s.Align) - 1)
}
}
if ctxt.BuildMode == BuildModeShared {
abihashgostr := ctxt.Syms.Lookup("go.link.abihash."+filepath.Base(*flagOutfile), 0)
abihashgostr.Attr |= sym.AttrReachable
abihashgostr.Type = sym.SRODATA
hashsym := ctxt.Syms.Lookup("go.link.abihashbytes", 0)
abihashgostr.AddAddr(ctxt.Arch, hashsym)
abihashgostr.AddUint(ctxt.Arch, uint64(hashsym.Size))
}
if ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() {
for _, l := range ctxt.Library {
s := ctxt.Syms.Lookup("go.link.pkghashbytes."+l.Pkg, 0)
s.Attr |= sym.AttrReachable
s.Type = sym.SRODATA
s.Size = int64(len(l.Hash))
s.P = []byte(l.Hash)
str := ctxt.Syms.Lookup("go.link.pkghash."+l.Pkg, 0)
str.Attr |= sym.AttrReachable
str.Type = sym.SRODATA
str.AddAddr(ctxt.Arch, s)
str.AddUint(ctxt.Arch, uint64(len(l.Hash)))
}
}
nsections := textsectionmap(ctxt)
// Information about the layout of the executable image for the
// runtime to use. Any changes here must be matched by changes to
// the definition of moduledata in runtime/symtab.go.
// This code uses several global variables that are set by pcln.go:pclntab.
moduledata := ctxt.Moduledata
// The pclntab slice
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0))
moduledata.AddUint(ctxt.Arch, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size))
moduledata.AddUint(ctxt.Arch, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size))
// The ftab slice
moduledata.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabPclntabOffset))
moduledata.AddUint(ctxt.Arch, uint64(pclntabNfunc+1))
moduledata.AddUint(ctxt.Arch, uint64(pclntabNfunc+1))
// The filetab slice
moduledata.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabFiletabOffset))
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Filesyms))+1)
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Filesyms))+1)
// findfunctab
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.findfunctab", 0))
// minpc, maxpc
moduledata.AddAddr(ctxt.Arch, pclntabFirstFunc)
moduledata.AddAddrPlus(ctxt.Arch, pclntabLastFunc, pclntabLastFunc.Size)
// pointers to specific parts of the module
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.text", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.etext", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.noptrdata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.enoptrdata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.data", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.edata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.bss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.ebss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.noptrbss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.enoptrbss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.end", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.gcdata", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.gcbss", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.types", 0))
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.etypes", 0))
if ctxt.HeadType == objabi.Haix && ctxt.LinkMode == LinkExternal {
// Add R_REF relocation to prevent ld's garbage collection of
// runtime.rodata, runtime.erodata and runtime.epclntab.
addRef := func(name string) {
r := moduledata.AddRel()
r.Sym = ctxt.Syms.Lookup(name, 0)
r.Type = objabi.R_XCOFFREF
r.Siz = uint8(ctxt.Arch.PtrSize)
}
addRef("runtime.rodata")
addRef("runtime.erodata")
addRef("runtime.epclntab")
}
// text section information
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.textsectionmap", 0))
moduledata.AddUint(ctxt.Arch, uint64(nsections))
moduledata.AddUint(ctxt.Arch, uint64(nsections))
// The typelinks slice
typelinkSym := ctxt.Syms.Lookup("runtime.typelink", 0)
ntypelinks := uint64(typelinkSym.Size) / 4
moduledata.AddAddr(ctxt.Arch, typelinkSym)
moduledata.AddUint(ctxt.Arch, ntypelinks)
moduledata.AddUint(ctxt.Arch, ntypelinks)
// The itablinks slice
moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.itablink", 0))
moduledata.AddUint(ctxt.Arch, uint64(nitablinks))
moduledata.AddUint(ctxt.Arch, uint64(nitablinks))
// The ptab slice
if ptab := ctxt.Syms.ROLookup("go.plugin.tabs", 0); ptab != nil && ptab.Attr.Reachable() {
ptab.Attr |= sym.AttrLocal
ptab.Type = sym.SRODATA
nentries := uint64(len(ptab.P) / 8) // sizeof(nameOff) + sizeof(typeOff)
moduledata.AddAddr(ctxt.Arch, ptab)
moduledata.AddUint(ctxt.Arch, nentries)
moduledata.AddUint(ctxt.Arch, nentries)
} else {
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
}
if ctxt.BuildMode == BuildModePlugin {
addgostring(ctxt, moduledata, "go.link.thispluginpath", objabi.PathToPrefix(*flagPluginPath))
pkghashes := ctxt.Syms.Lookup("go.link.pkghashes", 0)
pkghashes.Attr |= sym.AttrReachable
pkghashes.Attr |= sym.AttrLocal
pkghashes.Type = sym.SRODATA
for i, l := range ctxt.Library {
// pkghashes[i].name
addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkgname.%d", i), l.Pkg)
// pkghashes[i].linktimehash
addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkglinkhash.%d", i), l.Hash)
// pkghashes[i].runtimehash
hash := ctxt.Syms.ROLookup("go.link.pkghash."+l.Pkg, 0)
pkghashes.AddAddr(ctxt.Arch, hash)
}
moduledata.AddAddr(ctxt.Arch, pkghashes)
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Library)))
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Library)))
} else {
moduledata.AddUint(ctxt.Arch, 0) // pluginpath
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0) // pkghashes slice
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
}
if len(ctxt.Shlibs) > 0 {
thismodulename := filepath.Base(*flagOutfile)
switch ctxt.BuildMode {
case BuildModeExe, BuildModePIE:
// When linking an executable, outfile is just "a.out". Make
// it something slightly more comprehensible.
thismodulename = "the executable"
}
addgostring(ctxt, moduledata, "go.link.thismodulename", thismodulename)
modulehashes := ctxt.Syms.Lookup("go.link.abihashes", 0)
modulehashes.Attr |= sym.AttrReachable
modulehashes.Attr |= sym.AttrLocal
modulehashes.Type = sym.SRODATA
for i, shlib := range ctxt.Shlibs {
// modulehashes[i].modulename
modulename := filepath.Base(shlib.Path)
addgostring(ctxt, modulehashes, fmt.Sprintf("go.link.libname.%d", i), modulename)
// modulehashes[i].linktimehash
addgostring(ctxt, modulehashes, fmt.Sprintf("go.link.linkhash.%d", i), string(shlib.Hash))
// modulehashes[i].runtimehash
abihash := ctxt.Syms.Lookup("go.link.abihash."+modulename, 0)
abihash.Attr |= sym.AttrReachable
modulehashes.AddAddr(ctxt.Arch, abihash)
}
moduledata.AddAddr(ctxt.Arch, modulehashes)
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Shlibs)))
moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Shlibs)))
} else {
moduledata.AddUint(ctxt.Arch, 0) // modulename
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0) // moduleshashes slice
moduledata.AddUint(ctxt.Arch, 0)
moduledata.AddUint(ctxt.Arch, 0)
}
hasmain := ctxt.BuildMode == BuildModeExe || ctxt.BuildMode == BuildModePIE
if hasmain {
moduledata.AddUint8(1)
} else {
moduledata.AddUint8(0)
}
// The rest of moduledata is zero initialized.
// When linking an object that does not contain the runtime we are
// creating the moduledata from scratch and it does not have a
// compiler-provided size, so read it from the type data.
moduledatatype := ctxt.Syms.ROLookup("type.runtime.moduledata", 0)
moduledata.Size = decodetypeSize(ctxt.Arch, moduledatatype.P)
moduledata.Grow(moduledata.Size)
lastmoduledatap := ctxt.Syms.Lookup("runtime.lastmoduledatap", 0)
if lastmoduledatap.Type != sym.SDYNIMPORT {
lastmoduledatap.Type = sym.SNOPTRDATA
lastmoduledatap.Size = 0 // overwrite existing value
lastmoduledatap.AddAddr(ctxt.Arch, moduledata)
}
}
func isStaticTemp(name string) bool {
if i := strings.LastIndex(name, "/"); i >= 0 {
name = name[i:]
}
return strings.Contains(name, "..stmp_")
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuseops
import (
"bytes"
"log"
"time"
"unsafe"
"github.com/jacobsa/fuse/internal/fusekernel"
"github.com/jacobsa/fuse/internal/fuseshim"
"golang.org/x/net/context"
)
// This function is an implementation detail of the fuse package, and must not
// be called by anyone else.
//
// Convert the supplied fuse kernel message to an Op. finished will be called
// with the error supplied to o.Respond when the user invokes that method,
// before a response is sent to the kernel. o.Respond will destroy the message.
//
// It is guaranteed that o != nil. If the op is unknown, a special unexported
// type will be used.
//
// The debug logging function and error logger may be nil.
func Convert(
opCtx context.Context,
m *fuseshim.Message,
protocol fusekernel.Protocol,
debugLogForOp func(int, string, ...interface{}),
errorLogger *log.Logger,
finished func(error)) (o Op) {
var co *commonOp
var io internalOp
switch m.Hdr.Opcode {
case fusekernel.OpLookup:
buf := m.Bytes()
n := len(buf)
if n == 0 || buf[n-1] != '\x00' {
goto corrupt
}
to := &LookUpInodeOp{
Parent: InodeID(m.Header().Node),
Name: string(buf[:n-1]),
}
io = to
co = &to.commonOp
case fusekernel.OpGetattr:
to := &GetInodeAttributesOp{
Inode: InodeID(m.Header().Node),
}
io = to
co = &to.commonOp
case fusekernel.OpSetattr:
in := (*fusekernel.SetattrIn)(m.Data())
if m.Len() < unsafe.Sizeof(*in) {
goto corrupt
}
to := &SetInodeAttributesOp{
Inode: InodeID(m.Header().Node),
}
valid := fusekernel.SetattrValid(in.Valid)
if valid&fusekernel.SetattrSize != 0 {
to.Size = &in.Size
}
if valid&fusekernel.SetattrMode != 0 {
mode := fuseshim.FileMode(in.Mode)
to.Mode = &mode
}
if valid&fusekernel.SetattrAtime != 0 {
t := time.Unix(int64(in.Atime), int64(in.AtimeNsec))
to.Atime = &t
}
if valid&fusekernel.SetattrMtime != 0 {
t := time.Unix(int64(in.Mtime), int64(in.MtimeNsec))
to.Mtime = &t
}
io = to
co = &to.commonOp
case fusekernel.OpForget:
in := (*fusekernel.ForgetIn)(m.Data())
if m.Len() < unsafe.Sizeof(*in) {
goto corrupt
}
to := &ForgetInodeOp{
Inode: InodeID(m.Header().Node),
N: in.Nlookup,
}
io = to
co = &to.commonOp
case fusekernel.OpMkdir:
size := fusekernel.MkdirInSize(protocol)
if m.Len() < size {
goto corrupt
}
in := (*fusekernel.MkdirIn)(m.Data())
name := m.Bytes()[size:]
i := bytes.IndexByte(name, '\x00')
if i < 0 {
goto corrupt
}
name = name[:i]
to := &MkDirOp{
Parent: InodeID(m.Header().Node),
Name: string(name),
Mode: fuseshim.FileMode(in.Mode),
}
io = to
co = &to.commonOp
case fusekernel.OpCreate:
size := fusekernel.CreateInSize(protocol)
if m.Len() < size {
goto corrupt
}
in := (*fusekernel.CreateIn)(m.Data())
name := m.Bytes()[size:]
i := bytes.IndexByte(name, '\x00')
if i < 0 {
goto corrupt
}
name = name[:i]
to := &CreateFileOp{
Parent: InodeID(m.Header().Node),
Name: string(name),
Mode: fuseshim.FileMode(in.Mode),
}
io = to
co = &to.commonOp
case fusekernel.OpSymlink:
// m.Bytes() is "newName\0target\0"
names := m.Bytes()
if len(names) == 0 || names[len(names)-1] != 0 {
goto corrupt
}
i := bytes.IndexByte(names, '\x00')
if i < 0 {
goto corrupt
}
newName, target := names[0:i], names[i+1:len(names)-1]
to := &CreateSymlinkOp{
Parent: InodeID(m.Header().Node),
Name: string(newName),
Target: string(target),
}
io = to
co = &to.commonOp
case fusekernel.OpRename:
in := (*fusekernel.RenameIn)(m.Data())
if m.Len() < unsafe.Sizeof(*in) {
goto corrupt
}
names := m.Bytes()[unsafe.Sizeof(*in):]
// names should be "old\x00new\x00"
if len(names) < 4 {
goto corrupt
}
if names[len(names)-1] != '\x00' {
goto corrupt
}
i := bytes.IndexByte(names, '\x00')
if i < 0 {
goto corrupt
}
oldName, newName := names[:i], names[i+1:len(names)-1]
to := &RenameOp{
OldParent: InodeID(m.Header().Node),
OldName: string(oldName),
NewParent: InodeID(in.Newdir),
NewName: string(newName),
}
io = to
co = &to.commonOp
case fusekernel.OpUnlink:
buf := m.Bytes()
n := len(buf)
if n == 0 || buf[n-1] != '\x00' {
goto corrupt
}
to := &UnlinkOp{
Parent: InodeID(m.Header().Node),
Name: string(buf[:n-1]),
}
io = to
co = &to.commonOp
case *fuseshim.RemoveRequest:
if typed.Dir {
to := &RmDirOp{
bfReq: typed,
Parent: InodeID(typed.Header.Node),
Name: typed.Name,
}
io = to
co = &to.commonOp
} else {
to := &UnlinkOp{
bfReq: typed,
Parent: InodeID(typed.Header.Node),
Name: typed.Name,
}
io = to
co = &to.commonOp
}
case *fuseshim.OpenRequest:
if typed.Dir {
to := &OpenDirOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
}
io = to
co = &to.commonOp
} else {
to := &OpenFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
}
io = to
co = &to.commonOp
}
case *fuseshim.ReadRequest:
if typed.Dir {
to := &ReadDirOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
Offset: DirOffset(typed.Offset),
Size: typed.Size,
}
io = to
co = &to.commonOp
} else {
to := &ReadFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
Offset: typed.Offset,
Size: typed.Size,
}
io = to
co = &to.commonOp
}
case *fuseshim.ReleaseRequest:
if typed.Dir {
to := &ReleaseDirHandleOp{
bfReq: typed,
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
} else {
to := &ReleaseFileHandleOp{
bfReq: typed,
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
}
case *fuseshim.WriteRequest:
to := &WriteFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
Data: typed.Data,
Offset: typed.Offset,
}
io = to
co = &to.commonOp
case *fuseshim.FsyncRequest:
// We don't currently support this for directories.
if typed.Dir {
to := &unknownOp{}
io = to
co = &to.commonOp
} else {
to := &SyncFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
}
case *fuseshim.FlushRequest:
to := &FlushFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
case *fuseshim.ReadlinkRequest:
to := &ReadSymlinkOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
}
io = to
co = &to.commonOp
default:
to := &unknownOp{}
io = to
co = &to.commonOp
}
co.init(
opCtx,
io,
r,
debugLogForOp,
errorLogger,
finished)
o = io
return
}
func convertAttributes(
inode InodeID,
attr InodeAttributes,
expiration time.Time) fuseshim.Attr {
return fuseshim.Attr{
Inode: uint64(inode),
Size: attr.Size,
Mode: attr.Mode,
Nlink: uint32(attr.Nlink),
Atime: attr.Atime,
Mtime: attr.Mtime,
Ctime: attr.Ctime,
Crtime: attr.Crtime,
Uid: attr.Uid,
Gid: attr.Gid,
Valid: convertExpirationTime(expiration),
}
}
// Convert an absolute cache expiration time to a relative time from now for
// consumption by fuse.
func convertExpirationTime(t time.Time) (d time.Duration) {
// Fuse represents durations as unsigned 64-bit counts of seconds and 32-bit
// counts of nanoseconds (cf. http://goo.gl/EJupJV). The bazil.org/fuse
// package converts time.Duration values to this form in a straightforward
// way (cf. http://goo.gl/FJhV8j).
//
// So negative durations are right out. There is no need to cap the positive
// magnitude, because 2^64 seconds is well longer than the 2^63 ns range of
// time.Duration.
d = t.Sub(time.Now())
if d < 0 {
d = 0
}
return
}
func convertChildInodeEntry(
in *ChildInodeEntry,
out *fuseshim.LookupResponse) {
out.Node = fuseshim.NodeID(in.Child)
out.Generation = uint64(in.Generation)
out.Attr = convertAttributes(in.Child, in.Attributes, in.AttributesExpiration)
out.EntryValid = convertExpirationTime(in.EntryExpiration)
}
RmDirOp
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuseops
import (
"bytes"
"log"
"time"
"unsafe"
"github.com/jacobsa/fuse/internal/fusekernel"
"github.com/jacobsa/fuse/internal/fuseshim"
"golang.org/x/net/context"
)
// This function is an implementation detail of the fuse package, and must not
// be called by anyone else.
//
// Convert the supplied fuse kernel message to an Op. finished will be called
// with the error supplied to o.Respond when the user invokes that method,
// before a response is sent to the kernel. o.Respond will destroy the message.
//
// It is guaranteed that o != nil. If the op is unknown, a special unexported
// type will be used.
//
// The debug logging function and error logger may be nil.
func Convert(
opCtx context.Context,
m *fuseshim.Message,
protocol fusekernel.Protocol,
debugLogForOp func(int, string, ...interface{}),
errorLogger *log.Logger,
finished func(error)) (o Op) {
var co *commonOp
var io internalOp
switch m.Hdr.Opcode {
case fusekernel.OpLookup:
buf := m.Bytes()
n := len(buf)
if n == 0 || buf[n-1] != '\x00' {
goto corrupt
}
to := &LookUpInodeOp{
Parent: InodeID(m.Header().Node),
Name: string(buf[:n-1]),
}
io = to
co = &to.commonOp
case fusekernel.OpGetattr:
to := &GetInodeAttributesOp{
Inode: InodeID(m.Header().Node),
}
io = to
co = &to.commonOp
case fusekernel.OpSetattr:
in := (*fusekernel.SetattrIn)(m.Data())
if m.Len() < unsafe.Sizeof(*in) {
goto corrupt
}
to := &SetInodeAttributesOp{
Inode: InodeID(m.Header().Node),
}
valid := fusekernel.SetattrValid(in.Valid)
if valid&fusekernel.SetattrSize != 0 {
to.Size = &in.Size
}
if valid&fusekernel.SetattrMode != 0 {
mode := fuseshim.FileMode(in.Mode)
to.Mode = &mode
}
if valid&fusekernel.SetattrAtime != 0 {
t := time.Unix(int64(in.Atime), int64(in.AtimeNsec))
to.Atime = &t
}
if valid&fusekernel.SetattrMtime != 0 {
t := time.Unix(int64(in.Mtime), int64(in.MtimeNsec))
to.Mtime = &t
}
io = to
co = &to.commonOp
case fusekernel.OpForget:
in := (*fusekernel.ForgetIn)(m.Data())
if m.Len() < unsafe.Sizeof(*in) {
goto corrupt
}
to := &ForgetInodeOp{
Inode: InodeID(m.Header().Node),
N: in.Nlookup,
}
io = to
co = &to.commonOp
case fusekernel.OpMkdir:
size := fusekernel.MkdirInSize(protocol)
if m.Len() < size {
goto corrupt
}
in := (*fusekernel.MkdirIn)(m.Data())
name := m.Bytes()[size:]
i := bytes.IndexByte(name, '\x00')
if i < 0 {
goto corrupt
}
name = name[:i]
to := &MkDirOp{
Parent: InodeID(m.Header().Node),
Name: string(name),
Mode: fuseshim.FileMode(in.Mode),
}
io = to
co = &to.commonOp
case fusekernel.OpCreate:
size := fusekernel.CreateInSize(protocol)
if m.Len() < size {
goto corrupt
}
in := (*fusekernel.CreateIn)(m.Data())
name := m.Bytes()[size:]
i := bytes.IndexByte(name, '\x00')
if i < 0 {
goto corrupt
}
name = name[:i]
to := &CreateFileOp{
Parent: InodeID(m.Header().Node),
Name: string(name),
Mode: fuseshim.FileMode(in.Mode),
}
io = to
co = &to.commonOp
case fusekernel.OpSymlink:
// m.Bytes() is "newName\0target\0"
names := m.Bytes()
if len(names) == 0 || names[len(names)-1] != 0 {
goto corrupt
}
i := bytes.IndexByte(names, '\x00')
if i < 0 {
goto corrupt
}
newName, target := names[0:i], names[i+1:len(names)-1]
to := &CreateSymlinkOp{
Parent: InodeID(m.Header().Node),
Name: string(newName),
Target: string(target),
}
io = to
co = &to.commonOp
case fusekernel.OpRename:
in := (*fusekernel.RenameIn)(m.Data())
if m.Len() < unsafe.Sizeof(*in) {
goto corrupt
}
names := m.Bytes()[unsafe.Sizeof(*in):]
// names should be "old\x00new\x00"
if len(names) < 4 {
goto corrupt
}
if names[len(names)-1] != '\x00' {
goto corrupt
}
i := bytes.IndexByte(names, '\x00')
if i < 0 {
goto corrupt
}
oldName, newName := names[:i], names[i+1:len(names)-1]
to := &RenameOp{
OldParent: InodeID(m.Header().Node),
OldName: string(oldName),
NewParent: InodeID(in.Newdir),
NewName: string(newName),
}
io = to
co = &to.commonOp
case fusekernel.OpUnlink:
buf := m.Bytes()
n := len(buf)
if n == 0 || buf[n-1] != '\x00' {
goto corrupt
}
to := &UnlinkOp{
Parent: InodeID(m.Header().Node),
Name: string(buf[:n-1]),
}
io = to
co = &to.commonOp
case fusekernel.OpRmdir:
buf := m.Bytes()
n := len(buf)
if n == 0 || buf[n-1] != '\x00' {
goto corrupt
}
to := &RmDirOp{
Parent: InodeID(m.Header().Node),
Name: string(buf[:n-1]),
}
io = to
co = &to.commonOp
case *fuseshim.OpenRequest:
if typed.Dir {
to := &OpenDirOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
}
io = to
co = &to.commonOp
} else {
to := &OpenFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
}
io = to
co = &to.commonOp
}
case *fuseshim.ReadRequest:
if typed.Dir {
to := &ReadDirOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
Offset: DirOffset(typed.Offset),
Size: typed.Size,
}
io = to
co = &to.commonOp
} else {
to := &ReadFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
Offset: typed.Offset,
Size: typed.Size,
}
io = to
co = &to.commonOp
}
case *fuseshim.ReleaseRequest:
if typed.Dir {
to := &ReleaseDirHandleOp{
bfReq: typed,
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
} else {
to := &ReleaseFileHandleOp{
bfReq: typed,
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
}
case *fuseshim.WriteRequest:
to := &WriteFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
Data: typed.Data,
Offset: typed.Offset,
}
io = to
co = &to.commonOp
case *fuseshim.FsyncRequest:
// We don't currently support this for directories.
if typed.Dir {
to := &unknownOp{}
io = to
co = &to.commonOp
} else {
to := &SyncFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
}
case *fuseshim.FlushRequest:
to := &FlushFileOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
Handle: HandleID(typed.Handle),
}
io = to
co = &to.commonOp
case *fuseshim.ReadlinkRequest:
to := &ReadSymlinkOp{
bfReq: typed,
Inode: InodeID(typed.Header.Node),
}
io = to
co = &to.commonOp
default:
to := &unknownOp{}
io = to
co = &to.commonOp
}
co.init(
opCtx,
io,
r,
debugLogForOp,
errorLogger,
finished)
o = io
return
}
func convertAttributes(
inode InodeID,
attr InodeAttributes,
expiration time.Time) fuseshim.Attr {
return fuseshim.Attr{
Inode: uint64(inode),
Size: attr.Size,
Mode: attr.Mode,
Nlink: uint32(attr.Nlink),
Atime: attr.Atime,
Mtime: attr.Mtime,
Ctime: attr.Ctime,
Crtime: attr.Crtime,
Uid: attr.Uid,
Gid: attr.Gid,
Valid: convertExpirationTime(expiration),
}
}
// Convert an absolute cache expiration time to a relative time from now for
// consumption by fuse.
func convertExpirationTime(t time.Time) (d time.Duration) {
// Fuse represents durations as unsigned 64-bit counts of seconds and 32-bit
// counts of nanoseconds (cf. http://goo.gl/EJupJV). The bazil.org/fuse
// package converts time.Duration values to this form in a straightforward
// way (cf. http://goo.gl/FJhV8j).
//
// So negative durations are right out. There is no need to cap the positive
// magnitude, because 2^64 seconds is well longer than the 2^63 ns range of
// time.Duration.
d = t.Sub(time.Now())
if d < 0 {
d = 0
}
return
}
func convertChildInodeEntry(
in *ChildInodeEntry,
out *fuseshim.LookupResponse) {
out.Node = fuseshim.NodeID(in.Child)
out.Generation = uint64(in.Generation)
out.Attr = convertAttributes(in.Child, in.Attributes, in.AttributesExpiration)
out.EntryValid = convertExpirationTime(in.EntryExpiration)
}
|
package structs
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"golang.org/x/crypto/blake2b"
"github.com/gorhill/cronexpr"
"github.com/hashicorp/consul/api"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/args"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/mitchellh/copystructure"
"github.com/ugorji/go/codec"
hcodec "github.com/hashicorp/go-msgpack/codec"
)
var (
ErrNoLeader = fmt.Errorf("No cluster leader")
ErrNoRegionPath = fmt.Errorf("No path to region")
ErrTokenNotFound = errors.New("ACL token not found")
ErrPermissionDenied = errors.New("Permission denied")
// validPolicyName is used to validate a policy name
validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
)
type MessageType uint8
const (
NodeRegisterRequestType MessageType = iota
NodeDeregisterRequestType
NodeUpdateStatusRequestType
NodeUpdateDrainRequestType
JobRegisterRequestType
JobDeregisterRequestType
EvalUpdateRequestType
EvalDeleteRequestType
AllocUpdateRequestType
AllocClientUpdateRequestType
ReconcileJobSummariesRequestType
VaultAccessorRegisterRequestType
VaultAccessorDegisterRequestType
ApplyPlanResultsRequestType
DeploymentStatusUpdateRequestType
DeploymentPromoteRequestType
DeploymentAllocHealthRequestType
DeploymentDeleteRequestType
JobStabilityRequestType
ACLPolicyUpsertRequestType
ACLPolicyDeleteRequestType
ACLTokenUpsertRequestType
ACLTokenDeleteRequestType
ACLTokenBootstrapRequestType
)
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// ApiMajorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed in a way
// that would break clients for sane client versioning.
ApiMajorVersion = 1
// ApiMinorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed to allow
// for sane client versioning. Minor changes should be compatible
// within the major version.
ApiMinorVersion = 1
ProtocolVersion = "protocol"
APIMajorVersion = "api.major"
APIMinorVersion = "api.minor"
GetterModeAny = "any"
GetterModeFile = "file"
GetterModeDir = "dir"
// maxPolicyDescriptionLength limits a policy description length
maxPolicyDescriptionLength = 256
// maxTokenNameLength limits a ACL token name length
maxTokenNameLength = 64
// ACLClientToken and ACLManagementToken are the only types of tokens
ACLClientToken = "client"
ACLManagementToken = "management"
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
DefaultNamespaceDescription = "Default shared namespace"
)
// Context defines the scope in which a search for Nomad object operates, and
// is also used to query the matching index value for this context
type Context string
const (
Allocs Context = "allocs"
Deployments Context = "deployment"
Evals Context = "evals"
Jobs Context = "jobs"
Nodes Context = "nodes"
Namespaces Context = "namespaces"
Quotas Context = "quotas"
All Context = "all"
)
// NamespacedID is a tuple of an ID and a namespace
type NamespacedID struct {
ID string
Namespace string
}
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
IsRead() bool
AllowStaleRead() bool
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
// The target region for this query
Region string
// Namespace is the target namespace for the query.
Namespace string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool
// If set, used as prefix for resource list searches
Prefix string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
}
func (q QueryOptions) RequestRegion() string {
return q.Region
}
func (q QueryOptions) RequestNamespace() string {
if q.Namespace == "" {
return DefaultNamespace
}
return q.Namespace
}
// QueryOption only applies to reads, so always true
func (q QueryOptions) IsRead() bool {
return true
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
type WriteRequest struct {
// The target region for this write
Region string
// Namespace is the target namespace for the write.
Namespace string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
}
func (w WriteRequest) RequestRegion() string {
// The target region for this request
return w.Region
}
func (w WriteRequest) RequestNamespace() string {
if w.Namespace == "" {
return DefaultNamespace
}
return w.Namespace
}
// WriteRequest only applies to writes, always false
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// This is the index associated with the read
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
}
// WriteMeta allows a write response to include potentially
// useful metadata about the write
type WriteMeta struct {
// This is the index associated with the write
Index uint64
}
// NodeRegisterRequest is used for Node.Register endpoint
// to register a node as being a schedulable entity.
type NodeRegisterRequest struct {
Node *Node
WriteRequest
}
// NodeDeregisterRequest is used for Node.Deregister endpoint
// to deregister a node as being a schedulable entity.
type NodeDeregisterRequest struct {
NodeID string
WriteRequest
}
// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
// information used in RPC server lists.
type NodeServerInfo struct {
// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
// be contacted at for RPCs.
RPCAdvertiseAddr string
// RpcMajorVersion is the major version number the Nomad Server
// supports
RPCMajorVersion int32
// RpcMinorVersion is the minor version number the Nomad Server
// supports
RPCMinorVersion int32
// Datacenter is the datacenter that a Nomad server belongs to
Datacenter string
}
// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
// to update the status of a node.
type NodeUpdateStatusRequest struct {
NodeID string
Status string
WriteRequest
}
// NodeUpdateDrainRequest is used for updatin the drain status
type NodeUpdateDrainRequest struct {
NodeID string
Drain bool
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the ndoe
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
}
// NodeSpecificRequest is used when we just need to specify a target node
type NodeSpecificRequest struct {
NodeID string
SecretID string
QueryOptions
}
// SearchResponse is used to return matches and information about whether
// the match list is truncated specific to each type of context.
type SearchResponse struct {
// Map of context types to ids which match a specified prefix
Matches map[Context][]string
// Truncations indicates whether the matches for a particular context have
// been truncated
Truncations map[Context]bool
QueryMeta
}
// SearchRequest is used to parameterize a request, and returns a
// list of matches made up of jobs, allocations, evaluations, and/or nodes,
// along with whether or not the information returned is truncated.
type SearchRequest struct {
// Prefix is what ids are matched to. I.e, if the given prefix were
// "a", potential matches might be "abcd" or "aabb"
Prefix string
// Context is the type that can be matched against. A context can be a job,
// node, evaluation, allocation, or empty (indicated every context should be
// matched)
Context Context
QueryOptions
}
// JobRegisterRequest is used for Job.Register endpoint
// to register a job as being a schedulable entity.
type JobRegisterRequest struct {
Job *Job
// If EnforceIndex is set then the job will only be registered if the passed
// JobModifyIndex matches the current Jobs index. If the index is zero, the
// register only occurs if the job is new.
EnforceIndex bool
JobModifyIndex uint64
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// JobDeregisterRequest is used for Job.Deregister endpoint
// to deregister a job as being a schedulable entity.
type JobDeregisterRequest struct {
JobID string
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
WriteRequest
}
// JobEvaluateRequest is used when we just need to re-evaluate a target job
type JobEvaluateRequest struct {
JobID string
WriteRequest
}
// JobSpecificRequest is used when we just need to specify a target job
type JobSpecificRequest struct {
JobID string
AllAllocs bool
QueryOptions
}
// JobListRequest is used to parameterize a list request
type JobListRequest struct {
QueryOptions
}
// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
// evaluation of the Job.
type JobPlanRequest struct {
Job *Job
Diff bool // Toggles an annotated diff
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// JobSummaryRequest is used when we just need to get a specific job summary
type JobSummaryRequest struct {
JobID string
QueryOptions
}
// JobDispatchRequest is used to dispatch a job based on a parameterized job
type JobDispatchRequest struct {
JobID string
Payload []byte
Meta map[string]string
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobRevertRequest is used to revert a job to a prior version.
type JobRevertRequest struct {
// JobID is the ID of the job being reverted
JobID string
// JobVersion the version to revert to.
JobVersion uint64
// EnforcePriorVersion if set will enforce that the job is at the given
// version before reverting.
EnforcePriorVersion *uint64
WriteRequest
}
// JobStabilityRequest is used to marked a job as stable.
type JobStabilityRequest struct {
// Job to set the stability on
JobID string
JobVersion uint64
// Set the stability
Stable bool
WriteRequest
}
// JobStabilityResponse is the response when marking a job as stable.
type JobStabilityResponse struct {
WriteMeta
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
}
// EvalUpdateRequest is used for upserting evaluations.
type EvalUpdateRequest struct {
Evals []*Evaluation
EvalToken string
WriteRequest
}
// EvalDeleteRequest is used for deleting an evaluation.
type EvalDeleteRequest struct {
Evals []string
Allocs []string
WriteRequest
}
// EvalSpecificRequest is used when we just need to specify a target evaluation
type EvalSpecificRequest struct {
EvalID string
QueryOptions
}
// EvalAckRequest is used to Ack/Nack a specific evaluation
type EvalAckRequest struct {
EvalID string
Token string
WriteRequest
}
// EvalDequeueRequest is used when we want to dequeue an evaluation
type EvalDequeueRequest struct {
Schedulers []string
Timeout time.Duration
SchedulerVersion uint16
WriteRequest
}
// EvalListRequest is used to list the evaluations
type EvalListRequest struct {
QueryOptions
}
// PlanRequest is used to submit an allocation plan to the leader
type PlanRequest struct {
Plan *Plan
WriteRequest
}
// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
// committing the result of a plan.
type ApplyPlanResultsRequest struct {
// AllocUpdateRequest holds the allocation updates to be made by the
// scheduler.
AllocUpdateRequest
// Deployment is the deployment created or updated as a result of a
// scheduling event.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AllocUpdateRequest is used to submit changes to allocations, either
// to cause evictions or to assign new allocaitons. Both can be done
// within a single transaction
type AllocUpdateRequest struct {
// Alloc is the list of new allocations to assign
Alloc []*Allocation
// Job is the shared parent job of the allocations.
// It is pulled out since it is common to reduce payload size.
Job *Job
WriteRequest
}
// AllocListRequest is used to request a list of allocations
type AllocListRequest struct {
QueryOptions
}
// AllocSpecificRequest is used to query a specific allocation
type AllocSpecificRequest struct {
AllocID string
QueryOptions
}
// AllocsGetRequest is used to query a set of allocations
type AllocsGetRequest struct {
AllocIDs []string
QueryOptions
}
// PeriodicForceReqeuest is used to force a specific periodic job.
type PeriodicForceRequest struct {
JobID string
WriteRequest
}
// ServerMembersResponse has the list of servers in a cluster
type ServerMembersResponse struct {
ServerName string
ServerRegion string
ServerDC string
Members []*ServerMember
}
// ServerMember holds information about a Nomad server agent in a cluster
type ServerMember struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
// following tasks in the given allocation
type DeriveVaultTokenRequest struct {
NodeID string
SecretID string
AllocID string
Tasks []string
QueryOptions
}
// VaultAccessorsRequest is used to operate on a set of Vault accessors
type VaultAccessorsRequest struct {
Accessors []*VaultAccessor
}
// VaultAccessor is a reference to a created Vault token on behalf of
// an allocation's task.
type VaultAccessor struct {
AllocID string
Task string
NodeID string
Accessor string
CreationTTL int
// Raft Indexes
CreateIndex uint64
}
// DeriveVaultTokenResponse returns the wrapped tokens for each requested task
type DeriveVaultTokenResponse struct {
// Tasks is a mapping between the task name and the wrapped token
Tasks map[string]string
// Error stores any error that occurred. Errors are stored here so we can
// communicate whether it is retriable
Error *RecoverableError
QueryMeta
}
// GenericRequest is used to request where no
// specific information is needed.
type GenericRequest struct {
QueryOptions
}
// DeploymentListRequest is used to list the deployments
type DeploymentListRequest struct {
QueryOptions
}
// DeploymentDeleteRequest is used for deleting deployments.
type DeploymentDeleteRequest struct {
Deployments []string
WriteRequest
}
// DeploymentStatusUpdateRequest is used to update the status of a deployment as
// well as optionally creating an evaluation atomically.
type DeploymentStatusUpdateRequest struct {
// Eval, if set, is used to create an evaluation at the same time as
// updating the status of a deployment.
Eval *Evaluation
// DeploymentUpdate is a status update to apply to the given
// deployment.
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
}
// DeploymentAllocHealthRequest is used to set the health of a set of
// allocations as part of a deployment.
type DeploymentAllocHealthRequest struct {
DeploymentID string
// Marks these allocations as healthy, allow further allocations
// to be rolled.
HealthyAllocationIDs []string
// Any unhealthy allocations fail the deployment
UnhealthyAllocationIDs []string
WriteRequest
}
// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
type ApplyDeploymentAllocHealthRequest struct {
DeploymentAllocHealthRequest
// An optional field to update the status of a deployment
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPromoteRequest is used to promote task groups in a deployment
type DeploymentPromoteRequest struct {
DeploymentID string
// All is to promote all task groups
All bool
// Groups is used to set the promotion status per task group
Groups []string
WriteRequest
}
// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
type ApplyDeploymentPromoteRequest struct {
DeploymentPromoteRequest
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPauseRequest is used to pause a deployment
type DeploymentPauseRequest struct {
DeploymentID string
// Pause sets the pause status
Pause bool
WriteRequest
}
// DeploymentSpecificRequest is used to make a request specific to a particular
// deployment
type DeploymentSpecificRequest struct {
DeploymentID string
QueryOptions
}
// DeploymentFailRequest is used to fail a particular deployment
type DeploymentFailRequest struct {
DeploymentID string
WriteRequest
}
// SingleDeploymentResponse is used to respond with a single deployment
type SingleDeploymentResponse struct {
Deployment *Deployment
QueryMeta
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
WriteMeta
}
// VersionResponse is used for the Status.Version reseponse
type VersionResponse struct {
Build string
Versions map[string]int
QueryMeta
}
// JobRegisterResponse is used to respond to a job registration
type JobRegisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
QueryMeta
}
// JobDeregisterResponse is used to respond to a job deregistration
type JobDeregisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
QueryMeta
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
// Error is a string version of any error that may have occurred
Error string
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
}
// NodeUpdateResponse is used to respond to a node update
type NodeUpdateResponse struct {
HeartbeatTTL time.Duration
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
// LeaderRPCAddr is the RPC address of the current Raft Leader. If
// empty, the current Nomad Server is in the minority of a partition.
LeaderRPCAddr string
// NumNodes is the number of Nomad nodes attached to this quorum of
// Nomad Servers at the time of the response. This value can
// fluctuate based on the health of the cluster between heartbeats.
NumNodes int32
// Servers is the full list of known Nomad servers in the local
// region.
Servers []*NodeServerInfo
QueryMeta
}
// NodeDrainUpdateResponse is used to respond to a node drain update
type NodeDrainUpdateResponse struct {
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
QueryMeta
}
// NodeAllocsResponse is used to return allocs for a single node
type NodeAllocsResponse struct {
Allocs []*Allocation
QueryMeta
}
// NodeClientAllocsResponse is used to return allocs meta data for a single node
type NodeClientAllocsResponse struct {
Allocs map[string]uint64
// MigrateTokens are used when ACLs are enabled to allow cross node,
// authenticated access to sticky volumes
MigrateTokens map[string]string
QueryMeta
}
// SingleNodeResponse is used to return a single node
type SingleNodeResponse struct {
Node *Node
QueryMeta
}
// NodeListResponse is used for a list request
type NodeListResponse struct {
Nodes []*NodeListStub
QueryMeta
}
// SingleJobResponse is used to return a single job
type SingleJobResponse struct {
Job *Job
QueryMeta
}
// JobSummaryResponse is used to return a single job summary
type JobSummaryResponse struct {
JobSummary *JobSummary
QueryMeta
}
type JobDispatchResponse struct {
DispatchedJobID string
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
WriteMeta
}
// JobListResponse is used for a list request
type JobListResponse struct {
Jobs []*JobListStub
QueryMeta
}
// JobVersionsRequest is used to get a jobs versions
type JobVersionsRequest struct {
JobID string
Diffs bool
QueryOptions
}
// JobVersionsResponse is used for a job get versions request
type JobVersionsResponse struct {
Versions []*Job
Diffs []*JobDiff
QueryMeta
}
// JobPlanResponse is used to respond to a job plan request
type JobPlanResponse struct {
// Annotations stores annotations explaining decisions the scheduler made.
Annotations *PlanAnnotations
// FailedTGAllocs is the placement failures per task group.
FailedTGAllocs map[string]*AllocMetric
// JobModifyIndex is the modification index of the job. The value can be
// used when running `nomad run` to ensure that the Job wasn’t modified
// since the last plan. If the job is being created, the value is zero.
JobModifyIndex uint64
// CreatedEvals is the set of evaluations created by the scheduler. The
// reasons for this can be rolling-updates or blocked evals.
CreatedEvals []*Evaluation
// Diff contains the diff of the job and annotations on whether the change
// causes an in-place update or create/destroy
Diff *JobDiff
// NextPeriodicLaunch is the time duration till the job would be launched if
// submitted.
NextPeriodicLaunch time.Time
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
WriteMeta
}
// SingleAllocResponse is used to return a single allocation
type SingleAllocResponse struct {
Alloc *Allocation
QueryMeta
}
// AllocsGetResponse is used to return a set of allocations
type AllocsGetResponse struct {
Allocs []*Allocation
QueryMeta
}
// JobAllocationsResponse is used to return the allocations for a job
type JobAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// JobEvaluationsResponse is used to return the evaluations for a job
type JobEvaluationsResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// SingleEvalResponse is used to return a single evaluation
type SingleEvalResponse struct {
Eval *Evaluation
QueryMeta
}
// EvalDequeueResponse is used to return from a dequeue
type EvalDequeueResponse struct {
Eval *Evaluation
Token string
// WaitIndex is the Raft index the worker should wait until invoking the
// scheduler.
WaitIndex uint64
QueryMeta
}
// GetWaitIndex is used to retrieve the Raft index in which state should be at
// or beyond before invoking the scheduler.
func (e *EvalDequeueResponse) GetWaitIndex() uint64 {
// Prefer the wait index sent. This will be populated on all responses from
// 0.7.0 and above
if e.WaitIndex != 0 {
return e.WaitIndex
} else if e.Eval != nil {
return e.Eval.ModifyIndex
}
// This should never happen
return 1
}
// PlanResponse is used to return from a PlanRequest
type PlanResponse struct {
Result *PlanResult
WriteMeta
}
// AllocListResponse is used for a list request
type AllocListResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// DeploymentListResponse is used for a list request
type DeploymentListResponse struct {
Deployments []*Deployment
QueryMeta
}
// EvalListResponse is used for a list request
type EvalListResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// EvalAllocationsResponse is used to return the allocations for an evaluation
type EvalAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// PeriodicForceResponse is used to respond to a periodic job force launch
type PeriodicForceResponse struct {
EvalID string
EvalCreateIndex uint64
WriteMeta
}
// DeploymentUpdateResponse is used to respond to a deployment change. The
// response will include the modify index of the deployment as well as details
// of any triggered evaluation.
type DeploymentUpdateResponse struct {
EvalID string
EvalCreateIndex uint64
DeploymentModifyIndex uint64
// RevertedJobVersion is the version the job was reverted to. If unset, the
// job wasn't reverted
RevertedJobVersion *uint64
WriteMeta
}
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
)
// ShouldDrainNode checks if a given node status should trigger an
// evaluation. Some states don't require any further action.
func ShouldDrainNode(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady:
return false
case NodeStatusDown:
return true
default:
panic(fmt.Sprintf("unhandled node status %s", status))
}
}
// ValidNodeStatus is used to check if a node status is valid
func ValidNodeStatus(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDown:
return true
default:
return false
}
}
// Node is a representation of a schedulable client node
type Node struct {
// ID is a unique identifier for the node. It can be constructed
// by doing a concatenation of the Name and Datacenter as a simple
// approach. Alternatively a UUID may be used.
ID string
// SecretID is an ID that is only known by the Node and the set of Servers.
// It is not accessible via the API and is used to authenticate nodes
// conducting priviledged activities.
SecretID string
// Datacenter for this node
Datacenter string
// Node name
Name string
// HTTPAddr is the address on which the Nomad client is listening for http
// requests
HTTPAddr string
// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
TLSEnabled bool
// Attributes is an arbitrary set of key/value
// data that can be used for constraints. Examples
// include "kernel.name=linux", "arch=386", "driver.docker=1",
// "docker.runtime=1.8.3"
Attributes map[string]string
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
Resources *Resources
// Reserved is the set of resources that are reserved,
// and should be subtracted from the total resources for
// the purposes of scheduling. This may be provide certain
// high-watermark tolerances or because of external schedulers
// consuming resources.
Reserved *Resources
// Links are used to 'link' this client to external
// systems. For example 'consul=foo.dc1' 'aws=i-83212'
// 'ami=ami-123'
Links map[string]string
// Meta is used to associate arbitrary metadata with this
// client. This is opaque to Nomad.
Meta map[string]string
// NodeClass is an opaque identifier used to group nodes
// together for the purpose of determining scheduling pressure.
NodeClass string
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities.
ComputedClass string
// Drain is controlled by the servers, and not the client.
// If true, no jobs will be scheduled to this node, and existing
// allocations will be drained.
Drain bool
// Status of this node
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// StatusUpdatedAt is the time stamp at which the state of the node was
// updated
StatusUpdatedAt int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Ready returns if the node is ready for running allocations
func (n *Node) Ready() bool {
return n.Status == NodeStatusReady && !n.Drain
}
func (n *Node) Copy() *Node {
if n == nil {
return nil
}
nn := new(Node)
*nn = *n
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
return nn
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (n *Node) TerminalStatus() bool {
switch n.Status {
case NodeStatusDown:
return true
default:
return false
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub() *NodeListStub {
return &NodeListStub{
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Version: n.Attributes["nomad.version"],
Drain: n.Drain,
Status: n.Status,
StatusDescription: n.StatusDescription,
CreateIndex: n.CreateIndex,
ModifyIndex: n.ModifyIndex,
}
}
// NodeListStub is used to return a subset of job information
// for the job list
type NodeListStub struct {
ID string
Datacenter string
Name string
NodeClass string
Version string
Drain bool
Status string
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) (string, int) {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return n.IP, p.Value
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return n.IP, p.Value
}
}
}
return "", 0
}
// Resources is used to define the resources available
// on a client
type Resources struct {
CPU int
MemoryMB int
DiskMB int
IOPS int
Networks Networks
}
const (
BytesInMegabyte = 1024 * 1024
)
// DefaultResources is a small resources object that contains the
// default resources requests that we will provide to an object.
// --- THIS FUNCTION IS REPLICATED IN api/resources.go and should
// be kept in sync.
func DefaultResources() *Resources {
return &Resources{
CPU: 100,
MemoryMB: 300,
IOPS: 0,
}
}
// MinResources is a small resources object that contains the
// absolute minimum resources that we will provide to an object.
// This should not be confused with the defaults which are
// provided in Canonicalize() --- THIS FUNCTION IS REPLICATED IN
// api/resources.go and should be kept in sync.
func MinResources() *Resources {
return &Resources{
CPU: 100,
MemoryMB: 10,
IOPS: 0,
}
}
// DiskInBytes returns the amount of disk resources in bytes.
func (r *Resources) DiskInBytes() int64 {
return int64(r.DiskMB * BytesInMegabyte)
}
// Merge merges this resource with another resource.
func (r *Resources) Merge(other *Resources) {
if other.CPU != 0 {
r.CPU = other.CPU
}
if other.MemoryMB != 0 {
r.MemoryMB = other.MemoryMB
}
if other.DiskMB != 0 {
r.DiskMB = other.DiskMB
}
if other.IOPS != 0 {
r.IOPS = other.IOPS
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
}
func (r *Resources) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(r.Networks) == 0 {
r.Networks = nil
}
for _, n := range r.Networks {
n.Canonicalize()
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
// This is based on the minimums defined in the Resources type
func (r *Resources) MeetsMinResources() error {
var mErr multierror.Error
minResources := MinResources()
if r.CPU < minResources.CPU {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU))
}
if r.MemoryMB < minResources.MemoryMB {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.CPU, r.MemoryMB))
}
if r.IOPS < minResources.IOPS {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is %d; got %d", minResources.CPU, r.IOPS))
}
for i, n := range r.Networks {
if err := n.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the resources
func (r *Resources) Copy() *Resources {
if r == nil {
return nil
}
newR := new(Resources)
*newR = *r
if r.Networks != nil {
n := len(r.Networks)
newR.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
newR.Networks[i] = r.Networks[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
func (r *Resources) NetIndex(n *NetworkResource) int {
for idx, net := range r.Networks {
if net.Device == n.Device {
return idx
}
}
return -1
}
// Superset checks if one set of resources is a superset
// of another. This ignores network resources, and the NetworkIndex
// should be used for that.
func (r *Resources) Superset(other *Resources) (bool, string) {
if r.CPU < other.CPU {
return false, "cpu exhausted"
}
if r.MemoryMB < other.MemoryMB {
return false, "memory exhausted"
}
if r.DiskMB < other.DiskMB {
return false, "disk exhausted"
}
if r.IOPS < other.IOPS {
return false, "iops exhausted"
}
return true, ""
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (r *Resources) Add(delta *Resources) error {
if delta == nil {
return nil
}
r.CPU += delta.CPU
r.MemoryMB += delta.MemoryMB
r.DiskMB += delta.DiskMB
r.IOPS += delta.IOPS
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := r.NetIndex(n)
if idx == -1 {
r.Networks = append(r.Networks, n.Copy())
} else {
r.Networks[idx].Add(n)
}
}
return nil
}
func (r *Resources) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
type Port struct {
Label string
Value int
}
// NetworkResource is used to represent available network
// resources
type NetworkResource struct {
Device string // Name of the device
CIDR string // CIDR block of addresses
IP string // Host IP address
MBits int // Throughput
ReservedPorts []Port // Host Reserved ports
DynamicPorts []Port // Host Dynamically assigned ports
}
func (n *NetworkResource) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(n.ReservedPorts) == 0 {
n.ReservedPorts = nil
}
if len(n.DynamicPorts) == 0 {
n.DynamicPorts = nil
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
func (n *NetworkResource) MeetsMinResources() error {
var mErr multierror.Error
if n.MBits < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits))
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the network resource
func (n *NetworkResource) Copy() *NetworkResource {
if n == nil {
return nil
}
newR := new(NetworkResource)
*newR = *n
if n.ReservedPorts != nil {
newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
copy(newR.ReservedPorts, n.ReservedPorts)
}
if n.DynamicPorts != nil {
newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
copy(newR.DynamicPorts, n.DynamicPorts)
}
return newR
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (n *NetworkResource) Add(delta *NetworkResource) {
if len(delta.ReservedPorts) > 0 {
n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
}
n.MBits += delta.MBits
n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
}
func (n *NetworkResource) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
// PortLabels returns a map of port labels to their assigned host ports.
func (n *NetworkResource) PortLabels() map[string]int {
num := len(n.ReservedPorts) + len(n.DynamicPorts)
labelValues := make(map[string]int, num)
for _, port := range n.ReservedPorts {
labelValues[port.Label] = port.Value
}
for _, port := range n.DynamicPorts {
labelValues[port.Label] = port.Value
}
return labelValues
}
const (
// JobTypeNomad is reserved for internal system tasks and is
// always handled by the CoreScheduler.
JobTypeCore = "_core"
JobTypeService = "service"
JobTypeBatch = "batch"
JobTypeSystem = "system"
)
const (
JobStatusPending = "pending" // Pending means the job is waiting on scheduling
JobStatusRunning = "running" // Running means the job has non-terminal allocations
JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal
)
const (
// JobMinPriority is the minimum allowed priority
JobMinPriority = 1
// JobDefaultPriority is the default priority if not
// not specified.
JobDefaultPriority = 50
// JobMaxPriority is the maximum allowed priority
JobMaxPriority = 100
// Ensure CoreJobPriority is higher than any user
// specified job so that it gets priority. This is important
// for the system to remain healthy.
CoreJobPriority = JobMaxPriority * 2
// JobTrackedVersions is the number of historic job versions that are
// kept.
JobTrackedVersions = 6
)
// Job is the scope of a scheduling request to Nomad. It is the largest
// scoped object, and is a named collection of task groups. Each task group
// is further composed of tasks. A task group (TG) is the unit of scheduling
// however.
type Job struct {
// Stop marks whether the user has stopped the job. A stopped job will
// have all created allocations stopped and acts as a way to stop a job
// without purging it from the system. This allows existing allocs to be
// queried and the job to be inspected as it is being killed.
Stop bool
// Region is the Nomad region that handles scheduling this job
Region string
// Namespace is the namespace the job is submitted into.
Namespace string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
// ParentID is the unique identifier of the job that spawned this job.
ParentID string
// Name is the logical name of the job used to refer to it. This is unique
// per region, but not unique globally.
Name string
// Type is used to control various behaviors about the job. Most jobs
// are service jobs, meaning they are expected to be long lived.
// Some jobs are batch oriented meaning they run and then terminate.
// This can be extended in the future to support custom schedulers.
Type string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job. This
// can slow down larger jobs if resources are not available.
AllAtOnce bool
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
// TaskGroups are the collections of task groups that this job needs
// to run. Each task group is an atomic unit of scheduling and placement.
TaskGroups []*TaskGroup
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
Update UpdateStrategy
// Periodic is used to define the interval the job is run at.
Periodic *PeriodicConfig
// ParameterizedJob is used to specify the job as a parameterized job
// for dispatching.
ParameterizedJob *ParameterizedJobConfig
// Payload is the payload supplied when the job was dispatched.
Payload []byte
// Meta is used to associate arbitrary metadata with this
// job. This is opaque to Nomad.
Meta map[string]string
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
// Job status
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Stable marks a job as stable. Stability is only defined on "service" and
// "system" jobs. The stability of a job will be set automatically as part
// of a deployment and can be manually set via APIs.
Stable bool
// Version is a monitonically increasing version number that is incremened
// on each job register.
Version uint64
// SubmitTime is the time at which the job was submitted as a UnixNano in
// UTC
SubmitTime int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
}
// Canonicalize is used to canonicalize fields in the Job. This should be called
// when registering a Job. A set of warnings are returned if the job was changed
// in anyway that the user should be made aware of.
func (j *Job) Canonicalize() (warnings error) {
if j == nil {
return nil
}
var mErr multierror.Error
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(j.Meta) == 0 {
j.Meta = nil
}
// Ensure the job is in a namespace.
if j.Namespace == "" {
j.Namespace = DefaultNamespace
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
// COMPAT: Remove in 0.7.0
// Rewrite any job that has an update block with pre 0.6.0 syntax.
jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0
if jobHasOldUpdate && j.Type != JobTypeBatch {
// Build an appropriate update block and copy it down to each task group
base := DefaultUpdateStrategy.Copy()
base.MaxParallel = j.Update.MaxParallel
base.MinHealthyTime = j.Update.Stagger
// Add to each task group, modifying as needed
upgraded := false
l := len(j.TaskGroups)
for _, tg := range j.TaskGroups {
// The task group doesn't need upgrading if it has an update block with the new syntax
u := tg.Update
if u != nil && u.Stagger > 0 && u.MaxParallel > 0 &&
u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 {
continue
}
upgraded = true
// The MaxParallel for the job should be 10% of the total count
// unless there is just one task group then we can infer the old
// max parallel should be the new
tgu := base.Copy()
if l != 1 {
// RoundTo 10%
var percent float64 = float64(tg.Count) * 0.1
tgu.MaxParallel = int(percent + 0.5)
}
// Safety guards
if tgu.MaxParallel == 0 {
tgu.MaxParallel = 1
} else if tgu.MaxParallel > tg.Count {
tgu.MaxParallel = tg.Count
}
tg.Update = tgu
}
if upgraded {
w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " +
"Please update upgrade stanza before v0.7.0."
multierror.Append(&mErr, fmt.Errorf(w))
}
}
// Ensure that the batch job doesn't have new style or old style update
// stanza. Unfortunately are scanning here because we have to deprecate over
// a release so we can't check in the task group since that may be new style
// but wouldn't capture the old style and we don't want to have duplicate
// warnings.
if j.Type == JobTypeBatch {
displayWarning := jobHasOldUpdate
j.Update.Stagger = 0
j.Update.MaxParallel = 0
j.Update.HealthCheck = ""
j.Update.MinHealthyTime = 0
j.Update.HealthyDeadline = 0
j.Update.AutoRevert = false
j.Update.Canary = 0
// Remove any update spec from the task groups
for _, tg := range j.TaskGroups {
if tg.Update != nil {
displayWarning = true
tg.Update = nil
}
}
if displayWarning {
w := "Update stanza is disallowed for batch jobs since v0.6.0. " +
"The update block has automatically been removed"
multierror.Append(&mErr, fmt.Errorf(w))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
// This job can panic if the deep copy failed as it uses reflection.
func (j *Job) Copy() *Job {
if j == nil {
return nil
}
nj := new(Job)
*nj = *j
nj.Datacenters = helper.CopySliceString(nj.Datacenters)
nj.Constraints = CopySliceConstraints(nj.Constraints)
if j.TaskGroups != nil {
tgs := make([]*TaskGroup, len(nj.TaskGroups))
for i, tg := range nj.TaskGroups {
tgs[i] = tg.Copy()
}
nj.TaskGroups = tgs
}
nj.Periodic = nj.Periodic.Copy()
nj.Meta = helper.CopyMapStringString(nj.Meta)
nj.ParameterizedJob = nj.ParameterizedJob.Copy()
return nj
}
// Validate is used to sanity check a job input
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}
if j.ID == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
} else if strings.Contains(j.ID, " ") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
}
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
}
if j.Namespace == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem:
case "":
mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
}
if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
}
if len(j.Datacenters) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
for idx, constr := range j.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Check for duplicate task groups
taskGroups := make(map[string]int)
for idx, tg := range j.TaskGroups {
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
} else if existing, ok := taskGroups[tg.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
} else {
taskGroups[tg.Name] = idx
}
if j.Type == "system" && tg.Count > 1 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
tg.Name, tg.Count))
}
}
// Validate the task group
for _, tg := range j.TaskGroups {
if err := tg.Validate(j); err != nil {
outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate periodic is only used with batch jobs.
if j.IsPeriodic() && j.Periodic.Enabled {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch))
}
if err := j.Periodic.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsParameterized() {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch))
}
if err := j.ParameterizedJob.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (j *Job) Warnings() error {
var mErr multierror.Error
// Check the groups
for _, tg := range j.TaskGroups {
if err := tg.Warnings(j); err != nil {
outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// LookupTaskGroup finds a task group by name
func (j *Job) LookupTaskGroup(name string) *TaskGroup {
for _, tg := range j.TaskGroups {
if tg.Name == name {
return tg
}
}
return nil
}
// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
// meta data for the task. When joining Job, Group and Task Meta, the precedence
// is by deepest scope (Task > Group > Job).
func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
group := j.LookupTaskGroup(groupName)
if group == nil {
return nil
}
task := group.LookupTask(taskName)
if task == nil {
return nil
}
meta := helper.CopyMapStringString(task.Meta)
if meta == nil {
meta = make(map[string]string, len(group.Meta)+len(j.Meta))
}
// Add the group specific meta
for k, v := range group.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
// Add the job specific meta
for k, v := range j.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
return meta
}
// Stopped returns if a job is stopped.
func (j *Job) Stopped() bool {
return j == nil || j.Stop
}
// HasUpdateStrategy returns if any task group in the job has an update strategy
func (j *Job) HasUpdateStrategy() bool {
for _, tg := range j.TaskGroups {
if tg.Update != nil {
return true
}
}
return false
}
// Stub is used to return a summary of the job
func (j *Job) Stub(summary *JobSummary) *JobListStub {
return &JobListStub{
ID: j.ID,
ParentID: j.ParentID,
Name: j.Name,
Type: j.Type,
Priority: j.Priority,
Periodic: j.IsPeriodic(),
ParameterizedJob: j.IsParameterized(),
Stop: j.Stop,
Status: j.Status,
StatusDescription: j.StatusDescription,
CreateIndex: j.CreateIndex,
ModifyIndex: j.ModifyIndex,
JobModifyIndex: j.JobModifyIndex,
SubmitTime: j.SubmitTime,
JobSummary: summary,
}
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil
}
// VaultPolicies returns the set of Vault policies per task group, per task
func (j *Job) VaultPolicies() map[string]map[string]*Vault {
policies := make(map[string]map[string]*Vault, len(j.TaskGroups))
for _, tg := range j.TaskGroups {
tgPolicies := make(map[string]*Vault, len(tg.Tasks))
for _, task := range tg.Tasks {
if task.Vault == nil {
continue
}
tgPolicies[task.Name] = task.Vault
}
if len(tgPolicies) != 0 {
policies[tg.Name] = tgPolicies
}
}
return policies
}
// RequiredSignals returns a mapping of task groups to tasks to their required
// set of signals
func (j *Job) RequiredSignals() map[string]map[string][]string {
signals := make(map[string]map[string][]string)
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
// Use this local one as a set
taskSignals := make(map[string]struct{})
// Check if the Vault change mode uses signals
if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
taskSignals[task.Vault.ChangeSignal] = struct{}{}
}
// Check if any template change mode uses signals
for _, t := range task.Templates {
if t.ChangeMode != TemplateChangeModeSignal {
continue
}
taskSignals[t.ChangeSignal] = struct{}{}
}
// Flatten and sort the signals
l := len(taskSignals)
if l == 0 {
continue
}
flat := make([]string, 0, l)
for sig := range taskSignals {
flat = append(flat, sig)
}
sort.Strings(flat)
tgSignals, ok := signals[tg.Name]
if !ok {
tgSignals = make(map[string][]string)
signals[tg.Name] = tgSignals
}
tgSignals[task.Name] = flat
}
}
return signals
}
// SpecChanged determines if the functional specification has changed between
// two job versions.
func (j *Job) SpecChanged(new *Job) bool {
if j == nil {
return new != nil
}
// Create a copy of the new job
c := new.Copy()
// Update the new job so we can do a reflect
c.Status = j.Status
c.StatusDescription = j.StatusDescription
c.Stable = j.Stable
c.Version = j.Version
c.CreateIndex = j.CreateIndex
c.ModifyIndex = j.ModifyIndex
c.JobModifyIndex = j.JobModifyIndex
c.SubmitTime = j.SubmitTime
// Deep equals the jobs
return !reflect.DeepEqual(j, c)
}
func (j *Job) SetSubmitTime() {
j.SubmitTime = time.Now().UTC().UnixNano()
}
// JobListStub is used to return a subset of job information
// for the job list
type JobListStub struct {
ID string
ParentID string
Name string
Type string
Priority int
Periodic bool
ParameterizedJob bool
Stop bool
Status string
StatusDescription string
JobSummary *JobSummary
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
SubmitTime int64
}
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
// JobID is the ID of the job the summary is for
JobID string
// Namespace is the namespace of the job and its summary
Namespace string
// Summmary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
// Children contains a summary for the children of this job.
Children *JobChildrenSummary
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Copy returns a new copy of JobSummary
func (js *JobSummary) Copy() *JobSummary {
newJobSummary := new(JobSummary)
*newJobSummary = *js
newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
for k, v := range js.Summary {
newTGSummary[k] = v
}
newJobSummary.Summary = newTGSummary
newJobSummary.Children = newJobSummary.Children.Copy()
return newJobSummary
}
// JobChildrenSummary contains the summary of children job statuses
type JobChildrenSummary struct {
Pending int64
Running int64
Dead int64
}
// Copy returns a new copy of a JobChildrenSummary
func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
if jc == nil {
return nil
}
njc := new(JobChildrenSummary)
*njc = *jc
return njc
}
// TaskGroup summarizes the state of all the allocations of a particular
// TaskGroup
type TaskGroupSummary struct {
Queued int
Complete int
Failed int
Running int
Starting int
Lost int
}
const (
// Checks uses any registered health check state in combination with task
// states to determine if a allocation is healthy.
UpdateStrategyHealthCheck_Checks = "checks"
// TaskStates uses the task states of an allocation to determine if the
// allocation is healthy.
UpdateStrategyHealthCheck_TaskStates = "task_states"
// Manual allows the operator to manually signal to Nomad when an
// allocations is healthy. This allows more advanced health checking that is
// outside of the scope of Nomad.
UpdateStrategyHealthCheck_Manual = "manual"
)
var (
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
DefaultUpdateStrategy = &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 1,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
}
)
// UpdateStrategy is used to modify how updates are done
type UpdateStrategy struct {
// Stagger is used to determine the rate at which allocations are migrated
// due to down or draining nodes.
Stagger time.Duration
// MaxParallel is how many updates can be done in parallel
MaxParallel int
// HealthCheck specifies the mechanism in which allocations are marked
// healthy or unhealthy as part of a deployment.
HealthCheck string
// MinHealthyTime is the minimum time an allocation must be in the healthy
// state before it is marked as healthy, unblocking more alllocations to be
// rolled.
MinHealthyTime time.Duration
// HealthyDeadline is the time in which an allocation must be marked as
// healthy before it is automatically transistioned to unhealthy. This time
// period doesn't count against the MinHealthyTime.
HealthyDeadline time.Duration
// AutoRevert declares that if a deployment fails because of unhealthy
// allocations, there should be an attempt to auto-revert the job to a
// stable version.
AutoRevert bool
// Canary is the number of canaries to deploy when a change to the task
// group is detected.
Canary int
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
}
copy := new(UpdateStrategy)
*copy = *u
return copy
}
func (u *UpdateStrategy) Validate() error {
if u == nil {
return nil
}
var mErr multierror.Error
switch u.HealthCheck {
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
default:
multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
}
if u.MaxParallel < 1 {
multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel))
}
if u.Canary < 0 {
multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
}
if u.MinHealthyTime < 0 {
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
}
if u.HealthyDeadline <= 0 {
multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
}
if u.MinHealthyTime >= u.HealthyDeadline {
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline))
}
if u.Stagger <= 0 {
multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
}
return mErr.ErrorOrNil()
}
// TODO(alexdadgar): Remove once no longer used by the scheduler.
// Rolling returns if a rolling strategy should be used
func (u *UpdateStrategy) Rolling() bool {
return u.Stagger > 0 && u.MaxParallel > 0
}
const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
// separated list of unix timestamps at which to launch.
PeriodicSpecTest = "_internal_test"
)
// Periodic defines the interval a job should be run at.
type PeriodicConfig struct {
// Enabled determines if the job should be run periodically.
Enabled bool
// Spec specifies the interval the job should be run as. It is parsed based
// on the SpecType.
Spec string
// SpecType defines the format of the spec.
SpecType string
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
if p == nil {
return nil
}
np := new(PeriodicConfig)
*np = *p
return np
}
func (p *PeriodicConfig) Validate() error {
if !p.Enabled {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
switch p.SpecType {
case PeriodicSpecCron:
if e, err := cronexpr.Parse(p.Spec); err == nil {
return e.Next(fromTime)
}
case PeriodicSpecTest:
split := strings.Split(p.Spec, ",")
if len(split) == 1 && split[0] == "" {
return time.Time{}
}
// Parse the times
times := make([]time.Time, len(split))
for i, s := range split {
unix, err := strconv.Atoi(s)
if err != nil {
return time.Time{}
}
times[i] = time.Unix(int64(unix), 0)
}
// Find the next match
for _, next := range times {
if fromTime.Before(next) {
return next
}
}
}
return time.Time{}
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.
PeriodicLaunchSuffix = "/periodic-"
)
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Namespace string // Namespace of the periodic job
Launch time.Time // The last launch time.
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
const (
DispatchPayloadForbidden = "forbidden"
DispatchPayloadOptional = "optional"
DispatchPayloadRequired = "required"
// DispatchLaunchSuffix is the string appended to the parameterized job's ID
// when dispatching instances of it.
DispatchLaunchSuffix = "/dispatch-"
)
// ParameterizedJobConfig is used to configure the parameterized job
type ParameterizedJobConfig struct {
// Payload configure the payload requirements
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string
}
func (d *ParameterizedJobConfig) Validate() error {
var mErr multierror.Error
switch d.Payload {
case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
}
// Check that the meta configurations are disjoint sets
disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
if !disjoint {
multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
}
return mErr.ErrorOrNil()
}
func (d *ParameterizedJobConfig) Canonicalize() {
if d.Payload == "" {
d.Payload = DispatchPayloadOptional
}
}
func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
if d == nil {
return nil
}
nd := new(ParameterizedJobConfig)
*nd = *d
nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
return nd
}
// DispatchedID returns an ID appropriate for a job dispatched against a
// particular parameterized job
func DispatchedID(templateID string, t time.Time) string {
u := uuid.Generate()[:8]
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
return fmt.Errorf("destination escapes allocation directory")
}
return nil
}
var (
defaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 1 * time.Minute,
Mode: RestartPolicyModeDelay,
}
defaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 15,
Interval: 7 * 24 * time.Hour,
Mode: RestartPolicyModeDelay,
}
)
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// RestartPolicyMinInterval is the minimum interval that is accepted for a
// restart policy.
RestartPolicyMinInterval = 5 * time.Second
// ReasonWithinPolicy describes restart events that are within policy
ReasonWithinPolicy = "Restart within policy"
)
// RestartPolicy configures how Tasks are restarted when they crash or fail.
type RestartPolicy struct {
// Attempts is the number of restart that will occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of restarts
// within.
Interval time.Duration
// Delay is the time between a failure and a restart.
Delay time.Duration
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
}
func (r *RestartPolicy) Copy() *RestartPolicy {
if r == nil {
return nil
}
nrp := new(RestartPolicy)
*nrp = *r
return nrp
}
func (r *RestartPolicy) Validate() error {
var mErr multierror.Error
switch r.Mode {
case RestartPolicyModeDelay, RestartPolicyModeFail:
default:
multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
}
// Check for ambiguous/confusing settings
if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
}
if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
}
if time.Duration(r.Attempts)*r.Delay > r.Interval {
multierror.Append(&mErr,
fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
}
return mErr.ErrorOrNil()
}
func NewRestartPolicy(jobType string) *RestartPolicy {
switch jobType {
case JobTypeService, JobTypeSystem:
rp := defaultServiceJobRestartPolicy
return &rp
case JobTypeBatch:
rp := defaultBatchJobRestartPolicy
return &rp
}
return nil
}
// TaskGroup is an atomic unit of placement. Each task group belongs to
// a job and may contain any number of tasks. A task group support running
// in many replicas using the same configuration..
type TaskGroup struct {
// Name of the task group
Name string
// Count is the number of replicas of this task group that should
// be scheduled.
Count int
// Update is used to control the update strategy for this task group
Update *UpdateStrategy
// Constraints can be specified at a task group level and apply to
// all the tasks contained.
Constraints []*Constraint
//RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// Tasks are the collection of tasks that this task group needs to run
Tasks []*Task
// EphemeralDisk is the disk resources that the task group requests
EphemeralDisk *EphemeralDisk
// Meta is used to associate arbitrary metadata with this
// task group. This is opaque to Nomad.
Meta map[string]string
}
func (tg *TaskGroup) Copy() *TaskGroup {
if tg == nil {
return nil
}
ntg := new(TaskGroup)
*ntg = *tg
ntg.Update = ntg.Update.Copy()
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
if tg.Tasks != nil {
tasks := make([]*Task, len(ntg.Tasks))
for i, t := range ntg.Tasks {
tasks[i] = t.Copy()
}
ntg.Tasks = tasks
}
ntg.Meta = helper.CopyMapStringString(ntg.Meta)
if tg.EphemeralDisk != nil {
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
}
return ntg
}
// Canonicalize is used to canonicalize fields in the TaskGroup.
func (tg *TaskGroup) Canonicalize(job *Job) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(tg.Meta) == 0 {
tg.Meta = nil
}
// Set the default restart policy.
if tg.RestartPolicy == nil {
tg.RestartPolicy = NewRestartPolicy(job.Type)
}
// Set a default ephemeral disk object if the user has not requested for one
if tg.EphemeralDisk == nil {
tg.EphemeralDisk = DefaultEphemeralDisk()
}
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
// Add up the disk resources to EphemeralDisk. This is done so that users
// are not required to move their disk attribute from resources to
// EphemeralDisk section of the job spec in Nomad 0.5
// COMPAT 0.4.1 -> 0.5
// Remove in 0.6
var diskMB int
for _, task := range tg.Tasks {
diskMB += task.Resources.DiskMB
}
if diskMB > 0 {
tg.EphemeralDisk.SizeMB = diskMB
}
}
// Validate is used to sanity check a task group
func (tg *TaskGroup) Validate(j *Job) error {
var mErr multierror.Error
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
}
if tg.Count < 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
}
if len(tg.Tasks) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
}
for idx, constr := range tg.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if tg.RestartPolicy != nil {
if err := tg.RestartPolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
}
if tg.EphemeralDisk != nil {
if err := tg.EphemeralDisk.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
}
// Validate the update strategy
if u := tg.Update; u != nil {
switch j.Type {
case JobTypeService, JobTypeSystem:
default:
// COMPAT: Enable in 0.7.0
//mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
}
if err := u.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
// Check for duplicate tasks, that there is only leader task if any,
// and no duplicated static ports
tasks := make(map[string]int)
staticPorts := make(map[int]string)
leaderTasks := 0
for idx, task := range tg.Tasks {
if task.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
} else if existing, ok := tasks[task.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
} else {
tasks[task.Name] = idx
}
if task.Leader {
leaderTasks++
}
if task.Resources == nil {
continue
}
for _, net := range task.Resources.Networks {
for _, port := range net.ReservedPorts {
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
}
}
}
}
if leaderTasks > 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
}
// Validate the tasks
for _, task := range tg.Tasks {
if err := task.Validate(tg.EphemeralDisk); err != nil {
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (tg *TaskGroup) Warnings(j *Job) error {
var mErr multierror.Error
// Validate the update strategy
if u := tg.Update; u != nil {
// Check the counts are appropriate
if u.MaxParallel > tg.Count {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
}
}
return mErr.ErrorOrNil()
}
// LookupTask finds a task by name
func (tg *TaskGroup) LookupTask(name string) *Task {
for _, t := range tg.Tasks {
if t.Name == name {
return t
}
}
return nil
}
func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
// CombinedResources returns the combined resources for the task group
func (tg *TaskGroup) CombinedResources() *Resources {
r := &Resources{
DiskMB: tg.EphemeralDisk.SizeMB,
}
for _, task := range tg.Tasks {
r.Add(task.Resources)
}
return r
}
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int // Restart task after this many unhealthy intervals
Grace time.Duration // Grace time to give tasks after starting to get healthy
IgnoreWarnings bool // If true treat checks in `warning` as passing
}
func (c *CheckRestart) Copy() *CheckRestart {
if c == nil {
return nil
}
nc := new(CheckRestart)
*nc = *c
return nc
}
func (c *CheckRestart) Validate() error {
if c == nil {
return nil
}
var mErr multierror.Error
if c.Limit < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit))
}
if c.Grace < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace))
}
return mErr.ErrorOrNil()
}
const (
ServiceCheckHTTP = "http"
ServiceCheckTCP = "tcp"
ServiceCheckScript = "script"
// minCheckInterval is the minimum check interval permitted. Consul
// currently has its MinInterval set to 1s. Mirror that here for
// consistency.
minCheckInterval = 1 * time.Second
// minCheckTimeout is the minimum check timeout permitted for Consul
// script TTL checks.
minCheckTimeout = 1 * time.Second
)
// The ServiceCheck data model represents the consul health check that
// Nomad registers for a Task
type ServiceCheck struct {
Name string // Name of the check, defaults to id
Type string // Type of the check - tcp, http, docker and script
Command string // Command is the command to run for script checks
Args []string // Args is a list of argumes for script checks
Path string // path of the health check url for http type check
Protocol string // Protocol to use if check is http, defaults to http
PortLabel string // The port to use for tcp/http checks
Interval time.Duration // Interval of the check
Timeout time.Duration // Timeout of the response from the check before consul fails the check
InitialStatus string // Initial status of the check
TLSSkipVerify bool // Skip TLS verification when Protocol=https
Method string // HTTP Method to use (GET by default)
Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks
CheckRestart *CheckRestart // If and when a task should be restarted based on checks
}
func (sc *ServiceCheck) Copy() *ServiceCheck {
if sc == nil {
return nil
}
nsc := new(ServiceCheck)
*nsc = *sc
nsc.Args = helper.CopySliceString(sc.Args)
nsc.Header = helper.CopyMapStringSliceString(sc.Header)
nsc.CheckRestart = sc.CheckRestart.Copy()
return nsc
}
func (sc *ServiceCheck) Canonicalize(serviceName string) {
// Ensure empty maps/slices are treated as null to avoid scheduling
// issues when using DeepEquals.
if len(sc.Args) == 0 {
sc.Args = nil
}
if len(sc.Header) == 0 {
sc.Header = nil
} else {
for k, v := range sc.Header {
if len(v) == 0 {
sc.Header[k] = nil
}
}
}
if sc.Name == "" {
sc.Name = fmt.Sprintf("service: %q check", serviceName)
}
}
// validate a Service's ServiceCheck
func (sc *ServiceCheck) validate() error {
switch strings.ToLower(sc.Type) {
case ServiceCheckTCP:
case ServiceCheckHTTP:
if sc.Path == "" {
return fmt.Errorf("http type must have a valid http path")
}
case ServiceCheckScript:
if sc.Command == "" {
return fmt.Errorf("script type must have a valid script path")
}
default:
return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type)
}
if sc.Interval == 0 {
return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval)
} else if sc.Interval < minCheckInterval {
return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval)
}
if sc.Timeout == 0 {
return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval)
} else if sc.Timeout < minCheckTimeout {
return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval)
}
switch sc.InitialStatus {
case "":
// case api.HealthUnknown: TODO: Add when Consul releases 0.7.1
case api.HealthPassing:
case api.HealthWarning:
case api.HealthCritical:
default:
return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical)
}
return sc.CheckRestart.Validate()
}
// RequiresPort returns whether the service check requires the task has a port.
func (sc *ServiceCheck) RequiresPort() bool {
switch sc.Type {
case ServiceCheckHTTP, ServiceCheckTCP:
return true
default:
return false
}
}
// TriggersRestarts returns true if this check should be watched and trigger a restart
// on failure.
func (sc *ServiceCheck) TriggersRestarts() bool {
return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0
}
// Hash all ServiceCheck fields and the check's corresponding service ID to
// create an identifier. The identifier is not guaranteed to be unique as if
// the PortLabel is blank, the Service's PortLabel will be used after Hash is
// called.
func (sc *ServiceCheck) Hash(serviceID string) string {
h := sha1.New()
io.WriteString(h, serviceID)
io.WriteString(h, sc.Name)
io.WriteString(h, sc.Type)
io.WriteString(h, sc.Command)
io.WriteString(h, strings.Join(sc.Args, ""))
io.WriteString(h, sc.Path)
io.WriteString(h, sc.Protocol)
io.WriteString(h, sc.PortLabel)
io.WriteString(h, sc.Interval.String())
io.WriteString(h, sc.Timeout.String())
io.WriteString(h, sc.Method)
// Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6
if sc.TLSSkipVerify {
io.WriteString(h, "true")
}
// Since map iteration order isn't stable we need to write k/v pairs to
// a slice and sort it before hashing.
if len(sc.Header) > 0 {
headers := make([]string, 0, len(sc.Header))
for k, v := range sc.Header {
headers = append(headers, k+strings.Join(v, ""))
}
sort.Strings(headers)
io.WriteString(h, strings.Join(headers, ""))
}
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
AddressModeAuto = "auto"
AddressModeHost = "host"
AddressModeDriver = "driver"
)
// Service represents a Consul service definition in Nomad
type Service struct {
// Name of the service registered with Consul. Consul defaults the
// Name to ServiceID if not specified. The Name if specified is used
// as one of the seed values when generating a Consul ServiceID.
Name string
// PortLabel is either the numeric port number or the `host:port`.
// To specify the port number using the host's Consul Advertise
// address, specify an empty host in the PortLabel (e.g. `:port`).
PortLabel string
// AddressMode specifies whether or not to use the host ip:port for
// this service.
AddressMode string
Tags []string // List of tags for the service
Checks []*ServiceCheck // List of checks associated with the service
}
func (s *Service) Copy() *Service {
if s == nil {
return nil
}
ns := new(Service)
*ns = *s
ns.Tags = helper.CopySliceString(ns.Tags)
if s.Checks != nil {
checks := make([]*ServiceCheck, len(ns.Checks))
for i, c := range ns.Checks {
checks[i] = c.Copy()
}
ns.Checks = checks
}
return ns
}
// Canonicalize interpolates values of Job, Task Group and Task in the Service
// Name. This also generates check names, service id and check ids.
func (s *Service) Canonicalize(job string, taskGroup string, task string) {
// Ensure empty lists are treated as null to avoid scheduler issues when
// using DeepEquals
if len(s.Tags) == 0 {
s.Tags = nil
}
if len(s.Checks) == 0 {
s.Checks = nil
}
s.Name = args.ReplaceEnv(s.Name, map[string]string{
"JOB": job,
"TASKGROUP": taskGroup,
"TASK": task,
"BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task),
},
)
for _, check := range s.Checks {
check.Canonicalize(s.Name)
}
}
// Validate checks if the Check definition is valid
func (s *Service) Validate() error {
var mErr multierror.Error
// Ensure the service name is valid per the below RFCs but make an exception
// for our interpolation syntax by first stripping any environment variables from the name
serviceNameStripped := args.ReplaceEnvWithPlaceHolder(s.Name, "ENV-VAR")
if err := s.ValidateName(serviceNameStripped); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
switch s.AddressMode {
case "", AddressModeAuto, AddressModeHost, AddressModeDriver:
// OK
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode))
}
for _, c := range s.Checks {
if s.PortLabel == "" && c.RequiresPort() {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name))
continue
}
if err := c.validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err))
}
}
return mErr.ErrorOrNil()
}
// ValidateName checks if the services Name is valid and should be called after
// the name has been interpolated
func (s *Service) ValidateName(name string) error {
// Ensure the service name is valid per RFC-952 §1
// (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1
// (https://tools.ietf.org/html/rfc1123), and RFC-2782
// (https://tools.ietf.org/html/rfc2782).
re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`)
if !re.MatchString(name) {
return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name)
}
return nil
}
// Hash calculates the hash of the check based on it's content and the service
// which owns it
func (s *Service) Hash() string {
h := sha1.New()
io.WriteString(h, s.Name)
io.WriteString(h, strings.Join(s.Tags, ""))
io.WriteString(h, s.PortLabel)
io.WriteString(h, s.AddressMode)
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
// DefaultKillTimeout is the default timeout between signaling a task it
// will be killed and killing it.
DefaultKillTimeout = 5 * time.Second
)
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
}
// DefaultLogConfig returns the default LogConfig values.
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
}
}
// Validate returns an error if the log config specified are less than
// the minimum allowed.
func (l *LogConfig) Validate() error {
var mErr multierror.Error
if l.MaxFiles < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
}
if l.MaxFileSizeMB < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
}
return mErr.ErrorOrNil()
}
// Task is a single process typically that is executed as part of a task group.
type Task struct {
// Name of the task
Name string
// Driver is used to control which driver is used
Driver string
// User is used to determine which user will run the task. It defaults to
// the same user the Nomad client is being run as.
User string
// Config is provided to the driver to initialize
Config map[string]interface{}
// Map of environment variables to be used by the driver
Env map[string]string
// List of service definitions exposed by the Task
Services []*Service
// Vault is used to define the set of Vault policies that this task should
// have access to.
Vault *Vault
// Templates are the set of templates to be rendered for the task.
Templates []*Template
// Constraints can be specified at a task level and apply only to
// the particular task.
Constraints []*Constraint
// Resources is the resources needed by this task
Resources *Resources
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
Meta map[string]string
// KillTimeout is the time between signaling a task that it will be
// killed and killing it.
KillTimeout time.Duration
// LogConfig provides configuration for log rotation
LogConfig *LogConfig
// Artifacts is a list of artifacts to download and extract before running
// the task.
Artifacts []*TaskArtifact
// Leader marks the task as the leader within the group. When the leader
// task exits, other tasks will be gracefully terminated.
Leader bool
// ShutdownDelay is the duration of the delay between deregistering a
// task from Consul and sending it a signal to shutdown. See #2441
ShutdownDelay time.Duration
}
func (t *Task) Copy() *Task {
if t == nil {
return nil
}
nt := new(Task)
*nt = *t
nt.Env = helper.CopyMapStringString(nt.Env)
if t.Services != nil {
services := make([]*Service, len(nt.Services))
for i, s := range nt.Services {
services[i] = s.Copy()
}
nt.Services = services
}
nt.Constraints = CopySliceConstraints(nt.Constraints)
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchPayload = nt.DispatchPayload.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
for _, a := range nt.Artifacts {
artifacts = append(artifacts, a.Copy())
}
nt.Artifacts = artifacts
}
if i, err := copystructure.Copy(nt.Config); err != nil {
panic(err.Error())
} else {
nt.Config = i.(map[string]interface{})
}
if t.Templates != nil {
templates := make([]*Template, len(t.Templates))
for i, tmpl := range nt.Templates {
templates[i] = tmpl.Copy()
}
nt.Templates = templates
}
return nt
}
// Canonicalize canonicalizes fields in the task.
func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(t.Meta) == 0 {
t.Meta = nil
}
if len(t.Config) == 0 {
t.Config = nil
}
if len(t.Env) == 0 {
t.Env = nil
}
for _, service := range t.Services {
service.Canonicalize(job.Name, tg.Name, t.Name)
}
// If Resources are nil initialize them to defaults, otherwise canonicalize
if t.Resources == nil {
t.Resources = DefaultResources()
} else {
t.Resources.Canonicalize()
}
// Set the default timeout if it is not specified.
if t.KillTimeout == 0 {
t.KillTimeout = DefaultKillTimeout
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, template := range t.Templates {
template.Canonicalize()
}
}
func (t *Task) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
// Validate is used to sanity check a task
func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error {
var mErr multierror.Error
if t.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
}
if strings.ContainsAny(t.Name, `/\`) {
// We enforce this so that when creating the directory on disk it will
// not have any slashes.
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
}
if t.Driver == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
}
if t.KillTimeout < 0 {
mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
}
if t.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}
// Validate the resources.
if t.Resources == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
} else {
if err := t.Resources.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Ensure the task isn't asking for disk resources
if t.Resources.DiskMB > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
}
}
// Validate the log config
if t.LogConfig == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
} else if err := t.LogConfig.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
for idx, constr := range t.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate Services
if err := validateServices(t); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if t.LogConfig != nil && ephemeralDisk != nil {
logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
if ephemeralDisk.SizeMB <= logUsage {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
logUsage, ephemeralDisk.SizeMB))
}
}
for idx, artifact := range t.Artifacts {
if err := artifact.Validate(); err != nil {
outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if t.Vault != nil {
if err := t.Vault.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
}
}
destinations := make(map[string]int, len(t.Templates))
for idx, tmpl := range t.Templates {
if err := tmpl.Validate(); err != nil {
outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
if other, ok := destinations[tmpl.DestPath]; ok {
outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
mErr.Errors = append(mErr.Errors, outer)
} else {
destinations[tmpl.DestPath] = idx + 1
}
}
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}
return mErr.ErrorOrNil()
}
// validateServices takes a task and validates the services within it are valid
// and reference ports that exist.
func validateServices(t *Task) error {
var mErr multierror.Error
// Ensure that services don't ask for non-existent ports and their names are
// unique.
servicePorts := make(map[string][]string)
knownServices := make(map[string]struct{})
for i, service := range t.Services {
if err := service.Validate(); err != nil {
outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
// Ensure that services with the same name are not being registered for
// the same port
if _, ok := knownServices[service.Name+service.PortLabel]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
}
knownServices[service.Name+service.PortLabel] = struct{}{}
if service.PortLabel != "" {
servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name)
}
// Ensure that check names are unique.
knownChecks := make(map[string]struct{})
for _, check := range service.Checks {
if _, ok := knownChecks[check.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
}
knownChecks[check.Name] = struct{}{}
}
}
// Get the set of port labels.
portLabels := make(map[string]struct{})
if t.Resources != nil {
for _, network := range t.Resources.Networks {
ports := network.PortLabels()
for portLabel := range ports {
portLabels[portLabel] = struct{}{}
}
}
}
// Ensure all ports referenced in services exist.
for servicePort, services := range servicePorts {
_, ok := portLabels[servicePort]
if !ok {
joined := strings.Join(services, ", ")
err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
mErr.Errors = append(mErr.Errors, err)
}
}
// Ensure address mode is valid
return mErr.ErrorOrNil()
}
const (
// TemplateChangeModeNoop marks that no action should be taken if the
// template is re-rendered
TemplateChangeModeNoop = "noop"
// TemplateChangeModeSignal marks that the task should be signaled if the
// template is re-rendered
TemplateChangeModeSignal = "signal"
// TemplateChangeModeRestart marks that the task should be restarted if the
// template is re-rendered
TemplateChangeModeRestart = "restart"
)
var (
// TemplateChangeModeInvalidError is the error for when an invalid change
// mode is given
TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
)
// Template represents a template configuration to be rendered for a given task
type Template struct {
// SourcePath is the path to the template to be rendered
SourcePath string
// DestPath is the path to where the template should be rendered
DestPath string
// EmbeddedTmpl store the raw template. This is useful for smaller templates
// where they are embedded in the job file rather than sent as an artificat
EmbeddedTmpl string
// ChangeMode indicates what should be done if the template is re-rendered
ChangeMode string
// ChangeSignal is the signal that should be sent if the change mode
// requires it.
ChangeSignal string
// Splay is used to avoid coordinated restarts of processes by applying a
// random wait between 0 and the given splay value before signalling the
// application of a change
Splay time.Duration
// Perms is the permission the file should be written out with.
Perms string
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim string
RightDelim string
// Envvars enables exposing the template as environment variables
// instead of as a file. The template must be of the form:
//
// VAR_NAME_1={{ key service/my-key }}
// VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
//
// Lines will be split on the initial "=" with the first part being the
// key name and the second part the value.
// Empty lines and lines starting with # will be ignored, but to avoid
// escaping issues #s within lines will not be treated as comments.
Envvars bool
// VaultGrace is the grace duration between lease renewal and reacquiring a
// secret. If the lease of a secret is less than the grace, a new secret is
// acquired.
VaultGrace time.Duration
}
// DefaultTemplate returns a default template.
func DefaultTemplate() *Template {
return &Template{
ChangeMode: TemplateChangeModeRestart,
Splay: 5 * time.Second,
Perms: "0644",
}
}
func (t *Template) Copy() *Template {
if t == nil {
return nil
}
copy := new(Template)
*copy = *t
return copy
}
func (t *Template) Canonicalize() {
if t.ChangeSignal != "" {
t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
}
}
func (t *Template) Validate() error {
var mErr multierror.Error
// Verify we have something to render
if t.SourcePath == "" && t.EmbeddedTmpl == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
}
// Verify we can render somewhere
if t.DestPath == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
}
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify a proper change mode
switch t.ChangeMode {
case TemplateChangeModeNoop, TemplateChangeModeRestart:
case TemplateChangeModeSignal:
if t.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
}
if t.Envvars {
multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates"))
}
default:
multierror.Append(&mErr, TemplateChangeModeInvalidError)
}
// Verify the splay is positive
if t.Splay < 0 {
multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
}
// Verify the permissions
if t.Perms != "" {
if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
}
}
if t.VaultGrace.Nanoseconds() < 0 {
multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace))
}
return mErr.ErrorOrNil()
}
// Set of possible states for a task.
const (
TaskStatePending = "pending" // The task is waiting to be run.
TaskStateRunning = "running" // The task is currently running.
TaskStateDead = "dead" // Terminal state of task.
)
// TaskState tracks the current state of a task and events that caused state
// transitions.
type TaskState struct {
// The current state of the task.
State string
// Failed marks a task as having failed
Failed bool
// Restarts is the number of times the task has restarted
Restarts uint64
// LastRestart is the time the task last restarted. It is updated each time the
// task restarts
LastRestart time.Time
// StartedAt is the time the task is started. It is updated each time the
// task starts
StartedAt time.Time
// FinishedAt is the time at which the task transistioned to dead and will
// not be started again.
FinishedAt time.Time
// Series of task events that transition the state of the task.
Events []*TaskEvent
}
func (ts *TaskState) Copy() *TaskState {
if ts == nil {
return nil
}
copy := new(TaskState)
*copy = *ts
if ts.Events != nil {
copy.Events = make([]*TaskEvent, len(ts.Events))
for i, e := range ts.Events {
copy.Events[i] = e.Copy()
}
}
return copy
}
// Successful returns whether a task finished successfully.
func (ts *TaskState) Successful() bool {
l := len(ts.Events)
if ts.State != TaskStateDead || l == 0 {
return false
}
e := ts.Events[l-1]
if e.Type != TaskTerminated {
return false
}
return e.ExitCode == 0
}
const (
// TaskSetupFailure indicates that the task could not be started due to a
// a setup failure.
TaskSetupFailure = "Setup Failure"
// TaskDriveFailure indicates that the task could not be started due to a
// failure in the driver.
TaskDriverFailure = "Driver Failure"
// TaskReceived signals that the task has been pulled by the client at the
// given timestamp.
TaskReceived = "Received"
// TaskFailedValidation indicates the task was invalid and as such was not
// run.
TaskFailedValidation = "Failed Validation"
// TaskStarted signals that the task was started and its timestamp can be
// used to determine the running length of the task.
TaskStarted = "Started"
// TaskTerminated indicates that the task was started and exited.
TaskTerminated = "Terminated"
// TaskKilling indicates a kill signal has been sent to the task.
TaskKilling = "Killing"
// TaskKilled indicates a user has killed the task.
TaskKilled = "Killed"
// TaskRestarting indicates that task terminated and is being restarted.
TaskRestarting = "Restarting"
// TaskNotRestarting indicates that the task has failed and is not being
// restarted because it has exceeded its restart policy.
TaskNotRestarting = "Not Restarting"
// TaskRestartSignal indicates that the task has been signalled to be
// restarted
TaskRestartSignal = "Restart Signaled"
// TaskSignaling indicates that the task is being signalled.
TaskSignaling = "Signaling"
// TaskDownloadingArtifacts means the task is downloading the artifacts
// specified in the task.
TaskDownloadingArtifacts = "Downloading Artifacts"
// TaskArtifactDownloadFailed indicates that downloading the artifacts
// failed.
TaskArtifactDownloadFailed = "Failed Artifact Download"
// TaskBuildingTaskDir indicates that the task directory/chroot is being
// built.
TaskBuildingTaskDir = "Building Task Directory"
// TaskSetup indicates the task runner is setting up the task environment
TaskSetup = "Task Setup"
// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
// exceeded the requested disk resources.
TaskDiskExceeded = "Disk Resources Exceeded"
// TaskSiblingFailed indicates that a sibling task in the task group has
// failed.
TaskSiblingFailed = "Sibling Task Failed"
// TaskDriverMessage is an informational event message emitted by
// drivers such as when they're performing a long running action like
// downloading an image.
TaskDriverMessage = "Driver"
// TaskLeaderDead indicates that the leader task within the has finished.
TaskLeaderDead = "Leader Task Dead"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64 // Unix Nanosecond timestamp
Message string // A possible message explaining the termination of the task.
// DisplayMessage is a human friendly message about the event
DisplayMessage string
// Details is a map with annotated info about the event
Details map[string]string
// DEPRECATION NOTICE: The following fields are deprecated and will be removed
// in a future release. Field values are available in the Details map.
// FailsTask marks whether this event fails the task.
// Deprecated, use Details["fails_task"] to access this.
FailsTask bool
// Restart fields.
// Deprecated, use Details["restart_reason"] to access this.
RestartReason string
// Setup Failure fields.
// Deprecated, use Details["setup_error"] to access this.
SetupError string
// Driver Failure fields.
// Deprecated, use Details["driver_error"] to access this.
DriverError string // A driver error occurred while starting the task.
// Task Terminated Fields.
// Deprecated, use Details["exit_code"] to access this.
ExitCode int // The exit code of the task.
// Deprecated, use Details["signal"] to access this.
Signal int // The signal that terminated the task.
// Killing fields
// Deprecated, use Details["kill_timeout"] to access this.
KillTimeout time.Duration
// Task Killed Fields.
// Deprecated, use Details["kill_error"] to access this.
KillError string // Error killing the task.
// KillReason is the reason the task was killed
// Deprecated, use Details["kill_reason"] to access this.
KillReason string
// TaskRestarting fields.
// Deprecated, use Details["start_delay"] to access this.
StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
// Artifact Download fields
// Deprecated, use Details["download_error"] to access this.
DownloadError string // Error downloading artifacts
// Validation fields
// Deprecated, use Details["validation_error"] to access this.
ValidationError string // Validation error
// The maximum allowed task disk size.
// Deprecated, use Details["disk_limit"] to access this.
DiskLimit int64
// Name of the sibling task that caused termination of the task that
// the TaskEvent refers to.
// Deprecated, use Details["failed_sibling"] to access this.
FailedSibling string
// VaultError is the error from token renewal
// Deprecated, use Details["vault_renewal_error"] to access this.
VaultError string
// TaskSignalReason indicates the reason the task is being signalled.
// Deprecated, use Details["task_signal_reason"] to access this.
TaskSignalReason string
// TaskSignal is the signal that was sent to the task
// Deprecated, use Details["task_signal"] to access this.
TaskSignal string
// DriverMessage indicates a driver action being taken.
// Deprecated, use Details["driver_message"] to access this.
DriverMessage string
// GenericSource is the source of a message.
// Deprecated, is redundant with event type.
GenericSource string
}
func (event *TaskEvent) PopulateEventDisplayMessage() {
// Build up the description based on the event type.
if event == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why?
return
}
if event.DisplayMessage != "" {
return
}
var desc string
switch event.Type {
case TaskSetup:
desc = event.Message
case TaskStarted:
desc = "Task started by client"
case TaskReceived:
desc = "Task received by client"
case TaskFailedValidation:
if event.ValidationError != "" {
desc = event.ValidationError
} else {
desc = "Validation of task failed"
}
case TaskSetupFailure:
if event.SetupError != "" {
desc = event.SetupError
} else {
desc = "Task setup failed"
}
case TaskDriverFailure:
if event.DriverError != "" {
desc = event.DriverError
} else {
desc = "Failed to start task"
}
case TaskDownloadingArtifacts:
desc = "Client is downloading artifacts"
case TaskArtifactDownloadFailed:
if event.DownloadError != "" {
desc = event.DownloadError
} else {
desc = "Failed to download artifacts"
}
case TaskKilling:
if event.KillReason != "" {
desc = fmt.Sprintf("Killing task: %v", event.KillReason)
} else if event.KillTimeout != 0 {
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout)
} else {
desc = "Sent interrupt"
}
case TaskKilled:
if event.KillError != "" {
desc = event.KillError
} else {
desc = "Task successfully killed"
}
case TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
if event.Signal != 0 {
parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
}
if event.Message != "" {
parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
}
desc = strings.Join(parts, ", ")
case TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
if event.RestartReason != "" && event.RestartReason != ReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
} else {
desc = in
}
case TaskNotRestarting:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task exceeded restart policy"
}
case TaskSiblingFailed:
if event.FailedSibling != "" {
desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling)
} else {
desc = "Task's sibling failed"
}
case TaskSignaling:
sig := event.TaskSignal
reason := event.TaskSignalReason
if sig == "" && reason == "" {
desc = "Task being sent a signal"
} else if sig == "" {
desc = reason
} else if reason == "" {
desc = fmt.Sprintf("Task being sent signal %v", sig)
} else {
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
}
case TaskRestartSignal:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task signaled to restart"
}
case TaskDriverMessage:
desc = event.DriverMessage
case TaskLeaderDead:
desc = "Leader Task in Group dead"
default:
desc = event.Message
}
event.DisplayMessage = desc
}
func (te *TaskEvent) GoString() string {
return fmt.Sprintf("%v - %v", te.Time, te.Type)
}
// SetMessage sets the message of TaskEvent
func (te *TaskEvent) SetMessage(msg string) *TaskEvent {
te.Message = msg
te.Details["message"] = msg
return te
}
func (te *TaskEvent) Copy() *TaskEvent {
if te == nil {
return nil
}
copy := new(TaskEvent)
*copy = *te
return copy
}
func NewTaskEvent(event string) *TaskEvent {
return &TaskEvent{
Type: event,
Time: time.Now().UnixNano(),
Details: make(map[string]string),
}
}
// SetSetupError is used to store an error that occurred while setting up the
// task
func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
if err != nil {
e.SetupError = err.Error()
e.Details["setup_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetFailsTask() *TaskEvent {
e.FailsTask = true
e.Details["fails_task"] = "true"
return e
}
func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
if err != nil {
e.DriverError = err.Error()
e.Details["driver_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
e.ExitCode = c
e.Details["exit_code"] = fmt.Sprintf("%d", c)
return e
}
func (e *TaskEvent) SetSignal(s int) *TaskEvent {
e.Signal = s
e.Details["signal"] = fmt.Sprintf("%d", s)
return e
}
func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
if err != nil {
e.Message = err.Error()
e.Details["exit_message"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillError(err error) *TaskEvent {
if err != nil {
e.KillError = err.Error()
e.Details["kill_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
e.KillReason = r
e.Details["kill_reason"] = r
return e
}
func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
e.StartDelay = int64(delay)
e.Details["start_delay"] = fmt.Sprintf("%d", delay)
return e
}
func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
e.RestartReason = reason
e.Details["restart_reason"] = reason
return e
}
func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
e.TaskSignalReason = r
e.Details["task_signal_reason"] = r
return e
}
func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
e.TaskSignal = s.String()
e.Details["task_signal"] = s.String()
return e
}
func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
if err != nil {
e.DownloadError = err.Error()
e.Details["download_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
if err != nil {
e.ValidationError = err.Error()
e.Details["validation_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
e.KillTimeout = timeout
e.Details["kill_timeout"] = timeout.String()
return e
}
func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
e.DiskLimit = limit
e.Details["disk_limit"] = fmt.Sprintf("%d", limit)
return e
}
func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
e.FailedSibling = sibling
e.Details["failed_sibling"] = sibling
return e
}
func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
if err != nil {
e.VaultError = err.Error()
e.Details["vault_renewal_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
e.DriverMessage = m
e.Details["driver_message"] = m
return e
}
// TaskArtifact is an artifact to download before running the task.
type TaskArtifact struct {
// GetterSource is the source to download an artifact using go-getter
GetterSource string
// GetterOptions are options to use when downloading the artifact using
// go-getter.
GetterOptions map[string]string
// GetterMode is the go-getter.ClientMode for fetching resources.
// Defaults to "any" but can be set to "file" or "dir".
GetterMode string
// RelativeDest is the download destination given relative to the task's
// directory.
RelativeDest string
}
func (ta *TaskArtifact) Copy() *TaskArtifact {
if ta == nil {
return nil
}
nta := new(TaskArtifact)
*nta = *ta
nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions)
return nta
}
func (ta *TaskArtifact) GoString() string {
return fmt.Sprintf("%+v", ta)
}
// PathEscapesAllocDir returns if the given path escapes the allocation
// directory. The prefix allows adding a prefix if the path will be joined, for
// example a "task/local" prefix may be provided if the path will be joined
// against that prefix.
func PathEscapesAllocDir(prefix, path string) (bool, error) {
// Verify the destination doesn't escape the tasks directory
alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
if err != nil {
return false, err
}
abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
if err != nil {
return false, err
}
rel, err := filepath.Rel(alloc, abs)
if err != nil {
return false, err
}
return strings.HasPrefix(rel, ".."), nil
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
if ta.GetterSource == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
}
switch ta.GetterMode {
case "":
// Default to any
ta.GetterMode = GetterModeAny
case GetterModeAny, GetterModeFile, GetterModeDir:
// Ok
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := PathEscapesAllocDir("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify the checksum
if check, ok := ta.GetterOptions["checksum"]; ok {
check = strings.TrimSpace(check)
if check == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty"))
return mErr.ErrorOrNil()
}
parts := strings.Split(check, ":")
if l := len(parts); l != 2 {
mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check))
return mErr.ErrorOrNil()
}
checksumVal := parts[1]
checksumBytes, err := hex.DecodeString(checksumVal)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err))
return mErr.ErrorOrNil()
}
checksumType := parts[0]
expectedLength := 0
switch checksumType {
case "md5":
expectedLength = md5.Size
case "sha1":
expectedLength = sha1.Size
case "sha256":
expectedLength = sha256.Size
case "sha512":
expectedLength = sha512.Size
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType))
return mErr.ErrorOrNil()
}
if len(checksumBytes) != expectedLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal))
return mErr.ErrorOrNil()
}
}
return mErr.ErrorOrNil()
}
const (
ConstraintDistinctProperty = "distinct_property"
ConstraintDistinctHosts = "distinct_hosts"
ConstraintRegex = "regexp"
ConstraintVersion = "version"
ConstraintSetContains = "set_contains"
)
// Constraints are used to restrict placement options.
type Constraint struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
str string // Memoized string
}
// Equal checks if two constraints are equal
func (c *Constraint) Equal(o *Constraint) bool {
return c.LTarget == o.LTarget &&
c.RTarget == o.RTarget &&
c.Operand == o.Operand
}
func (c *Constraint) Copy() *Constraint {
if c == nil {
return nil
}
nc := new(Constraint)
*nc = *c
return nc
}
func (c *Constraint) String() string {
if c.str != "" {
return c.str
}
c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
return c.str
}
func (c *Constraint) Validate() error {
var mErr multierror.Error
if c.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
}
// requireLtarget specifies whether the constraint requires an LTarget to be
// provided.
requireLtarget := true
// Perform additional validation based on operand
switch c.Operand {
case ConstraintDistinctHosts:
requireLtarget = false
case ConstraintSetContains:
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget"))
}
case ConstraintRegex:
if _, err := regexp.Compile(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
}
case ConstraintDistinctProperty:
// If a count is set, make sure it is convertible to a uint64
if c.RTarget != "" {
count, err := strconv.ParseUint(c.RTarget, 10, 64)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err))
} else if count < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count))
}
}
case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand))
}
// Ensure we have an LTarget for the constraints that need one
if requireLtarget && c.LTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint"))
}
return mErr.ErrorOrNil()
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
// Sticky indicates whether the allocation is sticky to a node
Sticky bool
// SizeMB is the size of the local disk
SizeMB int
// Migrate determines if Nomad client should migrate the allocation dir for
// sticky allocations
Migrate bool
}
// DefaultEphemeralDisk returns a EphemeralDisk with default configurations
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
SizeMB: 300,
}
}
// Validate validates EphemeralDisk
func (d *EphemeralDisk) Validate() error {
if d.SizeMB < 10 {
return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
}
return nil
}
// Copy copies the EphemeralDisk struct and returns a new one
func (d *EphemeralDisk) Copy() *EphemeralDisk {
ld := new(EphemeralDisk)
*ld = *d
return ld
}
const (
// VaultChangeModeNoop takes no action when a new token is retrieved.
VaultChangeModeNoop = "noop"
// VaultChangeModeSignal signals the task when a new token is retrieved.
VaultChangeModeSignal = "signal"
// VaultChangeModeRestart restarts the task when a new token is retrieved.
VaultChangeModeRestart = "restart"
)
// Vault stores the set of permissions a task needs access to from Vault.
type Vault struct {
// Policies is the set of policies that the task needs access to
Policies []string
// Env marks whether the Vault Token should be exposed as an environment
// variable
Env bool
// ChangeMode is used to configure the task's behavior when the Vault
// token changes because the original token could not be renewed in time.
ChangeMode string
// ChangeSignal is the signal sent to the task when a new token is
// retrieved. This is only valid when using the signal change mode.
ChangeSignal string
}
func DefaultVaultBlock() *Vault {
return &Vault{
Env: true,
ChangeMode: VaultChangeModeRestart,
}
}
// Copy returns a copy of this Vault block.
func (v *Vault) Copy() *Vault {
if v == nil {
return nil
}
nv := new(Vault)
*nv = *v
return nv
}
func (v *Vault) Canonicalize() {
if v.ChangeSignal != "" {
v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
}
}
// Validate returns if the Vault block is valid.
func (v *Vault) Validate() error {
if v == nil {
return nil
}
var mErr multierror.Error
if len(v.Policies) == 0 {
multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
}
for _, p := range v.Policies {
if p == "root" {
multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy"))
}
}
switch v.ChangeMode {
case VaultChangeModeSignal:
if v.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
}
case VaultChangeModeNoop, VaultChangeModeRestart:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
}
return mErr.ErrorOrNil()
}
const (
// DeploymentStatuses are the various states a deployment can be be in
DeploymentStatusRunning = "running"
DeploymentStatusPaused = "paused"
DeploymentStatusFailed = "failed"
DeploymentStatusSuccessful = "successful"
DeploymentStatusCancelled = "cancelled"
// DeploymentStatusDescriptions are the various descriptions of the states a
// deployment can be in.
DeploymentStatusDescriptionRunning = "Deployment is running"
DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion"
DeploymentStatusDescriptionPaused = "Deployment is paused"
DeploymentStatusDescriptionSuccessful = "Deployment completed successfully"
DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped"
DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job"
DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations"
DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed"
)
// DeploymentStatusDescriptionRollback is used to get the status description of
// a deployment when rolling back to an older job.
func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionRollbackNoop is used to get the status description of
// a deployment when rolling back is not possible because it has the same specification
func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of
// a deployment when there is no target to rollback to but autorevet is desired.
func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string {
return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription)
}
// Deployment is the object that represents a job deployment which is used to
// transition a job between versions.
type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// Namespace is the namespace the deployment is created in
Namespace string
// JobID is the job the deployment is created for
JobID string
// JobVersion is the version of the job at which the deployment is tracking
JobVersion uint64
// JobModifyIndex is the modify index of the job at which the deployment is tracking
JobModifyIndex uint64
// JobCreateIndex is the create index of the job which the deployment is
// tracking. It is needed so that if the job gets stopped and reran we can
// present the correct list of deployments for the job and not old ones.
JobCreateIndex uint64
// TaskGroups is the set of task groups effected by the deployment and their
// current deployment status.
TaskGroups map[string]*DeploymentState
// The status of the deployment
Status string
// StatusDescription allows a human readable description of the deployment
// status.
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// NewDeployment creates a new deployment given the job.
func NewDeployment(job *Job) *Deployment {
return &Deployment{
ID: uuid.Generate(),
Namespace: job.Namespace,
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
JobCreateIndex: job.CreateIndex,
Status: DeploymentStatusRunning,
StatusDescription: DeploymentStatusDescriptionRunning,
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
}
}
func (d *Deployment) Copy() *Deployment {
if d == nil {
return nil
}
c := &Deployment{}
*c = *d
c.TaskGroups = nil
if l := len(d.TaskGroups); d.TaskGroups != nil {
c.TaskGroups = make(map[string]*DeploymentState, l)
for tg, s := range d.TaskGroups {
c.TaskGroups[tg] = s.Copy()
}
}
return c
}
// Active returns whether the deployment is active or terminal.
func (d *Deployment) Active() bool {
switch d.Status {
case DeploymentStatusRunning, DeploymentStatusPaused:
return true
default:
return false
}
}
// GetID is a helper for getting the ID when the object may be nil
func (d *Deployment) GetID() string {
if d == nil {
return ""
}
return d.ID
}
// HasPlacedCanaries returns whether the deployment has placed canaries
func (d *Deployment) HasPlacedCanaries() bool {
if d == nil || len(d.TaskGroups) == 0 {
return false
}
for _, group := range d.TaskGroups {
if len(group.PlacedCanaries) != 0 {
return true
}
}
return false
}
// RequiresPromotion returns whether the deployment requires promotion to
// continue
func (d *Deployment) RequiresPromotion() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.Promoted {
return true
}
}
return false
}
func (d *Deployment) GoString() string {
base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
for group, state := range d.TaskGroups {
base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
}
return base
}
// DeploymentState tracks the state of a deployment for a given task group.
type DeploymentState struct {
// AutoRevert marks whether the task group has indicated the job should be
// reverted on failure
AutoRevert bool
// Promoted marks whether the canaries have been promoted
Promoted bool
// PlacedCanaries is the set of placed canary allocations
PlacedCanaries []string
// DesiredCanaries is the number of canaries that should be created.
DesiredCanaries int
// DesiredTotal is the total number of allocations that should be created as
// part of the deployment.
DesiredTotal int
// PlacedAllocs is the number of allocations that have been placed
PlacedAllocs int
// HealthyAllocs is the number of allocations that have been marked healthy.
HealthyAllocs int
// UnhealthyAllocs are allocations that have been marked as unhealthy.
UnhealthyAllocs int
}
func (d *DeploymentState) GoString() string {
base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
return base
}
func (d *DeploymentState) Copy() *DeploymentState {
c := &DeploymentState{}
*c = *d
c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
return c
}
// DeploymentStatusUpdate is used to update the status of a given deployment
type DeploymentStatusUpdate struct {
// DeploymentID is the ID of the deployment to update
DeploymentID string
// Status is the new status of the deployment.
Status string
// StatusDescription is the new status description of the deployment.
StatusDescription string
}
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
)
// Allocation is used to allocate the placement of a task group to a node.
type Allocation struct {
// ID of the allocation (UUID)
ID string
// Namespace is the namespace the allocation is created in
Namespace string
// ID of the evaluation that generated this allocation
EvalID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// Job is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
Job *Job
// TaskGroup is the name of the task group that should be run
TaskGroup string
// Resources is the total set of resources allocated as part
// of this allocation of the task group.
Resources *Resources
// SharedResources are the resources that are shared by all the tasks in an
// allocation
SharedResources *Resources
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources.
TaskResources map[string]*Resources
// Metrics associated with this allocation
Metrics *AllocMetric
// Desired Status of the allocation on the client
DesiredStatus string
// DesiredStatusDescription is meant to provide more human useful information
DesiredDescription string
// Status of the allocation on the client
ClientStatus string
// ClientStatusDescription is meant to provide more human useful information
ClientDescription string
// TaskStates stores the state of each task,
TaskStates map[string]*TaskState
// PreviousAllocation is the allocation that this allocation is replacing
PreviousAllocation string
// DeploymentID identifies an allocation as being created from a
// particular deployment
DeploymentID string
// DeploymentStatus captures the status of the allocation as part of the
// given deployment
DeploymentStatus *AllocDeploymentStatus
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
// AllocModifyIndex is not updated when the client updates allocations. This
// lets the client pull only the allocs updated by the server.
AllocModifyIndex uint64
// CreateTime is the time the allocation has finished scheduling and been
// verified by the plan applier.
CreateTime int64
// ModifyTime is the time the allocation was last updated.
ModifyTime int64
}
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
l := len(a.Name)
prefix := len(a.JobID) + len(a.TaskGroup) + 2
if l <= 3 || l <= prefix {
return uint(0)
}
strNum := a.Name[prefix : len(a.Name)-1]
num, _ := strconv.Atoi(strNum)
return uint(num)
}
func (a *Allocation) Copy() *Allocation {
return a.copyImpl(true)
}
// Copy provides a copy of the allocation but doesn't deep copy the job
func (a *Allocation) CopySkipJob() *Allocation {
return a.copyImpl(false)
}
func (a *Allocation) copyImpl(job bool) *Allocation {
if a == nil {
return nil
}
na := new(Allocation)
*na = *a
if job {
na.Job = na.Job.Copy()
}
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
if a.TaskResources != nil {
tr := make(map[string]*Resources, len(na.TaskResources))
for task, resource := range na.TaskResources {
tr[task] = resource.Copy()
}
na.TaskResources = tr
}
na.Metrics = na.Metrics.Copy()
na.DeploymentStatus = na.DeploymentStatus.Copy()
if a.TaskStates != nil {
ts := make(map[string]*TaskState, len(na.TaskStates))
for task, state := range na.TaskStates {
ts[task] = state.Copy()
}
na.TaskStates = ts
}
return na
}
// TerminalStatus returns if the desired or actual status is terminal and
// will no longer transition.
func (a *Allocation) TerminalStatus() bool {
// First check the desired state and if that isn't terminal, check client
// state.
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return true
default:
}
switch a.ClientStatus {
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
return true
default:
return false
}
}
// Terminated returns if the allocation is in a terminal state on a client.
func (a *Allocation) Terminated() bool {
if a.ClientStatus == AllocClientStatusFailed ||
a.ClientStatus == AllocClientStatusComplete ||
a.ClientStatus == AllocClientStatusLost {
return true
}
return false
}
// RanSuccessfully returns whether the client has ran the allocation and all
// tasks finished successfully
func (a *Allocation) RanSuccessfully() bool {
return a.ClientStatus == AllocClientStatusComplete
}
// ShouldMigrate returns if the allocation needs data migration
func (a *Allocation) ShouldMigrate() bool {
if a.PreviousAllocation == "" {
return false
}
if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// if the task group is nil or the ephemeral disk block isn't present then
// we won't migrate
if tg == nil || tg.EphemeralDisk == nil {
return false
}
// We won't migrate any data is the user hasn't enabled migration or the
// disk is not marked as sticky
if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
return false
}
return true
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub() *AllocListStub {
return &AllocListStub{
ID: a.ID,
EvalID: a.EvalID,
Name: a.Name,
NodeID: a.NodeID,
JobID: a.JobID,
JobVersion: a.Job.Version,
TaskGroup: a.TaskGroup,
DesiredStatus: a.DesiredStatus,
DesiredDescription: a.DesiredDescription,
ClientStatus: a.ClientStatus,
ClientDescription: a.ClientDescription,
TaskStates: a.TaskStates,
DeploymentStatus: a.DeploymentStatus,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
CreateTime: a.CreateTime,
ModifyTime: a.ModifyTime,
}
}
// AllocListStub is used to return a subset of alloc information
type AllocListStub struct {
ID string
EvalID string
Name string
NodeID string
JobID string
JobVersion uint64
TaskGroup string
DesiredStatus string
DesiredDescription string
ClientStatus string
ClientDescription string
TaskStates map[string]*TaskState
DeploymentStatus *AllocDeploymentStatus
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
// AllocMetric is used to track various metrics while attempting
// to make an allocation. These are used to debug a job, or to better
// understand the pressure within the system.
type AllocMetric struct {
// NodesEvaluated is the number of nodes that were evaluated
NodesEvaluated int
// NodesFiltered is the number of nodes filtered due to a constraint
NodesFiltered int
// NodesAvailable is the number of nodes available for evaluation per DC.
NodesAvailable map[string]int
// ClassFiltered is the number of nodes filtered by class
ClassFiltered map[string]int
// ConstraintFiltered is the number of failures caused by constraint
ConstraintFiltered map[string]int
// NodesExhausted is the number of nodes skipped due to being
// exhausted of at least one resource
NodesExhausted int
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// QuotaExhausted provides the exhausted dimensions
QuotaExhausted []string
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
Scores map[string]float64
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
// CoalescedFailures indicates the number of other
// allocations that were coalesced into this failed allocation.
// This is to prevent creating many failed allocations for a
// single task group.
CoalescedFailures int
}
func (a *AllocMetric) Copy() *AllocMetric {
if a == nil {
return nil
}
na := new(AllocMetric)
*na = *a
na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
return na
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
if dimension != "" {
if a.DimensionExhausted == nil {
a.DimensionExhausted = make(map[string]int)
}
a.DimensionExhausted[dimension] += 1
}
}
func (a *AllocMetric) ExhaustQuota(dimensions []string) {
if a.QuotaExhausted == nil {
a.QuotaExhausted = make([]string, 0, len(dimensions))
}
a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
}
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
if a.Scores == nil {
a.Scores = make(map[string]float64)
}
key := fmt.Sprintf("%s.%s", node.ID, name)
a.Scores[key] = score
}
// AllocDeploymentStatus captures the status of the allocation as part of the
// deployment. This can include things like if the allocation has been marked as
// heatlhy.
type AllocDeploymentStatus struct {
// Healthy marks whether the allocation has been marked healthy or unhealthy
// as part of a deployment. It can be unset if it has neither been marked
// healthy or unhealthy.
Healthy *bool
// ModifyIndex is the raft index in which the deployment status was last
// changed.
ModifyIndex uint64
}
// IsHealthy returns if the allocation is marked as healthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsHealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && *a.Healthy
}
// IsUnhealthy returns if the allocation is marked as unhealthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsUnhealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && !*a.Healthy
}
func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
if a == nil {
return nil
}
c := new(AllocDeploymentStatus)
*c = *a
if a.Healthy != nil {
c.Healthy = helper.BoolToPtr(*a.Healthy)
}
return c
}
const (
EvalStatusBlocked = "blocked"
EvalStatusPending = "pending"
EvalStatusComplete = "complete"
EvalStatusFailed = "failed"
EvalStatusCancelled = "canceled"
)
const (
EvalTriggerJobRegister = "job-register"
EvalTriggerJobDeregister = "job-deregister"
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerScheduled = "scheduled"
EvalTriggerRollingUpdate = "rolling-update"
EvalTriggerDeploymentWatcher = "deployment-watcher"
EvalTriggerFailedFollowUp = "failed-follow-up"
EvalTriggerMaxPlans = "max-plan-attempts"
)
const (
// CoreJobEvalGC is used for the garbage collection of evaluations
// and allocations. We periodically scan evaluations in a terminal state,
// in which all the corresponding allocations are also terminal. We
// delete these out of the system to bound the state.
CoreJobEvalGC = "eval-gc"
// CoreJobNodeGC is used for the garbage collection of failed nodes.
// We periodically scan nodes in a terminal state, and if they have no
// corresponding allocations we delete these out of the system.
CoreJobNodeGC = "node-gc"
// CoreJobJobGC is used for the garbage collection of eligible jobs. We
// periodically scan garbage collectible jobs and check if both their
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobDeploymentGC is used for the garbage collection of eligible
// deployments. We periodically scan garbage collectible deployments and
// check if they are terminal. If so, we delete these out of the system.
CoreJobDeploymentGC = "deployment-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result
// of a change to our desired state (job specification) or the emergent state
// (registered nodes). When the inputs change, we need to "evaluate" them,
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// ID is a randonly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string
// Namespace is the namespace the evaluation is created in
Namespace string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// Type is used to control which schedulers are available to handle
// this evaluation.
Type string
// TriggeredBy is used to give some insight into why this Eval
// was created. (Job change, node failure, alloc failure, etc).
TriggeredBy string
// JobID is the job this evaluation is scoped to. Evaluations cannot
// be run in parallel for a given JobID, so we serialize on this.
JobID string
// JobModifyIndex is the modify index of the job at the time
// the evaluation was created
JobModifyIndex uint64
// NodeID is the node that was affected triggering the evaluation.
NodeID string
// NodeModifyIndex is the modify index of the node at the time
// the evaluation was created
NodeModifyIndex uint64
// DeploymentID is the ID of the deployment that triggered the evaluation.
DeploymentID string
// Status of the evaluation
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Wait is a minimum wait time for running the eval. This is used to
// support a rolling upgrade.
Wait time.Duration
// NextEval is the evaluation ID for the eval created to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
NextEval string
// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
PreviousEval string
// BlockedEval is the evaluation ID for a created blocked eval. A
// blocked eval will be created if all allocations could not be placed due
// to constraints or lacking resources.
BlockedEval string
// FailedTGAllocs are task groups which have allocations that could not be
// made, but the metrics are persisted so that the user can use the feedback
// to determine the cause.
FailedTGAllocs map[string]*AllocMetric
// ClassEligibility tracks computed node classes that have been explicitly
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// QuotaLimitReached marks whether a quota limit was reached for the
// evaluation.
QuotaLimitReached string
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
// AnnotatePlan triggers the scheduler to provide additional annotations
// during the evaluation. This should not be set during normal operations.
AnnotatePlan bool
// QueuedAllocations is the number of unplaced allocations at the time the
// evaluation was processed. The map is keyed by Task Group names.
QueuedAllocations map[string]int
// LeaderACL provides the ACL token to when issuing RPCs back to the
// leader. This will be a valid management token as long as the leader is
// active. This should not ever be exposed via the API.
LeaderACL string
// SnapshotIndex is the Raft index of the snapshot used to process the
// evaluation. As such it will only be set once it has gone through the
// scheduler.
SnapshotIndex uint64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (e *Evaluation) TerminalStatus() bool {
switch e.Status {
case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
return true
default:
return false
}
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
}
func (e *Evaluation) Copy() *Evaluation {
if e == nil {
return nil
}
ne := new(Evaluation)
*ne = *e
// Copy ClassEligibility
if e.ClassEligibility != nil {
classes := make(map[string]bool, len(e.ClassEligibility))
for class, elig := range e.ClassEligibility {
classes[class] = elig
}
ne.ClassEligibility = classes
}
// Copy FailedTGAllocs
if e.FailedTGAllocs != nil {
failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
for tg, metric := range e.FailedTGAllocs {
failedTGs[tg] = metric.Copy()
}
ne.FailedTGAllocs = failedTGs
}
// Copy queued allocations
if e.QueuedAllocations != nil {
queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
for tg, num := range e.QueuedAllocations {
queuedAllocations[tg] = num
}
ne.QueuedAllocations = queuedAllocations
}
return ne
}
// ShouldEnqueue checks if a given evaluation should be enqueued into the
// eval_broker
func (e *Evaluation) ShouldEnqueue() bool {
switch e.Status {
case EvalStatusPending:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// ShouldBlock checks if a given evaluation should be entered into the blocked
// eval tracker.
func (e *Evaluation) ShouldBlock() bool {
switch e.Status {
case EvalStatusBlocked:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// MakePlan is used to make a plan from the given evaluation
// for a given Job
func (e *Evaluation) MakePlan(j *Job) *Plan {
p := &Plan{
EvalID: e.ID,
Priority: e.Priority,
Job: j,
NodeUpdate: make(map[string][]*Allocation),
NodeAllocation: make(map[string][]*Allocation),
}
if j != nil {
p.AllAtOnce = j.AllAtOnce
}
return p
}
// NextRollingEval creates an evaluation to followup this eval for rolling updates
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible, whether the job has escaped computed node classes and whether the
// quota limit was reached.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
escaped bool, quotaReached string) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusBlocked,
PreviousEval: e.ID,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
QuotaLimitReached: quotaReached,
}
}
// CreateFailedFollowUpEval creates a follow up evaluation when the current one
// has been marked as failed because it has hit the delivery limit and will not
// be retried by the eval_broker.
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// Plan is used to submit a commit plan for task allocations. These
// are submitted to the leader which verifies that resources have
// not been overcommitted before admiting the plan.
type Plan struct {
// EvalID is the evaluation ID this plan is associated with
EvalID string
// EvalToken is used to prevent a split-brain processing of
// an evaluation. There should only be a single scheduler running
// an Eval at a time, but this could be violated after a leadership
// transition. This unique token is used to reject plans that are
// being submitted from a different leader.
EvalToken string
// Priority is the priority of the upstream job
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job.
// If this is false, a plan may be partially applied. Otherwise, the
// entire plan must be able to make progress.
AllAtOnce bool
// Job is the parent job of all the allocations in the Plan.
// Since a Plan only involves a single Job, we can reduce the size
// of the plan by only including it once.
Job *Job
// NodeUpdate contains all the allocations for each node. For each node,
// this is a list of the allocations to update to either stop or evict.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations for each node.
// The evicts must be considered prior to the allocations.
NodeAllocation map[string][]*Allocation
// Annotations contains annotations by the scheduler to be used by operators
// to understand the decisions made by the scheduler.
Annotations *PlanAnnotations
// Deployment is the deployment created or updated by the scheduler that
// should be applied by the planner.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AppendUpdate marks the allocation for eviction. The clientStatus of the
// allocation may be optionally set by passing in a non-empty value.
func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) {
newAlloc := new(Allocation)
*newAlloc = *alloc
// If the job is not set in the plan we are deregistering a job so we
// extract the job from the allocation.
if p.Job == nil && newAlloc.Job != nil {
p.Job = newAlloc.Job
}
// Normalize the job
newAlloc.Job = nil
// Strip the resources as it can be rebuilt.
newAlloc.Resources = nil
newAlloc.DesiredStatus = desiredStatus
newAlloc.DesiredDescription = desiredDesc
if clientStatus != "" {
newAlloc.ClientStatus = clientStatus
}
node := alloc.NodeID
existing := p.NodeUpdate[node]
p.NodeUpdate[node] = append(existing, newAlloc)
}
func (p *Plan) PopUpdate(alloc *Allocation) {
existing := p.NodeUpdate[alloc.NodeID]
n := len(existing)
if n > 0 && existing[n-1].ID == alloc.ID {
existing = existing[:n-1]
if len(existing) > 0 {
p.NodeUpdate[alloc.NodeID] = existing
} else {
delete(p.NodeUpdate, alloc.NodeID)
}
}
}
func (p *Plan) AppendAlloc(alloc *Allocation) {
node := alloc.NodeID
existing := p.NodeAllocation[node]
p.NodeAllocation[node] = append(existing, alloc)
}
// IsNoOp checks if this plan would do nothing
func (p *Plan) IsNoOp() bool {
return len(p.NodeUpdate) == 0 &&
len(p.NodeAllocation) == 0 &&
p.Deployment == nil &&
len(p.DeploymentUpdates) == 0
}
// PlanResult is the result of a plan submitted to the leader.
type PlanResult struct {
// NodeUpdate contains all the updates that were committed.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations that were committed.
NodeAllocation map[string][]*Allocation
// Deployment is the deployment that was committed.
Deployment *Deployment
// DeploymentUpdates is the set of deployment updates that were committed.
DeploymentUpdates []*DeploymentStatusUpdate
// RefreshIndex is the index the worker should refresh state up to.
// This allows all evictions and allocations to be materialized.
// If any allocations were rejected due to stale data (node state,
// over committed) this can be used to force a worker refresh.
RefreshIndex uint64
// AllocIndex is the Raft index in which the evictions and
// allocations took place. This is used for the write index.
AllocIndex uint64
}
// IsNoOp checks if this plan result would do nothing
func (p *PlanResult) IsNoOp() bool {
return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
len(p.DeploymentUpdates) == 0 && p.Deployment == nil
}
// FullCommit is used to check if all the allocations in a plan
// were committed as part of the result. Returns if there was
// a match, and the number of expected and actual allocations.
func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc, _ := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}
return actual == expected, expected, actual
}
// PlanAnnotations holds annotations made by the scheduler to give further debug
// information to operators.
type PlanAnnotations struct {
// DesiredTGUpdates is the set of desired updates per task group.
DesiredTGUpdates map[string]*DesiredUpdates
}
// DesiredUpdates is the set of changes the scheduler would like to make given
// sufficient resources and cluster capacity.
type DesiredUpdates struct {
Ignore uint64
Place uint64
Migrate uint64
Stop uint64
InPlaceUpdate uint64
DestructiveUpdate uint64
Canary uint64
}
func (d *DesiredUpdates) GoString() string {
return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
}
// msgpackHandle is a shared handle for encoding/decoding of structs
var MsgpackHandle = func() *codec.MsgpackHandle {
h := &codec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
var (
// JsonHandle and JsonHandlePretty are the codec handles to JSON encode
// structs. The pretty handle will add indents for easier human consumption.
JsonHandle = &codec.JsonHandle{
HTMLCharsAsIs: true,
}
JsonHandlePretty = &codec.JsonHandle{
HTMLCharsAsIs: true,
Indent: 4,
}
)
var HashiMsgpackHandle = func() *hcodec.MsgpackHandle {
h := &hcodec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err string
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) error {
if e == nil {
return nil
}
return &RecoverableError{
Err: e.Error(),
Recoverable: recoverable,
}
}
// WrapRecoverable wraps an existing error in a new RecoverableError with a new
// message. If the error was recoverable before the returned error is as well;
// otherwise it is unrecoverable.
func WrapRecoverable(msg string, err error) error {
return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
}
func (r *RecoverableError) Error() string {
return r.Err
}
func (r *RecoverableError) IsRecoverable() bool {
return r.Recoverable
}
// Recoverable is an interface for errors to implement to indicate whether or
// not they are fatal or recoverable.
type Recoverable interface {
error
IsRecoverable() bool
}
// IsRecoverable returns true if error is a RecoverableError with
// Recoverable=true. Otherwise false is returned.
func IsRecoverable(e error) bool {
if re, ok := e.(Recoverable); ok {
return re.IsRecoverable()
}
return false
}
// ACLPolicy is used to represent an ACL policy
type ACLPolicy struct {
Name string // Unique name
Description string // Human readable
Rules string // HCL or JSON format
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL policy
func (c *ACLPolicy) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
hash.Write([]byte(c.Name))
hash.Write([]byte(c.Description))
hash.Write([]byte(c.Rules))
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
c.Hash = hashVal
return hashVal
}
func (a *ACLPolicy) Stub() *ACLPolicyListStub {
return &ACLPolicyListStub{
Name: a.Name,
Description: a.Description,
Hash: a.Hash,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
func (a *ACLPolicy) Validate() error {
var mErr multierror.Error
if !validPolicyName.MatchString(a.Name) {
err := fmt.Errorf("invalid name '%s'", a.Name)
mErr.Errors = append(mErr.Errors, err)
}
if _, err := acl.Parse(a.Rules); err != nil {
err = fmt.Errorf("failed to parse rules: %v", err)
mErr.Errors = append(mErr.Errors, err)
}
if len(a.Description) > maxPolicyDescriptionLength {
err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// ACLPolicyListStub is used to for listing ACL policies
type ACLPolicyListStub struct {
Name string
Description string
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// ACLPolicyListRequest is used to request a list of policies
type ACLPolicyListRequest struct {
QueryOptions
}
// ACLPolicySpecificRequest is used to query a specific policy
type ACLPolicySpecificRequest struct {
Name string
QueryOptions
}
// ACLPolicySetRequest is used to query a set of policies
type ACLPolicySetRequest struct {
Names []string
QueryOptions
}
// ACLPolicyListResponse is used for a list request
type ACLPolicyListResponse struct {
Policies []*ACLPolicyListStub
QueryMeta
}
// SingleACLPolicyResponse is used to return a single policy
type SingleACLPolicyResponse struct {
Policy *ACLPolicy
QueryMeta
}
// ACLPolicySetResponse is used to return a set of policies
type ACLPolicySetResponse struct {
Policies map[string]*ACLPolicy
QueryMeta
}
// ACLPolicyDeleteRequest is used to delete a set of policies
type ACLPolicyDeleteRequest struct {
Names []string
WriteRequest
}
// ACLPolicyUpsertRequest is used to upsert a set of policies
type ACLPolicyUpsertRequest struct {
Policies []*ACLPolicy
WriteRequest
}
// ACLToken represents a client token which is used to Authenticate
type ACLToken struct {
AccessorID string // Public Accessor ID (UUID)
SecretID string // Secret ID, private (UUID)
Name string // Human friendly name
Type string // Client or Management
Policies []string // Policies this token ties to
Global bool // Global or Region local
Hash []byte
CreateTime time.Time // Time of creation
CreateIndex uint64
ModifyIndex uint64
}
var (
// AnonymousACLToken is used no SecretID is provided, and the
// request is made anonymously.
AnonymousACLToken = &ACLToken{
AccessorID: "anonymous",
Name: "Anonymous Token",
Type: ACLClientToken,
Policies: []string{"anonymous"},
Global: false,
}
)
type ACLTokenListStub struct {
AccessorID string
Name string
Type string
Policies []string
Global bool
Hash []byte
CreateTime time.Time
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL token
func (a *ACLToken) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
hash.Write([]byte(a.Name))
hash.Write([]byte(a.Type))
for _, policyName := range a.Policies {
hash.Write([]byte(policyName))
}
if a.Global {
hash.Write([]byte("global"))
} else {
hash.Write([]byte("local"))
}
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
a.Hash = hashVal
return hashVal
}
func (a *ACLToken) Stub() *ACLTokenListStub {
return &ACLTokenListStub{
AccessorID: a.AccessorID,
Name: a.Name,
Type: a.Type,
Policies: a.Policies,
Global: a.Global,
Hash: a.Hash,
CreateTime: a.CreateTime,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
// Validate is used to sanity check a token
func (a *ACLToken) Validate() error {
var mErr multierror.Error
if len(a.Name) > maxTokenNameLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long"))
}
switch a.Type {
case ACLClientToken:
if len(a.Policies) == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies"))
}
case ACLManagementToken:
if len(a.Policies) != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies"))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management"))
}
return mErr.ErrorOrNil()
}
// PolicySubset checks if a given set of policies is a subset of the token
func (a *ACLToken) PolicySubset(policies []string) bool {
// Hot-path the management tokens, superset of all policies.
if a.Type == ACLManagementToken {
return true
}
associatedPolicies := make(map[string]struct{}, len(a.Policies))
for _, policy := range a.Policies {
associatedPolicies[policy] = struct{}{}
}
for _, policy := range policies {
if _, ok := associatedPolicies[policy]; !ok {
return false
}
}
return true
}
// ACLTokenListRequest is used to request a list of tokens
type ACLTokenListRequest struct {
GlobalOnly bool
QueryOptions
}
// ACLTokenSpecificRequest is used to query a specific token
type ACLTokenSpecificRequest struct {
AccessorID string
QueryOptions
}
// ACLTokenSetRequest is used to query a set of tokens
type ACLTokenSetRequest struct {
AccessorIDS []string
QueryOptions
}
// ACLTokenListResponse is used for a list request
type ACLTokenListResponse struct {
Tokens []*ACLTokenListStub
QueryMeta
}
// SingleACLTokenResponse is used to return a single token
type SingleACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenSetResponse is used to return a set of token
type ACLTokenSetResponse struct {
Tokens map[string]*ACLToken // Keyed by Accessor ID
QueryMeta
}
// ResolveACLTokenRequest is used to resolve a specific token
type ResolveACLTokenRequest struct {
SecretID string
QueryOptions
}
// ResolveACLTokenResponse is used to resolve a single token
type ResolveACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenDeleteRequest is used to delete a set of tokens
type ACLTokenDeleteRequest struct {
AccessorIDs []string
WriteRequest
}
// ACLTokenBootstrapRequest is used to bootstrap ACLs
type ACLTokenBootstrapRequest struct {
Token *ACLToken // Not client specifiable
ResetIndex uint64 // Reset index is used to clear the bootstrap token
WriteRequest
}
// ACLTokenUpsertRequest is used to upsert a set of tokens
type ACLTokenUpsertRequest struct {
Tokens []*ACLToken
WriteRequest
}
// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
type ACLTokenUpsertResponse struct {
Tokens []*ACLToken
WriteMeta
}
Change error message to use original name for clarity, rather than the name after substituting env vars with placeholder.
package structs
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"golang.org/x/crypto/blake2b"
"github.com/gorhill/cronexpr"
"github.com/hashicorp/consul/api"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/args"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/mitchellh/copystructure"
"github.com/ugorji/go/codec"
hcodec "github.com/hashicorp/go-msgpack/codec"
)
var (
ErrNoLeader = fmt.Errorf("No cluster leader")
ErrNoRegionPath = fmt.Errorf("No path to region")
ErrTokenNotFound = errors.New("ACL token not found")
ErrPermissionDenied = errors.New("Permission denied")
// validPolicyName is used to validate a policy name
validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
)
type MessageType uint8
const (
NodeRegisterRequestType MessageType = iota
NodeDeregisterRequestType
NodeUpdateStatusRequestType
NodeUpdateDrainRequestType
JobRegisterRequestType
JobDeregisterRequestType
EvalUpdateRequestType
EvalDeleteRequestType
AllocUpdateRequestType
AllocClientUpdateRequestType
ReconcileJobSummariesRequestType
VaultAccessorRegisterRequestType
VaultAccessorDegisterRequestType
ApplyPlanResultsRequestType
DeploymentStatusUpdateRequestType
DeploymentPromoteRequestType
DeploymentAllocHealthRequestType
DeploymentDeleteRequestType
JobStabilityRequestType
ACLPolicyUpsertRequestType
ACLPolicyDeleteRequestType
ACLTokenUpsertRequestType
ACLTokenDeleteRequestType
ACLTokenBootstrapRequestType
)
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// ApiMajorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed in a way
// that would break clients for sane client versioning.
ApiMajorVersion = 1
// ApiMinorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed to allow
// for sane client versioning. Minor changes should be compatible
// within the major version.
ApiMinorVersion = 1
ProtocolVersion = "protocol"
APIMajorVersion = "api.major"
APIMinorVersion = "api.minor"
GetterModeAny = "any"
GetterModeFile = "file"
GetterModeDir = "dir"
// maxPolicyDescriptionLength limits a policy description length
maxPolicyDescriptionLength = 256
// maxTokenNameLength limits a ACL token name length
maxTokenNameLength = 64
// ACLClientToken and ACLManagementToken are the only types of tokens
ACLClientToken = "client"
ACLManagementToken = "management"
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
DefaultNamespaceDescription = "Default shared namespace"
)
// Context defines the scope in which a search for Nomad object operates, and
// is also used to query the matching index value for this context
type Context string
const (
Allocs Context = "allocs"
Deployments Context = "deployment"
Evals Context = "evals"
Jobs Context = "jobs"
Nodes Context = "nodes"
Namespaces Context = "namespaces"
Quotas Context = "quotas"
All Context = "all"
)
// NamespacedID is a tuple of an ID and a namespace
type NamespacedID struct {
ID string
Namespace string
}
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
IsRead() bool
AllowStaleRead() bool
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
// The target region for this query
Region string
// Namespace is the target namespace for the query.
Namespace string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool
// If set, used as prefix for resource list searches
Prefix string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
}
func (q QueryOptions) RequestRegion() string {
return q.Region
}
func (q QueryOptions) RequestNamespace() string {
if q.Namespace == "" {
return DefaultNamespace
}
return q.Namespace
}
// QueryOption only applies to reads, so always true
func (q QueryOptions) IsRead() bool {
return true
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
type WriteRequest struct {
// The target region for this write
Region string
// Namespace is the target namespace for the write.
Namespace string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
}
func (w WriteRequest) RequestRegion() string {
// The target region for this request
return w.Region
}
func (w WriteRequest) RequestNamespace() string {
if w.Namespace == "" {
return DefaultNamespace
}
return w.Namespace
}
// WriteRequest only applies to writes, always false
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// This is the index associated with the read
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
}
// WriteMeta allows a write response to include potentially
// useful metadata about the write
type WriteMeta struct {
// This is the index associated with the write
Index uint64
}
// NodeRegisterRequest is used for Node.Register endpoint
// to register a node as being a schedulable entity.
type NodeRegisterRequest struct {
Node *Node
WriteRequest
}
// NodeDeregisterRequest is used for Node.Deregister endpoint
// to deregister a node as being a schedulable entity.
type NodeDeregisterRequest struct {
NodeID string
WriteRequest
}
// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
// information used in RPC server lists.
type NodeServerInfo struct {
// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
// be contacted at for RPCs.
RPCAdvertiseAddr string
// RpcMajorVersion is the major version number the Nomad Server
// supports
RPCMajorVersion int32
// RpcMinorVersion is the minor version number the Nomad Server
// supports
RPCMinorVersion int32
// Datacenter is the datacenter that a Nomad server belongs to
Datacenter string
}
// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
// to update the status of a node.
type NodeUpdateStatusRequest struct {
NodeID string
Status string
WriteRequest
}
// NodeUpdateDrainRequest is used for updatin the drain status
type NodeUpdateDrainRequest struct {
NodeID string
Drain bool
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the ndoe
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
}
// NodeSpecificRequest is used when we just need to specify a target node
type NodeSpecificRequest struct {
NodeID string
SecretID string
QueryOptions
}
// SearchResponse is used to return matches and information about whether
// the match list is truncated specific to each type of context.
type SearchResponse struct {
// Map of context types to ids which match a specified prefix
Matches map[Context][]string
// Truncations indicates whether the matches for a particular context have
// been truncated
Truncations map[Context]bool
QueryMeta
}
// SearchRequest is used to parameterize a request, and returns a
// list of matches made up of jobs, allocations, evaluations, and/or nodes,
// along with whether or not the information returned is truncated.
type SearchRequest struct {
// Prefix is what ids are matched to. I.e, if the given prefix were
// "a", potential matches might be "abcd" or "aabb"
Prefix string
// Context is the type that can be matched against. A context can be a job,
// node, evaluation, allocation, or empty (indicated every context should be
// matched)
Context Context
QueryOptions
}
// JobRegisterRequest is used for Job.Register endpoint
// to register a job as being a schedulable entity.
type JobRegisterRequest struct {
Job *Job
// If EnforceIndex is set then the job will only be registered if the passed
// JobModifyIndex matches the current Jobs index. If the index is zero, the
// register only occurs if the job is new.
EnforceIndex bool
JobModifyIndex uint64
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// JobDeregisterRequest is used for Job.Deregister endpoint
// to deregister a job as being a schedulable entity.
type JobDeregisterRequest struct {
JobID string
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
WriteRequest
}
// JobEvaluateRequest is used when we just need to re-evaluate a target job
type JobEvaluateRequest struct {
JobID string
WriteRequest
}
// JobSpecificRequest is used when we just need to specify a target job
type JobSpecificRequest struct {
JobID string
AllAllocs bool
QueryOptions
}
// JobListRequest is used to parameterize a list request
type JobListRequest struct {
QueryOptions
}
// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
// evaluation of the Job.
type JobPlanRequest struct {
Job *Job
Diff bool // Toggles an annotated diff
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// JobSummaryRequest is used when we just need to get a specific job summary
type JobSummaryRequest struct {
JobID string
QueryOptions
}
// JobDispatchRequest is used to dispatch a job based on a parameterized job
type JobDispatchRequest struct {
JobID string
Payload []byte
Meta map[string]string
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobRevertRequest is used to revert a job to a prior version.
type JobRevertRequest struct {
// JobID is the ID of the job being reverted
JobID string
// JobVersion the version to revert to.
JobVersion uint64
// EnforcePriorVersion if set will enforce that the job is at the given
// version before reverting.
EnforcePriorVersion *uint64
WriteRequest
}
// JobStabilityRequest is used to marked a job as stable.
type JobStabilityRequest struct {
// Job to set the stability on
JobID string
JobVersion uint64
// Set the stability
Stable bool
WriteRequest
}
// JobStabilityResponse is the response when marking a job as stable.
type JobStabilityResponse struct {
WriteMeta
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
}
// EvalUpdateRequest is used for upserting evaluations.
type EvalUpdateRequest struct {
Evals []*Evaluation
EvalToken string
WriteRequest
}
// EvalDeleteRequest is used for deleting an evaluation.
type EvalDeleteRequest struct {
Evals []string
Allocs []string
WriteRequest
}
// EvalSpecificRequest is used when we just need to specify a target evaluation
type EvalSpecificRequest struct {
EvalID string
QueryOptions
}
// EvalAckRequest is used to Ack/Nack a specific evaluation
type EvalAckRequest struct {
EvalID string
Token string
WriteRequest
}
// EvalDequeueRequest is used when we want to dequeue an evaluation
type EvalDequeueRequest struct {
Schedulers []string
Timeout time.Duration
SchedulerVersion uint16
WriteRequest
}
// EvalListRequest is used to list the evaluations
type EvalListRequest struct {
QueryOptions
}
// PlanRequest is used to submit an allocation plan to the leader
type PlanRequest struct {
Plan *Plan
WriteRequest
}
// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
// committing the result of a plan.
type ApplyPlanResultsRequest struct {
// AllocUpdateRequest holds the allocation updates to be made by the
// scheduler.
AllocUpdateRequest
// Deployment is the deployment created or updated as a result of a
// scheduling event.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AllocUpdateRequest is used to submit changes to allocations, either
// to cause evictions or to assign new allocaitons. Both can be done
// within a single transaction
type AllocUpdateRequest struct {
// Alloc is the list of new allocations to assign
Alloc []*Allocation
// Job is the shared parent job of the allocations.
// It is pulled out since it is common to reduce payload size.
Job *Job
WriteRequest
}
// AllocListRequest is used to request a list of allocations
type AllocListRequest struct {
QueryOptions
}
// AllocSpecificRequest is used to query a specific allocation
type AllocSpecificRequest struct {
AllocID string
QueryOptions
}
// AllocsGetRequest is used to query a set of allocations
type AllocsGetRequest struct {
AllocIDs []string
QueryOptions
}
// PeriodicForceReqeuest is used to force a specific periodic job.
type PeriodicForceRequest struct {
JobID string
WriteRequest
}
// ServerMembersResponse has the list of servers in a cluster
type ServerMembersResponse struct {
ServerName string
ServerRegion string
ServerDC string
Members []*ServerMember
}
// ServerMember holds information about a Nomad server agent in a cluster
type ServerMember struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
// following tasks in the given allocation
type DeriveVaultTokenRequest struct {
NodeID string
SecretID string
AllocID string
Tasks []string
QueryOptions
}
// VaultAccessorsRequest is used to operate on a set of Vault accessors
type VaultAccessorsRequest struct {
Accessors []*VaultAccessor
}
// VaultAccessor is a reference to a created Vault token on behalf of
// an allocation's task.
type VaultAccessor struct {
AllocID string
Task string
NodeID string
Accessor string
CreationTTL int
// Raft Indexes
CreateIndex uint64
}
// DeriveVaultTokenResponse returns the wrapped tokens for each requested task
type DeriveVaultTokenResponse struct {
// Tasks is a mapping between the task name and the wrapped token
Tasks map[string]string
// Error stores any error that occurred. Errors are stored here so we can
// communicate whether it is retriable
Error *RecoverableError
QueryMeta
}
// GenericRequest is used to request where no
// specific information is needed.
type GenericRequest struct {
QueryOptions
}
// DeploymentListRequest is used to list the deployments
type DeploymentListRequest struct {
QueryOptions
}
// DeploymentDeleteRequest is used for deleting deployments.
type DeploymentDeleteRequest struct {
Deployments []string
WriteRequest
}
// DeploymentStatusUpdateRequest is used to update the status of a deployment as
// well as optionally creating an evaluation atomically.
type DeploymentStatusUpdateRequest struct {
// Eval, if set, is used to create an evaluation at the same time as
// updating the status of a deployment.
Eval *Evaluation
// DeploymentUpdate is a status update to apply to the given
// deployment.
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
}
// DeploymentAllocHealthRequest is used to set the health of a set of
// allocations as part of a deployment.
type DeploymentAllocHealthRequest struct {
DeploymentID string
// Marks these allocations as healthy, allow further allocations
// to be rolled.
HealthyAllocationIDs []string
// Any unhealthy allocations fail the deployment
UnhealthyAllocationIDs []string
WriteRequest
}
// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
type ApplyDeploymentAllocHealthRequest struct {
DeploymentAllocHealthRequest
// An optional field to update the status of a deployment
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPromoteRequest is used to promote task groups in a deployment
type DeploymentPromoteRequest struct {
DeploymentID string
// All is to promote all task groups
All bool
// Groups is used to set the promotion status per task group
Groups []string
WriteRequest
}
// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
type ApplyDeploymentPromoteRequest struct {
DeploymentPromoteRequest
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPauseRequest is used to pause a deployment
type DeploymentPauseRequest struct {
DeploymentID string
// Pause sets the pause status
Pause bool
WriteRequest
}
// DeploymentSpecificRequest is used to make a request specific to a particular
// deployment
type DeploymentSpecificRequest struct {
DeploymentID string
QueryOptions
}
// DeploymentFailRequest is used to fail a particular deployment
type DeploymentFailRequest struct {
DeploymentID string
WriteRequest
}
// SingleDeploymentResponse is used to respond with a single deployment
type SingleDeploymentResponse struct {
Deployment *Deployment
QueryMeta
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
WriteMeta
}
// VersionResponse is used for the Status.Version reseponse
type VersionResponse struct {
Build string
Versions map[string]int
QueryMeta
}
// JobRegisterResponse is used to respond to a job registration
type JobRegisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
QueryMeta
}
// JobDeregisterResponse is used to respond to a job deregistration
type JobDeregisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
QueryMeta
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
// Error is a string version of any error that may have occurred
Error string
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
}
// NodeUpdateResponse is used to respond to a node update
type NodeUpdateResponse struct {
HeartbeatTTL time.Duration
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
// LeaderRPCAddr is the RPC address of the current Raft Leader. If
// empty, the current Nomad Server is in the minority of a partition.
LeaderRPCAddr string
// NumNodes is the number of Nomad nodes attached to this quorum of
// Nomad Servers at the time of the response. This value can
// fluctuate based on the health of the cluster between heartbeats.
NumNodes int32
// Servers is the full list of known Nomad servers in the local
// region.
Servers []*NodeServerInfo
QueryMeta
}
// NodeDrainUpdateResponse is used to respond to a node drain update
type NodeDrainUpdateResponse struct {
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
QueryMeta
}
// NodeAllocsResponse is used to return allocs for a single node
type NodeAllocsResponse struct {
Allocs []*Allocation
QueryMeta
}
// NodeClientAllocsResponse is used to return allocs meta data for a single node
type NodeClientAllocsResponse struct {
Allocs map[string]uint64
// MigrateTokens are used when ACLs are enabled to allow cross node,
// authenticated access to sticky volumes
MigrateTokens map[string]string
QueryMeta
}
// SingleNodeResponse is used to return a single node
type SingleNodeResponse struct {
Node *Node
QueryMeta
}
// NodeListResponse is used for a list request
type NodeListResponse struct {
Nodes []*NodeListStub
QueryMeta
}
// SingleJobResponse is used to return a single job
type SingleJobResponse struct {
Job *Job
QueryMeta
}
// JobSummaryResponse is used to return a single job summary
type JobSummaryResponse struct {
JobSummary *JobSummary
QueryMeta
}
type JobDispatchResponse struct {
DispatchedJobID string
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
WriteMeta
}
// JobListResponse is used for a list request
type JobListResponse struct {
Jobs []*JobListStub
QueryMeta
}
// JobVersionsRequest is used to get a jobs versions
type JobVersionsRequest struct {
JobID string
Diffs bool
QueryOptions
}
// JobVersionsResponse is used for a job get versions request
type JobVersionsResponse struct {
Versions []*Job
Diffs []*JobDiff
QueryMeta
}
// JobPlanResponse is used to respond to a job plan request
type JobPlanResponse struct {
// Annotations stores annotations explaining decisions the scheduler made.
Annotations *PlanAnnotations
// FailedTGAllocs is the placement failures per task group.
FailedTGAllocs map[string]*AllocMetric
// JobModifyIndex is the modification index of the job. The value can be
// used when running `nomad run` to ensure that the Job wasn’t modified
// since the last plan. If the job is being created, the value is zero.
JobModifyIndex uint64
// CreatedEvals is the set of evaluations created by the scheduler. The
// reasons for this can be rolling-updates or blocked evals.
CreatedEvals []*Evaluation
// Diff contains the diff of the job and annotations on whether the change
// causes an in-place update or create/destroy
Diff *JobDiff
// NextPeriodicLaunch is the time duration till the job would be launched if
// submitted.
NextPeriodicLaunch time.Time
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
WriteMeta
}
// SingleAllocResponse is used to return a single allocation
type SingleAllocResponse struct {
Alloc *Allocation
QueryMeta
}
// AllocsGetResponse is used to return a set of allocations
type AllocsGetResponse struct {
Allocs []*Allocation
QueryMeta
}
// JobAllocationsResponse is used to return the allocations for a job
type JobAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// JobEvaluationsResponse is used to return the evaluations for a job
type JobEvaluationsResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// SingleEvalResponse is used to return a single evaluation
type SingleEvalResponse struct {
Eval *Evaluation
QueryMeta
}
// EvalDequeueResponse is used to return from a dequeue
type EvalDequeueResponse struct {
Eval *Evaluation
Token string
// WaitIndex is the Raft index the worker should wait until invoking the
// scheduler.
WaitIndex uint64
QueryMeta
}
// GetWaitIndex is used to retrieve the Raft index in which state should be at
// or beyond before invoking the scheduler.
func (e *EvalDequeueResponse) GetWaitIndex() uint64 {
// Prefer the wait index sent. This will be populated on all responses from
// 0.7.0 and above
if e.WaitIndex != 0 {
return e.WaitIndex
} else if e.Eval != nil {
return e.Eval.ModifyIndex
}
// This should never happen
return 1
}
// PlanResponse is used to return from a PlanRequest
type PlanResponse struct {
Result *PlanResult
WriteMeta
}
// AllocListResponse is used for a list request
type AllocListResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// DeploymentListResponse is used for a list request
type DeploymentListResponse struct {
Deployments []*Deployment
QueryMeta
}
// EvalListResponse is used for a list request
type EvalListResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// EvalAllocationsResponse is used to return the allocations for an evaluation
type EvalAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// PeriodicForceResponse is used to respond to a periodic job force launch
type PeriodicForceResponse struct {
EvalID string
EvalCreateIndex uint64
WriteMeta
}
// DeploymentUpdateResponse is used to respond to a deployment change. The
// response will include the modify index of the deployment as well as details
// of any triggered evaluation.
type DeploymentUpdateResponse struct {
EvalID string
EvalCreateIndex uint64
DeploymentModifyIndex uint64
// RevertedJobVersion is the version the job was reverted to. If unset, the
// job wasn't reverted
RevertedJobVersion *uint64
WriteMeta
}
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
)
// ShouldDrainNode checks if a given node status should trigger an
// evaluation. Some states don't require any further action.
func ShouldDrainNode(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady:
return false
case NodeStatusDown:
return true
default:
panic(fmt.Sprintf("unhandled node status %s", status))
}
}
// ValidNodeStatus is used to check if a node status is valid
func ValidNodeStatus(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDown:
return true
default:
return false
}
}
// Node is a representation of a schedulable client node
type Node struct {
// ID is a unique identifier for the node. It can be constructed
// by doing a concatenation of the Name and Datacenter as a simple
// approach. Alternatively a UUID may be used.
ID string
// SecretID is an ID that is only known by the Node and the set of Servers.
// It is not accessible via the API and is used to authenticate nodes
// conducting priviledged activities.
SecretID string
// Datacenter for this node
Datacenter string
// Node name
Name string
// HTTPAddr is the address on which the Nomad client is listening for http
// requests
HTTPAddr string
// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
TLSEnabled bool
// Attributes is an arbitrary set of key/value
// data that can be used for constraints. Examples
// include "kernel.name=linux", "arch=386", "driver.docker=1",
// "docker.runtime=1.8.3"
Attributes map[string]string
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
Resources *Resources
// Reserved is the set of resources that are reserved,
// and should be subtracted from the total resources for
// the purposes of scheduling. This may be provide certain
// high-watermark tolerances or because of external schedulers
// consuming resources.
Reserved *Resources
// Links are used to 'link' this client to external
// systems. For example 'consul=foo.dc1' 'aws=i-83212'
// 'ami=ami-123'
Links map[string]string
// Meta is used to associate arbitrary metadata with this
// client. This is opaque to Nomad.
Meta map[string]string
// NodeClass is an opaque identifier used to group nodes
// together for the purpose of determining scheduling pressure.
NodeClass string
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities.
ComputedClass string
// Drain is controlled by the servers, and not the client.
// If true, no jobs will be scheduled to this node, and existing
// allocations will be drained.
Drain bool
// Status of this node
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// StatusUpdatedAt is the time stamp at which the state of the node was
// updated
StatusUpdatedAt int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Ready returns if the node is ready for running allocations
func (n *Node) Ready() bool {
return n.Status == NodeStatusReady && !n.Drain
}
func (n *Node) Copy() *Node {
if n == nil {
return nil
}
nn := new(Node)
*nn = *n
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
return nn
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (n *Node) TerminalStatus() bool {
switch n.Status {
case NodeStatusDown:
return true
default:
return false
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub() *NodeListStub {
return &NodeListStub{
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Version: n.Attributes["nomad.version"],
Drain: n.Drain,
Status: n.Status,
StatusDescription: n.StatusDescription,
CreateIndex: n.CreateIndex,
ModifyIndex: n.ModifyIndex,
}
}
// NodeListStub is used to return a subset of job information
// for the job list
type NodeListStub struct {
ID string
Datacenter string
Name string
NodeClass string
Version string
Drain bool
Status string
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) (string, int) {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return n.IP, p.Value
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return n.IP, p.Value
}
}
}
return "", 0
}
// Resources is used to define the resources available
// on a client
type Resources struct {
CPU int
MemoryMB int
DiskMB int
IOPS int
Networks Networks
}
const (
BytesInMegabyte = 1024 * 1024
)
// DefaultResources is a small resources object that contains the
// default resources requests that we will provide to an object.
// --- THIS FUNCTION IS REPLICATED IN api/resources.go and should
// be kept in sync.
func DefaultResources() *Resources {
return &Resources{
CPU: 100,
MemoryMB: 300,
IOPS: 0,
}
}
// MinResources is a small resources object that contains the
// absolute minimum resources that we will provide to an object.
// This should not be confused with the defaults which are
// provided in Canonicalize() --- THIS FUNCTION IS REPLICATED IN
// api/resources.go and should be kept in sync.
func MinResources() *Resources {
return &Resources{
CPU: 100,
MemoryMB: 10,
IOPS: 0,
}
}
// DiskInBytes returns the amount of disk resources in bytes.
func (r *Resources) DiskInBytes() int64 {
return int64(r.DiskMB * BytesInMegabyte)
}
// Merge merges this resource with another resource.
func (r *Resources) Merge(other *Resources) {
if other.CPU != 0 {
r.CPU = other.CPU
}
if other.MemoryMB != 0 {
r.MemoryMB = other.MemoryMB
}
if other.DiskMB != 0 {
r.DiskMB = other.DiskMB
}
if other.IOPS != 0 {
r.IOPS = other.IOPS
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
}
func (r *Resources) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(r.Networks) == 0 {
r.Networks = nil
}
for _, n := range r.Networks {
n.Canonicalize()
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
// This is based on the minimums defined in the Resources type
func (r *Resources) MeetsMinResources() error {
var mErr multierror.Error
minResources := MinResources()
if r.CPU < minResources.CPU {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU))
}
if r.MemoryMB < minResources.MemoryMB {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.CPU, r.MemoryMB))
}
if r.IOPS < minResources.IOPS {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is %d; got %d", minResources.CPU, r.IOPS))
}
for i, n := range r.Networks {
if err := n.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the resources
func (r *Resources) Copy() *Resources {
if r == nil {
return nil
}
newR := new(Resources)
*newR = *r
if r.Networks != nil {
n := len(r.Networks)
newR.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
newR.Networks[i] = r.Networks[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
func (r *Resources) NetIndex(n *NetworkResource) int {
for idx, net := range r.Networks {
if net.Device == n.Device {
return idx
}
}
return -1
}
// Superset checks if one set of resources is a superset
// of another. This ignores network resources, and the NetworkIndex
// should be used for that.
func (r *Resources) Superset(other *Resources) (bool, string) {
if r.CPU < other.CPU {
return false, "cpu exhausted"
}
if r.MemoryMB < other.MemoryMB {
return false, "memory exhausted"
}
if r.DiskMB < other.DiskMB {
return false, "disk exhausted"
}
if r.IOPS < other.IOPS {
return false, "iops exhausted"
}
return true, ""
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (r *Resources) Add(delta *Resources) error {
if delta == nil {
return nil
}
r.CPU += delta.CPU
r.MemoryMB += delta.MemoryMB
r.DiskMB += delta.DiskMB
r.IOPS += delta.IOPS
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := r.NetIndex(n)
if idx == -1 {
r.Networks = append(r.Networks, n.Copy())
} else {
r.Networks[idx].Add(n)
}
}
return nil
}
func (r *Resources) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
type Port struct {
Label string
Value int
}
// NetworkResource is used to represent available network
// resources
type NetworkResource struct {
Device string // Name of the device
CIDR string // CIDR block of addresses
IP string // Host IP address
MBits int // Throughput
ReservedPorts []Port // Host Reserved ports
DynamicPorts []Port // Host Dynamically assigned ports
}
func (n *NetworkResource) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(n.ReservedPorts) == 0 {
n.ReservedPorts = nil
}
if len(n.DynamicPorts) == 0 {
n.DynamicPorts = nil
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
func (n *NetworkResource) MeetsMinResources() error {
var mErr multierror.Error
if n.MBits < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits))
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the network resource
func (n *NetworkResource) Copy() *NetworkResource {
if n == nil {
return nil
}
newR := new(NetworkResource)
*newR = *n
if n.ReservedPorts != nil {
newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
copy(newR.ReservedPorts, n.ReservedPorts)
}
if n.DynamicPorts != nil {
newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
copy(newR.DynamicPorts, n.DynamicPorts)
}
return newR
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (n *NetworkResource) Add(delta *NetworkResource) {
if len(delta.ReservedPorts) > 0 {
n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
}
n.MBits += delta.MBits
n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
}
func (n *NetworkResource) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
// PortLabels returns a map of port labels to their assigned host ports.
func (n *NetworkResource) PortLabels() map[string]int {
num := len(n.ReservedPorts) + len(n.DynamicPorts)
labelValues := make(map[string]int, num)
for _, port := range n.ReservedPorts {
labelValues[port.Label] = port.Value
}
for _, port := range n.DynamicPorts {
labelValues[port.Label] = port.Value
}
return labelValues
}
const (
// JobTypeNomad is reserved for internal system tasks and is
// always handled by the CoreScheduler.
JobTypeCore = "_core"
JobTypeService = "service"
JobTypeBatch = "batch"
JobTypeSystem = "system"
)
const (
JobStatusPending = "pending" // Pending means the job is waiting on scheduling
JobStatusRunning = "running" // Running means the job has non-terminal allocations
JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal
)
const (
// JobMinPriority is the minimum allowed priority
JobMinPriority = 1
// JobDefaultPriority is the default priority if not
// not specified.
JobDefaultPriority = 50
// JobMaxPriority is the maximum allowed priority
JobMaxPriority = 100
// Ensure CoreJobPriority is higher than any user
// specified job so that it gets priority. This is important
// for the system to remain healthy.
CoreJobPriority = JobMaxPriority * 2
// JobTrackedVersions is the number of historic job versions that are
// kept.
JobTrackedVersions = 6
)
// Job is the scope of a scheduling request to Nomad. It is the largest
// scoped object, and is a named collection of task groups. Each task group
// is further composed of tasks. A task group (TG) is the unit of scheduling
// however.
type Job struct {
// Stop marks whether the user has stopped the job. A stopped job will
// have all created allocations stopped and acts as a way to stop a job
// without purging it from the system. This allows existing allocs to be
// queried and the job to be inspected as it is being killed.
Stop bool
// Region is the Nomad region that handles scheduling this job
Region string
// Namespace is the namespace the job is submitted into.
Namespace string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
// ParentID is the unique identifier of the job that spawned this job.
ParentID string
// Name is the logical name of the job used to refer to it. This is unique
// per region, but not unique globally.
Name string
// Type is used to control various behaviors about the job. Most jobs
// are service jobs, meaning they are expected to be long lived.
// Some jobs are batch oriented meaning they run and then terminate.
// This can be extended in the future to support custom schedulers.
Type string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job. This
// can slow down larger jobs if resources are not available.
AllAtOnce bool
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
// TaskGroups are the collections of task groups that this job needs
// to run. Each task group is an atomic unit of scheduling and placement.
TaskGroups []*TaskGroup
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
Update UpdateStrategy
// Periodic is used to define the interval the job is run at.
Periodic *PeriodicConfig
// ParameterizedJob is used to specify the job as a parameterized job
// for dispatching.
ParameterizedJob *ParameterizedJobConfig
// Payload is the payload supplied when the job was dispatched.
Payload []byte
// Meta is used to associate arbitrary metadata with this
// job. This is opaque to Nomad.
Meta map[string]string
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
// Job status
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Stable marks a job as stable. Stability is only defined on "service" and
// "system" jobs. The stability of a job will be set automatically as part
// of a deployment and can be manually set via APIs.
Stable bool
// Version is a monitonically increasing version number that is incremened
// on each job register.
Version uint64
// SubmitTime is the time at which the job was submitted as a UnixNano in
// UTC
SubmitTime int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
}
// Canonicalize is used to canonicalize fields in the Job. This should be called
// when registering a Job. A set of warnings are returned if the job was changed
// in anyway that the user should be made aware of.
func (j *Job) Canonicalize() (warnings error) {
if j == nil {
return nil
}
var mErr multierror.Error
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(j.Meta) == 0 {
j.Meta = nil
}
// Ensure the job is in a namespace.
if j.Namespace == "" {
j.Namespace = DefaultNamespace
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
// COMPAT: Remove in 0.7.0
// Rewrite any job that has an update block with pre 0.6.0 syntax.
jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0
if jobHasOldUpdate && j.Type != JobTypeBatch {
// Build an appropriate update block and copy it down to each task group
base := DefaultUpdateStrategy.Copy()
base.MaxParallel = j.Update.MaxParallel
base.MinHealthyTime = j.Update.Stagger
// Add to each task group, modifying as needed
upgraded := false
l := len(j.TaskGroups)
for _, tg := range j.TaskGroups {
// The task group doesn't need upgrading if it has an update block with the new syntax
u := tg.Update
if u != nil && u.Stagger > 0 && u.MaxParallel > 0 &&
u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 {
continue
}
upgraded = true
// The MaxParallel for the job should be 10% of the total count
// unless there is just one task group then we can infer the old
// max parallel should be the new
tgu := base.Copy()
if l != 1 {
// RoundTo 10%
var percent float64 = float64(tg.Count) * 0.1
tgu.MaxParallel = int(percent + 0.5)
}
// Safety guards
if tgu.MaxParallel == 0 {
tgu.MaxParallel = 1
} else if tgu.MaxParallel > tg.Count {
tgu.MaxParallel = tg.Count
}
tg.Update = tgu
}
if upgraded {
w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " +
"Please update upgrade stanza before v0.7.0."
multierror.Append(&mErr, fmt.Errorf(w))
}
}
// Ensure that the batch job doesn't have new style or old style update
// stanza. Unfortunately are scanning here because we have to deprecate over
// a release so we can't check in the task group since that may be new style
// but wouldn't capture the old style and we don't want to have duplicate
// warnings.
if j.Type == JobTypeBatch {
displayWarning := jobHasOldUpdate
j.Update.Stagger = 0
j.Update.MaxParallel = 0
j.Update.HealthCheck = ""
j.Update.MinHealthyTime = 0
j.Update.HealthyDeadline = 0
j.Update.AutoRevert = false
j.Update.Canary = 0
// Remove any update spec from the task groups
for _, tg := range j.TaskGroups {
if tg.Update != nil {
displayWarning = true
tg.Update = nil
}
}
if displayWarning {
w := "Update stanza is disallowed for batch jobs since v0.6.0. " +
"The update block has automatically been removed"
multierror.Append(&mErr, fmt.Errorf(w))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
// This job can panic if the deep copy failed as it uses reflection.
func (j *Job) Copy() *Job {
if j == nil {
return nil
}
nj := new(Job)
*nj = *j
nj.Datacenters = helper.CopySliceString(nj.Datacenters)
nj.Constraints = CopySliceConstraints(nj.Constraints)
if j.TaskGroups != nil {
tgs := make([]*TaskGroup, len(nj.TaskGroups))
for i, tg := range nj.TaskGroups {
tgs[i] = tg.Copy()
}
nj.TaskGroups = tgs
}
nj.Periodic = nj.Periodic.Copy()
nj.Meta = helper.CopyMapStringString(nj.Meta)
nj.ParameterizedJob = nj.ParameterizedJob.Copy()
return nj
}
// Validate is used to sanity check a job input
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}
if j.ID == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
} else if strings.Contains(j.ID, " ") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
}
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
}
if j.Namespace == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem:
case "":
mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
}
if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
}
if len(j.Datacenters) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
for idx, constr := range j.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Check for duplicate task groups
taskGroups := make(map[string]int)
for idx, tg := range j.TaskGroups {
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
} else if existing, ok := taskGroups[tg.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
} else {
taskGroups[tg.Name] = idx
}
if j.Type == "system" && tg.Count > 1 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
tg.Name, tg.Count))
}
}
// Validate the task group
for _, tg := range j.TaskGroups {
if err := tg.Validate(j); err != nil {
outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate periodic is only used with batch jobs.
if j.IsPeriodic() && j.Periodic.Enabled {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch))
}
if err := j.Periodic.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsParameterized() {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch))
}
if err := j.ParameterizedJob.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (j *Job) Warnings() error {
var mErr multierror.Error
// Check the groups
for _, tg := range j.TaskGroups {
if err := tg.Warnings(j); err != nil {
outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// LookupTaskGroup finds a task group by name
func (j *Job) LookupTaskGroup(name string) *TaskGroup {
for _, tg := range j.TaskGroups {
if tg.Name == name {
return tg
}
}
return nil
}
// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
// meta data for the task. When joining Job, Group and Task Meta, the precedence
// is by deepest scope (Task > Group > Job).
func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
group := j.LookupTaskGroup(groupName)
if group == nil {
return nil
}
task := group.LookupTask(taskName)
if task == nil {
return nil
}
meta := helper.CopyMapStringString(task.Meta)
if meta == nil {
meta = make(map[string]string, len(group.Meta)+len(j.Meta))
}
// Add the group specific meta
for k, v := range group.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
// Add the job specific meta
for k, v := range j.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
return meta
}
// Stopped returns if a job is stopped.
func (j *Job) Stopped() bool {
return j == nil || j.Stop
}
// HasUpdateStrategy returns if any task group in the job has an update strategy
func (j *Job) HasUpdateStrategy() bool {
for _, tg := range j.TaskGroups {
if tg.Update != nil {
return true
}
}
return false
}
// Stub is used to return a summary of the job
func (j *Job) Stub(summary *JobSummary) *JobListStub {
return &JobListStub{
ID: j.ID,
ParentID: j.ParentID,
Name: j.Name,
Type: j.Type,
Priority: j.Priority,
Periodic: j.IsPeriodic(),
ParameterizedJob: j.IsParameterized(),
Stop: j.Stop,
Status: j.Status,
StatusDescription: j.StatusDescription,
CreateIndex: j.CreateIndex,
ModifyIndex: j.ModifyIndex,
JobModifyIndex: j.JobModifyIndex,
SubmitTime: j.SubmitTime,
JobSummary: summary,
}
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil
}
// VaultPolicies returns the set of Vault policies per task group, per task
func (j *Job) VaultPolicies() map[string]map[string]*Vault {
policies := make(map[string]map[string]*Vault, len(j.TaskGroups))
for _, tg := range j.TaskGroups {
tgPolicies := make(map[string]*Vault, len(tg.Tasks))
for _, task := range tg.Tasks {
if task.Vault == nil {
continue
}
tgPolicies[task.Name] = task.Vault
}
if len(tgPolicies) != 0 {
policies[tg.Name] = tgPolicies
}
}
return policies
}
// RequiredSignals returns a mapping of task groups to tasks to their required
// set of signals
func (j *Job) RequiredSignals() map[string]map[string][]string {
signals := make(map[string]map[string][]string)
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
// Use this local one as a set
taskSignals := make(map[string]struct{})
// Check if the Vault change mode uses signals
if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
taskSignals[task.Vault.ChangeSignal] = struct{}{}
}
// Check if any template change mode uses signals
for _, t := range task.Templates {
if t.ChangeMode != TemplateChangeModeSignal {
continue
}
taskSignals[t.ChangeSignal] = struct{}{}
}
// Flatten and sort the signals
l := len(taskSignals)
if l == 0 {
continue
}
flat := make([]string, 0, l)
for sig := range taskSignals {
flat = append(flat, sig)
}
sort.Strings(flat)
tgSignals, ok := signals[tg.Name]
if !ok {
tgSignals = make(map[string][]string)
signals[tg.Name] = tgSignals
}
tgSignals[task.Name] = flat
}
}
return signals
}
// SpecChanged determines if the functional specification has changed between
// two job versions.
func (j *Job) SpecChanged(new *Job) bool {
if j == nil {
return new != nil
}
// Create a copy of the new job
c := new.Copy()
// Update the new job so we can do a reflect
c.Status = j.Status
c.StatusDescription = j.StatusDescription
c.Stable = j.Stable
c.Version = j.Version
c.CreateIndex = j.CreateIndex
c.ModifyIndex = j.ModifyIndex
c.JobModifyIndex = j.JobModifyIndex
c.SubmitTime = j.SubmitTime
// Deep equals the jobs
return !reflect.DeepEqual(j, c)
}
func (j *Job) SetSubmitTime() {
j.SubmitTime = time.Now().UTC().UnixNano()
}
// JobListStub is used to return a subset of job information
// for the job list
type JobListStub struct {
ID string
ParentID string
Name string
Type string
Priority int
Periodic bool
ParameterizedJob bool
Stop bool
Status string
StatusDescription string
JobSummary *JobSummary
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
SubmitTime int64
}
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
// JobID is the ID of the job the summary is for
JobID string
// Namespace is the namespace of the job and its summary
Namespace string
// Summmary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
// Children contains a summary for the children of this job.
Children *JobChildrenSummary
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Copy returns a new copy of JobSummary
func (js *JobSummary) Copy() *JobSummary {
newJobSummary := new(JobSummary)
*newJobSummary = *js
newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
for k, v := range js.Summary {
newTGSummary[k] = v
}
newJobSummary.Summary = newTGSummary
newJobSummary.Children = newJobSummary.Children.Copy()
return newJobSummary
}
// JobChildrenSummary contains the summary of children job statuses
type JobChildrenSummary struct {
Pending int64
Running int64
Dead int64
}
// Copy returns a new copy of a JobChildrenSummary
func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
if jc == nil {
return nil
}
njc := new(JobChildrenSummary)
*njc = *jc
return njc
}
// TaskGroup summarizes the state of all the allocations of a particular
// TaskGroup
type TaskGroupSummary struct {
Queued int
Complete int
Failed int
Running int
Starting int
Lost int
}
const (
// Checks uses any registered health check state in combination with task
// states to determine if a allocation is healthy.
UpdateStrategyHealthCheck_Checks = "checks"
// TaskStates uses the task states of an allocation to determine if the
// allocation is healthy.
UpdateStrategyHealthCheck_TaskStates = "task_states"
// Manual allows the operator to manually signal to Nomad when an
// allocations is healthy. This allows more advanced health checking that is
// outside of the scope of Nomad.
UpdateStrategyHealthCheck_Manual = "manual"
)
var (
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
DefaultUpdateStrategy = &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 1,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
}
)
// UpdateStrategy is used to modify how updates are done
type UpdateStrategy struct {
// Stagger is used to determine the rate at which allocations are migrated
// due to down or draining nodes.
Stagger time.Duration
// MaxParallel is how many updates can be done in parallel
MaxParallel int
// HealthCheck specifies the mechanism in which allocations are marked
// healthy or unhealthy as part of a deployment.
HealthCheck string
// MinHealthyTime is the minimum time an allocation must be in the healthy
// state before it is marked as healthy, unblocking more alllocations to be
// rolled.
MinHealthyTime time.Duration
// HealthyDeadline is the time in which an allocation must be marked as
// healthy before it is automatically transistioned to unhealthy. This time
// period doesn't count against the MinHealthyTime.
HealthyDeadline time.Duration
// AutoRevert declares that if a deployment fails because of unhealthy
// allocations, there should be an attempt to auto-revert the job to a
// stable version.
AutoRevert bool
// Canary is the number of canaries to deploy when a change to the task
// group is detected.
Canary int
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
}
copy := new(UpdateStrategy)
*copy = *u
return copy
}
func (u *UpdateStrategy) Validate() error {
if u == nil {
return nil
}
var mErr multierror.Error
switch u.HealthCheck {
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
default:
multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
}
if u.MaxParallel < 1 {
multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel))
}
if u.Canary < 0 {
multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
}
if u.MinHealthyTime < 0 {
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
}
if u.HealthyDeadline <= 0 {
multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
}
if u.MinHealthyTime >= u.HealthyDeadline {
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline))
}
if u.Stagger <= 0 {
multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
}
return mErr.ErrorOrNil()
}
// TODO(alexdadgar): Remove once no longer used by the scheduler.
// Rolling returns if a rolling strategy should be used
func (u *UpdateStrategy) Rolling() bool {
return u.Stagger > 0 && u.MaxParallel > 0
}
const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
// separated list of unix timestamps at which to launch.
PeriodicSpecTest = "_internal_test"
)
// Periodic defines the interval a job should be run at.
type PeriodicConfig struct {
// Enabled determines if the job should be run periodically.
Enabled bool
// Spec specifies the interval the job should be run as. It is parsed based
// on the SpecType.
Spec string
// SpecType defines the format of the spec.
SpecType string
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
if p == nil {
return nil
}
np := new(PeriodicConfig)
*np = *p
return np
}
func (p *PeriodicConfig) Validate() error {
if !p.Enabled {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
switch p.SpecType {
case PeriodicSpecCron:
if e, err := cronexpr.Parse(p.Spec); err == nil {
return e.Next(fromTime)
}
case PeriodicSpecTest:
split := strings.Split(p.Spec, ",")
if len(split) == 1 && split[0] == "" {
return time.Time{}
}
// Parse the times
times := make([]time.Time, len(split))
for i, s := range split {
unix, err := strconv.Atoi(s)
if err != nil {
return time.Time{}
}
times[i] = time.Unix(int64(unix), 0)
}
// Find the next match
for _, next := range times {
if fromTime.Before(next) {
return next
}
}
}
return time.Time{}
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.
PeriodicLaunchSuffix = "/periodic-"
)
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Namespace string // Namespace of the periodic job
Launch time.Time // The last launch time.
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
const (
DispatchPayloadForbidden = "forbidden"
DispatchPayloadOptional = "optional"
DispatchPayloadRequired = "required"
// DispatchLaunchSuffix is the string appended to the parameterized job's ID
// when dispatching instances of it.
DispatchLaunchSuffix = "/dispatch-"
)
// ParameterizedJobConfig is used to configure the parameterized job
type ParameterizedJobConfig struct {
// Payload configure the payload requirements
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string
}
func (d *ParameterizedJobConfig) Validate() error {
var mErr multierror.Error
switch d.Payload {
case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
}
// Check that the meta configurations are disjoint sets
disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
if !disjoint {
multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
}
return mErr.ErrorOrNil()
}
func (d *ParameterizedJobConfig) Canonicalize() {
if d.Payload == "" {
d.Payload = DispatchPayloadOptional
}
}
func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
if d == nil {
return nil
}
nd := new(ParameterizedJobConfig)
*nd = *d
nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
return nd
}
// DispatchedID returns an ID appropriate for a job dispatched against a
// particular parameterized job
func DispatchedID(templateID string, t time.Time) string {
u := uuid.Generate()[:8]
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
return fmt.Errorf("destination escapes allocation directory")
}
return nil
}
var (
defaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 1 * time.Minute,
Mode: RestartPolicyModeDelay,
}
defaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 15,
Interval: 7 * 24 * time.Hour,
Mode: RestartPolicyModeDelay,
}
)
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// RestartPolicyMinInterval is the minimum interval that is accepted for a
// restart policy.
RestartPolicyMinInterval = 5 * time.Second
// ReasonWithinPolicy describes restart events that are within policy
ReasonWithinPolicy = "Restart within policy"
)
// RestartPolicy configures how Tasks are restarted when they crash or fail.
type RestartPolicy struct {
// Attempts is the number of restart that will occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of restarts
// within.
Interval time.Duration
// Delay is the time between a failure and a restart.
Delay time.Duration
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
}
func (r *RestartPolicy) Copy() *RestartPolicy {
if r == nil {
return nil
}
nrp := new(RestartPolicy)
*nrp = *r
return nrp
}
func (r *RestartPolicy) Validate() error {
var mErr multierror.Error
switch r.Mode {
case RestartPolicyModeDelay, RestartPolicyModeFail:
default:
multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
}
// Check for ambiguous/confusing settings
if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
}
if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
}
if time.Duration(r.Attempts)*r.Delay > r.Interval {
multierror.Append(&mErr,
fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
}
return mErr.ErrorOrNil()
}
func NewRestartPolicy(jobType string) *RestartPolicy {
switch jobType {
case JobTypeService, JobTypeSystem:
rp := defaultServiceJobRestartPolicy
return &rp
case JobTypeBatch:
rp := defaultBatchJobRestartPolicy
return &rp
}
return nil
}
// TaskGroup is an atomic unit of placement. Each task group belongs to
// a job and may contain any number of tasks. A task group support running
// in many replicas using the same configuration..
type TaskGroup struct {
// Name of the task group
Name string
// Count is the number of replicas of this task group that should
// be scheduled.
Count int
// Update is used to control the update strategy for this task group
Update *UpdateStrategy
// Constraints can be specified at a task group level and apply to
// all the tasks contained.
Constraints []*Constraint
//RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// Tasks are the collection of tasks that this task group needs to run
Tasks []*Task
// EphemeralDisk is the disk resources that the task group requests
EphemeralDisk *EphemeralDisk
// Meta is used to associate arbitrary metadata with this
// task group. This is opaque to Nomad.
Meta map[string]string
}
func (tg *TaskGroup) Copy() *TaskGroup {
if tg == nil {
return nil
}
ntg := new(TaskGroup)
*ntg = *tg
ntg.Update = ntg.Update.Copy()
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
if tg.Tasks != nil {
tasks := make([]*Task, len(ntg.Tasks))
for i, t := range ntg.Tasks {
tasks[i] = t.Copy()
}
ntg.Tasks = tasks
}
ntg.Meta = helper.CopyMapStringString(ntg.Meta)
if tg.EphemeralDisk != nil {
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
}
return ntg
}
// Canonicalize is used to canonicalize fields in the TaskGroup.
func (tg *TaskGroup) Canonicalize(job *Job) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(tg.Meta) == 0 {
tg.Meta = nil
}
// Set the default restart policy.
if tg.RestartPolicy == nil {
tg.RestartPolicy = NewRestartPolicy(job.Type)
}
// Set a default ephemeral disk object if the user has not requested for one
if tg.EphemeralDisk == nil {
tg.EphemeralDisk = DefaultEphemeralDisk()
}
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
// Add up the disk resources to EphemeralDisk. This is done so that users
// are not required to move their disk attribute from resources to
// EphemeralDisk section of the job spec in Nomad 0.5
// COMPAT 0.4.1 -> 0.5
// Remove in 0.6
var diskMB int
for _, task := range tg.Tasks {
diskMB += task.Resources.DiskMB
}
if diskMB > 0 {
tg.EphemeralDisk.SizeMB = diskMB
}
}
// Validate is used to sanity check a task group
func (tg *TaskGroup) Validate(j *Job) error {
var mErr multierror.Error
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
}
if tg.Count < 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
}
if len(tg.Tasks) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
}
for idx, constr := range tg.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if tg.RestartPolicy != nil {
if err := tg.RestartPolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
}
if tg.EphemeralDisk != nil {
if err := tg.EphemeralDisk.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
}
// Validate the update strategy
if u := tg.Update; u != nil {
switch j.Type {
case JobTypeService, JobTypeSystem:
default:
// COMPAT: Enable in 0.7.0
//mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
}
if err := u.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
// Check for duplicate tasks, that there is only leader task if any,
// and no duplicated static ports
tasks := make(map[string]int)
staticPorts := make(map[int]string)
leaderTasks := 0
for idx, task := range tg.Tasks {
if task.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
} else if existing, ok := tasks[task.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
} else {
tasks[task.Name] = idx
}
if task.Leader {
leaderTasks++
}
if task.Resources == nil {
continue
}
for _, net := range task.Resources.Networks {
for _, port := range net.ReservedPorts {
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
}
}
}
}
if leaderTasks > 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
}
// Validate the tasks
for _, task := range tg.Tasks {
if err := task.Validate(tg.EphemeralDisk); err != nil {
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (tg *TaskGroup) Warnings(j *Job) error {
var mErr multierror.Error
// Validate the update strategy
if u := tg.Update; u != nil {
// Check the counts are appropriate
if u.MaxParallel > tg.Count {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
}
}
return mErr.ErrorOrNil()
}
// LookupTask finds a task by name
func (tg *TaskGroup) LookupTask(name string) *Task {
for _, t := range tg.Tasks {
if t.Name == name {
return t
}
}
return nil
}
func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
// CombinedResources returns the combined resources for the task group
func (tg *TaskGroup) CombinedResources() *Resources {
r := &Resources{
DiskMB: tg.EphemeralDisk.SizeMB,
}
for _, task := range tg.Tasks {
r.Add(task.Resources)
}
return r
}
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int // Restart task after this many unhealthy intervals
Grace time.Duration // Grace time to give tasks after starting to get healthy
IgnoreWarnings bool // If true treat checks in `warning` as passing
}
func (c *CheckRestart) Copy() *CheckRestart {
if c == nil {
return nil
}
nc := new(CheckRestart)
*nc = *c
return nc
}
func (c *CheckRestart) Validate() error {
if c == nil {
return nil
}
var mErr multierror.Error
if c.Limit < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit))
}
if c.Grace < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace))
}
return mErr.ErrorOrNil()
}
const (
ServiceCheckHTTP = "http"
ServiceCheckTCP = "tcp"
ServiceCheckScript = "script"
// minCheckInterval is the minimum check interval permitted. Consul
// currently has its MinInterval set to 1s. Mirror that here for
// consistency.
minCheckInterval = 1 * time.Second
// minCheckTimeout is the minimum check timeout permitted for Consul
// script TTL checks.
minCheckTimeout = 1 * time.Second
)
// The ServiceCheck data model represents the consul health check that
// Nomad registers for a Task
type ServiceCheck struct {
Name string // Name of the check, defaults to id
Type string // Type of the check - tcp, http, docker and script
Command string // Command is the command to run for script checks
Args []string // Args is a list of argumes for script checks
Path string // path of the health check url for http type check
Protocol string // Protocol to use if check is http, defaults to http
PortLabel string // The port to use for tcp/http checks
Interval time.Duration // Interval of the check
Timeout time.Duration // Timeout of the response from the check before consul fails the check
InitialStatus string // Initial status of the check
TLSSkipVerify bool // Skip TLS verification when Protocol=https
Method string // HTTP Method to use (GET by default)
Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks
CheckRestart *CheckRestart // If and when a task should be restarted based on checks
}
func (sc *ServiceCheck) Copy() *ServiceCheck {
if sc == nil {
return nil
}
nsc := new(ServiceCheck)
*nsc = *sc
nsc.Args = helper.CopySliceString(sc.Args)
nsc.Header = helper.CopyMapStringSliceString(sc.Header)
nsc.CheckRestart = sc.CheckRestart.Copy()
return nsc
}
func (sc *ServiceCheck) Canonicalize(serviceName string) {
// Ensure empty maps/slices are treated as null to avoid scheduling
// issues when using DeepEquals.
if len(sc.Args) == 0 {
sc.Args = nil
}
if len(sc.Header) == 0 {
sc.Header = nil
} else {
for k, v := range sc.Header {
if len(v) == 0 {
sc.Header[k] = nil
}
}
}
if sc.Name == "" {
sc.Name = fmt.Sprintf("service: %q check", serviceName)
}
}
// validate a Service's ServiceCheck
func (sc *ServiceCheck) validate() error {
switch strings.ToLower(sc.Type) {
case ServiceCheckTCP:
case ServiceCheckHTTP:
if sc.Path == "" {
return fmt.Errorf("http type must have a valid http path")
}
case ServiceCheckScript:
if sc.Command == "" {
return fmt.Errorf("script type must have a valid script path")
}
default:
return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type)
}
if sc.Interval == 0 {
return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval)
} else if sc.Interval < minCheckInterval {
return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval)
}
if sc.Timeout == 0 {
return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval)
} else if sc.Timeout < minCheckTimeout {
return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval)
}
switch sc.InitialStatus {
case "":
// case api.HealthUnknown: TODO: Add when Consul releases 0.7.1
case api.HealthPassing:
case api.HealthWarning:
case api.HealthCritical:
default:
return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical)
}
return sc.CheckRestart.Validate()
}
// RequiresPort returns whether the service check requires the task has a port.
func (sc *ServiceCheck) RequiresPort() bool {
switch sc.Type {
case ServiceCheckHTTP, ServiceCheckTCP:
return true
default:
return false
}
}
// TriggersRestarts returns true if this check should be watched and trigger a restart
// on failure.
func (sc *ServiceCheck) TriggersRestarts() bool {
return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0
}
// Hash all ServiceCheck fields and the check's corresponding service ID to
// create an identifier. The identifier is not guaranteed to be unique as if
// the PortLabel is blank, the Service's PortLabel will be used after Hash is
// called.
func (sc *ServiceCheck) Hash(serviceID string) string {
h := sha1.New()
io.WriteString(h, serviceID)
io.WriteString(h, sc.Name)
io.WriteString(h, sc.Type)
io.WriteString(h, sc.Command)
io.WriteString(h, strings.Join(sc.Args, ""))
io.WriteString(h, sc.Path)
io.WriteString(h, sc.Protocol)
io.WriteString(h, sc.PortLabel)
io.WriteString(h, sc.Interval.String())
io.WriteString(h, sc.Timeout.String())
io.WriteString(h, sc.Method)
// Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6
if sc.TLSSkipVerify {
io.WriteString(h, "true")
}
// Since map iteration order isn't stable we need to write k/v pairs to
// a slice and sort it before hashing.
if len(sc.Header) > 0 {
headers := make([]string, 0, len(sc.Header))
for k, v := range sc.Header {
headers = append(headers, k+strings.Join(v, ""))
}
sort.Strings(headers)
io.WriteString(h, strings.Join(headers, ""))
}
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
AddressModeAuto = "auto"
AddressModeHost = "host"
AddressModeDriver = "driver"
)
// Service represents a Consul service definition in Nomad
type Service struct {
// Name of the service registered with Consul. Consul defaults the
// Name to ServiceID if not specified. The Name if specified is used
// as one of the seed values when generating a Consul ServiceID.
Name string
// PortLabel is either the numeric port number or the `host:port`.
// To specify the port number using the host's Consul Advertise
// address, specify an empty host in the PortLabel (e.g. `:port`).
PortLabel string
// AddressMode specifies whether or not to use the host ip:port for
// this service.
AddressMode string
Tags []string // List of tags for the service
Checks []*ServiceCheck // List of checks associated with the service
}
func (s *Service) Copy() *Service {
if s == nil {
return nil
}
ns := new(Service)
*ns = *s
ns.Tags = helper.CopySliceString(ns.Tags)
if s.Checks != nil {
checks := make([]*ServiceCheck, len(ns.Checks))
for i, c := range ns.Checks {
checks[i] = c.Copy()
}
ns.Checks = checks
}
return ns
}
// Canonicalize interpolates values of Job, Task Group and Task in the Service
// Name. This also generates check names, service id and check ids.
func (s *Service) Canonicalize(job string, taskGroup string, task string) {
// Ensure empty lists are treated as null to avoid scheduler issues when
// using DeepEquals
if len(s.Tags) == 0 {
s.Tags = nil
}
if len(s.Checks) == 0 {
s.Checks = nil
}
s.Name = args.ReplaceEnv(s.Name, map[string]string{
"JOB": job,
"TASKGROUP": taskGroup,
"TASK": task,
"BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task),
},
)
for _, check := range s.Checks {
check.Canonicalize(s.Name)
}
}
// Validate checks if the Check definition is valid
func (s *Service) Validate() error {
var mErr multierror.Error
// Ensure the service name is valid per the below RFCs but make an exception
// for our interpolation syntax by first stripping any environment variables from the name
serviceNameStripped := args.ReplaceEnvWithPlaceHolder(s.Name, "ENV-VAR")
if err := s.ValidateName(serviceNameStripped); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name))
}
switch s.AddressMode {
case "", AddressModeAuto, AddressModeHost, AddressModeDriver:
// OK
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode))
}
for _, c := range s.Checks {
if s.PortLabel == "" && c.RequiresPort() {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name))
continue
}
if err := c.validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err))
}
}
return mErr.ErrorOrNil()
}
// ValidateName checks if the services Name is valid and should be called after
// the name has been interpolated
func (s *Service) ValidateName(name string) error {
// Ensure the service name is valid per RFC-952 §1
// (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1
// (https://tools.ietf.org/html/rfc1123), and RFC-2782
// (https://tools.ietf.org/html/rfc2782).
re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`)
if !re.MatchString(name) {
return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name)
}
return nil
}
// Hash calculates the hash of the check based on it's content and the service
// which owns it
func (s *Service) Hash() string {
h := sha1.New()
io.WriteString(h, s.Name)
io.WriteString(h, strings.Join(s.Tags, ""))
io.WriteString(h, s.PortLabel)
io.WriteString(h, s.AddressMode)
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
// DefaultKillTimeout is the default timeout between signaling a task it
// will be killed and killing it.
DefaultKillTimeout = 5 * time.Second
)
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
}
// DefaultLogConfig returns the default LogConfig values.
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
}
}
// Validate returns an error if the log config specified are less than
// the minimum allowed.
func (l *LogConfig) Validate() error {
var mErr multierror.Error
if l.MaxFiles < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
}
if l.MaxFileSizeMB < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
}
return mErr.ErrorOrNil()
}
// Task is a single process typically that is executed as part of a task group.
type Task struct {
// Name of the task
Name string
// Driver is used to control which driver is used
Driver string
// User is used to determine which user will run the task. It defaults to
// the same user the Nomad client is being run as.
User string
// Config is provided to the driver to initialize
Config map[string]interface{}
// Map of environment variables to be used by the driver
Env map[string]string
// List of service definitions exposed by the Task
Services []*Service
// Vault is used to define the set of Vault policies that this task should
// have access to.
Vault *Vault
// Templates are the set of templates to be rendered for the task.
Templates []*Template
// Constraints can be specified at a task level and apply only to
// the particular task.
Constraints []*Constraint
// Resources is the resources needed by this task
Resources *Resources
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
Meta map[string]string
// KillTimeout is the time between signaling a task that it will be
// killed and killing it.
KillTimeout time.Duration
// LogConfig provides configuration for log rotation
LogConfig *LogConfig
// Artifacts is a list of artifacts to download and extract before running
// the task.
Artifacts []*TaskArtifact
// Leader marks the task as the leader within the group. When the leader
// task exits, other tasks will be gracefully terminated.
Leader bool
// ShutdownDelay is the duration of the delay between deregistering a
// task from Consul and sending it a signal to shutdown. See #2441
ShutdownDelay time.Duration
}
func (t *Task) Copy() *Task {
if t == nil {
return nil
}
nt := new(Task)
*nt = *t
nt.Env = helper.CopyMapStringString(nt.Env)
if t.Services != nil {
services := make([]*Service, len(nt.Services))
for i, s := range nt.Services {
services[i] = s.Copy()
}
nt.Services = services
}
nt.Constraints = CopySliceConstraints(nt.Constraints)
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchPayload = nt.DispatchPayload.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
for _, a := range nt.Artifacts {
artifacts = append(artifacts, a.Copy())
}
nt.Artifacts = artifacts
}
if i, err := copystructure.Copy(nt.Config); err != nil {
panic(err.Error())
} else {
nt.Config = i.(map[string]interface{})
}
if t.Templates != nil {
templates := make([]*Template, len(t.Templates))
for i, tmpl := range nt.Templates {
templates[i] = tmpl.Copy()
}
nt.Templates = templates
}
return nt
}
// Canonicalize canonicalizes fields in the task.
func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(t.Meta) == 0 {
t.Meta = nil
}
if len(t.Config) == 0 {
t.Config = nil
}
if len(t.Env) == 0 {
t.Env = nil
}
for _, service := range t.Services {
service.Canonicalize(job.Name, tg.Name, t.Name)
}
// If Resources are nil initialize them to defaults, otherwise canonicalize
if t.Resources == nil {
t.Resources = DefaultResources()
} else {
t.Resources.Canonicalize()
}
// Set the default timeout if it is not specified.
if t.KillTimeout == 0 {
t.KillTimeout = DefaultKillTimeout
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, template := range t.Templates {
template.Canonicalize()
}
}
func (t *Task) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
// Validate is used to sanity check a task
func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error {
var mErr multierror.Error
if t.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
}
if strings.ContainsAny(t.Name, `/\`) {
// We enforce this so that when creating the directory on disk it will
// not have any slashes.
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
}
if t.Driver == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
}
if t.KillTimeout < 0 {
mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
}
if t.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}
// Validate the resources.
if t.Resources == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
} else {
if err := t.Resources.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Ensure the task isn't asking for disk resources
if t.Resources.DiskMB > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
}
}
// Validate the log config
if t.LogConfig == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
} else if err := t.LogConfig.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
for idx, constr := range t.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate Services
if err := validateServices(t); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if t.LogConfig != nil && ephemeralDisk != nil {
logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
if ephemeralDisk.SizeMB <= logUsage {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
logUsage, ephemeralDisk.SizeMB))
}
}
for idx, artifact := range t.Artifacts {
if err := artifact.Validate(); err != nil {
outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if t.Vault != nil {
if err := t.Vault.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
}
}
destinations := make(map[string]int, len(t.Templates))
for idx, tmpl := range t.Templates {
if err := tmpl.Validate(); err != nil {
outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
if other, ok := destinations[tmpl.DestPath]; ok {
outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
mErr.Errors = append(mErr.Errors, outer)
} else {
destinations[tmpl.DestPath] = idx + 1
}
}
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}
return mErr.ErrorOrNil()
}
// validateServices takes a task and validates the services within it are valid
// and reference ports that exist.
func validateServices(t *Task) error {
var mErr multierror.Error
// Ensure that services don't ask for non-existent ports and their names are
// unique.
servicePorts := make(map[string][]string)
knownServices := make(map[string]struct{})
for i, service := range t.Services {
if err := service.Validate(); err != nil {
outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
// Ensure that services with the same name are not being registered for
// the same port
if _, ok := knownServices[service.Name+service.PortLabel]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
}
knownServices[service.Name+service.PortLabel] = struct{}{}
if service.PortLabel != "" {
servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name)
}
// Ensure that check names are unique.
knownChecks := make(map[string]struct{})
for _, check := range service.Checks {
if _, ok := knownChecks[check.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
}
knownChecks[check.Name] = struct{}{}
}
}
// Get the set of port labels.
portLabels := make(map[string]struct{})
if t.Resources != nil {
for _, network := range t.Resources.Networks {
ports := network.PortLabels()
for portLabel := range ports {
portLabels[portLabel] = struct{}{}
}
}
}
// Ensure all ports referenced in services exist.
for servicePort, services := range servicePorts {
_, ok := portLabels[servicePort]
if !ok {
joined := strings.Join(services, ", ")
err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
mErr.Errors = append(mErr.Errors, err)
}
}
// Ensure address mode is valid
return mErr.ErrorOrNil()
}
const (
// TemplateChangeModeNoop marks that no action should be taken if the
// template is re-rendered
TemplateChangeModeNoop = "noop"
// TemplateChangeModeSignal marks that the task should be signaled if the
// template is re-rendered
TemplateChangeModeSignal = "signal"
// TemplateChangeModeRestart marks that the task should be restarted if the
// template is re-rendered
TemplateChangeModeRestart = "restart"
)
var (
// TemplateChangeModeInvalidError is the error for when an invalid change
// mode is given
TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
)
// Template represents a template configuration to be rendered for a given task
type Template struct {
// SourcePath is the path to the template to be rendered
SourcePath string
// DestPath is the path to where the template should be rendered
DestPath string
// EmbeddedTmpl store the raw template. This is useful for smaller templates
// where they are embedded in the job file rather than sent as an artificat
EmbeddedTmpl string
// ChangeMode indicates what should be done if the template is re-rendered
ChangeMode string
// ChangeSignal is the signal that should be sent if the change mode
// requires it.
ChangeSignal string
// Splay is used to avoid coordinated restarts of processes by applying a
// random wait between 0 and the given splay value before signalling the
// application of a change
Splay time.Duration
// Perms is the permission the file should be written out with.
Perms string
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim string
RightDelim string
// Envvars enables exposing the template as environment variables
// instead of as a file. The template must be of the form:
//
// VAR_NAME_1={{ key service/my-key }}
// VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
//
// Lines will be split on the initial "=" with the first part being the
// key name and the second part the value.
// Empty lines and lines starting with # will be ignored, but to avoid
// escaping issues #s within lines will not be treated as comments.
Envvars bool
// VaultGrace is the grace duration between lease renewal and reacquiring a
// secret. If the lease of a secret is less than the grace, a new secret is
// acquired.
VaultGrace time.Duration
}
// DefaultTemplate returns a default template.
func DefaultTemplate() *Template {
return &Template{
ChangeMode: TemplateChangeModeRestart,
Splay: 5 * time.Second,
Perms: "0644",
}
}
func (t *Template) Copy() *Template {
if t == nil {
return nil
}
copy := new(Template)
*copy = *t
return copy
}
func (t *Template) Canonicalize() {
if t.ChangeSignal != "" {
t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
}
}
func (t *Template) Validate() error {
var mErr multierror.Error
// Verify we have something to render
if t.SourcePath == "" && t.EmbeddedTmpl == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
}
// Verify we can render somewhere
if t.DestPath == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
}
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify a proper change mode
switch t.ChangeMode {
case TemplateChangeModeNoop, TemplateChangeModeRestart:
case TemplateChangeModeSignal:
if t.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
}
if t.Envvars {
multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates"))
}
default:
multierror.Append(&mErr, TemplateChangeModeInvalidError)
}
// Verify the splay is positive
if t.Splay < 0 {
multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
}
// Verify the permissions
if t.Perms != "" {
if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
}
}
if t.VaultGrace.Nanoseconds() < 0 {
multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace))
}
return mErr.ErrorOrNil()
}
// Set of possible states for a task.
const (
TaskStatePending = "pending" // The task is waiting to be run.
TaskStateRunning = "running" // The task is currently running.
TaskStateDead = "dead" // Terminal state of task.
)
// TaskState tracks the current state of a task and events that caused state
// transitions.
type TaskState struct {
// The current state of the task.
State string
// Failed marks a task as having failed
Failed bool
// Restarts is the number of times the task has restarted
Restarts uint64
// LastRestart is the time the task last restarted. It is updated each time the
// task restarts
LastRestart time.Time
// StartedAt is the time the task is started. It is updated each time the
// task starts
StartedAt time.Time
// FinishedAt is the time at which the task transistioned to dead and will
// not be started again.
FinishedAt time.Time
// Series of task events that transition the state of the task.
Events []*TaskEvent
}
func (ts *TaskState) Copy() *TaskState {
if ts == nil {
return nil
}
copy := new(TaskState)
*copy = *ts
if ts.Events != nil {
copy.Events = make([]*TaskEvent, len(ts.Events))
for i, e := range ts.Events {
copy.Events[i] = e.Copy()
}
}
return copy
}
// Successful returns whether a task finished successfully.
func (ts *TaskState) Successful() bool {
l := len(ts.Events)
if ts.State != TaskStateDead || l == 0 {
return false
}
e := ts.Events[l-1]
if e.Type != TaskTerminated {
return false
}
return e.ExitCode == 0
}
const (
// TaskSetupFailure indicates that the task could not be started due to a
// a setup failure.
TaskSetupFailure = "Setup Failure"
// TaskDriveFailure indicates that the task could not be started due to a
// failure in the driver.
TaskDriverFailure = "Driver Failure"
// TaskReceived signals that the task has been pulled by the client at the
// given timestamp.
TaskReceived = "Received"
// TaskFailedValidation indicates the task was invalid and as such was not
// run.
TaskFailedValidation = "Failed Validation"
// TaskStarted signals that the task was started and its timestamp can be
// used to determine the running length of the task.
TaskStarted = "Started"
// TaskTerminated indicates that the task was started and exited.
TaskTerminated = "Terminated"
// TaskKilling indicates a kill signal has been sent to the task.
TaskKilling = "Killing"
// TaskKilled indicates a user has killed the task.
TaskKilled = "Killed"
// TaskRestarting indicates that task terminated and is being restarted.
TaskRestarting = "Restarting"
// TaskNotRestarting indicates that the task has failed and is not being
// restarted because it has exceeded its restart policy.
TaskNotRestarting = "Not Restarting"
// TaskRestartSignal indicates that the task has been signalled to be
// restarted
TaskRestartSignal = "Restart Signaled"
// TaskSignaling indicates that the task is being signalled.
TaskSignaling = "Signaling"
// TaskDownloadingArtifacts means the task is downloading the artifacts
// specified in the task.
TaskDownloadingArtifacts = "Downloading Artifacts"
// TaskArtifactDownloadFailed indicates that downloading the artifacts
// failed.
TaskArtifactDownloadFailed = "Failed Artifact Download"
// TaskBuildingTaskDir indicates that the task directory/chroot is being
// built.
TaskBuildingTaskDir = "Building Task Directory"
// TaskSetup indicates the task runner is setting up the task environment
TaskSetup = "Task Setup"
// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
// exceeded the requested disk resources.
TaskDiskExceeded = "Disk Resources Exceeded"
// TaskSiblingFailed indicates that a sibling task in the task group has
// failed.
TaskSiblingFailed = "Sibling Task Failed"
// TaskDriverMessage is an informational event message emitted by
// drivers such as when they're performing a long running action like
// downloading an image.
TaskDriverMessage = "Driver"
// TaskLeaderDead indicates that the leader task within the has finished.
TaskLeaderDead = "Leader Task Dead"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64 // Unix Nanosecond timestamp
Message string // A possible message explaining the termination of the task.
// DisplayMessage is a human friendly message about the event
DisplayMessage string
// Details is a map with annotated info about the event
Details map[string]string
// DEPRECATION NOTICE: The following fields are deprecated and will be removed
// in a future release. Field values are available in the Details map.
// FailsTask marks whether this event fails the task.
// Deprecated, use Details["fails_task"] to access this.
FailsTask bool
// Restart fields.
// Deprecated, use Details["restart_reason"] to access this.
RestartReason string
// Setup Failure fields.
// Deprecated, use Details["setup_error"] to access this.
SetupError string
// Driver Failure fields.
// Deprecated, use Details["driver_error"] to access this.
DriverError string // A driver error occurred while starting the task.
// Task Terminated Fields.
// Deprecated, use Details["exit_code"] to access this.
ExitCode int // The exit code of the task.
// Deprecated, use Details["signal"] to access this.
Signal int // The signal that terminated the task.
// Killing fields
// Deprecated, use Details["kill_timeout"] to access this.
KillTimeout time.Duration
// Task Killed Fields.
// Deprecated, use Details["kill_error"] to access this.
KillError string // Error killing the task.
// KillReason is the reason the task was killed
// Deprecated, use Details["kill_reason"] to access this.
KillReason string
// TaskRestarting fields.
// Deprecated, use Details["start_delay"] to access this.
StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
// Artifact Download fields
// Deprecated, use Details["download_error"] to access this.
DownloadError string // Error downloading artifacts
// Validation fields
// Deprecated, use Details["validation_error"] to access this.
ValidationError string // Validation error
// The maximum allowed task disk size.
// Deprecated, use Details["disk_limit"] to access this.
DiskLimit int64
// Name of the sibling task that caused termination of the task that
// the TaskEvent refers to.
// Deprecated, use Details["failed_sibling"] to access this.
FailedSibling string
// VaultError is the error from token renewal
// Deprecated, use Details["vault_renewal_error"] to access this.
VaultError string
// TaskSignalReason indicates the reason the task is being signalled.
// Deprecated, use Details["task_signal_reason"] to access this.
TaskSignalReason string
// TaskSignal is the signal that was sent to the task
// Deprecated, use Details["task_signal"] to access this.
TaskSignal string
// DriverMessage indicates a driver action being taken.
// Deprecated, use Details["driver_message"] to access this.
DriverMessage string
// GenericSource is the source of a message.
// Deprecated, is redundant with event type.
GenericSource string
}
func (event *TaskEvent) PopulateEventDisplayMessage() {
// Build up the description based on the event type.
if event == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why?
return
}
if event.DisplayMessage != "" {
return
}
var desc string
switch event.Type {
case TaskSetup:
desc = event.Message
case TaskStarted:
desc = "Task started by client"
case TaskReceived:
desc = "Task received by client"
case TaskFailedValidation:
if event.ValidationError != "" {
desc = event.ValidationError
} else {
desc = "Validation of task failed"
}
case TaskSetupFailure:
if event.SetupError != "" {
desc = event.SetupError
} else {
desc = "Task setup failed"
}
case TaskDriverFailure:
if event.DriverError != "" {
desc = event.DriverError
} else {
desc = "Failed to start task"
}
case TaskDownloadingArtifacts:
desc = "Client is downloading artifacts"
case TaskArtifactDownloadFailed:
if event.DownloadError != "" {
desc = event.DownloadError
} else {
desc = "Failed to download artifacts"
}
case TaskKilling:
if event.KillReason != "" {
desc = fmt.Sprintf("Killing task: %v", event.KillReason)
} else if event.KillTimeout != 0 {
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout)
} else {
desc = "Sent interrupt"
}
case TaskKilled:
if event.KillError != "" {
desc = event.KillError
} else {
desc = "Task successfully killed"
}
case TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
if event.Signal != 0 {
parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
}
if event.Message != "" {
parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
}
desc = strings.Join(parts, ", ")
case TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
if event.RestartReason != "" && event.RestartReason != ReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
} else {
desc = in
}
case TaskNotRestarting:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task exceeded restart policy"
}
case TaskSiblingFailed:
if event.FailedSibling != "" {
desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling)
} else {
desc = "Task's sibling failed"
}
case TaskSignaling:
sig := event.TaskSignal
reason := event.TaskSignalReason
if sig == "" && reason == "" {
desc = "Task being sent a signal"
} else if sig == "" {
desc = reason
} else if reason == "" {
desc = fmt.Sprintf("Task being sent signal %v", sig)
} else {
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
}
case TaskRestartSignal:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task signaled to restart"
}
case TaskDriverMessage:
desc = event.DriverMessage
case TaskLeaderDead:
desc = "Leader Task in Group dead"
default:
desc = event.Message
}
event.DisplayMessage = desc
}
func (te *TaskEvent) GoString() string {
return fmt.Sprintf("%v - %v", te.Time, te.Type)
}
// SetMessage sets the message of TaskEvent
func (te *TaskEvent) SetMessage(msg string) *TaskEvent {
te.Message = msg
te.Details["message"] = msg
return te
}
func (te *TaskEvent) Copy() *TaskEvent {
if te == nil {
return nil
}
copy := new(TaskEvent)
*copy = *te
return copy
}
func NewTaskEvent(event string) *TaskEvent {
return &TaskEvent{
Type: event,
Time: time.Now().UnixNano(),
Details: make(map[string]string),
}
}
// SetSetupError is used to store an error that occurred while setting up the
// task
func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
if err != nil {
e.SetupError = err.Error()
e.Details["setup_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetFailsTask() *TaskEvent {
e.FailsTask = true
e.Details["fails_task"] = "true"
return e
}
func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
if err != nil {
e.DriverError = err.Error()
e.Details["driver_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
e.ExitCode = c
e.Details["exit_code"] = fmt.Sprintf("%d", c)
return e
}
func (e *TaskEvent) SetSignal(s int) *TaskEvent {
e.Signal = s
e.Details["signal"] = fmt.Sprintf("%d", s)
return e
}
func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
if err != nil {
e.Message = err.Error()
e.Details["exit_message"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillError(err error) *TaskEvent {
if err != nil {
e.KillError = err.Error()
e.Details["kill_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
e.KillReason = r
e.Details["kill_reason"] = r
return e
}
func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
e.StartDelay = int64(delay)
e.Details["start_delay"] = fmt.Sprintf("%d", delay)
return e
}
func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
e.RestartReason = reason
e.Details["restart_reason"] = reason
return e
}
func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
e.TaskSignalReason = r
e.Details["task_signal_reason"] = r
return e
}
func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
e.TaskSignal = s.String()
e.Details["task_signal"] = s.String()
return e
}
func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
if err != nil {
e.DownloadError = err.Error()
e.Details["download_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
if err != nil {
e.ValidationError = err.Error()
e.Details["validation_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
e.KillTimeout = timeout
e.Details["kill_timeout"] = timeout.String()
return e
}
func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
e.DiskLimit = limit
e.Details["disk_limit"] = fmt.Sprintf("%d", limit)
return e
}
func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
e.FailedSibling = sibling
e.Details["failed_sibling"] = sibling
return e
}
func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
if err != nil {
e.VaultError = err.Error()
e.Details["vault_renewal_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
e.DriverMessage = m
e.Details["driver_message"] = m
return e
}
// TaskArtifact is an artifact to download before running the task.
type TaskArtifact struct {
// GetterSource is the source to download an artifact using go-getter
GetterSource string
// GetterOptions are options to use when downloading the artifact using
// go-getter.
GetterOptions map[string]string
// GetterMode is the go-getter.ClientMode for fetching resources.
// Defaults to "any" but can be set to "file" or "dir".
GetterMode string
// RelativeDest is the download destination given relative to the task's
// directory.
RelativeDest string
}
func (ta *TaskArtifact) Copy() *TaskArtifact {
if ta == nil {
return nil
}
nta := new(TaskArtifact)
*nta = *ta
nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions)
return nta
}
func (ta *TaskArtifact) GoString() string {
return fmt.Sprintf("%+v", ta)
}
// PathEscapesAllocDir returns if the given path escapes the allocation
// directory. The prefix allows adding a prefix if the path will be joined, for
// example a "task/local" prefix may be provided if the path will be joined
// against that prefix.
func PathEscapesAllocDir(prefix, path string) (bool, error) {
// Verify the destination doesn't escape the tasks directory
alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
if err != nil {
return false, err
}
abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
if err != nil {
return false, err
}
rel, err := filepath.Rel(alloc, abs)
if err != nil {
return false, err
}
return strings.HasPrefix(rel, ".."), nil
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
if ta.GetterSource == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
}
switch ta.GetterMode {
case "":
// Default to any
ta.GetterMode = GetterModeAny
case GetterModeAny, GetterModeFile, GetterModeDir:
// Ok
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := PathEscapesAllocDir("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify the checksum
if check, ok := ta.GetterOptions["checksum"]; ok {
check = strings.TrimSpace(check)
if check == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty"))
return mErr.ErrorOrNil()
}
parts := strings.Split(check, ":")
if l := len(parts); l != 2 {
mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check))
return mErr.ErrorOrNil()
}
checksumVal := parts[1]
checksumBytes, err := hex.DecodeString(checksumVal)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err))
return mErr.ErrorOrNil()
}
checksumType := parts[0]
expectedLength := 0
switch checksumType {
case "md5":
expectedLength = md5.Size
case "sha1":
expectedLength = sha1.Size
case "sha256":
expectedLength = sha256.Size
case "sha512":
expectedLength = sha512.Size
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType))
return mErr.ErrorOrNil()
}
if len(checksumBytes) != expectedLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal))
return mErr.ErrorOrNil()
}
}
return mErr.ErrorOrNil()
}
const (
ConstraintDistinctProperty = "distinct_property"
ConstraintDistinctHosts = "distinct_hosts"
ConstraintRegex = "regexp"
ConstraintVersion = "version"
ConstraintSetContains = "set_contains"
)
// Constraints are used to restrict placement options.
type Constraint struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
str string // Memoized string
}
// Equal checks if two constraints are equal
func (c *Constraint) Equal(o *Constraint) bool {
return c.LTarget == o.LTarget &&
c.RTarget == o.RTarget &&
c.Operand == o.Operand
}
func (c *Constraint) Copy() *Constraint {
if c == nil {
return nil
}
nc := new(Constraint)
*nc = *c
return nc
}
func (c *Constraint) String() string {
if c.str != "" {
return c.str
}
c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
return c.str
}
func (c *Constraint) Validate() error {
var mErr multierror.Error
if c.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
}
// requireLtarget specifies whether the constraint requires an LTarget to be
// provided.
requireLtarget := true
// Perform additional validation based on operand
switch c.Operand {
case ConstraintDistinctHosts:
requireLtarget = false
case ConstraintSetContains:
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget"))
}
case ConstraintRegex:
if _, err := regexp.Compile(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
}
case ConstraintDistinctProperty:
// If a count is set, make sure it is convertible to a uint64
if c.RTarget != "" {
count, err := strconv.ParseUint(c.RTarget, 10, 64)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err))
} else if count < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count))
}
}
case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand))
}
// Ensure we have an LTarget for the constraints that need one
if requireLtarget && c.LTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint"))
}
return mErr.ErrorOrNil()
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
// Sticky indicates whether the allocation is sticky to a node
Sticky bool
// SizeMB is the size of the local disk
SizeMB int
// Migrate determines if Nomad client should migrate the allocation dir for
// sticky allocations
Migrate bool
}
// DefaultEphemeralDisk returns a EphemeralDisk with default configurations
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
SizeMB: 300,
}
}
// Validate validates EphemeralDisk
func (d *EphemeralDisk) Validate() error {
if d.SizeMB < 10 {
return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
}
return nil
}
// Copy copies the EphemeralDisk struct and returns a new one
func (d *EphemeralDisk) Copy() *EphemeralDisk {
ld := new(EphemeralDisk)
*ld = *d
return ld
}
const (
// VaultChangeModeNoop takes no action when a new token is retrieved.
VaultChangeModeNoop = "noop"
// VaultChangeModeSignal signals the task when a new token is retrieved.
VaultChangeModeSignal = "signal"
// VaultChangeModeRestart restarts the task when a new token is retrieved.
VaultChangeModeRestart = "restart"
)
// Vault stores the set of permissions a task needs access to from Vault.
type Vault struct {
// Policies is the set of policies that the task needs access to
Policies []string
// Env marks whether the Vault Token should be exposed as an environment
// variable
Env bool
// ChangeMode is used to configure the task's behavior when the Vault
// token changes because the original token could not be renewed in time.
ChangeMode string
// ChangeSignal is the signal sent to the task when a new token is
// retrieved. This is only valid when using the signal change mode.
ChangeSignal string
}
func DefaultVaultBlock() *Vault {
return &Vault{
Env: true,
ChangeMode: VaultChangeModeRestart,
}
}
// Copy returns a copy of this Vault block.
func (v *Vault) Copy() *Vault {
if v == nil {
return nil
}
nv := new(Vault)
*nv = *v
return nv
}
func (v *Vault) Canonicalize() {
if v.ChangeSignal != "" {
v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
}
}
// Validate returns if the Vault block is valid.
func (v *Vault) Validate() error {
if v == nil {
return nil
}
var mErr multierror.Error
if len(v.Policies) == 0 {
multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
}
for _, p := range v.Policies {
if p == "root" {
multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy"))
}
}
switch v.ChangeMode {
case VaultChangeModeSignal:
if v.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
}
case VaultChangeModeNoop, VaultChangeModeRestart:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
}
return mErr.ErrorOrNil()
}
const (
// DeploymentStatuses are the various states a deployment can be be in
DeploymentStatusRunning = "running"
DeploymentStatusPaused = "paused"
DeploymentStatusFailed = "failed"
DeploymentStatusSuccessful = "successful"
DeploymentStatusCancelled = "cancelled"
// DeploymentStatusDescriptions are the various descriptions of the states a
// deployment can be in.
DeploymentStatusDescriptionRunning = "Deployment is running"
DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion"
DeploymentStatusDescriptionPaused = "Deployment is paused"
DeploymentStatusDescriptionSuccessful = "Deployment completed successfully"
DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped"
DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job"
DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations"
DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed"
)
// DeploymentStatusDescriptionRollback is used to get the status description of
// a deployment when rolling back to an older job.
func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionRollbackNoop is used to get the status description of
// a deployment when rolling back is not possible because it has the same specification
func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of
// a deployment when there is no target to rollback to but autorevet is desired.
func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string {
return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription)
}
// Deployment is the object that represents a job deployment which is used to
// transition a job between versions.
type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// Namespace is the namespace the deployment is created in
Namespace string
// JobID is the job the deployment is created for
JobID string
// JobVersion is the version of the job at which the deployment is tracking
JobVersion uint64
// JobModifyIndex is the modify index of the job at which the deployment is tracking
JobModifyIndex uint64
// JobCreateIndex is the create index of the job which the deployment is
// tracking. It is needed so that if the job gets stopped and reran we can
// present the correct list of deployments for the job and not old ones.
JobCreateIndex uint64
// TaskGroups is the set of task groups effected by the deployment and their
// current deployment status.
TaskGroups map[string]*DeploymentState
// The status of the deployment
Status string
// StatusDescription allows a human readable description of the deployment
// status.
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// NewDeployment creates a new deployment given the job.
func NewDeployment(job *Job) *Deployment {
return &Deployment{
ID: uuid.Generate(),
Namespace: job.Namespace,
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
JobCreateIndex: job.CreateIndex,
Status: DeploymentStatusRunning,
StatusDescription: DeploymentStatusDescriptionRunning,
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
}
}
func (d *Deployment) Copy() *Deployment {
if d == nil {
return nil
}
c := &Deployment{}
*c = *d
c.TaskGroups = nil
if l := len(d.TaskGroups); d.TaskGroups != nil {
c.TaskGroups = make(map[string]*DeploymentState, l)
for tg, s := range d.TaskGroups {
c.TaskGroups[tg] = s.Copy()
}
}
return c
}
// Active returns whether the deployment is active or terminal.
func (d *Deployment) Active() bool {
switch d.Status {
case DeploymentStatusRunning, DeploymentStatusPaused:
return true
default:
return false
}
}
// GetID is a helper for getting the ID when the object may be nil
func (d *Deployment) GetID() string {
if d == nil {
return ""
}
return d.ID
}
// HasPlacedCanaries returns whether the deployment has placed canaries
func (d *Deployment) HasPlacedCanaries() bool {
if d == nil || len(d.TaskGroups) == 0 {
return false
}
for _, group := range d.TaskGroups {
if len(group.PlacedCanaries) != 0 {
return true
}
}
return false
}
// RequiresPromotion returns whether the deployment requires promotion to
// continue
func (d *Deployment) RequiresPromotion() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.Promoted {
return true
}
}
return false
}
func (d *Deployment) GoString() string {
base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
for group, state := range d.TaskGroups {
base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
}
return base
}
// DeploymentState tracks the state of a deployment for a given task group.
type DeploymentState struct {
// AutoRevert marks whether the task group has indicated the job should be
// reverted on failure
AutoRevert bool
// Promoted marks whether the canaries have been promoted
Promoted bool
// PlacedCanaries is the set of placed canary allocations
PlacedCanaries []string
// DesiredCanaries is the number of canaries that should be created.
DesiredCanaries int
// DesiredTotal is the total number of allocations that should be created as
// part of the deployment.
DesiredTotal int
// PlacedAllocs is the number of allocations that have been placed
PlacedAllocs int
// HealthyAllocs is the number of allocations that have been marked healthy.
HealthyAllocs int
// UnhealthyAllocs are allocations that have been marked as unhealthy.
UnhealthyAllocs int
}
func (d *DeploymentState) GoString() string {
base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
return base
}
func (d *DeploymentState) Copy() *DeploymentState {
c := &DeploymentState{}
*c = *d
c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
return c
}
// DeploymentStatusUpdate is used to update the status of a given deployment
type DeploymentStatusUpdate struct {
// DeploymentID is the ID of the deployment to update
DeploymentID string
// Status is the new status of the deployment.
Status string
// StatusDescription is the new status description of the deployment.
StatusDescription string
}
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
)
// Allocation is used to allocate the placement of a task group to a node.
type Allocation struct {
// ID of the allocation (UUID)
ID string
// Namespace is the namespace the allocation is created in
Namespace string
// ID of the evaluation that generated this allocation
EvalID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// Job is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
Job *Job
// TaskGroup is the name of the task group that should be run
TaskGroup string
// Resources is the total set of resources allocated as part
// of this allocation of the task group.
Resources *Resources
// SharedResources are the resources that are shared by all the tasks in an
// allocation
SharedResources *Resources
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources.
TaskResources map[string]*Resources
// Metrics associated with this allocation
Metrics *AllocMetric
// Desired Status of the allocation on the client
DesiredStatus string
// DesiredStatusDescription is meant to provide more human useful information
DesiredDescription string
// Status of the allocation on the client
ClientStatus string
// ClientStatusDescription is meant to provide more human useful information
ClientDescription string
// TaskStates stores the state of each task,
TaskStates map[string]*TaskState
// PreviousAllocation is the allocation that this allocation is replacing
PreviousAllocation string
// DeploymentID identifies an allocation as being created from a
// particular deployment
DeploymentID string
// DeploymentStatus captures the status of the allocation as part of the
// given deployment
DeploymentStatus *AllocDeploymentStatus
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
// AllocModifyIndex is not updated when the client updates allocations. This
// lets the client pull only the allocs updated by the server.
AllocModifyIndex uint64
// CreateTime is the time the allocation has finished scheduling and been
// verified by the plan applier.
CreateTime int64
// ModifyTime is the time the allocation was last updated.
ModifyTime int64
}
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
l := len(a.Name)
prefix := len(a.JobID) + len(a.TaskGroup) + 2
if l <= 3 || l <= prefix {
return uint(0)
}
strNum := a.Name[prefix : len(a.Name)-1]
num, _ := strconv.Atoi(strNum)
return uint(num)
}
func (a *Allocation) Copy() *Allocation {
return a.copyImpl(true)
}
// Copy provides a copy of the allocation but doesn't deep copy the job
func (a *Allocation) CopySkipJob() *Allocation {
return a.copyImpl(false)
}
func (a *Allocation) copyImpl(job bool) *Allocation {
if a == nil {
return nil
}
na := new(Allocation)
*na = *a
if job {
na.Job = na.Job.Copy()
}
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
if a.TaskResources != nil {
tr := make(map[string]*Resources, len(na.TaskResources))
for task, resource := range na.TaskResources {
tr[task] = resource.Copy()
}
na.TaskResources = tr
}
na.Metrics = na.Metrics.Copy()
na.DeploymentStatus = na.DeploymentStatus.Copy()
if a.TaskStates != nil {
ts := make(map[string]*TaskState, len(na.TaskStates))
for task, state := range na.TaskStates {
ts[task] = state.Copy()
}
na.TaskStates = ts
}
return na
}
// TerminalStatus returns if the desired or actual status is terminal and
// will no longer transition.
func (a *Allocation) TerminalStatus() bool {
// First check the desired state and if that isn't terminal, check client
// state.
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return true
default:
}
switch a.ClientStatus {
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
return true
default:
return false
}
}
// Terminated returns if the allocation is in a terminal state on a client.
func (a *Allocation) Terminated() bool {
if a.ClientStatus == AllocClientStatusFailed ||
a.ClientStatus == AllocClientStatusComplete ||
a.ClientStatus == AllocClientStatusLost {
return true
}
return false
}
// RanSuccessfully returns whether the client has ran the allocation and all
// tasks finished successfully
func (a *Allocation) RanSuccessfully() bool {
return a.ClientStatus == AllocClientStatusComplete
}
// ShouldMigrate returns if the allocation needs data migration
func (a *Allocation) ShouldMigrate() bool {
if a.PreviousAllocation == "" {
return false
}
if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// if the task group is nil or the ephemeral disk block isn't present then
// we won't migrate
if tg == nil || tg.EphemeralDisk == nil {
return false
}
// We won't migrate any data is the user hasn't enabled migration or the
// disk is not marked as sticky
if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
return false
}
return true
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub() *AllocListStub {
return &AllocListStub{
ID: a.ID,
EvalID: a.EvalID,
Name: a.Name,
NodeID: a.NodeID,
JobID: a.JobID,
JobVersion: a.Job.Version,
TaskGroup: a.TaskGroup,
DesiredStatus: a.DesiredStatus,
DesiredDescription: a.DesiredDescription,
ClientStatus: a.ClientStatus,
ClientDescription: a.ClientDescription,
TaskStates: a.TaskStates,
DeploymentStatus: a.DeploymentStatus,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
CreateTime: a.CreateTime,
ModifyTime: a.ModifyTime,
}
}
// AllocListStub is used to return a subset of alloc information
type AllocListStub struct {
ID string
EvalID string
Name string
NodeID string
JobID string
JobVersion uint64
TaskGroup string
DesiredStatus string
DesiredDescription string
ClientStatus string
ClientDescription string
TaskStates map[string]*TaskState
DeploymentStatus *AllocDeploymentStatus
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
// AllocMetric is used to track various metrics while attempting
// to make an allocation. These are used to debug a job, or to better
// understand the pressure within the system.
type AllocMetric struct {
// NodesEvaluated is the number of nodes that were evaluated
NodesEvaluated int
// NodesFiltered is the number of nodes filtered due to a constraint
NodesFiltered int
// NodesAvailable is the number of nodes available for evaluation per DC.
NodesAvailable map[string]int
// ClassFiltered is the number of nodes filtered by class
ClassFiltered map[string]int
// ConstraintFiltered is the number of failures caused by constraint
ConstraintFiltered map[string]int
// NodesExhausted is the number of nodes skipped due to being
// exhausted of at least one resource
NodesExhausted int
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// QuotaExhausted provides the exhausted dimensions
QuotaExhausted []string
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
Scores map[string]float64
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
// CoalescedFailures indicates the number of other
// allocations that were coalesced into this failed allocation.
// This is to prevent creating many failed allocations for a
// single task group.
CoalescedFailures int
}
func (a *AllocMetric) Copy() *AllocMetric {
if a == nil {
return nil
}
na := new(AllocMetric)
*na = *a
na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
return na
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
if dimension != "" {
if a.DimensionExhausted == nil {
a.DimensionExhausted = make(map[string]int)
}
a.DimensionExhausted[dimension] += 1
}
}
func (a *AllocMetric) ExhaustQuota(dimensions []string) {
if a.QuotaExhausted == nil {
a.QuotaExhausted = make([]string, 0, len(dimensions))
}
a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
}
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
if a.Scores == nil {
a.Scores = make(map[string]float64)
}
key := fmt.Sprintf("%s.%s", node.ID, name)
a.Scores[key] = score
}
// AllocDeploymentStatus captures the status of the allocation as part of the
// deployment. This can include things like if the allocation has been marked as
// heatlhy.
type AllocDeploymentStatus struct {
// Healthy marks whether the allocation has been marked healthy or unhealthy
// as part of a deployment. It can be unset if it has neither been marked
// healthy or unhealthy.
Healthy *bool
// ModifyIndex is the raft index in which the deployment status was last
// changed.
ModifyIndex uint64
}
// IsHealthy returns if the allocation is marked as healthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsHealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && *a.Healthy
}
// IsUnhealthy returns if the allocation is marked as unhealthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsUnhealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && !*a.Healthy
}
func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
if a == nil {
return nil
}
c := new(AllocDeploymentStatus)
*c = *a
if a.Healthy != nil {
c.Healthy = helper.BoolToPtr(*a.Healthy)
}
return c
}
const (
EvalStatusBlocked = "blocked"
EvalStatusPending = "pending"
EvalStatusComplete = "complete"
EvalStatusFailed = "failed"
EvalStatusCancelled = "canceled"
)
const (
EvalTriggerJobRegister = "job-register"
EvalTriggerJobDeregister = "job-deregister"
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerScheduled = "scheduled"
EvalTriggerRollingUpdate = "rolling-update"
EvalTriggerDeploymentWatcher = "deployment-watcher"
EvalTriggerFailedFollowUp = "failed-follow-up"
EvalTriggerMaxPlans = "max-plan-attempts"
)
const (
// CoreJobEvalGC is used for the garbage collection of evaluations
// and allocations. We periodically scan evaluations in a terminal state,
// in which all the corresponding allocations are also terminal. We
// delete these out of the system to bound the state.
CoreJobEvalGC = "eval-gc"
// CoreJobNodeGC is used for the garbage collection of failed nodes.
// We periodically scan nodes in a terminal state, and if they have no
// corresponding allocations we delete these out of the system.
CoreJobNodeGC = "node-gc"
// CoreJobJobGC is used for the garbage collection of eligible jobs. We
// periodically scan garbage collectible jobs and check if both their
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobDeploymentGC is used for the garbage collection of eligible
// deployments. We periodically scan garbage collectible deployments and
// check if they are terminal. If so, we delete these out of the system.
CoreJobDeploymentGC = "deployment-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result
// of a change to our desired state (job specification) or the emergent state
// (registered nodes). When the inputs change, we need to "evaluate" them,
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// ID is a randonly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string
// Namespace is the namespace the evaluation is created in
Namespace string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// Type is used to control which schedulers are available to handle
// this evaluation.
Type string
// TriggeredBy is used to give some insight into why this Eval
// was created. (Job change, node failure, alloc failure, etc).
TriggeredBy string
// JobID is the job this evaluation is scoped to. Evaluations cannot
// be run in parallel for a given JobID, so we serialize on this.
JobID string
// JobModifyIndex is the modify index of the job at the time
// the evaluation was created
JobModifyIndex uint64
// NodeID is the node that was affected triggering the evaluation.
NodeID string
// NodeModifyIndex is the modify index of the node at the time
// the evaluation was created
NodeModifyIndex uint64
// DeploymentID is the ID of the deployment that triggered the evaluation.
DeploymentID string
// Status of the evaluation
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Wait is a minimum wait time for running the eval. This is used to
// support a rolling upgrade.
Wait time.Duration
// NextEval is the evaluation ID for the eval created to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
NextEval string
// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
PreviousEval string
// BlockedEval is the evaluation ID for a created blocked eval. A
// blocked eval will be created if all allocations could not be placed due
// to constraints or lacking resources.
BlockedEval string
// FailedTGAllocs are task groups which have allocations that could not be
// made, but the metrics are persisted so that the user can use the feedback
// to determine the cause.
FailedTGAllocs map[string]*AllocMetric
// ClassEligibility tracks computed node classes that have been explicitly
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// QuotaLimitReached marks whether a quota limit was reached for the
// evaluation.
QuotaLimitReached string
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
// AnnotatePlan triggers the scheduler to provide additional annotations
// during the evaluation. This should not be set during normal operations.
AnnotatePlan bool
// QueuedAllocations is the number of unplaced allocations at the time the
// evaluation was processed. The map is keyed by Task Group names.
QueuedAllocations map[string]int
// LeaderACL provides the ACL token to when issuing RPCs back to the
// leader. This will be a valid management token as long as the leader is
// active. This should not ever be exposed via the API.
LeaderACL string
// SnapshotIndex is the Raft index of the snapshot used to process the
// evaluation. As such it will only be set once it has gone through the
// scheduler.
SnapshotIndex uint64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (e *Evaluation) TerminalStatus() bool {
switch e.Status {
case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
return true
default:
return false
}
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
}
func (e *Evaluation) Copy() *Evaluation {
if e == nil {
return nil
}
ne := new(Evaluation)
*ne = *e
// Copy ClassEligibility
if e.ClassEligibility != nil {
classes := make(map[string]bool, len(e.ClassEligibility))
for class, elig := range e.ClassEligibility {
classes[class] = elig
}
ne.ClassEligibility = classes
}
// Copy FailedTGAllocs
if e.FailedTGAllocs != nil {
failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
for tg, metric := range e.FailedTGAllocs {
failedTGs[tg] = metric.Copy()
}
ne.FailedTGAllocs = failedTGs
}
// Copy queued allocations
if e.QueuedAllocations != nil {
queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
for tg, num := range e.QueuedAllocations {
queuedAllocations[tg] = num
}
ne.QueuedAllocations = queuedAllocations
}
return ne
}
// ShouldEnqueue checks if a given evaluation should be enqueued into the
// eval_broker
func (e *Evaluation) ShouldEnqueue() bool {
switch e.Status {
case EvalStatusPending:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// ShouldBlock checks if a given evaluation should be entered into the blocked
// eval tracker.
func (e *Evaluation) ShouldBlock() bool {
switch e.Status {
case EvalStatusBlocked:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// MakePlan is used to make a plan from the given evaluation
// for a given Job
func (e *Evaluation) MakePlan(j *Job) *Plan {
p := &Plan{
EvalID: e.ID,
Priority: e.Priority,
Job: j,
NodeUpdate: make(map[string][]*Allocation),
NodeAllocation: make(map[string][]*Allocation),
}
if j != nil {
p.AllAtOnce = j.AllAtOnce
}
return p
}
// NextRollingEval creates an evaluation to followup this eval for rolling updates
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible, whether the job has escaped computed node classes and whether the
// quota limit was reached.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
escaped bool, quotaReached string) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusBlocked,
PreviousEval: e.ID,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
QuotaLimitReached: quotaReached,
}
}
// CreateFailedFollowUpEval creates a follow up evaluation when the current one
// has been marked as failed because it has hit the delivery limit and will not
// be retried by the eval_broker.
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// Plan is used to submit a commit plan for task allocations. These
// are submitted to the leader which verifies that resources have
// not been overcommitted before admiting the plan.
type Plan struct {
// EvalID is the evaluation ID this plan is associated with
EvalID string
// EvalToken is used to prevent a split-brain processing of
// an evaluation. There should only be a single scheduler running
// an Eval at a time, but this could be violated after a leadership
// transition. This unique token is used to reject plans that are
// being submitted from a different leader.
EvalToken string
// Priority is the priority of the upstream job
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job.
// If this is false, a plan may be partially applied. Otherwise, the
// entire plan must be able to make progress.
AllAtOnce bool
// Job is the parent job of all the allocations in the Plan.
// Since a Plan only involves a single Job, we can reduce the size
// of the plan by only including it once.
Job *Job
// NodeUpdate contains all the allocations for each node. For each node,
// this is a list of the allocations to update to either stop or evict.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations for each node.
// The evicts must be considered prior to the allocations.
NodeAllocation map[string][]*Allocation
// Annotations contains annotations by the scheduler to be used by operators
// to understand the decisions made by the scheduler.
Annotations *PlanAnnotations
// Deployment is the deployment created or updated by the scheduler that
// should be applied by the planner.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AppendUpdate marks the allocation for eviction. The clientStatus of the
// allocation may be optionally set by passing in a non-empty value.
func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) {
newAlloc := new(Allocation)
*newAlloc = *alloc
// If the job is not set in the plan we are deregistering a job so we
// extract the job from the allocation.
if p.Job == nil && newAlloc.Job != nil {
p.Job = newAlloc.Job
}
// Normalize the job
newAlloc.Job = nil
// Strip the resources as it can be rebuilt.
newAlloc.Resources = nil
newAlloc.DesiredStatus = desiredStatus
newAlloc.DesiredDescription = desiredDesc
if clientStatus != "" {
newAlloc.ClientStatus = clientStatus
}
node := alloc.NodeID
existing := p.NodeUpdate[node]
p.NodeUpdate[node] = append(existing, newAlloc)
}
func (p *Plan) PopUpdate(alloc *Allocation) {
existing := p.NodeUpdate[alloc.NodeID]
n := len(existing)
if n > 0 && existing[n-1].ID == alloc.ID {
existing = existing[:n-1]
if len(existing) > 0 {
p.NodeUpdate[alloc.NodeID] = existing
} else {
delete(p.NodeUpdate, alloc.NodeID)
}
}
}
func (p *Plan) AppendAlloc(alloc *Allocation) {
node := alloc.NodeID
existing := p.NodeAllocation[node]
p.NodeAllocation[node] = append(existing, alloc)
}
// IsNoOp checks if this plan would do nothing
func (p *Plan) IsNoOp() bool {
return len(p.NodeUpdate) == 0 &&
len(p.NodeAllocation) == 0 &&
p.Deployment == nil &&
len(p.DeploymentUpdates) == 0
}
// PlanResult is the result of a plan submitted to the leader.
type PlanResult struct {
// NodeUpdate contains all the updates that were committed.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations that were committed.
NodeAllocation map[string][]*Allocation
// Deployment is the deployment that was committed.
Deployment *Deployment
// DeploymentUpdates is the set of deployment updates that were committed.
DeploymentUpdates []*DeploymentStatusUpdate
// RefreshIndex is the index the worker should refresh state up to.
// This allows all evictions and allocations to be materialized.
// If any allocations were rejected due to stale data (node state,
// over committed) this can be used to force a worker refresh.
RefreshIndex uint64
// AllocIndex is the Raft index in which the evictions and
// allocations took place. This is used for the write index.
AllocIndex uint64
}
// IsNoOp checks if this plan result would do nothing
func (p *PlanResult) IsNoOp() bool {
return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
len(p.DeploymentUpdates) == 0 && p.Deployment == nil
}
// FullCommit is used to check if all the allocations in a plan
// were committed as part of the result. Returns if there was
// a match, and the number of expected and actual allocations.
func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc, _ := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}
return actual == expected, expected, actual
}
// PlanAnnotations holds annotations made by the scheduler to give further debug
// information to operators.
type PlanAnnotations struct {
// DesiredTGUpdates is the set of desired updates per task group.
DesiredTGUpdates map[string]*DesiredUpdates
}
// DesiredUpdates is the set of changes the scheduler would like to make given
// sufficient resources and cluster capacity.
type DesiredUpdates struct {
Ignore uint64
Place uint64
Migrate uint64
Stop uint64
InPlaceUpdate uint64
DestructiveUpdate uint64
Canary uint64
}
func (d *DesiredUpdates) GoString() string {
return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
}
// msgpackHandle is a shared handle for encoding/decoding of structs
var MsgpackHandle = func() *codec.MsgpackHandle {
h := &codec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
var (
// JsonHandle and JsonHandlePretty are the codec handles to JSON encode
// structs. The pretty handle will add indents for easier human consumption.
JsonHandle = &codec.JsonHandle{
HTMLCharsAsIs: true,
}
JsonHandlePretty = &codec.JsonHandle{
HTMLCharsAsIs: true,
Indent: 4,
}
)
var HashiMsgpackHandle = func() *hcodec.MsgpackHandle {
h := &hcodec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err string
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) error {
if e == nil {
return nil
}
return &RecoverableError{
Err: e.Error(),
Recoverable: recoverable,
}
}
// WrapRecoverable wraps an existing error in a new RecoverableError with a new
// message. If the error was recoverable before the returned error is as well;
// otherwise it is unrecoverable.
func WrapRecoverable(msg string, err error) error {
return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
}
func (r *RecoverableError) Error() string {
return r.Err
}
func (r *RecoverableError) IsRecoverable() bool {
return r.Recoverable
}
// Recoverable is an interface for errors to implement to indicate whether or
// not they are fatal or recoverable.
type Recoverable interface {
error
IsRecoverable() bool
}
// IsRecoverable returns true if error is a RecoverableError with
// Recoverable=true. Otherwise false is returned.
func IsRecoverable(e error) bool {
if re, ok := e.(Recoverable); ok {
return re.IsRecoverable()
}
return false
}
// ACLPolicy is used to represent an ACL policy
type ACLPolicy struct {
Name string // Unique name
Description string // Human readable
Rules string // HCL or JSON format
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL policy
func (c *ACLPolicy) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
hash.Write([]byte(c.Name))
hash.Write([]byte(c.Description))
hash.Write([]byte(c.Rules))
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
c.Hash = hashVal
return hashVal
}
func (a *ACLPolicy) Stub() *ACLPolicyListStub {
return &ACLPolicyListStub{
Name: a.Name,
Description: a.Description,
Hash: a.Hash,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
func (a *ACLPolicy) Validate() error {
var mErr multierror.Error
if !validPolicyName.MatchString(a.Name) {
err := fmt.Errorf("invalid name '%s'", a.Name)
mErr.Errors = append(mErr.Errors, err)
}
if _, err := acl.Parse(a.Rules); err != nil {
err = fmt.Errorf("failed to parse rules: %v", err)
mErr.Errors = append(mErr.Errors, err)
}
if len(a.Description) > maxPolicyDescriptionLength {
err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// ACLPolicyListStub is used to for listing ACL policies
type ACLPolicyListStub struct {
Name string
Description string
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// ACLPolicyListRequest is used to request a list of policies
type ACLPolicyListRequest struct {
QueryOptions
}
// ACLPolicySpecificRequest is used to query a specific policy
type ACLPolicySpecificRequest struct {
Name string
QueryOptions
}
// ACLPolicySetRequest is used to query a set of policies
type ACLPolicySetRequest struct {
Names []string
QueryOptions
}
// ACLPolicyListResponse is used for a list request
type ACLPolicyListResponse struct {
Policies []*ACLPolicyListStub
QueryMeta
}
// SingleACLPolicyResponse is used to return a single policy
type SingleACLPolicyResponse struct {
Policy *ACLPolicy
QueryMeta
}
// ACLPolicySetResponse is used to return a set of policies
type ACLPolicySetResponse struct {
Policies map[string]*ACLPolicy
QueryMeta
}
// ACLPolicyDeleteRequest is used to delete a set of policies
type ACLPolicyDeleteRequest struct {
Names []string
WriteRequest
}
// ACLPolicyUpsertRequest is used to upsert a set of policies
type ACLPolicyUpsertRequest struct {
Policies []*ACLPolicy
WriteRequest
}
// ACLToken represents a client token which is used to Authenticate
type ACLToken struct {
AccessorID string // Public Accessor ID (UUID)
SecretID string // Secret ID, private (UUID)
Name string // Human friendly name
Type string // Client or Management
Policies []string // Policies this token ties to
Global bool // Global or Region local
Hash []byte
CreateTime time.Time // Time of creation
CreateIndex uint64
ModifyIndex uint64
}
var (
// AnonymousACLToken is used no SecretID is provided, and the
// request is made anonymously.
AnonymousACLToken = &ACLToken{
AccessorID: "anonymous",
Name: "Anonymous Token",
Type: ACLClientToken,
Policies: []string{"anonymous"},
Global: false,
}
)
type ACLTokenListStub struct {
AccessorID string
Name string
Type string
Policies []string
Global bool
Hash []byte
CreateTime time.Time
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL token
func (a *ACLToken) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
hash.Write([]byte(a.Name))
hash.Write([]byte(a.Type))
for _, policyName := range a.Policies {
hash.Write([]byte(policyName))
}
if a.Global {
hash.Write([]byte("global"))
} else {
hash.Write([]byte("local"))
}
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
a.Hash = hashVal
return hashVal
}
func (a *ACLToken) Stub() *ACLTokenListStub {
return &ACLTokenListStub{
AccessorID: a.AccessorID,
Name: a.Name,
Type: a.Type,
Policies: a.Policies,
Global: a.Global,
Hash: a.Hash,
CreateTime: a.CreateTime,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
// Validate is used to sanity check a token
func (a *ACLToken) Validate() error {
var mErr multierror.Error
if len(a.Name) > maxTokenNameLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long"))
}
switch a.Type {
case ACLClientToken:
if len(a.Policies) == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies"))
}
case ACLManagementToken:
if len(a.Policies) != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies"))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management"))
}
return mErr.ErrorOrNil()
}
// PolicySubset checks if a given set of policies is a subset of the token
func (a *ACLToken) PolicySubset(policies []string) bool {
// Hot-path the management tokens, superset of all policies.
if a.Type == ACLManagementToken {
return true
}
associatedPolicies := make(map[string]struct{}, len(a.Policies))
for _, policy := range a.Policies {
associatedPolicies[policy] = struct{}{}
}
for _, policy := range policies {
if _, ok := associatedPolicies[policy]; !ok {
return false
}
}
return true
}
// ACLTokenListRequest is used to request a list of tokens
type ACLTokenListRequest struct {
GlobalOnly bool
QueryOptions
}
// ACLTokenSpecificRequest is used to query a specific token
type ACLTokenSpecificRequest struct {
AccessorID string
QueryOptions
}
// ACLTokenSetRequest is used to query a set of tokens
type ACLTokenSetRequest struct {
AccessorIDS []string
QueryOptions
}
// ACLTokenListResponse is used for a list request
type ACLTokenListResponse struct {
Tokens []*ACLTokenListStub
QueryMeta
}
// SingleACLTokenResponse is used to return a single token
type SingleACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenSetResponse is used to return a set of token
type ACLTokenSetResponse struct {
Tokens map[string]*ACLToken // Keyed by Accessor ID
QueryMeta
}
// ResolveACLTokenRequest is used to resolve a specific token
type ResolveACLTokenRequest struct {
SecretID string
QueryOptions
}
// ResolveACLTokenResponse is used to resolve a single token
type ResolveACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenDeleteRequest is used to delete a set of tokens
type ACLTokenDeleteRequest struct {
AccessorIDs []string
WriteRequest
}
// ACLTokenBootstrapRequest is used to bootstrap ACLs
type ACLTokenBootstrapRequest struct {
Token *ACLToken // Not client specifiable
ResetIndex uint64 // Reset index is used to clear the bootstrap token
WriteRequest
}
// ACLTokenUpsertRequest is used to upsert a set of tokens
type ACLTokenUpsertRequest struct {
Tokens []*ACLToken
WriteRequest
}
// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
type ACLTokenUpsertResponse struct {
Tokens []*ACLToken
WriteMeta
}
|
package cliTricks
import (
"encoding/json"
"errors"
"fmt"
"github.com/stretchr/testify/assert"
"reflect"
"testing"
)
func TestBreakupStringArray(t *testing.T) {
testData := []struct {
input string
output []string
}{
{
input: "apple,banana,cherry",
output: []string{"apple", "banana", "cherry"},
}, {
input: "Dog, Eagle, Fox",
output: []string{"Dog", "Eagle", "Fox"},
}, {
input: "[Green Beans][Hot Tamales][Ice Cream]",
output: []string{"Green Beans", "Hot Tamales", "Ice Cream"},
}, {
input: "[JellyBean],[KitKat],[Marshmallow]",
output: []string{"JellyBean", "KitKat", "Marshmallow"},
}, {
input: "[\"Nutella\"],[\"Oatmeal\"],[\"Pie\"]",
output: []string{"Nutella", "Oatmeal", "Pie"},
},
}
for _, oneTest := range testData {
assert.Equal(t, oneTest.output, BreakupStringArray(oneTest.input), "BreakupStringArray returned non-expected results")
}
}
func ExampleGetItem() {
testBytes := []byte(`{"Everything":"Awesome","Team":{"Everything":"Cool"}}`)
var testData interface{}
err := json.Unmarshal(testBytes, &testData)
if err != nil {
fmt.Printf("hit a snag unmarshalling the data - %v", err)
}
item, err := GetItem(testData, []string{"Team", "Everything"})
if err != nil {
fmt.Printf("hit a snag retrieving the item - %v", err)
}
fmt.Println(item)
// Output:
// Cool
}
// func TestGetInt(t *testing.T) {
// testData := []struct{
// input interface{}
// target []string
// output int
// }{
// {
// input: map[string]interface{}{
// "params": map[string]float64{
// "data": 63,
// },
// },
// target: []string{"params", "data",},
// output: 63,
// },
// }
// for _, oneTest := range testData {
// result, err := GetInt(oneTest.input, oneTest.target)
// assert.Equal(t, oneTest.output, result)
// assert.NoError(t, err)
// result2, err := GetItem(oneTest.input, []string{"params"})
// fmt.Println(oneTest.input)
// fmt.Println(oneTest.output)
// fmt.Println(result2)
// }
// }
func TestGetIntJSON(t *testing.T) {
testData := []struct {
input []byte
target []string
output int
status error
}{
{
input: []byte(`{"params":{"data":63}}`),
target: []string{"params", "data"},
output: 63,
status: nil,
}, {
// we always round down
input: []byte(`{"params":{"data":63.9}}`),
target: []string{"params", "data"},
output: 63,
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "data"},
output: -1,
status: errors.New("got non-float item - potato"),
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"bad", "address"},
output: -1,
status: errors.New("bad item - bad address - [address]"),
},
}
for _, oneTest := range testData {
var testData interface{}
err := json.Unmarshal(oneTest.input, &testData)
assert.Nil(t, err, "Problems unmarshaling the input")
result, err := GetInt(testData, oneTest.target)
assert.Equal(t, oneTest.output, result)
assert.Equal(t, oneTest.status, err)
}
}
func TestGetItemJSON(t *testing.T) {
testData := []struct {
input []byte
target []string
output []byte
status error
}{
{
input: []byte(`{"params":{"data":63}}`),
target: []string{"params", "data"},
output: []byte(`63`),
status: nil,
}, {
// we always round down
input: []byte(`{"params":{"data":63.9}}`),
target: []string{"params", "data"},
output: []byte(`63.9`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "data"},
output: []byte(`"potato"`),
status: nil,
}, {
input: []byte(`{"numbers":[4,8,15,16,23,42,63]}`),
target: []string{"numbers", "3"},
output: []byte(`16`),
status: nil,
}, {
input: []byte(`{"numbers":[4,8,15,16,23,42,63]}`),
target: []string{"numbers", "potato"},
output: []byte("null"),
status: errors.New("got non-int address for []interface{}"),
}, {
input: []byte(`[["apple","apricot","acorn"],"banana",["chestnut","cookie"]]`),
target: []string{"0", "1"},
output: []byte(`"apricot"`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"bad", "address"},
output: []byte("null"),
status: errors.New("bad address - [address]"),
},
}
for _, oneTest := range testData {
var inputData, outputData interface{}
err := json.Unmarshal(oneTest.input, &inputData)
assert.Nil(t, err, "Problems unmarshaling the input - %q", oneTest.input)
err = json.Unmarshal(oneTest.output, &outputData)
assert.Nil(t, err, "Problems unmarshaling the output - input was %q and output was %q", oneTest.input, oneTest.output)
result, err := GetItem(inputData, oneTest.target)
assert.Equal(t, outputData, result)
assert.Equal(t, oneTest.status, err)
}
}
func TestSetItemJSON(t *testing.T) {
testData := []struct {
input []byte
target []string
newVal []byte
output []byte
status error
}{
{
input: []byte(`{"params":{"data":63}}`),
target: []string{"params", "data"},
newVal: []byte("63"),
output: []byte(`{"params":{"data":63}}`),
status: nil,
}, {
// we always round down
input: []byte(`{"params":{"data":42}}`),
target: []string{"params", "data"},
newVal: []byte(`63.9`),
output: []byte(`{"params":{"data":63}}`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "data"},
newVal: []byte(`"banana"`),
output: []byte(`{"params":{"data":"banana"}}`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "magic"},
newVal: []byte(`"banana"`),
output: []byte(`{"params":{"data":"banana","magic":"banana"}}`),
status: nil,
}, {
input: []byte(`{"numbers":[4,8,15,16,23,42]}`),
target: []string{"numbers", "6"},
newVal: []byte(`63`),
output: []byte(`{"numbers":[4,8,15,16,23,42,63]}`),
status: nil,
}, {
input: []byte(`[["apple","apricot"],"banana",["chestnut","cookie"]]`),
target: []string{"0", "2"},
newVal: []byte(`"acorn"`),
output: []byte(`[["apple","apricot","acorn"],"banana",["chestnut","cookie"]]`),
status: nil,
}, {
input: []byte(`[["apple","acorn"],"banana",["chestnut","cookie"]]`),
target: []string{"0", "1"},
newVal: []byte(`"apricot"`),
output: []byte(`[["apple","apricot","acorn"],"banana",["chestnut","cookie"]]`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"bad", "address"},
newVal: []byte(""),
output: []byte(`{"params":{"data":"potato"}}`),
status: errors.New("bad address - [address]"),
},
}
for _, oneTest := range testData {
var inputData, newData, outputData interface{}
err := json.Unmarshal(oneTest.input, &inputData)
assert.Nil(t, err, "Problems unmarshaling the input")
err = json.Unmarshal(oneTest.newVal, &newData)
assert.Nil(t, err, "Problems unmarshaling the newData")
err = json.Unmarshal(oneTest.output, &outputData)
assert.Nil(t, err, "Problems unmarshaling the output")
err = SetItem(&inputData, newData, oneTest.target)
assert.Equal(t, outputData, inputData)
assert.Equal(t, oneTest.status, err)
}
}
func ExampleGetInt() {
testBytes := []byte(`{"Everything":"Awesome","Team":{"Everything":"Cool", "Solution": 63}}`)
var testData interface{}
err := json.Unmarshal(testBytes, &testData)
if err != nil {
fmt.Printf("hit a snag unmarshalling the data - %v", err)
}
item, err := GetInt(testData, []string{"Team", "Solution"})
if err != nil {
fmt.Printf("hit a snag retrieving the item - %v", err)
return
}
fmt.Println(item)
fmt.Println(reflect.TypeOf(item))
// Output:
// 63
// int
}
added some tests for the improved breakupStringArray
package cliTricks
import (
"encoding/json"
"errors"
"fmt"
"github.com/stretchr/testify/assert"
"reflect"
"testing"
)
func TestBreakupStringArray(t *testing.T) {
testData := []struct {
input string
output []interface{}
}{
{
input: "apple,banana,cherry",
output: []interface{}{"apple", "banana", "cherry"},
}, {
input: "Dog, Eagle, Fox",
output: []interface{}{"Dog", "Eagle", "Fox"},
}, {
input: "[Green Beans][Hot Tamales][Ice Cream]",
output: []interface{}{"Green Beans", "Hot Tamales", "Ice Cream"},
}, {
input: "[JellyBean],[KitKat],[Marshmallow]",
output: []interface{}{"JellyBean", "KitKat", "Marshmallow"},
}, {
input: "[\"Nutella\"],[\"Oatmeal\"],[\"Pie\"]",
output: []interface{}{"Nutella", "Oatmeal", "Pie"},
},{
input: "apple,banana,cherry,4,5",
output: []interface{}{"apple", "banana", "cherry",4,5},
},
}
for _, oneTest := range testData {
assert.Equal(t, oneTest.output, BreakupStringArray(oneTest.input), "BreakupStringArray returned non-expected results")
}
}
func ExampleGetItem() {
testBytes := []byte(`{"Everything":"Awesome","Team":{"Everything":"Cool"}}`)
var testData interface{}
err := json.Unmarshal(testBytes, &testData)
if err != nil {
fmt.Printf("hit a snag unmarshalling the data - %v", err)
}
item, err := GetItem(testData, []string{"Team", "Everything"})
if err != nil {
fmt.Printf("hit a snag retrieving the item - %v", err)
}
fmt.Println(item)
// Output:
// Cool
}
// func TestGetInt(t *testing.T) {
// testData := []struct{
// input interface{}
// target []string
// output int
// }{
// {
// input: map[string]interface{}{
// "params": map[string]float64{
// "data": 63,
// },
// },
// target: []string{"params", "data",},
// output: 63,
// },
// }
// for _, oneTest := range testData {
// result, err := GetInt(oneTest.input, oneTest.target)
// assert.Equal(t, oneTest.output, result)
// assert.NoError(t, err)
// result2, err := GetItem(oneTest.input, []string{"params"})
// fmt.Println(oneTest.input)
// fmt.Println(oneTest.output)
// fmt.Println(result2)
// }
// }
func TestGetIntJSON(t *testing.T) {
testData := []struct {
input []byte
target []string
output int
status error
}{
{
input: []byte(`{"params":{"data":63}}`),
target: []string{"params", "data"},
output: 63,
status: nil,
}, {
// we always round down
input: []byte(`{"params":{"data":63.9}}`),
target: []string{"params", "data"},
output: 63,
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "data"},
output: -1,
status: errors.New("got non-float item - potato"),
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"bad", "address"},
output: -1,
status: errors.New("bad item - bad address - [address]"),
},
}
for _, oneTest := range testData {
var testData interface{}
err := json.Unmarshal(oneTest.input, &testData)
assert.Nil(t, err, "Problems unmarshaling the input")
result, err := GetInt(testData, oneTest.target)
assert.Equal(t, oneTest.output, result)
assert.Equal(t, oneTest.status, err)
}
}
func TestGetItemJSON(t *testing.T) {
testData := []struct {
input []byte
target []string
output []byte
status error
}{
{
input: []byte(`{"params":{"data":63}}`),
target: []string{"params", "data"},
output: []byte(`63`),
status: nil,
}, {
// we always round down
input: []byte(`{"params":{"data":63.9}}`),
target: []string{"params", "data"},
output: []byte(`63.9`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "data"},
output: []byte(`"potato"`),
status: nil,
}, {
input: []byte(`{"numbers":[4,8,15,16,23,42,63]}`),
target: []string{"numbers", "3"},
output: []byte(`16`),
status: nil,
}, {
input: []byte(`{"numbers":[4,8,15,16,23,42,63]}`),
target: []string{"numbers", "potato"},
output: []byte("null"),
status: errors.New("got non-int address for []interface{}"),
}, {
input: []byte(`[["apple","apricot","acorn"],"banana",["chestnut","cookie"]]`),
target: []string{"0", "1"},
output: []byte(`"apricot"`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"bad", "address"},
output: []byte("null"),
status: errors.New("bad address - [address]"),
},
}
for _, oneTest := range testData {
var inputData, outputData interface{}
err := json.Unmarshal(oneTest.input, &inputData)
assert.Nil(t, err, "Problems unmarshaling the input - %q", oneTest.input)
err = json.Unmarshal(oneTest.output, &outputData)
assert.Nil(t, err, "Problems unmarshaling the output - input was %q and output was %q", oneTest.input, oneTest.output)
result, err := GetItem(inputData, oneTest.target)
assert.Equal(t, outputData, result)
assert.Equal(t, oneTest.status, err)
}
}
func TestSetItemJSON(t *testing.T) {
testData := []struct {
input []byte
target []string
newVal []byte
output []byte
status error
}{
{
input: []byte(`{"params":{"data":63}}`),
target: []string{"params", "data"},
newVal: []byte("63"),
output: []byte(`{"params":{"data":63}}`),
status: nil,
}, {
// we always round down
input: []byte(`{"params":{"data":42}}`),
target: []string{"params", "data"},
newVal: []byte(`63.9`),
output: []byte(`{"params":{"data":63}}`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "data"},
newVal: []byte(`"banana"`),
output: []byte(`{"params":{"data":"banana"}}`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"params", "magic"},
newVal: []byte(`"banana"`),
output: []byte(`{"params":{"data":"banana","magic":"banana"}}`),
status: nil,
}, {
input: []byte(`{"numbers":[4,8,15,16,23,42]}`),
target: []string{"numbers", "6"},
newVal: []byte(`63`),
output: []byte(`{"numbers":[4,8,15,16,23,42,63]}`),
status: nil,
}, {
input: []byte(`[["apple","apricot"],"banana",["chestnut","cookie"]]`),
target: []string{"0", "2"},
newVal: []byte(`"acorn"`),
output: []byte(`[["apple","apricot","acorn"],"banana",["chestnut","cookie"]]`),
status: nil,
}, {
input: []byte(`[["apple","acorn"],"banana",["chestnut","cookie"]]`),
target: []string{"0", "1"},
newVal: []byte(`"apricot"`),
output: []byte(`[["apple","apricot","acorn"],"banana",["chestnut","cookie"]]`),
status: nil,
}, {
input: []byte(`{"params":{"data":"potato"}}`),
target: []string{"bad", "address"},
newVal: []byte(""),
output: []byte(`{"params":{"data":"potato"}}`),
status: errors.New("bad address - [address]"),
},
}
for _, oneTest := range testData {
var inputData, newData, outputData interface{}
err := json.Unmarshal(oneTest.input, &inputData)
assert.Nil(t, err, "Problems unmarshaling the input")
err = json.Unmarshal(oneTest.newVal, &newData)
assert.Nil(t, err, "Problems unmarshaling the newData")
err = json.Unmarshal(oneTest.output, &outputData)
assert.Nil(t, err, "Problems unmarshaling the output")
err = SetItem(&inputData, newData, oneTest.target)
assert.Equal(t, outputData, inputData)
assert.Equal(t, oneTest.status, err)
}
}
func ExampleGetInt() {
testBytes := []byte(`{"Everything":"Awesome","Team":{"Everything":"Cool", "Solution": 63}}`)
var testData interface{}
err := json.Unmarshal(testBytes, &testData)
if err != nil {
fmt.Printf("hit a snag unmarshalling the data - %v", err)
}
item, err := GetInt(testData, []string{"Team", "Solution"})
if err != nil {
fmt.Printf("hit a snag retrieving the item - %v", err)
return
}
fmt.Println(item)
fmt.Println(reflect.TypeOf(item))
// Output:
// 63
// int
}
|
// Copyright 2020 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main is the main entry point for the app.
package main
import (
"context"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"go.chromium.org/luci/common/data/rand/mathrand"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/proto/access"
"go.chromium.org/luci/grpc/prpc"
"go.chromium.org/luci/server"
"go.chromium.org/luci/server/gaeemulation"
"go.chromium.org/luci/server/module"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/server/tq"
// Enable datastore transactional tasks support.
_ "go.chromium.org/luci/server/tq/txn/datastore"
"go.chromium.org/luci/buildbucket/appengine/rpc"
pb "go.chromium.org/luci/buildbucket/proto"
)
// isBeefy returns whether the request was intended for the beefy service.
func isBeefy(req *http.Request) bool {
return strings.Contains(req.Host, "beefy")
}
// isDev returns whether the request was intended for the dev instance.
func isDev(req *http.Request) bool {
return strings.HasSuffix(req.Host, "-dev.appspot.com")
}
func main() {
mods := []module.Module{
gaeemulation.NewModuleFromFlags(),
tq.NewModuleFromFlags(),
}
server.Main(nil, mods, func(srv *server.Server) error {
// Proxy buildbucket.v2.Builds pRPC requests back to the Python
// service in order to achieve a programmatic traffic split.
// Because of the way dispatch routes work, requests are proxied
// to a copy of the Python service hosted at a different path.
// TODO(crbug/1042991): Remove the proxy once the go service handles all traffic.
pythonURL, err := url.Parse(fmt.Sprintf("https://default-dot-%s.appspot.com/python", srv.Options.CloudProject))
if err != nil {
panic(err)
}
beefyURL, err := url.Parse(fmt.Sprintf("https://beefy-dot-%s.appspot.com/python", srv.Options.CloudProject))
if err != nil {
panic(err)
}
prx := httputil.NewSingleHostReverseProxy(pythonURL)
prx.Director = func(req *http.Request) {
target := pythonURL
if isBeefy(req) {
target = beefyURL
}
// According to net.Request documentation, setting Host is unnecessary
// because URL.Host is supposed to be used for outbound requests.
// However, on GAE, it seems that req.Host is incorrectly used.
req.Host = target.Host
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = fmt.Sprintf("%s%s", target.Path, req.URL.Path)
}
// makeOverride returns a prpc.Override which allows the given percentage of requests
// through to this service, proxying the remainder to Python.
makeOverride := func(prodPct, devPct int) func(*router.Context) bool {
return func(ctx *router.Context) bool {
// TODO(crbug/1090540): remove env k-v
ctx.Context = context.WithValue(ctx.Context, "env", "Prod")
pct := prodPct
if isDev(ctx.Request) {
pct = devPct
// TODO(crbug/1090540): remove env k-v
ctx.Context = context.WithValue(ctx.Context, "env", "Dev")
}
switch val := ctx.Request.Header.Get("Should-Proxy"); val {
case "true":
pct = 0
logging.Debugf(ctx.Context, "request demanded to be proxied")
case "false":
pct = 100
logging.Debugf(ctx.Context, "request demanded not to be proxied")
}
if mathrand.Intn(ctx.Context, 100) < pct {
return false
}
target := pythonURL
if isBeefy(ctx.Request) {
target = beefyURL
}
logging.Debugf(ctx.Context, "proxying request to %s", target)
prx.ServeHTTP(ctx.Writer, ctx.Request)
return true
}
}
srv.PRPC.AccessControl = prpc.AllowOriginAll
access.RegisterAccessServer(srv.PRPC, &access.UnimplementedAccessServer{})
pb.RegisterBuildsServer(srv.PRPC, rpc.NewBuilds())
pb.RegisterBuildersServer(srv.PRPC, rpc.NewBuilders())
// TODO(crbug/1082369): Remove this workaround once field masks can be decoded.
srv.PRPC.HackFixFieldMasksForJSON = true
// makeOverride(prod % -> Go, dev % -> Go).
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "Batch", makeOverride(20, 50))
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "CancelBuild", makeOverride(0, 0))
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "ScheduleBuild", makeOverride(0, 0))
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "UpdateBuild", makeOverride(0, 0))
return nil
})
}
[buildbucket] Enable 100% traffic for Batch
Bug:1144958
Change-Id: I4abd039113490888e14c26817b5f99e06fd29804
Reviewed-on: https://chromium-review.googlesource.com/c/infra/luci/luci-go/+/2654425
Reviewed-by: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@google.com>
Commit-Queue: Yuanjun Huang <a5f4b8445ce5e958d19c0a7caeadcd07c9d18770@google.com>
// Copyright 2020 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main is the main entry point for the app.
package main
import (
"context"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"go.chromium.org/luci/common/data/rand/mathrand"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/proto/access"
"go.chromium.org/luci/grpc/prpc"
"go.chromium.org/luci/server"
"go.chromium.org/luci/server/gaeemulation"
"go.chromium.org/luci/server/module"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/server/tq"
// Enable datastore transactional tasks support.
_ "go.chromium.org/luci/server/tq/txn/datastore"
"go.chromium.org/luci/buildbucket/appengine/rpc"
pb "go.chromium.org/luci/buildbucket/proto"
)
// isBeefy returns whether the request was intended for the beefy service.
func isBeefy(req *http.Request) bool {
return strings.Contains(req.Host, "beefy")
}
// isDev returns whether the request was intended for the dev instance.
func isDev(req *http.Request) bool {
return strings.HasSuffix(req.Host, "-dev.appspot.com")
}
func main() {
mods := []module.Module{
gaeemulation.NewModuleFromFlags(),
tq.NewModuleFromFlags(),
}
server.Main(nil, mods, func(srv *server.Server) error {
// Proxy buildbucket.v2.Builds pRPC requests back to the Python
// service in order to achieve a programmatic traffic split.
// Because of the way dispatch routes work, requests are proxied
// to a copy of the Python service hosted at a different path.
// TODO(crbug/1042991): Remove the proxy once the go service handles all traffic.
pythonURL, err := url.Parse(fmt.Sprintf("https://default-dot-%s.appspot.com/python", srv.Options.CloudProject))
if err != nil {
panic(err)
}
beefyURL, err := url.Parse(fmt.Sprintf("https://beefy-dot-%s.appspot.com/python", srv.Options.CloudProject))
if err != nil {
panic(err)
}
prx := httputil.NewSingleHostReverseProxy(pythonURL)
prx.Director = func(req *http.Request) {
target := pythonURL
if isBeefy(req) {
target = beefyURL
}
// According to net.Request documentation, setting Host is unnecessary
// because URL.Host is supposed to be used for outbound requests.
// However, on GAE, it seems that req.Host is incorrectly used.
req.Host = target.Host
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = fmt.Sprintf("%s%s", target.Path, req.URL.Path)
}
// makeOverride returns a prpc.Override which allows the given percentage of requests
// through to this service, proxying the remainder to Python.
makeOverride := func(prodPct, devPct int) func(*router.Context) bool {
return func(ctx *router.Context) bool {
// TODO(crbug/1090540): remove env k-v
ctx.Context = context.WithValue(ctx.Context, "env", "Prod")
pct := prodPct
if isDev(ctx.Request) {
pct = devPct
// TODO(crbug/1090540): remove env k-v
ctx.Context = context.WithValue(ctx.Context, "env", "Dev")
}
switch val := ctx.Request.Header.Get("Should-Proxy"); val {
case "true":
pct = 0
logging.Debugf(ctx.Context, "request demanded to be proxied")
case "false":
pct = 100
logging.Debugf(ctx.Context, "request demanded not to be proxied")
}
if mathrand.Intn(ctx.Context, 100) < pct {
return false
}
target := pythonURL
if isBeefy(ctx.Request) {
target = beefyURL
}
logging.Debugf(ctx.Context, "proxying request to %s", target)
prx.ServeHTTP(ctx.Writer, ctx.Request)
return true
}
}
srv.PRPC.AccessControl = prpc.AllowOriginAll
access.RegisterAccessServer(srv.PRPC, &access.UnimplementedAccessServer{})
pb.RegisterBuildsServer(srv.PRPC, rpc.NewBuilds())
pb.RegisterBuildersServer(srv.PRPC, rpc.NewBuilders())
// TODO(crbug/1082369): Remove this workaround once field masks can be decoded.
srv.PRPC.HackFixFieldMasksForJSON = true
// makeOverride(prod % -> Go, dev % -> Go).
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "Batch", makeOverride(100, 100))
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "CancelBuild", makeOverride(0, 0))
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "ScheduleBuild", makeOverride(0, 0))
srv.PRPC.RegisterOverride("buildbucket.v2.Builds", "UpdateBuild", makeOverride(0, 0))
return nil
})
}
|
package structs
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/gorhill/cronexpr"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/args"
"github.com/mitchellh/copystructure"
"github.com/ugorji/go/codec"
hcodec "github.com/hashicorp/go-msgpack/codec"
)
var (
ErrNoLeader = fmt.Errorf("No cluster leader")
ErrNoRegionPath = fmt.Errorf("No path to region")
)
type MessageType uint8
const (
NodeRegisterRequestType MessageType = iota
NodeDeregisterRequestType
NodeUpdateStatusRequestType
NodeUpdateDrainRequestType
JobRegisterRequestType
JobDeregisterRequestType
EvalUpdateRequestType
EvalDeleteRequestType
AllocUpdateRequestType
AllocClientUpdateRequestType
ReconcileJobSummariesRequestType
VaultAccessorRegisterRequestType
VaultAccessorDegisterRequestType
ApplyPlanResultsRequestType
DeploymentStatusUpdateRequestType
DeploymentPromoteRequestType
DeploymentAllocHealthRequestType
DeploymentDeleteRequestType
JobStabilityRequestType
)
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// ApiMajorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed in a way
// that would break clients for sane client versioning.
ApiMajorVersion = 1
// ApiMinorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed to allow
// for sane client versioning. Minor changes should be compatible
// within the major version.
ApiMinorVersion = 1
ProtocolVersion = "protocol"
APIMajorVersion = "api.major"
APIMinorVersion = "api.minor"
GetterModeAny = "any"
GetterModeFile = "file"
GetterModeDir = "dir"
)
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
IsRead() bool
AllowStaleRead() bool
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
// The target region for this query
Region string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool
// If set, used as prefix for resource list searches
Prefix string
}
func (q QueryOptions) RequestRegion() string {
return q.Region
}
// QueryOption only applies to reads, so always true
func (q QueryOptions) IsRead() bool {
return true
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
type WriteRequest struct {
// The target region for this write
Region string
}
func (w WriteRequest) RequestRegion() string {
// The target region for this request
return w.Region
}
// WriteRequest only applies to writes, always false
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// This is the index associated with the read
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
}
// WriteMeta allows a write response to include potentially
// useful metadata about the write
type WriteMeta struct {
// This is the index associated with the write
Index uint64
}
// NodeRegisterRequest is used for Node.Register endpoint
// to register a node as being a schedulable entity.
type NodeRegisterRequest struct {
Node *Node
WriteRequest
}
// NodeDeregisterRequest is used for Node.Deregister endpoint
// to deregister a node as being a schedulable entity.
type NodeDeregisterRequest struct {
NodeID string
WriteRequest
}
// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
// information used in RPC server lists.
type NodeServerInfo struct {
// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
// be contacted at for RPCs.
RPCAdvertiseAddr string
// RpcMajorVersion is the major version number the Nomad Server
// supports
RPCMajorVersion int32
// RpcMinorVersion is the minor version number the Nomad Server
// supports
RPCMinorVersion int32
// Datacenter is the datacenter that a Nomad server belongs to
Datacenter string
}
// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
// to update the status of a node.
type NodeUpdateStatusRequest struct {
NodeID string
Status string
WriteRequest
}
// NodeUpdateDrainRequest is used for updatin the drain status
type NodeUpdateDrainRequest struct {
NodeID string
Drain bool
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the ndoe
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
}
// NodeSpecificRequest is used when we just need to specify a target node
type NodeSpecificRequest struct {
NodeID string
SecretID string
QueryOptions
}
// JobRegisterRequest is used for Job.Register endpoint
// to register a job as being a schedulable entity.
type JobRegisterRequest struct {
Job *Job
// If EnforceIndex is set then the job will only be registered if the passed
// JobModifyIndex matches the current Jobs index. If the index is zero, the
// register only occurs if the job is new.
EnforceIndex bool
JobModifyIndex uint64
WriteRequest
}
// JobDeregisterRequest is used for Job.Deregister endpoint
// to deregister a job as being a schedulable entity.
type JobDeregisterRequest struct {
JobID string
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
WriteRequest
}
// JobEvaluateRequest is used when we just need to re-evaluate a target job
type JobEvaluateRequest struct {
JobID string
WriteRequest
}
// JobSpecificRequest is used when we just need to specify a target job
type JobSpecificRequest struct {
JobID string
AllAllocs bool
QueryOptions
}
// JobListRequest is used to parameterize a list request
type JobListRequest struct {
QueryOptions
}
// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
// evaluation of the Job.
type JobPlanRequest struct {
Job *Job
Diff bool // Toggles an annotated diff
WriteRequest
}
// JobSummaryRequest is used when we just need to get a specific job summary
type JobSummaryRequest struct {
JobID string
QueryOptions
}
// JobDispatchRequest is used to dispatch a job based on a parameterized job
type JobDispatchRequest struct {
JobID string
Payload []byte
Meta map[string]string
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobRevertRequest is used to revert a job to a prior version.
type JobRevertRequest struct {
// JobID is the ID of the job being reverted
JobID string
// JobVersion the version to revert to.
JobVersion uint64
// EnforcePriorVersion if set will enforce that the job is at the given
// version before reverting.
EnforcePriorVersion *uint64
WriteRequest
}
// JobStabilityRequest is used to marked a job as stable.
type JobStabilityRequest struct {
// Job to set the stability on
JobID string
JobVersion uint64
// Set the stability
Stable bool
WriteRequest
}
// JobStabilityResponse is the response when marking a job as stable.
type JobStabilityResponse struct {
WriteMeta
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
}
// EvalUpdateRequest is used for upserting evaluations.
type EvalUpdateRequest struct {
Evals []*Evaluation
EvalToken string
WriteRequest
}
// EvalDeleteRequest is used for deleting an evaluation.
type EvalDeleteRequest struct {
Evals []string
Allocs []string
WriteRequest
}
// EvalSpecificRequest is used when we just need to specify a target evaluation
type EvalSpecificRequest struct {
EvalID string
QueryOptions
}
// EvalAckRequest is used to Ack/Nack a specific evaluation
type EvalAckRequest struct {
EvalID string
Token string
WriteRequest
}
// EvalDequeueRequest is used when we want to dequeue an evaluation
type EvalDequeueRequest struct {
Schedulers []string
Timeout time.Duration
SchedulerVersion uint16
WriteRequest
}
// EvalListRequest is used to list the evaluations
type EvalListRequest struct {
QueryOptions
}
// PlanRequest is used to submit an allocation plan to the leader
type PlanRequest struct {
Plan *Plan
WriteRequest
}
// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
// committing the result of a plan.
type ApplyPlanResultsRequest struct {
// AllocUpdateRequest holds the allocation updates to be made by the
// scheduler.
AllocUpdateRequest
// Deployment is the deployment created or updated as a result of a
// scheduling event.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AllocUpdateRequest is used to submit changes to allocations, either
// to cause evictions or to assign new allocaitons. Both can be done
// within a single transaction
type AllocUpdateRequest struct {
// Alloc is the list of new allocations to assign
Alloc []*Allocation
// Job is the shared parent job of the allocations.
// It is pulled out since it is common to reduce payload size.
Job *Job
WriteRequest
}
// AllocListRequest is used to request a list of allocations
type AllocListRequest struct {
QueryOptions
}
// AllocSpecificRequest is used to query a specific allocation
type AllocSpecificRequest struct {
AllocID string
QueryOptions
}
// AllocsGetRequest is used to query a set of allocations
type AllocsGetRequest struct {
AllocIDs []string
QueryOptions
}
// PeriodicForceReqeuest is used to force a specific periodic job.
type PeriodicForceRequest struct {
JobID string
WriteRequest
}
// ServerMembersResponse has the list of servers in a cluster
type ServerMembersResponse struct {
ServerName string
ServerRegion string
ServerDC string
Members []*ServerMember
}
// ServerMember holds information about a Nomad server agent in a cluster
type ServerMember struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
// following tasks in the given allocation
type DeriveVaultTokenRequest struct {
NodeID string
SecretID string
AllocID string
Tasks []string
QueryOptions
}
// VaultAccessorsRequest is used to operate on a set of Vault accessors
type VaultAccessorsRequest struct {
Accessors []*VaultAccessor
}
// VaultAccessor is a reference to a created Vault token on behalf of
// an allocation's task.
type VaultAccessor struct {
AllocID string
Task string
NodeID string
Accessor string
CreationTTL int
// Raft Indexes
CreateIndex uint64
}
// DeriveVaultTokenResponse returns the wrapped tokens for each requested task
type DeriveVaultTokenResponse struct {
// Tasks is a mapping between the task name and the wrapped token
Tasks map[string]string
// Error stores any error that occured. Errors are stored here so we can
// communicate whether it is retriable
Error *RecoverableError
QueryMeta
}
// GenericRequest is used to request where no
// specific information is needed.
type GenericRequest struct {
QueryOptions
}
// DeploymentListRequest is used to list the deployments
type DeploymentListRequest struct {
QueryOptions
}
// DeploymentDeleteRequest is used for deleting deployments.
type DeploymentDeleteRequest struct {
Deployments []string
WriteRequest
}
// DeploymentStatusUpdateRequest is used to update the status of a deployment as
// well as optionally creating an evaluation atomically.
type DeploymentStatusUpdateRequest struct {
// Eval, if set, is used to create an evaluation at the same time as
// updating the status of a deployment.
Eval *Evaluation
// DeploymentUpdate is a status update to apply to the given
// deployment.
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
}
// DeploymentAllocHealthRequest is used to set the health of a set of
// allocations as part of a deployment.
type DeploymentAllocHealthRequest struct {
DeploymentID string
// Marks these allocations as healthy, allow further allocations
// to be rolled.
HealthyAllocationIDs []string
// Any unhealthy allocations fail the deployment
UnhealthyAllocationIDs []string
WriteRequest
}
// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
type ApplyDeploymentAllocHealthRequest struct {
DeploymentAllocHealthRequest
// An optional field to update the status of a deployment
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPromoteRequest is used to promote task groups in a deployment
type DeploymentPromoteRequest struct {
DeploymentID string
// All is to promote all task groups
All bool
// Groups is used to set the promotion status per task group
Groups []string
WriteRequest
}
// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
type ApplyDeploymentPromoteRequest struct {
DeploymentPromoteRequest
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPauseRequest is used to pause a deployment
type DeploymentPauseRequest struct {
DeploymentID string
// Pause sets the pause status
Pause bool
WriteRequest
}
// DeploymentSpecificRequest is used to make a request specific to a particular
// deployment
type DeploymentSpecificRequest struct {
DeploymentID string
QueryOptions
}
// DeploymentFailRequest is used to fail a particular deployment
type DeploymentFailRequest struct {
DeploymentID string
WriteRequest
}
// SingleDeploymentResponse is used to respond with a single deployment
type SingleDeploymentResponse struct {
Deployment *Deployment
QueryMeta
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
WriteMeta
}
// VersionResponse is used for the Status.Version reseponse
type VersionResponse struct {
Build string
Versions map[string]int
QueryMeta
}
// JobRegisterResponse is used to respond to a job registration
type JobRegisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
QueryMeta
}
// JobDeregisterResponse is used to respond to a job deregistration
type JobDeregisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
QueryMeta
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
// Error is a string version of any error that may have occured
Error string
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
}
// NodeUpdateResponse is used to respond to a node update
type NodeUpdateResponse struct {
HeartbeatTTL time.Duration
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
// LeaderRPCAddr is the RPC address of the current Raft Leader. If
// empty, the current Nomad Server is in the minority of a partition.
LeaderRPCAddr string
// NumNodes is the number of Nomad nodes attached to this quorum of
// Nomad Servers at the time of the response. This value can
// fluctuate based on the health of the cluster between heartbeats.
NumNodes int32
// Servers is the full list of known Nomad servers in the local
// region.
Servers []*NodeServerInfo
QueryMeta
}
// NodeDrainUpdateResponse is used to respond to a node drain update
type NodeDrainUpdateResponse struct {
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
QueryMeta
}
// NodeAllocsResponse is used to return allocs for a single node
type NodeAllocsResponse struct {
Allocs []*Allocation
QueryMeta
}
// NodeClientAllocsResponse is used to return allocs meta data for a single node
type NodeClientAllocsResponse struct {
Allocs map[string]uint64
QueryMeta
}
// SingleNodeResponse is used to return a single node
type SingleNodeResponse struct {
Node *Node
QueryMeta
}
// NodeListResponse is used for a list request
type NodeListResponse struct {
Nodes []*NodeListStub
QueryMeta
}
// SingleJobResponse is used to return a single job
type SingleJobResponse struct {
Job *Job
QueryMeta
}
// JobSummaryResponse is used to return a single job summary
type JobSummaryResponse struct {
JobSummary *JobSummary
QueryMeta
}
type JobDispatchResponse struct {
DispatchedJobID string
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
WriteMeta
}
// JobListResponse is used for a list request
type JobListResponse struct {
Jobs []*JobListStub
QueryMeta
}
// JobVersionsRequest is used to get a jobs versions
type JobVersionsRequest struct {
JobID string
Diffs bool
QueryOptions
}
// JobVersionsResponse is used for a job get versions request
type JobVersionsResponse struct {
Versions []*Job
Diffs []*JobDiff
QueryMeta
}
// JobPlanResponse is used to respond to a job plan request
type JobPlanResponse struct {
// Annotations stores annotations explaining decisions the scheduler made.
Annotations *PlanAnnotations
// FailedTGAllocs is the placement failures per task group.
FailedTGAllocs map[string]*AllocMetric
// JobModifyIndex is the modification index of the job. The value can be
// used when running `nomad run` to ensure that the Job wasn’t modified
// since the last plan. If the job is being created, the value is zero.
JobModifyIndex uint64
// CreatedEvals is the set of evaluations created by the scheduler. The
// reasons for this can be rolling-updates or blocked evals.
CreatedEvals []*Evaluation
// Diff contains the diff of the job and annotations on whether the change
// causes an in-place update or create/destroy
Diff *JobDiff
// NextPeriodicLaunch is the time duration till the job would be launched if
// submitted.
NextPeriodicLaunch time.Time
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
WriteMeta
}
// SingleAllocResponse is used to return a single allocation
type SingleAllocResponse struct {
Alloc *Allocation
QueryMeta
}
// AllocsGetResponse is used to return a set of allocations
type AllocsGetResponse struct {
Allocs []*Allocation
QueryMeta
}
// JobAllocationsResponse is used to return the allocations for a job
type JobAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// JobEvaluationsResponse is used to return the evaluations for a job
type JobEvaluationsResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// SingleEvalResponse is used to return a single evaluation
type SingleEvalResponse struct {
Eval *Evaluation
QueryMeta
}
// EvalDequeueResponse is used to return from a dequeue
type EvalDequeueResponse struct {
Eval *Evaluation
Token string
QueryMeta
}
// PlanResponse is used to return from a PlanRequest
type PlanResponse struct {
Result *PlanResult
WriteMeta
}
// AllocListResponse is used for a list request
type AllocListResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// DeploymentListResponse is used for a list request
type DeploymentListResponse struct {
Deployments []*Deployment
QueryMeta
}
// EvalListResponse is used for a list request
type EvalListResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// EvalAllocationsResponse is used to return the allocations for an evaluation
type EvalAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// PeriodicForceResponse is used to respond to a periodic job force launch
type PeriodicForceResponse struct {
EvalID string
EvalCreateIndex uint64
WriteMeta
}
// DeploymentUpdateResponse is used to respond to a deployment change. The
// response will include the modify index of the deployment as well as details
// of any triggered evaluation.
type DeploymentUpdateResponse struct {
EvalID string
EvalCreateIndex uint64
DeploymentModifyIndex uint64
// RevertedJobVersion is the version the job was reverted to. If unset, the
// job wasn't reverted
RevertedJobVersion *uint64
WriteMeta
}
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
)
// ShouldDrainNode checks if a given node status should trigger an
// evaluation. Some states don't require any further action.
func ShouldDrainNode(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady:
return false
case NodeStatusDown:
return true
default:
panic(fmt.Sprintf("unhandled node status %s", status))
}
}
// ValidNodeStatus is used to check if a node status is valid
func ValidNodeStatus(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDown:
return true
default:
return false
}
}
// Node is a representation of a schedulable client node
type Node struct {
// ID is a unique identifier for the node. It can be constructed
// by doing a concatenation of the Name and Datacenter as a simple
// approach. Alternatively a UUID may be used.
ID string
// SecretID is an ID that is only known by the Node and the set of Servers.
// It is not accessible via the API and is used to authenticate nodes
// conducting priviledged activities.
SecretID string
// Datacenter for this node
Datacenter string
// Node name
Name string
// HTTPAddr is the address on which the Nomad client is listening for http
// requests
HTTPAddr string
// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
TLSEnabled bool
// Attributes is an arbitrary set of key/value
// data that can be used for constraints. Examples
// include "kernel.name=linux", "arch=386", "driver.docker=1",
// "docker.runtime=1.8.3"
Attributes map[string]string
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
Resources *Resources
// Reserved is the set of resources that are reserved,
// and should be subtracted from the total resources for
// the purposes of scheduling. This may be provide certain
// high-watermark tolerances or because of external schedulers
// consuming resources.
Reserved *Resources
// Links are used to 'link' this client to external
// systems. For example 'consul=foo.dc1' 'aws=i-83212'
// 'ami=ami-123'
Links map[string]string
// Meta is used to associate arbitrary metadata with this
// client. This is opaque to Nomad.
Meta map[string]string
// NodeClass is an opaque identifier used to group nodes
// together for the purpose of determining scheduling pressure.
NodeClass string
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities.
ComputedClass string
// Drain is controlled by the servers, and not the client.
// If true, no jobs will be scheduled to this node, and existing
// allocations will be drained.
Drain bool
// Status of this node
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// StatusUpdatedAt is the time stamp at which the state of the node was
// updated
StatusUpdatedAt int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Ready returns if the node is ready for running allocations
func (n *Node) Ready() bool {
return n.Status == NodeStatusReady && !n.Drain
}
func (n *Node) Copy() *Node {
if n == nil {
return nil
}
nn := new(Node)
*nn = *n
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
return nn
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (n *Node) TerminalStatus() bool {
switch n.Status {
case NodeStatusDown:
return true
default:
return false
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub() *NodeListStub {
return &NodeListStub{
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Drain: n.Drain,
Status: n.Status,
StatusDescription: n.StatusDescription,
CreateIndex: n.CreateIndex,
ModifyIndex: n.ModifyIndex,
}
}
// NodeListStub is used to return a subset of job information
// for the job list
type NodeListStub struct {
ID string
Datacenter string
Name string
NodeClass string
Drain bool
Status string
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) (string, int) {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return n.IP, p.Value
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return n.IP, p.Value
}
}
}
return "", 0
}
// Resources is used to define the resources available
// on a client
type Resources struct {
CPU int
MemoryMB int
DiskMB int
IOPS int
Networks Networks
}
const (
BytesInMegabyte = 1024 * 1024
)
// DefaultResources returns the default resources for a task.
func DefaultResources() *Resources {
return &Resources{
CPU: 100,
MemoryMB: 10,
IOPS: 0,
}
}
// DiskInBytes returns the amount of disk resources in bytes.
func (r *Resources) DiskInBytes() int64 {
return int64(r.DiskMB * BytesInMegabyte)
}
// Merge merges this resource with another resource.
func (r *Resources) Merge(other *Resources) {
if other.CPU != 0 {
r.CPU = other.CPU
}
if other.MemoryMB != 0 {
r.MemoryMB = other.MemoryMB
}
if other.DiskMB != 0 {
r.DiskMB = other.DiskMB
}
if other.IOPS != 0 {
r.IOPS = other.IOPS
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
}
func (r *Resources) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(r.Networks) == 0 {
r.Networks = nil
}
for _, n := range r.Networks {
n.Canonicalize()
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
func (r *Resources) MeetsMinResources() error {
var mErr multierror.Error
if r.CPU < 20 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is 20; got %d", r.CPU))
}
if r.MemoryMB < 10 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is 10; got %d", r.MemoryMB))
}
if r.IOPS < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is 0; got %d", r.IOPS))
}
for i, n := range r.Networks {
if err := n.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the resources
func (r *Resources) Copy() *Resources {
if r == nil {
return nil
}
newR := new(Resources)
*newR = *r
if r.Networks != nil {
n := len(r.Networks)
newR.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
newR.Networks[i] = r.Networks[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
func (r *Resources) NetIndex(n *NetworkResource) int {
for idx, net := range r.Networks {
if net.Device == n.Device {
return idx
}
}
return -1
}
// Superset checks if one set of resources is a superset
// of another. This ignores network resources, and the NetworkIndex
// should be used for that.
func (r *Resources) Superset(other *Resources) (bool, string) {
if r.CPU < other.CPU {
return false, "cpu exhausted"
}
if r.MemoryMB < other.MemoryMB {
return false, "memory exhausted"
}
if r.DiskMB < other.DiskMB {
return false, "disk exhausted"
}
if r.IOPS < other.IOPS {
return false, "iops exhausted"
}
return true, ""
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (r *Resources) Add(delta *Resources) error {
if delta == nil {
return nil
}
r.CPU += delta.CPU
r.MemoryMB += delta.MemoryMB
r.DiskMB += delta.DiskMB
r.IOPS += delta.IOPS
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := r.NetIndex(n)
if idx == -1 {
r.Networks = append(r.Networks, n.Copy())
} else {
r.Networks[idx].Add(n)
}
}
return nil
}
func (r *Resources) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
type Port struct {
Label string
Value int
}
// NetworkResource is used to represent available network
// resources
type NetworkResource struct {
Device string // Name of the device
CIDR string // CIDR block of addresses
IP string // Host IP address
MBits int // Throughput
ReservedPorts []Port // Host Reserved ports
DynamicPorts []Port // Host Dynamically assigned ports
}
func (n *NetworkResource) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(n.ReservedPorts) == 0 {
n.ReservedPorts = nil
}
if len(n.DynamicPorts) == 0 {
n.DynamicPorts = nil
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
func (n *NetworkResource) MeetsMinResources() error {
var mErr multierror.Error
if n.MBits < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits))
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the network resource
func (n *NetworkResource) Copy() *NetworkResource {
if n == nil {
return nil
}
newR := new(NetworkResource)
*newR = *n
if n.ReservedPorts != nil {
newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
copy(newR.ReservedPorts, n.ReservedPorts)
}
if n.DynamicPorts != nil {
newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
copy(newR.DynamicPorts, n.DynamicPorts)
}
return newR
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (n *NetworkResource) Add(delta *NetworkResource) {
if len(delta.ReservedPorts) > 0 {
n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
}
n.MBits += delta.MBits
n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
}
func (n *NetworkResource) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
// PortLabels returns a map of port labels to their assigned host ports.
func (n *NetworkResource) PortLabels() map[string]int {
num := len(n.ReservedPorts) + len(n.DynamicPorts)
labelValues := make(map[string]int, num)
for _, port := range n.ReservedPorts {
labelValues[port.Label] = port.Value
}
for _, port := range n.DynamicPorts {
labelValues[port.Label] = port.Value
}
return labelValues
}
const (
// JobTypeNomad is reserved for internal system tasks and is
// always handled by the CoreScheduler.
JobTypeCore = "_core"
JobTypeService = "service"
JobTypeBatch = "batch"
JobTypeSystem = "system"
)
const (
JobStatusPending = "pending" // Pending means the job is waiting on scheduling
JobStatusRunning = "running" // Running means the job has non-terminal allocations
JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal
)
const (
// JobMinPriority is the minimum allowed priority
JobMinPriority = 1
// JobDefaultPriority is the default priority if not
// not specified.
JobDefaultPriority = 50
// JobMaxPriority is the maximum allowed priority
JobMaxPriority = 100
// Ensure CoreJobPriority is higher than any user
// specified job so that it gets priority. This is important
// for the system to remain healthy.
CoreJobPriority = JobMaxPriority * 2
// JobTrackedVersions is the number of historic job versions that are
// kept.
JobTrackedVersions = 6
)
// Job is the scope of a scheduling request to Nomad. It is the largest
// scoped object, and is a named collection of task groups. Each task group
// is further composed of tasks. A task group (TG) is the unit of scheduling
// however.
type Job struct {
// Stop marks whether the user has stopped the job. A stopped job will
// have all created allocations stopped and acts as a way to stop a job
// without purging it from the system. This allows existing allocs to be
// queried and the job to be inspected as it is being killed.
Stop bool
// Region is the Nomad region that handles scheduling this job
Region string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
// ParentID is the unique identifier of the job that spawned this job.
ParentID string
// Name is the logical name of the job used to refer to it. This is unique
// per region, but not unique globally.
Name string
// Type is used to control various behaviors about the job. Most jobs
// are service jobs, meaning they are expected to be long lived.
// Some jobs are batch oriented meaning they run and then terminate.
// This can be extended in the future to support custom schedulers.
Type string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job. This
// can slow down larger jobs if resources are not available.
AllAtOnce bool
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
// TaskGroups are the collections of task groups that this job needs
// to run. Each task group is an atomic unit of scheduling and placement.
TaskGroups []*TaskGroup
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
Update UpdateStrategy
// Periodic is used to define the interval the job is run at.
Periodic *PeriodicConfig
// ParameterizedJob is used to specify the job as a parameterized job
// for dispatching.
ParameterizedJob *ParameterizedJobConfig
// Payload is the payload supplied when the job was dispatched.
Payload []byte
// Meta is used to associate arbitrary metadata with this
// job. This is opaque to Nomad.
Meta map[string]string
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
// Job status
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Stable marks a job as stable. Stability is only defined on "service" and
// "system" jobs. The stability of a job will be set automatically as part
// of a deployment and can be manually set via APIs.
Stable bool
// Version is a monitonically increasing version number that is incremened
// on each job register.
Version uint64
// SubmitTime is the time at which the job was submitted as a UnixNano in
// UTC
SubmitTime int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
}
// Canonicalize is used to canonicalize fields in the Job. This should be called
// when registering a Job. A set of warnings are returned if the job was changed
// in anyway that the user should be made aware of.
func (j *Job) Canonicalize() (warnings error) {
var mErr multierror.Error
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(j.Meta) == 0 {
j.Meta = nil
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
// COMPAT: Remove in 0.7.0
// Rewrite any job that has an update block with pre 0.6.0 syntax.
jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0
if jobHasOldUpdate && j.Type != JobTypeBatch {
// Build an appropriate update block and copy it down to each task group
base := DefaultUpdateStrategy.Copy()
base.MaxParallel = j.Update.MaxParallel
base.MinHealthyTime = j.Update.Stagger
// Add to each task group, modifying as needed
upgraded := false
l := len(j.TaskGroups)
for _, tg := range j.TaskGroups {
// The task group doesn't need upgrading if it has an update block with the new syntax
u := tg.Update
if u != nil && u.Stagger > 0 && u.MaxParallel > 0 &&
u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 {
continue
}
upgraded = true
// The MaxParallel for the job should be 10% of the total count
// unless there is just one task group then we can infer the old
// max parallel should be the new
tgu := base.Copy()
if l != 1 {
// RoundTo 10%
var percent float64 = float64(tg.Count) * 0.1
tgu.MaxParallel = int(percent + 0.5)
}
// Safety guards
if tgu.MaxParallel == 0 {
tgu.MaxParallel = 1
} else if tgu.MaxParallel > tg.Count {
tgu.MaxParallel = tg.Count
}
tg.Update = tgu
}
if upgraded {
w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " +
"Please update upgrade stanza before v0.7.0."
multierror.Append(&mErr, fmt.Errorf(w))
}
}
// Ensure that the batch job doesn't have new style or old style update
// stanza. Unfortunately are scanning here because we have to deprecate over
// a release so we can't check in the task group since that may be new style
// but wouldn't capture the old style and we don't want to have duplicate
// warnings.
if j.Type == JobTypeBatch {
displayWarning := jobHasOldUpdate
j.Update.Stagger = 0
j.Update.MaxParallel = 0
j.Update.HealthCheck = ""
j.Update.MinHealthyTime = 0
j.Update.HealthyDeadline = 0
j.Update.AutoRevert = false
j.Update.Canary = 0
// Remove any update spec from the task groups
for _, tg := range j.TaskGroups {
if tg.Update != nil {
displayWarning = true
tg.Update = nil
}
}
if displayWarning {
w := "Update stanza is disallowed for batch jobs since v0.6.0. " +
"The update block has automatically been removed"
multierror.Append(&mErr, fmt.Errorf(w))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
// This job can panic if the deep copy failed as it uses reflection.
func (j *Job) Copy() *Job {
if j == nil {
return nil
}
nj := new(Job)
*nj = *j
nj.Datacenters = helper.CopySliceString(nj.Datacenters)
nj.Constraints = CopySliceConstraints(nj.Constraints)
if j.TaskGroups != nil {
tgs := make([]*TaskGroup, len(nj.TaskGroups))
for i, tg := range nj.TaskGroups {
tgs[i] = tg.Copy()
}
nj.TaskGroups = tgs
}
nj.Periodic = nj.Periodic.Copy()
nj.Meta = helper.CopyMapStringString(nj.Meta)
nj.ParameterizedJob = nj.ParameterizedJob.Copy()
return nj
}
// Validate is used to sanity check a job input
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}
if j.ID == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
} else if strings.Contains(j.ID, " ") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
}
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem:
case "":
mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
}
if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
}
if len(j.Datacenters) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
for idx, constr := range j.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Check for duplicate task groups
taskGroups := make(map[string]int)
for idx, tg := range j.TaskGroups {
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
} else if existing, ok := taskGroups[tg.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
} else {
taskGroups[tg.Name] = idx
}
if j.Type == "system" && tg.Count > 1 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
tg.Name, tg.Count))
}
}
// Validate the task group
for _, tg := range j.TaskGroups {
if err := tg.Validate(j); err != nil {
outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate periodic is only used with batch jobs.
if j.IsPeriodic() && j.Periodic.Enabled {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch))
}
if err := j.Periodic.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsParameterized() {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch))
}
if err := j.ParameterizedJob.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (j *Job) Warnings() error {
var mErr multierror.Error
// Check the groups
for _, tg := range j.TaskGroups {
if err := tg.Warnings(j); err != nil {
outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// LookupTaskGroup finds a task group by name
func (j *Job) LookupTaskGroup(name string) *TaskGroup {
for _, tg := range j.TaskGroups {
if tg.Name == name {
return tg
}
}
return nil
}
// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
// meta data for the task. When joining Job, Group and Task Meta, the precedence
// is by deepest scope (Task > Group > Job).
func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
group := j.LookupTaskGroup(groupName)
if group == nil {
return nil
}
task := group.LookupTask(taskName)
if task == nil {
return nil
}
meta := helper.CopyMapStringString(task.Meta)
if meta == nil {
meta = make(map[string]string, len(group.Meta)+len(j.Meta))
}
// Add the group specific meta
for k, v := range group.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
// Add the job specific meta
for k, v := range j.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
return meta
}
// Stopped returns if a job is stopped.
func (j *Job) Stopped() bool {
return j == nil || j.Stop
}
// HasUpdateStrategy returns if any task group in the job has an update strategy
func (j *Job) HasUpdateStrategy() bool {
for _, tg := range j.TaskGroups {
if tg.Update != nil {
return true
}
}
return false
}
// Stub is used to return a summary of the job
func (j *Job) Stub(summary *JobSummary) *JobListStub {
return &JobListStub{
ID: j.ID,
ParentID: j.ParentID,
Name: j.Name,
Type: j.Type,
Priority: j.Priority,
Periodic: j.IsPeriodic(),
ParameterizedJob: j.IsParameterized(),
Stop: j.Stop,
Status: j.Status,
StatusDescription: j.StatusDescription,
CreateIndex: j.CreateIndex,
ModifyIndex: j.ModifyIndex,
JobModifyIndex: j.JobModifyIndex,
SubmitTime: j.SubmitTime,
JobSummary: summary,
}
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil
}
// VaultPolicies returns the set of Vault policies per task group, per task
func (j *Job) VaultPolicies() map[string]map[string]*Vault {
policies := make(map[string]map[string]*Vault, len(j.TaskGroups))
for _, tg := range j.TaskGroups {
tgPolicies := make(map[string]*Vault, len(tg.Tasks))
for _, task := range tg.Tasks {
if task.Vault == nil {
continue
}
tgPolicies[task.Name] = task.Vault
}
if len(tgPolicies) != 0 {
policies[tg.Name] = tgPolicies
}
}
return policies
}
// RequiredSignals returns a mapping of task groups to tasks to their required
// set of signals
func (j *Job) RequiredSignals() map[string]map[string][]string {
signals := make(map[string]map[string][]string)
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
// Use this local one as a set
taskSignals := make(map[string]struct{})
// Check if the Vault change mode uses signals
if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
taskSignals[task.Vault.ChangeSignal] = struct{}{}
}
// Check if any template change mode uses signals
for _, t := range task.Templates {
if t.ChangeMode != TemplateChangeModeSignal {
continue
}
taskSignals[t.ChangeSignal] = struct{}{}
}
// Flatten and sort the signals
l := len(taskSignals)
if l == 0 {
continue
}
flat := make([]string, 0, l)
for sig := range taskSignals {
flat = append(flat, sig)
}
sort.Strings(flat)
tgSignals, ok := signals[tg.Name]
if !ok {
tgSignals = make(map[string][]string)
signals[tg.Name] = tgSignals
}
tgSignals[task.Name] = flat
}
}
return signals
}
// SpecChanged determines if the functional specification has changed between
// two job versions.
func (j *Job) SpecChanged(new *Job) bool {
if j == nil {
return new != nil
}
// Create a copy of the new job
c := new.Copy()
// Update the new job so we can do a reflect
c.Status = j.Status
c.StatusDescription = j.StatusDescription
c.Stable = j.Stable
c.Version = j.Version
c.CreateIndex = j.CreateIndex
c.ModifyIndex = j.ModifyIndex
c.JobModifyIndex = j.JobModifyIndex
c.SubmitTime = j.SubmitTime
// Deep equals the jobs
return !reflect.DeepEqual(j, c)
}
func (j *Job) SetSubmitTime() {
j.SubmitTime = time.Now().UTC().UnixNano()
}
// JobListStub is used to return a subset of job information
// for the job list
type JobListStub struct {
ID string
ParentID string
Name string
Type string
Priority int
Periodic bool
ParameterizedJob bool
Stop bool
Status string
StatusDescription string
JobSummary *JobSummary
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
SubmitTime int64
}
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
JobID string
// Summmary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
// Children contains a summary for the children of this job.
Children *JobChildrenSummary
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Copy returns a new copy of JobSummary
func (js *JobSummary) Copy() *JobSummary {
newJobSummary := new(JobSummary)
*newJobSummary = *js
newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
for k, v := range js.Summary {
newTGSummary[k] = v
}
newJobSummary.Summary = newTGSummary
newJobSummary.Children = newJobSummary.Children.Copy()
return newJobSummary
}
// JobChildrenSummary contains the summary of children job statuses
type JobChildrenSummary struct {
Pending int64
Running int64
Dead int64
}
// Copy returns a new copy of a JobChildrenSummary
func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
if jc == nil {
return nil
}
njc := new(JobChildrenSummary)
*njc = *jc
return njc
}
// TaskGroup summarizes the state of all the allocations of a particular
// TaskGroup
type TaskGroupSummary struct {
Queued int
Complete int
Failed int
Running int
Starting int
Lost int
}
const (
// Checks uses any registered health check state in combination with task
// states to determine if a allocation is healthy.
UpdateStrategyHealthCheck_Checks = "checks"
// TaskStates uses the task states of an allocation to determine if the
// allocation is healthy.
UpdateStrategyHealthCheck_TaskStates = "task_states"
// Manual allows the operator to manually signal to Nomad when an
// allocations is healthy. This allows more advanced health checking that is
// outside of the scope of Nomad.
UpdateStrategyHealthCheck_Manual = "manual"
)
var (
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
DefaultUpdateStrategy = &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 0,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
}
)
// UpdateStrategy is used to modify how updates are done
type UpdateStrategy struct {
// Stagger is used to determine the rate at which allocations are migrated
// due to down or draining nodes.
Stagger time.Duration
// MaxParallel is how many updates can be done in parallel
MaxParallel int
// HealthCheck specifies the mechanism in which allocations are marked
// healthy or unhealthy as part of a deployment.
HealthCheck string
// MinHealthyTime is the minimum time an allocation must be in the healthy
// state before it is marked as healthy, unblocking more alllocations to be
// rolled.
MinHealthyTime time.Duration
// HealthyDeadline is the time in which an allocation must be marked as
// healthy before it is automatically transistioned to unhealthy. This time
// period doesn't count against the MinHealthyTime.
HealthyDeadline time.Duration
// AutoRevert declares that if a deployment fails because of unhealthy
// allocations, there should be an attempt to auto-revert the job to a
// stable version.
AutoRevert bool
// Canary is the number of canaries to deploy when a change to the task
// group is detected.
Canary int
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
}
copy := new(UpdateStrategy)
*copy = *u
return copy
}
func (u *UpdateStrategy) Validate() error {
if u == nil {
return nil
}
var mErr multierror.Error
switch u.HealthCheck {
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
default:
multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
}
if u.MaxParallel < 0 {
multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
}
if u.Canary < 0 {
multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
}
if u.MinHealthyTime < 0 {
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
}
if u.HealthyDeadline <= 0 {
multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
}
if u.Stagger <= 0 {
multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
}
return mErr.ErrorOrNil()
}
// TODO(alexdadgar): Remove once no longer used by the scheduler.
// Rolling returns if a rolling strategy should be used
func (u *UpdateStrategy) Rolling() bool {
return u.Stagger > 0 && u.MaxParallel > 0
}
const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
// separated list of unix timestamps at which to launch.
PeriodicSpecTest = "_internal_test"
)
// Periodic defines the interval a job should be run at.
type PeriodicConfig struct {
// Enabled determines if the job should be run periodically.
Enabled bool
// Spec specifies the interval the job should be run as. It is parsed based
// on the SpecType.
Spec string
// SpecType defines the format of the spec.
SpecType string
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
if p == nil {
return nil
}
np := new(PeriodicConfig)
*np = *p
return np
}
func (p *PeriodicConfig) Validate() error {
if !p.Enabled {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
switch p.SpecType {
case PeriodicSpecCron:
if e, err := cronexpr.Parse(p.Spec); err == nil {
return e.Next(fromTime)
}
case PeriodicSpecTest:
split := strings.Split(p.Spec, ",")
if len(split) == 1 && split[0] == "" {
return time.Time{}
}
// Parse the times
times := make([]time.Time, len(split))
for i, s := range split {
unix, err := strconv.Atoi(s)
if err != nil {
return time.Time{}
}
times[i] = time.Unix(int64(unix), 0)
}
// Find the next match
for _, next := range times {
if fromTime.Before(next) {
return next
}
}
}
return time.Time{}
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.
PeriodicLaunchSuffix = "/periodic-"
)
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Launch time.Time // The last launch time.
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
const (
DispatchPayloadForbidden = "forbidden"
DispatchPayloadOptional = "optional"
DispatchPayloadRequired = "required"
// DispatchLaunchSuffix is the string appended to the parameterized job's ID
// when dispatching instances of it.
DispatchLaunchSuffix = "/dispatch-"
)
// ParameterizedJobConfig is used to configure the parameterized job
type ParameterizedJobConfig struct {
// Payload configure the payload requirements
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string
}
func (d *ParameterizedJobConfig) Validate() error {
var mErr multierror.Error
switch d.Payload {
case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
}
// Check that the meta configurations are disjoint sets
disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
if !disjoint {
multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
}
return mErr.ErrorOrNil()
}
func (d *ParameterizedJobConfig) Canonicalize() {
if d.Payload == "" {
d.Payload = DispatchPayloadOptional
}
}
func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
if d == nil {
return nil
}
nd := new(ParameterizedJobConfig)
*nd = *d
nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
return nd
}
// DispatchedID returns an ID appropriate for a job dispatched against a
// particular parameterized job
func DispatchedID(templateID string, t time.Time) string {
u := GenerateUUID()[:8]
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
return fmt.Errorf("destination escapes allocation directory")
}
return nil
}
var (
defaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 1 * time.Minute,
Mode: RestartPolicyModeDelay,
}
defaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 15,
Interval: 7 * 24 * time.Hour,
Mode: RestartPolicyModeDelay,
}
)
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// RestartPolicyMinInterval is the minimum interval that is accepted for a
// restart policy.
RestartPolicyMinInterval = 5 * time.Second
)
// RestartPolicy configures how Tasks are restarted when they crash or fail.
type RestartPolicy struct {
// Attempts is the number of restart that will occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of restarts
// within.
Interval time.Duration
// Delay is the time between a failure and a restart.
Delay time.Duration
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
}
func (r *RestartPolicy) Copy() *RestartPolicy {
if r == nil {
return nil
}
nrp := new(RestartPolicy)
*nrp = *r
return nrp
}
func (r *RestartPolicy) Validate() error {
var mErr multierror.Error
switch r.Mode {
case RestartPolicyModeDelay, RestartPolicyModeFail:
default:
multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
}
// Check for ambiguous/confusing settings
if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
}
if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
}
if time.Duration(r.Attempts)*r.Delay > r.Interval {
multierror.Append(&mErr,
fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
}
return mErr.ErrorOrNil()
}
func NewRestartPolicy(jobType string) *RestartPolicy {
switch jobType {
case JobTypeService, JobTypeSystem:
rp := defaultServiceJobRestartPolicy
return &rp
case JobTypeBatch:
rp := defaultBatchJobRestartPolicy
return &rp
}
return nil
}
// TaskGroup is an atomic unit of placement. Each task group belongs to
// a job and may contain any number of tasks. A task group support running
// in many replicas using the same configuration..
type TaskGroup struct {
// Name of the task group
Name string
// Count is the number of replicas of this task group that should
// be scheduled.
Count int
// Update is used to control the update strategy for this task group
Update *UpdateStrategy
// Constraints can be specified at a task group level and apply to
// all the tasks contained.
Constraints []*Constraint
//RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// Tasks are the collection of tasks that this task group needs to run
Tasks []*Task
// EphemeralDisk is the disk resources that the task group requests
EphemeralDisk *EphemeralDisk
// Meta is used to associate arbitrary metadata with this
// task group. This is opaque to Nomad.
Meta map[string]string
}
func (tg *TaskGroup) Copy() *TaskGroup {
if tg == nil {
return nil
}
ntg := new(TaskGroup)
*ntg = *tg
ntg.Update = ntg.Update.Copy()
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
if tg.Tasks != nil {
tasks := make([]*Task, len(ntg.Tasks))
for i, t := range ntg.Tasks {
tasks[i] = t.Copy()
}
ntg.Tasks = tasks
}
ntg.Meta = helper.CopyMapStringString(ntg.Meta)
if tg.EphemeralDisk != nil {
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
}
return ntg
}
// Canonicalize is used to canonicalize fields in the TaskGroup.
func (tg *TaskGroup) Canonicalize(job *Job) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(tg.Meta) == 0 {
tg.Meta = nil
}
// Set the default restart policy.
if tg.RestartPolicy == nil {
tg.RestartPolicy = NewRestartPolicy(job.Type)
}
// Set a default ephemeral disk object if the user has not requested for one
if tg.EphemeralDisk == nil {
tg.EphemeralDisk = DefaultEphemeralDisk()
}
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
// Add up the disk resources to EphemeralDisk. This is done so that users
// are not required to move their disk attribute from resources to
// EphemeralDisk section of the job spec in Nomad 0.5
// COMPAT 0.4.1 -> 0.5
// Remove in 0.6
var diskMB int
for _, task := range tg.Tasks {
diskMB += task.Resources.DiskMB
}
if diskMB > 0 {
tg.EphemeralDisk.SizeMB = diskMB
}
}
// Validate is used to sanity check a task group
func (tg *TaskGroup) Validate(j *Job) error {
var mErr multierror.Error
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
}
if tg.Count < 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
}
if len(tg.Tasks) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
}
for idx, constr := range tg.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if tg.RestartPolicy != nil {
if err := tg.RestartPolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
}
if tg.EphemeralDisk != nil {
if err := tg.EphemeralDisk.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
}
// Validate the update strategy
if u := tg.Update; u != nil {
switch j.Type {
case JobTypeService, JobTypeSystem:
default:
// COMPAT: Enable in 0.7.0
//mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
}
if err := u.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
// Check for duplicate tasks, that there is only leader task if any,
// and no duplicated static ports
tasks := make(map[string]int)
staticPorts := make(map[int]string)
leaderTasks := 0
for idx, task := range tg.Tasks {
if task.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
} else if existing, ok := tasks[task.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
} else {
tasks[task.Name] = idx
}
if task.Leader {
leaderTasks++
}
if task.Resources == nil {
continue
}
for _, net := range task.Resources.Networks {
for _, port := range net.ReservedPorts {
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
}
}
}
}
if leaderTasks > 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
}
// Validate the tasks
for _, task := range tg.Tasks {
if err := task.Validate(tg.EphemeralDisk); err != nil {
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (tg *TaskGroup) Warnings(j *Job) error {
var mErr multierror.Error
// Validate the update strategy
if u := tg.Update; u != nil {
// Check the counts are appropriate
if u.MaxParallel > tg.Count {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
"A destructive change would result in the replacement of all allocations.", u.MaxParallel, tg.Count))
}
}
return mErr.ErrorOrNil()
}
// LookupTask finds a task by name
func (tg *TaskGroup) LookupTask(name string) *Task {
for _, t := range tg.Tasks {
if t.Name == name {
return t
}
}
return nil
}
func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
const (
ServiceCheckHTTP = "http"
ServiceCheckTCP = "tcp"
ServiceCheckScript = "script"
// minCheckInterval is the minimum check interval permitted. Consul
// currently has its MinInterval set to 1s. Mirror that here for
// consistency.
minCheckInterval = 1 * time.Second
// minCheckTimeout is the minimum check timeout permitted for Consul
// script TTL checks.
minCheckTimeout = 1 * time.Second
)
// The ServiceCheck data model represents the consul health check that
// Nomad registers for a Task
type ServiceCheck struct {
Name string // Name of the check, defaults to id
Type string // Type of the check - tcp, http, docker and script
Command string // Command is the command to run for script checks
Args []string // Args is a list of argumes for script checks
Path string // path of the health check url for http type check
Protocol string // Protocol to use if check is http, defaults to http
PortLabel string // The port to use for tcp/http checks
Interval time.Duration // Interval of the check
Timeout time.Duration // Timeout of the response from the check before consul fails the check
InitialStatus string // Initial status of the check
TLSSkipVerify bool // Skip TLS verification when Protocol=https
}
func (sc *ServiceCheck) Copy() *ServiceCheck {
if sc == nil {
return nil
}
nsc := new(ServiceCheck)
*nsc = *sc
return nsc
}
func (sc *ServiceCheck) Canonicalize(serviceName string) {
// Ensure empty slices are treated as null to avoid scheduling issues when
// using DeepEquals.
if len(sc.Args) == 0 {
sc.Args = nil
}
if sc.Name == "" {
sc.Name = fmt.Sprintf("service: %q check", serviceName)
}
}
// validate a Service's ServiceCheck
func (sc *ServiceCheck) validate() error {
switch strings.ToLower(sc.Type) {
case ServiceCheckTCP:
if sc.Timeout == 0 {
return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval)
} else if sc.Timeout < minCheckTimeout {
return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval)
}
case ServiceCheckHTTP:
if sc.Path == "" {
return fmt.Errorf("http type must have a valid http path")
}
if sc.Timeout == 0 {
return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval)
} else if sc.Timeout < minCheckTimeout {
return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval)
}
case ServiceCheckScript:
if sc.Command == "" {
return fmt.Errorf("script type must have a valid script path")
}
// TODO: enforce timeout on the Client side and reenable
// validation.
default:
return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type)
}
if sc.Interval == 0 {
return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval)
} else if sc.Interval < minCheckInterval {
return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval)
}
switch sc.InitialStatus {
case "":
// case api.HealthUnknown: TODO: Add when Consul releases 0.7.1
case api.HealthPassing:
case api.HealthWarning:
case api.HealthCritical:
default:
return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical)
}
return nil
}
// RequiresPort returns whether the service check requires the task has a port.
func (sc *ServiceCheck) RequiresPort() bool {
switch sc.Type {
case ServiceCheckHTTP, ServiceCheckTCP:
return true
default:
return false
}
}
// Hash all ServiceCheck fields and the check's corresponding service ID to
// create an identifier. The identifier is not guaranteed to be unique as if
// the PortLabel is blank, the Service's PortLabel will be used after Hash is
// called.
func (sc *ServiceCheck) Hash(serviceID string) string {
h := sha1.New()
io.WriteString(h, serviceID)
io.WriteString(h, sc.Name)
io.WriteString(h, sc.Type)
io.WriteString(h, sc.Command)
io.WriteString(h, strings.Join(sc.Args, ""))
io.WriteString(h, sc.Path)
io.WriteString(h, sc.Protocol)
io.WriteString(h, sc.PortLabel)
io.WriteString(h, sc.Interval.String())
io.WriteString(h, sc.Timeout.String())
// Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6
if sc.TLSSkipVerify {
io.WriteString(h, "true")
}
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
AddressModeAuto = "auto"
AddressModeHost = "host"
AddressModeDriver = "driver"
)
// Service represents a Consul service definition in Nomad
type Service struct {
// Name of the service registered with Consul. Consul defaults the
// Name to ServiceID if not specified. The Name if specified is used
// as one of the seed values when generating a Consul ServiceID.
Name string
// PortLabel is either the numeric port number or the `host:port`.
// To specify the port number using the host's Consul Advertise
// address, specify an empty host in the PortLabel (e.g. `:port`).
PortLabel string
// AddressMode specifies whether or not to use the host ip:port for
// this service.
AddressMode string
Tags []string // List of tags for the service
Checks []*ServiceCheck // List of checks associated with the service
}
func (s *Service) Copy() *Service {
if s == nil {
return nil
}
ns := new(Service)
*ns = *s
ns.Tags = helper.CopySliceString(ns.Tags)
if s.Checks != nil {
checks := make([]*ServiceCheck, len(ns.Checks))
for i, c := range ns.Checks {
checks[i] = c.Copy()
}
ns.Checks = checks
}
return ns
}
// Canonicalize interpolates values of Job, Task Group and Task in the Service
// Name. This also generates check names, service id and check ids.
func (s *Service) Canonicalize(job string, taskGroup string, task string) {
// Ensure empty lists are treated as null to avoid scheduler issues when
// using DeepEquals
if len(s.Tags) == 0 {
s.Tags = nil
}
if len(s.Checks) == 0 {
s.Checks = nil
}
s.Name = args.ReplaceEnv(s.Name, map[string]string{
"JOB": job,
"TASKGROUP": taskGroup,
"TASK": task,
"BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task),
},
)
for _, check := range s.Checks {
check.Canonicalize(s.Name)
}
}
// Validate checks if the Check definition is valid
func (s *Service) Validate() error {
var mErr multierror.Error
// Ensure the service name is valid per the below RFCs but make an exception
// for our interpolation syntax
// RFC-952 §1 (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1
// (https://tools.ietf.org/html/rfc1123), and RFC-2782
// (https://tools.ietf.org/html/rfc2782).
re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9\$][a-zA-Z0-9\-\$\{\}\_\.]*[a-z0-9\}])$`)
if !re.MatchString(s.Name) {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name))
}
switch s.AddressMode {
case "", AddressModeAuto, AddressModeHost, AddressModeDriver:
// OK
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode))
}
for _, c := range s.Checks {
if s.PortLabel == "" && c.RequiresPort() {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name))
continue
}
if err := c.validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err))
}
}
return mErr.ErrorOrNil()
}
// ValidateName checks if the services Name is valid and should be called after
// the name has been interpolated
func (s *Service) ValidateName(name string) error {
// Ensure the service name is valid per RFC-952 §1
// (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1
// (https://tools.ietf.org/html/rfc1123), and RFC-2782
// (https://tools.ietf.org/html/rfc2782).
re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`)
if !re.MatchString(name) {
return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name)
}
return nil
}
// Hash calculates the hash of the check based on it's content and the service
// which owns it
func (s *Service) Hash() string {
h := sha1.New()
io.WriteString(h, s.Name)
io.WriteString(h, strings.Join(s.Tags, ""))
io.WriteString(h, s.PortLabel)
io.WriteString(h, s.AddressMode)
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
// DefaultKillTimeout is the default timeout between signaling a task it
// will be killed and killing it.
DefaultKillTimeout = 5 * time.Second
)
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
}
// DefaultLogConfig returns the default LogConfig values.
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
}
}
// Validate returns an error if the log config specified are less than
// the minimum allowed.
func (l *LogConfig) Validate() error {
var mErr multierror.Error
if l.MaxFiles < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
}
if l.MaxFileSizeMB < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
}
return mErr.ErrorOrNil()
}
// Task is a single process typically that is executed as part of a task group.
type Task struct {
// Name of the task
Name string
// Driver is used to control which driver is used
Driver string
// User is used to determine which user will run the task. It defaults to
// the same user the Nomad client is being run as.
User string
// Config is provided to the driver to initialize
Config map[string]interface{}
// Map of environment variables to be used by the driver
Env map[string]string
// List of service definitions exposed by the Task
Services []*Service
// Vault is used to define the set of Vault policies that this task should
// have access to.
Vault *Vault
// Templates are the set of templates to be rendered for the task.
Templates []*Template
// Constraints can be specified at a task level and apply only to
// the particular task.
Constraints []*Constraint
// Resources is the resources needed by this task
Resources *Resources
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
Meta map[string]string
// KillTimeout is the time between signaling a task that it will be
// killed and killing it.
KillTimeout time.Duration
// LogConfig provides configuration for log rotation
LogConfig *LogConfig
// Artifacts is a list of artifacts to download and extract before running
// the task.
Artifacts []*TaskArtifact
// Leader marks the task as the leader within the group. When the leader
// task exits, other tasks will be gracefully terminated.
Leader bool
}
func (t *Task) Copy() *Task {
if t == nil {
return nil
}
nt := new(Task)
*nt = *t
nt.Env = helper.CopyMapStringString(nt.Env)
if t.Services != nil {
services := make([]*Service, len(nt.Services))
for i, s := range nt.Services {
services[i] = s.Copy()
}
nt.Services = services
}
nt.Constraints = CopySliceConstraints(nt.Constraints)
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchPayload = nt.DispatchPayload.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
for _, a := range nt.Artifacts {
artifacts = append(artifacts, a.Copy())
}
nt.Artifacts = artifacts
}
if i, err := copystructure.Copy(nt.Config); err != nil {
nt.Config = i.(map[string]interface{})
}
if t.Templates != nil {
templates := make([]*Template, len(t.Templates))
for i, tmpl := range nt.Templates {
templates[i] = tmpl.Copy()
}
nt.Templates = templates
}
return nt
}
// Canonicalize canonicalizes fields in the task.
func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(t.Meta) == 0 {
t.Meta = nil
}
if len(t.Config) == 0 {
t.Config = nil
}
if len(t.Env) == 0 {
t.Env = nil
}
for _, service := range t.Services {
service.Canonicalize(job.Name, tg.Name, t.Name)
}
// If Resources are nil initialize them to defaults, otherwise canonicalize
if t.Resources == nil {
t.Resources = DefaultResources()
} else {
t.Resources.Canonicalize()
}
// Set the default timeout if it is not specified.
if t.KillTimeout == 0 {
t.KillTimeout = DefaultKillTimeout
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, template := range t.Templates {
template.Canonicalize()
}
}
func (t *Task) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
// Validate is used to sanity check a task
func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error {
var mErr multierror.Error
if t.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
}
if strings.ContainsAny(t.Name, `/\`) {
// We enforce this so that when creating the directory on disk it will
// not have any slashes.
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
}
if t.Driver == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
}
if t.KillTimeout.Nanoseconds() < 0 {
mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
}
// Validate the resources.
if t.Resources == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
} else {
if err := t.Resources.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Ensure the task isn't asking for disk resources
if t.Resources.DiskMB > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
}
}
// Validate the log config
if t.LogConfig == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
} else if err := t.LogConfig.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
for idx, constr := range t.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate Services
if err := validateServices(t); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if t.LogConfig != nil && ephemeralDisk != nil {
logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
if ephemeralDisk.SizeMB <= logUsage {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
logUsage, ephemeralDisk.SizeMB))
}
}
for idx, artifact := range t.Artifacts {
if err := artifact.Validate(); err != nil {
outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if t.Vault != nil {
if err := t.Vault.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
}
}
destinations := make(map[string]int, len(t.Templates))
for idx, tmpl := range t.Templates {
if err := tmpl.Validate(); err != nil {
outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
if other, ok := destinations[tmpl.DestPath]; ok {
outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
mErr.Errors = append(mErr.Errors, outer)
} else {
destinations[tmpl.DestPath] = idx + 1
}
}
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}
return mErr.ErrorOrNil()
}
// validateServices takes a task and validates the services within it are valid
// and reference ports that exist.
func validateServices(t *Task) error {
var mErr multierror.Error
// Ensure that services don't ask for non-existent ports and their names are
// unique.
servicePorts := make(map[string][]string)
knownServices := make(map[string]struct{})
for i, service := range t.Services {
if err := service.Validate(); err != nil {
outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
// Ensure that services with the same name are not being registered for
// the same port
if _, ok := knownServices[service.Name+service.PortLabel]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
}
knownServices[service.Name+service.PortLabel] = struct{}{}
if service.PortLabel != "" {
servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name)
}
// Ensure that check names are unique.
knownChecks := make(map[string]struct{})
for _, check := range service.Checks {
if _, ok := knownChecks[check.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
}
knownChecks[check.Name] = struct{}{}
}
}
// Get the set of port labels.
portLabels := make(map[string]struct{})
if t.Resources != nil {
for _, network := range t.Resources.Networks {
ports := network.PortLabels()
for portLabel, _ := range ports {
portLabels[portLabel] = struct{}{}
}
}
}
// Ensure all ports referenced in services exist.
for servicePort, services := range servicePorts {
_, ok := portLabels[servicePort]
if !ok {
joined := strings.Join(services, ", ")
err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
mErr.Errors = append(mErr.Errors, err)
}
}
// Ensure address mode is valid
return mErr.ErrorOrNil()
}
const (
// TemplateChangeModeNoop marks that no action should be taken if the
// template is re-rendered
TemplateChangeModeNoop = "noop"
// TemplateChangeModeSignal marks that the task should be signaled if the
// template is re-rendered
TemplateChangeModeSignal = "signal"
// TemplateChangeModeRestart marks that the task should be restarted if the
// template is re-rendered
TemplateChangeModeRestart = "restart"
)
var (
// TemplateChangeModeInvalidError is the error for when an invalid change
// mode is given
TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
)
// Template represents a template configuration to be rendered for a given task
type Template struct {
// SourcePath is the path to the template to be rendered
SourcePath string
// DestPath is the path to where the template should be rendered
DestPath string
// EmbeddedTmpl store the raw template. This is useful for smaller templates
// where they are embedded in the job file rather than sent as an artificat
EmbeddedTmpl string
// ChangeMode indicates what should be done if the template is re-rendered
ChangeMode string
// ChangeSignal is the signal that should be sent if the change mode
// requires it.
ChangeSignal string
// Splay is used to avoid coordinated restarts of processes by applying a
// random wait between 0 and the given splay value before signalling the
// application of a change
Splay time.Duration
// Perms is the permission the file should be written out with.
Perms string
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim string
RightDelim string
// Envvars enables exposing the template as environment variables
// instead of as a file. The template must be of the form:
//
// VAR_NAME_1={{ key service/my-key }}
// VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
//
// Lines will be split on the initial "=" with the first part being the
// key name and the second part the value.
// Empty lines and lines starting with # will be ignored, but to avoid
// escaping issues #s within lines will not be treated as comments.
Envvars bool
}
// DefaultTemplate returns a default template.
func DefaultTemplate() *Template {
return &Template{
ChangeMode: TemplateChangeModeRestart,
Splay: 5 * time.Second,
Perms: "0644",
}
}
func (t *Template) Copy() *Template {
if t == nil {
return nil
}
copy := new(Template)
*copy = *t
return copy
}
func (t *Template) Canonicalize() {
if t.ChangeSignal != "" {
t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
}
}
func (t *Template) Validate() error {
var mErr multierror.Error
// Verify we have something to render
if t.SourcePath == "" && t.EmbeddedTmpl == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
}
// Verify we can render somewhere
if t.DestPath == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
}
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify a proper change mode
switch t.ChangeMode {
case TemplateChangeModeNoop, TemplateChangeModeRestart:
case TemplateChangeModeSignal:
if t.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
}
default:
multierror.Append(&mErr, TemplateChangeModeInvalidError)
}
// Verify the splay is positive
if t.Splay < 0 {
multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
}
// Verify the permissions
if t.Perms != "" {
if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
}
}
return mErr.ErrorOrNil()
}
// Set of possible states for a task.
const (
TaskStatePending = "pending" // The task is waiting to be run.
TaskStateRunning = "running" // The task is currently running.
TaskStateDead = "dead" // Terminal state of task.
)
// TaskState tracks the current state of a task and events that caused state
// transitions.
type TaskState struct {
// The current state of the task.
State string
// Failed marks a task as having failed
Failed bool
// Restarts is the number of times the task has restarted
Restarts uint64
// LastRestart is the time the task last restarted. It is updated each time the
// task restarts
LastRestart time.Time
// StartedAt is the time the task is started. It is updated each time the
// task starts
StartedAt time.Time
// FinishedAt is the time at which the task transistioned to dead and will
// not be started again.
FinishedAt time.Time
// Series of task events that transition the state of the task.
Events []*TaskEvent
}
func (ts *TaskState) Copy() *TaskState {
if ts == nil {
return nil
}
copy := new(TaskState)
*copy = *ts
if ts.Events != nil {
copy.Events = make([]*TaskEvent, len(ts.Events))
for i, e := range ts.Events {
copy.Events[i] = e.Copy()
}
}
return copy
}
// Successful returns whether a task finished successfully.
func (ts *TaskState) Successful() bool {
l := len(ts.Events)
if ts.State != TaskStateDead || l == 0 {
return false
}
e := ts.Events[l-1]
if e.Type != TaskTerminated {
return false
}
return e.ExitCode == 0
}
const (
// TaskSetupFailure indicates that the task could not be started due to a
// a setup failure.
TaskSetupFailure = "Setup Failure"
// TaskDriveFailure indicates that the task could not be started due to a
// failure in the driver.
TaskDriverFailure = "Driver Failure"
// TaskReceived signals that the task has been pulled by the client at the
// given timestamp.
TaskReceived = "Received"
// TaskFailedValidation indicates the task was invalid and as such was not
// run.
TaskFailedValidation = "Failed Validation"
// TaskStarted signals that the task was started and its timestamp can be
// used to determine the running length of the task.
TaskStarted = "Started"
// TaskTerminated indicates that the task was started and exited.
TaskTerminated = "Terminated"
// TaskKilling indicates a kill signal has been sent to the task.
TaskKilling = "Killing"
// TaskKilled indicates a user has killed the task.
TaskKilled = "Killed"
// TaskRestarting indicates that task terminated and is being restarted.
TaskRestarting = "Restarting"
// TaskNotRestarting indicates that the task has failed and is not being
// restarted because it has exceeded its restart policy.
TaskNotRestarting = "Not Restarting"
// TaskRestartSignal indicates that the task has been signalled to be
// restarted
TaskRestartSignal = "Restart Signaled"
// TaskSignaling indicates that the task is being signalled.
TaskSignaling = "Signaling"
// TaskDownloadingArtifacts means the task is downloading the artifacts
// specified in the task.
TaskDownloadingArtifacts = "Downloading Artifacts"
// TaskArtifactDownloadFailed indicates that downloading the artifacts
// failed.
TaskArtifactDownloadFailed = "Failed Artifact Download"
// TaskBuildingTaskDir indicates that the task directory/chroot is being
// built.
TaskBuildingTaskDir = "Building Task Directory"
// TaskSetup indicates the task runner is setting up the task environment
TaskSetup = "Task Setup"
// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
// exceeded the requested disk resources.
TaskDiskExceeded = "Disk Resources Exceeded"
// TaskSiblingFailed indicates that a sibling task in the task group has
// failed.
TaskSiblingFailed = "Sibling Task Failed"
// TaskDriverMessage is an informational event message emitted by
// drivers such as when they're performing a long running action like
// downloading an image.
TaskDriverMessage = "Driver"
// TaskLeaderDead indicates that the leader task within the has finished.
TaskLeaderDead = "Leader Task Dead"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64 // Unix Nanosecond timestamp
// FailsTask marks whether this event fails the task
FailsTask bool
// Restart fields.
RestartReason string
// Setup Failure fields.
SetupError string
// Driver Failure fields.
DriverError string // A driver error occurred while starting the task.
// Task Terminated Fields.
ExitCode int // The exit code of the task.
Signal int // The signal that terminated the task.
Message string // A possible message explaining the termination of the task.
// Killing fields
KillTimeout time.Duration
// Task Killed Fields.
KillError string // Error killing the task.
// KillReason is the reason the task was killed
KillReason string
// TaskRestarting fields.
StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
// Artifact Download fields
DownloadError string // Error downloading artifacts
// Validation fields
ValidationError string // Validation error
// The maximum allowed task disk size.
DiskLimit int64
// Name of the sibling task that caused termination of the task that
// the TaskEvent refers to.
FailedSibling string
// VaultError is the error from token renewal
VaultError string
// TaskSignalReason indicates the reason the task is being signalled.
TaskSignalReason string
// TaskSignal is the signal that was sent to the task
TaskSignal string
// DriverMessage indicates a driver action being taken.
DriverMessage string
}
func (te *TaskEvent) GoString() string {
return fmt.Sprintf("%v at %v", te.Type, te.Time)
}
// SetMessage sets the message of TaskEvent
func (te *TaskEvent) SetMessage(msg string) *TaskEvent {
te.Message = msg
return te
}
func (te *TaskEvent) Copy() *TaskEvent {
if te == nil {
return nil
}
copy := new(TaskEvent)
*copy = *te
return copy
}
func NewTaskEvent(event string) *TaskEvent {
return &TaskEvent{
Type: event,
Time: time.Now().UnixNano(),
}
}
// SetSetupError is used to store an error that occured while setting up the
// task
func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
if err != nil {
e.SetupError = err.Error()
}
return e
}
func (e *TaskEvent) SetFailsTask() *TaskEvent {
e.FailsTask = true
return e
}
func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
if err != nil {
e.DriverError = err.Error()
}
return e
}
func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
e.ExitCode = c
return e
}
func (e *TaskEvent) SetSignal(s int) *TaskEvent {
e.Signal = s
return e
}
func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
if err != nil {
e.Message = err.Error()
}
return e
}
func (e *TaskEvent) SetKillError(err error) *TaskEvent {
if err != nil {
e.KillError = err.Error()
}
return e
}
func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
e.KillReason = r
return e
}
func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
e.StartDelay = int64(delay)
return e
}
func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
e.RestartReason = reason
return e
}
func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
e.TaskSignalReason = r
return e
}
func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
e.TaskSignal = s.String()
return e
}
func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
if err != nil {
e.DownloadError = err.Error()
}
return e
}
func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
if err != nil {
e.ValidationError = err.Error()
}
return e
}
func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
e.KillTimeout = timeout
return e
}
func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
e.DiskLimit = limit
return e
}
func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
e.FailedSibling = sibling
return e
}
func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
if err != nil {
e.VaultError = err.Error()
}
return e
}
func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
e.DriverMessage = m
return e
}
// TaskArtifact is an artifact to download before running the task.
type TaskArtifact struct {
// GetterSource is the source to download an artifact using go-getter
GetterSource string
// GetterOptions are options to use when downloading the artifact using
// go-getter.
GetterOptions map[string]string
// GetterMode is the go-getter.ClientMode for fetching resources.
// Defaults to "any" but can be set to "file" or "dir".
GetterMode string
// RelativeDest is the download destination given relative to the task's
// directory.
RelativeDest string
}
func (ta *TaskArtifact) Copy() *TaskArtifact {
if ta == nil {
return nil
}
nta := new(TaskArtifact)
*nta = *ta
nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions)
return nta
}
func (ta *TaskArtifact) GoString() string {
return fmt.Sprintf("%+v", ta)
}
// PathEscapesAllocDir returns if the given path escapes the allocation
// directory. The prefix allows adding a prefix if the path will be joined, for
// example a "task/local" prefix may be provided if the path will be joined
// against that prefix.
func PathEscapesAllocDir(prefix, path string) (bool, error) {
// Verify the destination doesn't escape the tasks directory
alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
if err != nil {
return false, err
}
abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
if err != nil {
return false, err
}
rel, err := filepath.Rel(alloc, abs)
if err != nil {
return false, err
}
return strings.HasPrefix(rel, ".."), nil
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
if ta.GetterSource == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
}
switch ta.GetterMode {
case "":
// Default to any
ta.GetterMode = GetterModeAny
case GetterModeAny, GetterModeFile, GetterModeDir:
// Ok
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := PathEscapesAllocDir("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify the checksum
if check, ok := ta.GetterOptions["checksum"]; ok {
check = strings.TrimSpace(check)
if check == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty"))
return mErr.ErrorOrNil()
}
parts := strings.Split(check, ":")
if l := len(parts); l != 2 {
mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check))
return mErr.ErrorOrNil()
}
checksumVal := parts[1]
checksumBytes, err := hex.DecodeString(checksumVal)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err))
return mErr.ErrorOrNil()
}
checksumType := parts[0]
expectedLength := 0
switch checksumType {
case "md5":
expectedLength = md5.Size
case "sha1":
expectedLength = sha1.Size
case "sha256":
expectedLength = sha256.Size
case "sha512":
expectedLength = sha512.Size
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType))
return mErr.ErrorOrNil()
}
if len(checksumBytes) != expectedLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal))
return mErr.ErrorOrNil()
}
}
return mErr.ErrorOrNil()
}
const (
ConstraintDistinctProperty = "distinct_property"
ConstraintDistinctHosts = "distinct_hosts"
ConstraintRegex = "regexp"
ConstraintVersion = "version"
ConstraintSetContains = "set_contains"
)
// Constraints are used to restrict placement options.
type Constraint struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
str string // Memoized string
}
// Equal checks if two constraints are equal
func (c *Constraint) Equal(o *Constraint) bool {
return c.LTarget == o.LTarget &&
c.RTarget == o.RTarget &&
c.Operand == o.Operand
}
func (c *Constraint) Copy() *Constraint {
if c == nil {
return nil
}
nc := new(Constraint)
*nc = *c
return nc
}
func (c *Constraint) String() string {
if c.str != "" {
return c.str
}
c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
return c.str
}
func (c *Constraint) Validate() error {
var mErr multierror.Error
if c.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
}
// Perform additional validation based on operand
switch c.Operand {
case ConstraintRegex:
if _, err := regexp.Compile(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
}
}
return mErr.ErrorOrNil()
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
// Sticky indicates whether the allocation is sticky to a node
Sticky bool
// SizeMB is the size of the local disk
SizeMB int
// Migrate determines if Nomad client should migrate the allocation dir for
// sticky allocations
Migrate bool
}
// DefaultEphemeralDisk returns a EphemeralDisk with default configurations
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
SizeMB: 300,
}
}
// Validate validates EphemeralDisk
func (d *EphemeralDisk) Validate() error {
if d.SizeMB < 10 {
return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
}
return nil
}
// Copy copies the EphemeralDisk struct and returns a new one
func (d *EphemeralDisk) Copy() *EphemeralDisk {
ld := new(EphemeralDisk)
*ld = *d
return ld
}
const (
// VaultChangeModeNoop takes no action when a new token is retrieved.
VaultChangeModeNoop = "noop"
// VaultChangeModeSignal signals the task when a new token is retrieved.
VaultChangeModeSignal = "signal"
// VaultChangeModeRestart restarts the task when a new token is retrieved.
VaultChangeModeRestart = "restart"
)
// Vault stores the set of premissions a task needs access to from Vault.
type Vault struct {
// Policies is the set of policies that the task needs access to
Policies []string
// Env marks whether the Vault Token should be exposed as an environment
// variable
Env bool
// ChangeMode is used to configure the task's behavior when the Vault
// token changes because the original token could not be renewed in time.
ChangeMode string
// ChangeSignal is the signal sent to the task when a new token is
// retrieved. This is only valid when using the signal change mode.
ChangeSignal string
}
func DefaultVaultBlock() *Vault {
return &Vault{
Env: true,
ChangeMode: VaultChangeModeRestart,
}
}
// Copy returns a copy of this Vault block.
func (v *Vault) Copy() *Vault {
if v == nil {
return nil
}
nv := new(Vault)
*nv = *v
return nv
}
func (v *Vault) Canonicalize() {
if v.ChangeSignal != "" {
v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
}
}
// Validate returns if the Vault block is valid.
func (v *Vault) Validate() error {
if v == nil {
return nil
}
var mErr multierror.Error
if len(v.Policies) == 0 {
multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
}
for _, p := range v.Policies {
if p == "root" {
multierror.Append(&mErr, fmt.Errorf("Can not specifiy \"root\" policy"))
}
}
switch v.ChangeMode {
case VaultChangeModeSignal:
if v.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
}
case VaultChangeModeNoop, VaultChangeModeRestart:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
}
return mErr.ErrorOrNil()
}
const (
// DeploymentStatuses are the various states a deployment can be be in
DeploymentStatusRunning = "running"
DeploymentStatusPaused = "paused"
DeploymentStatusFailed = "failed"
DeploymentStatusSuccessful = "successful"
DeploymentStatusCancelled = "cancelled"
// DeploymentStatusDescriptions are the various descriptions of the states a
// deployment can be in.
DeploymentStatusDescriptionRunning = "Deployment is running"
DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion"
DeploymentStatusDescriptionPaused = "Deployment is paused"
DeploymentStatusDescriptionSuccessful = "Deployment completed successfully"
DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped"
DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job"
DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations"
DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed"
)
// DeploymentStatusDescriptionRollback is used to get the status description of
// a deployment when rolling back to an older job.
func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
}
// Deployment is the object that represents a job deployment which is used to
// transistion a job between versions.
type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// JobID is the job the deployment is created for
JobID string
// JobVersion is the version of the job at which the deployment is tracking
JobVersion uint64
// JobModifyIndex is the modify index of the job at which the deployment is tracking
JobModifyIndex uint64
// JobCreateIndex is the create index of the job which the deployment is
// tracking. It is needed so that if the job gets stopped and reran we can
// present the correct list of deployments for the job and not old ones.
JobCreateIndex uint64
// TaskGroups is the set of task groups effected by the deployment and their
// current deployment status.
TaskGroups map[string]*DeploymentState
// The status of the deployment
Status string
// StatusDescription allows a human readable description of the deployment
// status.
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// NewDeployment creates a new deployment given the job.
func NewDeployment(job *Job) *Deployment {
return &Deployment{
ID: GenerateUUID(),
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
JobCreateIndex: job.CreateIndex,
Status: DeploymentStatusRunning,
StatusDescription: DeploymentStatusDescriptionRunning,
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
}
}
func (d *Deployment) Copy() *Deployment {
if d == nil {
return nil
}
c := &Deployment{}
*c = *d
c.TaskGroups = nil
if l := len(d.TaskGroups); d.TaskGroups != nil {
c.TaskGroups = make(map[string]*DeploymentState, l)
for tg, s := range d.TaskGroups {
c.TaskGroups[tg] = s.Copy()
}
}
return c
}
// Active returns whether the deployment is active or terminal.
func (d *Deployment) Active() bool {
switch d.Status {
case DeploymentStatusRunning, DeploymentStatusPaused:
return true
default:
return false
}
}
// GetID is a helper for getting the ID when the object may be nil
func (d *Deployment) GetID() string {
if d == nil {
return ""
}
return d.ID
}
// HasPlacedCanaries returns whether the deployment has placed canaries
func (d *Deployment) HasPlacedCanaries() bool {
if d == nil || len(d.TaskGroups) == 0 {
return false
}
for _, group := range d.TaskGroups {
if len(group.PlacedCanaries) != 0 {
return true
}
}
return false
}
// RequiresPromotion returns whether the deployment requires promotion to
// continue
func (d *Deployment) RequiresPromotion() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.Promoted {
return true
}
}
return false
}
func (d *Deployment) GoString() string {
base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
for group, state := range d.TaskGroups {
base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
}
return base
}
// DeploymentState tracks the state of a deployment for a given task group.
type DeploymentState struct {
// AutoRevert marks whether the task group has indicated the job should be
// reverted on failure
AutoRevert bool
// Promoted marks whether the canaries have been promoted
Promoted bool
// PlacedCanaries is the set of placed canary allocations
PlacedCanaries []string
// DesiredCanaries is the number of canaries that should be created.
DesiredCanaries int
// DesiredTotal is the total number of allocations that should be created as
// part of the deployment.
DesiredTotal int
// PlacedAllocs is the number of allocations that have been placed
PlacedAllocs int
// HealthyAllocs is the number of allocations that have been marked healthy.
HealthyAllocs int
// UnhealthyAllocs are allocations that have been marked as unhealthy.
UnhealthyAllocs int
}
func (d *DeploymentState) GoString() string {
base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
return base
}
func (d *DeploymentState) Copy() *DeploymentState {
c := &DeploymentState{}
*c = *d
c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
return c
}
// DeploymentStatusUpdate is used to update the status of a given deployment
type DeploymentStatusUpdate struct {
// DeploymentID is the ID of the deployment to update
DeploymentID string
// Status is the new status of the deployment.
Status string
// StatusDescription is the new status description of the deployment.
StatusDescription string
}
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
)
// Allocation is used to allocate the placement of a task group to a node.
type Allocation struct {
// ID of the allocation (UUID)
ID string
// ID of the evaluation that generated this allocation
EvalID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// Job is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
Job *Job
// TaskGroup is the name of the task group that should be run
TaskGroup string
// Resources is the total set of resources allocated as part
// of this allocation of the task group.
Resources *Resources
// SharedResources are the resources that are shared by all the tasks in an
// allocation
SharedResources *Resources
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources.
TaskResources map[string]*Resources
// Metrics associated with this allocation
Metrics *AllocMetric
// Desired Status of the allocation on the client
DesiredStatus string
// DesiredStatusDescription is meant to provide more human useful information
DesiredDescription string
// Status of the allocation on the client
ClientStatus string
// ClientStatusDescription is meant to provide more human useful information
ClientDescription string
// TaskStates stores the state of each task,
TaskStates map[string]*TaskState
// PreviousAllocation is the allocation that this allocation is replacing
PreviousAllocation string
// DeploymentID identifies an allocation as being created from a
// particular deployment
DeploymentID string
// DeploymentStatus captures the status of the allocation as part of the
// given deployment
DeploymentStatus *AllocDeploymentStatus
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
// AllocModifyIndex is not updated when the client updates allocations. This
// lets the client pull only the allocs updated by the server.
AllocModifyIndex uint64
// CreateTime is the time the allocation has finished scheduling and been
// verified by the plan applier.
CreateTime int64
}
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
l := len(a.Name)
prefix := len(a.JobID) + len(a.TaskGroup) + 2
if l <= 3 || l <= prefix {
return uint(0)
}
strNum := a.Name[prefix : len(a.Name)-1]
num, _ := strconv.Atoi(strNum)
return uint(num)
}
func (a *Allocation) Copy() *Allocation {
return a.copyImpl(true)
}
// Copy provides a copy of the allocation but doesn't deep copy the job
func (a *Allocation) CopySkipJob() *Allocation {
return a.copyImpl(false)
}
func (a *Allocation) copyImpl(job bool) *Allocation {
if a == nil {
return nil
}
na := new(Allocation)
*na = *a
if job {
na.Job = na.Job.Copy()
}
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
if a.TaskResources != nil {
tr := make(map[string]*Resources, len(na.TaskResources))
for task, resource := range na.TaskResources {
tr[task] = resource.Copy()
}
na.TaskResources = tr
}
na.Metrics = na.Metrics.Copy()
na.DeploymentStatus = na.DeploymentStatus.Copy()
if a.TaskStates != nil {
ts := make(map[string]*TaskState, len(na.TaskStates))
for task, state := range na.TaskStates {
ts[task] = state.Copy()
}
na.TaskStates = ts
}
return na
}
// TerminalStatus returns if the desired or actual status is terminal and
// will no longer transition.
func (a *Allocation) TerminalStatus() bool {
// First check the desired state and if that isn't terminal, check client
// state.
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return true
default:
}
switch a.ClientStatus {
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
return true
default:
return false
}
}
// Terminated returns if the allocation is in a terminal state on a client.
func (a *Allocation) Terminated() bool {
if a.ClientStatus == AllocClientStatusFailed ||
a.ClientStatus == AllocClientStatusComplete ||
a.ClientStatus == AllocClientStatusLost {
return true
}
return false
}
// RanSuccessfully returns whether the client has ran the allocation and all
// tasks finished successfully
func (a *Allocation) RanSuccessfully() bool {
// Handle the case the client hasn't started the allocation.
if len(a.TaskStates) == 0 {
return false
}
// Check to see if all the tasks finised successfully in the allocation
allSuccess := true
for _, state := range a.TaskStates {
allSuccess = allSuccess && state.Successful()
}
return allSuccess
}
// ShouldMigrate returns if the allocation needs data migration
func (a *Allocation) ShouldMigrate() bool {
if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// if the task group is nil or the ephemeral disk block isn't present then
// we won't migrate
if tg == nil || tg.EphemeralDisk == nil {
return false
}
// We won't migrate any data is the user hasn't enabled migration or the
// disk is not marked as sticky
if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
return false
}
return true
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub() *AllocListStub {
return &AllocListStub{
ID: a.ID,
EvalID: a.EvalID,
Name: a.Name,
NodeID: a.NodeID,
JobID: a.JobID,
JobVersion: a.Job.Version,
TaskGroup: a.TaskGroup,
DesiredStatus: a.DesiredStatus,
DesiredDescription: a.DesiredDescription,
ClientStatus: a.ClientStatus,
ClientDescription: a.ClientDescription,
TaskStates: a.TaskStates,
DeploymentStatus: a.DeploymentStatus,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
CreateTime: a.CreateTime,
}
}
// AllocListStub is used to return a subset of alloc information
type AllocListStub struct {
ID string
EvalID string
Name string
NodeID string
JobID string
JobVersion uint64
TaskGroup string
DesiredStatus string
DesiredDescription string
ClientStatus string
ClientDescription string
TaskStates map[string]*TaskState
DeploymentStatus *AllocDeploymentStatus
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
}
// AllocMetric is used to track various metrics while attempting
// to make an allocation. These are used to debug a job, or to better
// understand the pressure within the system.
type AllocMetric struct {
// NodesEvaluated is the number of nodes that were evaluated
NodesEvaluated int
// NodesFiltered is the number of nodes filtered due to a constraint
NodesFiltered int
// NodesAvailable is the number of nodes available for evaluation per DC.
NodesAvailable map[string]int
// ClassFiltered is the number of nodes filtered by class
ClassFiltered map[string]int
// ConstraintFiltered is the number of failures caused by constraint
ConstraintFiltered map[string]int
// NodesExhausted is the number of nodes skipped due to being
// exhausted of at least one resource
NodesExhausted int
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
Scores map[string]float64
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
// CoalescedFailures indicates the number of other
// allocations that were coalesced into this failed allocation.
// This is to prevent creating many failed allocations for a
// single task group.
CoalescedFailures int
}
func (a *AllocMetric) Copy() *AllocMetric {
if a == nil {
return nil
}
na := new(AllocMetric)
*na = *a
na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
return na
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
if dimension != "" {
if a.DimensionExhausted == nil {
a.DimensionExhausted = make(map[string]int)
}
a.DimensionExhausted[dimension] += 1
}
}
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
if a.Scores == nil {
a.Scores = make(map[string]float64)
}
key := fmt.Sprintf("%s.%s", node.ID, name)
a.Scores[key] = score
}
// AllocDeploymentStatus captures the status of the allocation as part of the
// deployment. This can include things like if the allocation has been marked as
// heatlhy.
type AllocDeploymentStatus struct {
// Healthy marks whether the allocation has been marked healthy or unhealthy
// as part of a deployment. It can be unset if it has neither been marked
// healthy or unhealthy.
Healthy *bool
// ModifyIndex is the raft index in which the deployment status was last
// changed.
ModifyIndex uint64
}
// IsHealthy returns if the allocation is marked as healthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsHealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && *a.Healthy
}
// IsUnhealthy returns if the allocation is marked as unhealthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsUnhealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && !*a.Healthy
}
func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
if a == nil {
return nil
}
c := new(AllocDeploymentStatus)
*c = *a
if a.Healthy != nil {
c.Healthy = helper.BoolToPtr(*a.Healthy)
}
return c
}
const (
EvalStatusBlocked = "blocked"
EvalStatusPending = "pending"
EvalStatusComplete = "complete"
EvalStatusFailed = "failed"
EvalStatusCancelled = "canceled"
)
const (
EvalTriggerJobRegister = "job-register"
EvalTriggerJobDeregister = "job-deregister"
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerScheduled = "scheduled"
EvalTriggerRollingUpdate = "rolling-update"
EvalTriggerDeploymentWatcher = "deployment-watcher"
EvalTriggerFailedFollowUp = "failed-follow-up"
EvalTriggerMaxPlans = "max-plan-attempts"
)
const (
// CoreJobEvalGC is used for the garbage collection of evaluations
// and allocations. We periodically scan evaluations in a terminal state,
// in which all the corresponding allocations are also terminal. We
// delete these out of the system to bound the state.
CoreJobEvalGC = "eval-gc"
// CoreJobNodeGC is used for the garbage collection of failed nodes.
// We periodically scan nodes in a terminal state, and if they have no
// corresponding allocations we delete these out of the system.
CoreJobNodeGC = "node-gc"
// CoreJobJobGC is used for the garbage collection of eligible jobs. We
// periodically scan garbage collectible jobs and check if both their
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobDeploymentGC is used for the garbage collection of eligible
// deployments. We periodically scan garbage collectible deployments and
// check if they are terminal. If so, we delete these out of the system.
CoreJobDeploymentGC = "deployment-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result
// of a change to our desired state (job specification) or the emergent state
// (registered nodes). When the inputs change, we need to "evaluate" them,
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// ID is a randonly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// Type is used to control which schedulers are available to handle
// this evaluation.
Type string
// TriggeredBy is used to give some insight into why this Eval
// was created. (Job change, node failure, alloc failure, etc).
TriggeredBy string
// JobID is the job this evaluation is scoped to. Evaluations cannot
// be run in parallel for a given JobID, so we serialize on this.
JobID string
// JobModifyIndex is the modify index of the job at the time
// the evaluation was created
JobModifyIndex uint64
// NodeID is the node that was affected triggering the evaluation.
NodeID string
// NodeModifyIndex is the modify index of the node at the time
// the evaluation was created
NodeModifyIndex uint64
// DeploymentID is the ID of the deployment that triggered the evaluation.
DeploymentID string
// Status of the evaluation
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Wait is a minimum wait time for running the eval. This is used to
// support a rolling upgrade.
Wait time.Duration
// NextEval is the evaluation ID for the eval created to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
NextEval string
// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
PreviousEval string
// BlockedEval is the evaluation ID for a created blocked eval. A
// blocked eval will be created if all allocations could not be placed due
// to constraints or lacking resources.
BlockedEval string
// FailedTGAllocs are task groups which have allocations that could not be
// made, but the metrics are persisted so that the user can use the feedback
// to determine the cause.
FailedTGAllocs map[string]*AllocMetric
// ClassEligibility tracks computed node classes that have been explicitly
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
// AnnotatePlan triggers the scheduler to provide additional annotations
// during the evaluation. This should not be set during normal operations.
AnnotatePlan bool
// QueuedAllocations is the number of unplaced allocations at the time the
// evaluation was processed. The map is keyed by Task Group names.
QueuedAllocations map[string]int
// SnapshotIndex is the Raft index of the snapshot used to process the
// evaluation. As such it will only be set once it has gone through the
// scheduler.
SnapshotIndex uint64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (e *Evaluation) TerminalStatus() bool {
switch e.Status {
case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
return true
default:
return false
}
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval '%s' JobID: '%s'>", e.ID, e.JobID)
}
func (e *Evaluation) Copy() *Evaluation {
if e == nil {
return nil
}
ne := new(Evaluation)
*ne = *e
// Copy ClassEligibility
if e.ClassEligibility != nil {
classes := make(map[string]bool, len(e.ClassEligibility))
for class, elig := range e.ClassEligibility {
classes[class] = elig
}
ne.ClassEligibility = classes
}
// Copy FailedTGAllocs
if e.FailedTGAllocs != nil {
failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
for tg, metric := range e.FailedTGAllocs {
failedTGs[tg] = metric.Copy()
}
ne.FailedTGAllocs = failedTGs
}
// Copy queued allocations
if e.QueuedAllocations != nil {
queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
for tg, num := range e.QueuedAllocations {
queuedAllocations[tg] = num
}
ne.QueuedAllocations = queuedAllocations
}
return ne
}
// ShouldEnqueue checks if a given evaluation should be enqueued into the
// eval_broker
func (e *Evaluation) ShouldEnqueue() bool {
switch e.Status {
case EvalStatusPending:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// ShouldBlock checks if a given evaluation should be entered into the blocked
// eval tracker.
func (e *Evaluation) ShouldBlock() bool {
switch e.Status {
case EvalStatusBlocked:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// MakePlan is used to make a plan from the given evaluation
// for a given Job
func (e *Evaluation) MakePlan(j *Job) *Plan {
p := &Plan{
EvalID: e.ID,
Priority: e.Priority,
Job: j,
NodeUpdate: make(map[string][]*Allocation),
NodeAllocation: make(map[string][]*Allocation),
}
if j != nil {
p.AllAtOnce = j.AllAtOnce
}
return p
}
// NextRollingEval creates an evaluation to followup this eval for rolling updates
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible and whether the job has escaped computed node classes.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusBlocked,
PreviousEval: e.ID,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
}
}
// CreateFailedFollowUpEval creates a follow up evaluation when the current one
// has been marked as failed becasue it has hit the delivery limit and will not
// be retried by the eval_broker.
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// Plan is used to submit a commit plan for task allocations. These
// are submitted to the leader which verifies that resources have
// not been overcommitted before admiting the plan.
type Plan struct {
// EvalID is the evaluation ID this plan is associated with
EvalID string
// EvalToken is used to prevent a split-brain processing of
// an evaluation. There should only be a single scheduler running
// an Eval at a time, but this could be violated after a leadership
// transition. This unique token is used to reject plans that are
// being submitted from a different leader.
EvalToken string
// Priority is the priority of the upstream job
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job.
// If this is false, a plan may be partially applied. Otherwise, the
// entire plan must be able to make progress.
AllAtOnce bool
// Job is the parent job of all the allocations in the Plan.
// Since a Plan only involves a single Job, we can reduce the size
// of the plan by only including it once.
Job *Job
// NodeUpdate contains all the allocations for each node. For each node,
// this is a list of the allocations to update to either stop or evict.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations for each node.
// The evicts must be considered prior to the allocations.
NodeAllocation map[string][]*Allocation
// Annotations contains annotations by the scheduler to be used by operators
// to understand the decisions made by the scheduler.
Annotations *PlanAnnotations
// Deployment is the deployment created or updated by the scheduler that
// should be applied by the planner.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AppendUpdate marks the allocation for eviction. The clientStatus of the
// allocation may be optionally set by passing in a non-empty value.
func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) {
newAlloc := new(Allocation)
*newAlloc = *alloc
// If the job is not set in the plan we are deregistering a job so we
// extract the job from the allocation.
if p.Job == nil && newAlloc.Job != nil {
p.Job = newAlloc.Job
}
// Normalize the job
newAlloc.Job = nil
// Strip the resources as it can be rebuilt.
newAlloc.Resources = nil
newAlloc.DesiredStatus = desiredStatus
newAlloc.DesiredDescription = desiredDesc
if clientStatus != "" {
newAlloc.ClientStatus = clientStatus
}
node := alloc.NodeID
existing := p.NodeUpdate[node]
p.NodeUpdate[node] = append(existing, newAlloc)
}
func (p *Plan) PopUpdate(alloc *Allocation) {
existing := p.NodeUpdate[alloc.NodeID]
n := len(existing)
if n > 0 && existing[n-1].ID == alloc.ID {
existing = existing[:n-1]
if len(existing) > 0 {
p.NodeUpdate[alloc.NodeID] = existing
} else {
delete(p.NodeUpdate, alloc.NodeID)
}
}
}
func (p *Plan) AppendAlloc(alloc *Allocation) {
node := alloc.NodeID
existing := p.NodeAllocation[node]
p.NodeAllocation[node] = append(existing, alloc)
}
// IsNoOp checks if this plan would do nothing
func (p *Plan) IsNoOp() bool {
return len(p.NodeUpdate) == 0 &&
len(p.NodeAllocation) == 0 &&
p.Deployment == nil &&
len(p.DeploymentUpdates) == 0
}
// PlanResult is the result of a plan submitted to the leader.
type PlanResult struct {
// NodeUpdate contains all the updates that were committed.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations that were committed.
NodeAllocation map[string][]*Allocation
// Deployment is the deployment that was committed.
Deployment *Deployment
// DeploymentUpdates is the set of deployment updates that were commited.
DeploymentUpdates []*DeploymentStatusUpdate
// RefreshIndex is the index the worker should refresh state up to.
// This allows all evictions and allocations to be materialized.
// If any allocations were rejected due to stale data (node state,
// over committed) this can be used to force a worker refresh.
RefreshIndex uint64
// AllocIndex is the Raft index in which the evictions and
// allocations took place. This is used for the write index.
AllocIndex uint64
}
// IsNoOp checks if this plan result would do nothing
func (p *PlanResult) IsNoOp() bool {
return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
len(p.DeploymentUpdates) == 0 && p.Deployment == nil
}
// FullCommit is used to check if all the allocations in a plan
// were committed as part of the result. Returns if there was
// a match, and the number of expected and actual allocations.
func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc, _ := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}
return actual == expected, expected, actual
}
// PlanAnnotations holds annotations made by the scheduler to give further debug
// information to operators.
type PlanAnnotations struct {
// DesiredTGUpdates is the set of desired updates per task group.
DesiredTGUpdates map[string]*DesiredUpdates
}
// DesiredUpdates is the set of changes the scheduler would like to make given
// sufficient resources and cluster capacity.
type DesiredUpdates struct {
Ignore uint64
Place uint64
Migrate uint64
Stop uint64
InPlaceUpdate uint64
DestructiveUpdate uint64
Canary uint64
}
func (d *DesiredUpdates) GoString() string {
return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
}
// msgpackHandle is a shared handle for encoding/decoding of structs
var MsgpackHandle = func() *codec.MsgpackHandle {
h := &codec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
var (
// JsonHandle and JsonHandlePretty are the codec handles to JSON encode
// structs. The pretty handle will add indents for easier human consumption.
JsonHandle = &codec.JsonHandle{
HTMLCharsAsIs: true,
}
JsonHandlePretty = &codec.JsonHandle{
HTMLCharsAsIs: true,
Indent: 4,
}
)
var HashiMsgpackHandle = func() *hcodec.MsgpackHandle {
h := &hcodec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err string
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) error {
if e == nil {
return nil
}
return &RecoverableError{
Err: e.Error(),
Recoverable: recoverable,
}
}
// WrapRecoverable wraps an existing error in a new RecoverableError with a new
// message. If the error was recoverable before the returned error is as well;
// otherwise it is unrecoverable.
func WrapRecoverable(msg string, err error) error {
return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
}
func (r *RecoverableError) Error() string {
return r.Err
}
func (r *RecoverableError) IsRecoverable() bool {
return r.Recoverable
}
// Recoverable is an interface for errors to implement to indicate whether or
// not they are fatal or recoverable.
type Recoverable interface {
error
IsRecoverable() bool
}
// IsRecoverable returns true if error is a RecoverableError with
// Recoverable=true. Otherwise false is returned.
func IsRecoverable(e error) bool {
if re, ok := e.(Recoverable); ok {
return re.IsRecoverable()
}
return false
}
Address feedback
package structs
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/gorhill/cronexpr"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/args"
"github.com/mitchellh/copystructure"
"github.com/ugorji/go/codec"
hcodec "github.com/hashicorp/go-msgpack/codec"
)
var (
ErrNoLeader = fmt.Errorf("No cluster leader")
ErrNoRegionPath = fmt.Errorf("No path to region")
)
type MessageType uint8
const (
NodeRegisterRequestType MessageType = iota
NodeDeregisterRequestType
NodeUpdateStatusRequestType
NodeUpdateDrainRequestType
JobRegisterRequestType
JobDeregisterRequestType
EvalUpdateRequestType
EvalDeleteRequestType
AllocUpdateRequestType
AllocClientUpdateRequestType
ReconcileJobSummariesRequestType
VaultAccessorRegisterRequestType
VaultAccessorDegisterRequestType
ApplyPlanResultsRequestType
DeploymentStatusUpdateRequestType
DeploymentPromoteRequestType
DeploymentAllocHealthRequestType
DeploymentDeleteRequestType
JobStabilityRequestType
)
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// ApiMajorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed in a way
// that would break clients for sane client versioning.
ApiMajorVersion = 1
// ApiMinorVersion is returned as part of the Status.Version request.
// It should be incremented anytime the APIs are changed to allow
// for sane client versioning. Minor changes should be compatible
// within the major version.
ApiMinorVersion = 1
ProtocolVersion = "protocol"
APIMajorVersion = "api.major"
APIMinorVersion = "api.minor"
GetterModeAny = "any"
GetterModeFile = "file"
GetterModeDir = "dir"
)
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
IsRead() bool
AllowStaleRead() bool
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
// The target region for this query
Region string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool
// If set, used as prefix for resource list searches
Prefix string
}
func (q QueryOptions) RequestRegion() string {
return q.Region
}
// QueryOption only applies to reads, so always true
func (q QueryOptions) IsRead() bool {
return true
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
type WriteRequest struct {
// The target region for this write
Region string
}
func (w WriteRequest) RequestRegion() string {
// The target region for this request
return w.Region
}
// WriteRequest only applies to writes, always false
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// This is the index associated with the read
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
}
// WriteMeta allows a write response to include potentially
// useful metadata about the write
type WriteMeta struct {
// This is the index associated with the write
Index uint64
}
// NodeRegisterRequest is used for Node.Register endpoint
// to register a node as being a schedulable entity.
type NodeRegisterRequest struct {
Node *Node
WriteRequest
}
// NodeDeregisterRequest is used for Node.Deregister endpoint
// to deregister a node as being a schedulable entity.
type NodeDeregisterRequest struct {
NodeID string
WriteRequest
}
// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
// information used in RPC server lists.
type NodeServerInfo struct {
// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
// be contacted at for RPCs.
RPCAdvertiseAddr string
// RpcMajorVersion is the major version number the Nomad Server
// supports
RPCMajorVersion int32
// RpcMinorVersion is the minor version number the Nomad Server
// supports
RPCMinorVersion int32
// Datacenter is the datacenter that a Nomad server belongs to
Datacenter string
}
// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
// to update the status of a node.
type NodeUpdateStatusRequest struct {
NodeID string
Status string
WriteRequest
}
// NodeUpdateDrainRequest is used for updatin the drain status
type NodeUpdateDrainRequest struct {
NodeID string
Drain bool
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the ndoe
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
}
// NodeSpecificRequest is used when we just need to specify a target node
type NodeSpecificRequest struct {
NodeID string
SecretID string
QueryOptions
}
// JobRegisterRequest is used for Job.Register endpoint
// to register a job as being a schedulable entity.
type JobRegisterRequest struct {
Job *Job
// If EnforceIndex is set then the job will only be registered if the passed
// JobModifyIndex matches the current Jobs index. If the index is zero, the
// register only occurs if the job is new.
EnforceIndex bool
JobModifyIndex uint64
WriteRequest
}
// JobDeregisterRequest is used for Job.Deregister endpoint
// to deregister a job as being a schedulable entity.
type JobDeregisterRequest struct {
JobID string
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
WriteRequest
}
// JobEvaluateRequest is used when we just need to re-evaluate a target job
type JobEvaluateRequest struct {
JobID string
WriteRequest
}
// JobSpecificRequest is used when we just need to specify a target job
type JobSpecificRequest struct {
JobID string
AllAllocs bool
QueryOptions
}
// JobListRequest is used to parameterize a list request
type JobListRequest struct {
QueryOptions
}
// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
// evaluation of the Job.
type JobPlanRequest struct {
Job *Job
Diff bool // Toggles an annotated diff
WriteRequest
}
// JobSummaryRequest is used when we just need to get a specific job summary
type JobSummaryRequest struct {
JobID string
QueryOptions
}
// JobDispatchRequest is used to dispatch a job based on a parameterized job
type JobDispatchRequest struct {
JobID string
Payload []byte
Meta map[string]string
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobRevertRequest is used to revert a job to a prior version.
type JobRevertRequest struct {
// JobID is the ID of the job being reverted
JobID string
// JobVersion the version to revert to.
JobVersion uint64
// EnforcePriorVersion if set will enforce that the job is at the given
// version before reverting.
EnforcePriorVersion *uint64
WriteRequest
}
// JobStabilityRequest is used to marked a job as stable.
type JobStabilityRequest struct {
// Job to set the stability on
JobID string
JobVersion uint64
// Set the stability
Stable bool
WriteRequest
}
// JobStabilityResponse is the response when marking a job as stable.
type JobStabilityResponse struct {
WriteMeta
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
}
// EvalUpdateRequest is used for upserting evaluations.
type EvalUpdateRequest struct {
Evals []*Evaluation
EvalToken string
WriteRequest
}
// EvalDeleteRequest is used for deleting an evaluation.
type EvalDeleteRequest struct {
Evals []string
Allocs []string
WriteRequest
}
// EvalSpecificRequest is used when we just need to specify a target evaluation
type EvalSpecificRequest struct {
EvalID string
QueryOptions
}
// EvalAckRequest is used to Ack/Nack a specific evaluation
type EvalAckRequest struct {
EvalID string
Token string
WriteRequest
}
// EvalDequeueRequest is used when we want to dequeue an evaluation
type EvalDequeueRequest struct {
Schedulers []string
Timeout time.Duration
SchedulerVersion uint16
WriteRequest
}
// EvalListRequest is used to list the evaluations
type EvalListRequest struct {
QueryOptions
}
// PlanRequest is used to submit an allocation plan to the leader
type PlanRequest struct {
Plan *Plan
WriteRequest
}
// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
// committing the result of a plan.
type ApplyPlanResultsRequest struct {
// AllocUpdateRequest holds the allocation updates to be made by the
// scheduler.
AllocUpdateRequest
// Deployment is the deployment created or updated as a result of a
// scheduling event.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AllocUpdateRequest is used to submit changes to allocations, either
// to cause evictions or to assign new allocaitons. Both can be done
// within a single transaction
type AllocUpdateRequest struct {
// Alloc is the list of new allocations to assign
Alloc []*Allocation
// Job is the shared parent job of the allocations.
// It is pulled out since it is common to reduce payload size.
Job *Job
WriteRequest
}
// AllocListRequest is used to request a list of allocations
type AllocListRequest struct {
QueryOptions
}
// AllocSpecificRequest is used to query a specific allocation
type AllocSpecificRequest struct {
AllocID string
QueryOptions
}
// AllocsGetRequest is used to query a set of allocations
type AllocsGetRequest struct {
AllocIDs []string
QueryOptions
}
// PeriodicForceReqeuest is used to force a specific periodic job.
type PeriodicForceRequest struct {
JobID string
WriteRequest
}
// ServerMembersResponse has the list of servers in a cluster
type ServerMembersResponse struct {
ServerName string
ServerRegion string
ServerDC string
Members []*ServerMember
}
// ServerMember holds information about a Nomad server agent in a cluster
type ServerMember struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
// following tasks in the given allocation
type DeriveVaultTokenRequest struct {
NodeID string
SecretID string
AllocID string
Tasks []string
QueryOptions
}
// VaultAccessorsRequest is used to operate on a set of Vault accessors
type VaultAccessorsRequest struct {
Accessors []*VaultAccessor
}
// VaultAccessor is a reference to a created Vault token on behalf of
// an allocation's task.
type VaultAccessor struct {
AllocID string
Task string
NodeID string
Accessor string
CreationTTL int
// Raft Indexes
CreateIndex uint64
}
// DeriveVaultTokenResponse returns the wrapped tokens for each requested task
type DeriveVaultTokenResponse struct {
// Tasks is a mapping between the task name and the wrapped token
Tasks map[string]string
// Error stores any error that occured. Errors are stored here so we can
// communicate whether it is retriable
Error *RecoverableError
QueryMeta
}
// GenericRequest is used to request where no
// specific information is needed.
type GenericRequest struct {
QueryOptions
}
// DeploymentListRequest is used to list the deployments
type DeploymentListRequest struct {
QueryOptions
}
// DeploymentDeleteRequest is used for deleting deployments.
type DeploymentDeleteRequest struct {
Deployments []string
WriteRequest
}
// DeploymentStatusUpdateRequest is used to update the status of a deployment as
// well as optionally creating an evaluation atomically.
type DeploymentStatusUpdateRequest struct {
// Eval, if set, is used to create an evaluation at the same time as
// updating the status of a deployment.
Eval *Evaluation
// DeploymentUpdate is a status update to apply to the given
// deployment.
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
}
// DeploymentAllocHealthRequest is used to set the health of a set of
// allocations as part of a deployment.
type DeploymentAllocHealthRequest struct {
DeploymentID string
// Marks these allocations as healthy, allow further allocations
// to be rolled.
HealthyAllocationIDs []string
// Any unhealthy allocations fail the deployment
UnhealthyAllocationIDs []string
WriteRequest
}
// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
type ApplyDeploymentAllocHealthRequest struct {
DeploymentAllocHealthRequest
// An optional field to update the status of a deployment
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPromoteRequest is used to promote task groups in a deployment
type DeploymentPromoteRequest struct {
DeploymentID string
// All is to promote all task groups
All bool
// Groups is used to set the promotion status per task group
Groups []string
WriteRequest
}
// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
type ApplyDeploymentPromoteRequest struct {
DeploymentPromoteRequest
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPauseRequest is used to pause a deployment
type DeploymentPauseRequest struct {
DeploymentID string
// Pause sets the pause status
Pause bool
WriteRequest
}
// DeploymentSpecificRequest is used to make a request specific to a particular
// deployment
type DeploymentSpecificRequest struct {
DeploymentID string
QueryOptions
}
// DeploymentFailRequest is used to fail a particular deployment
type DeploymentFailRequest struct {
DeploymentID string
WriteRequest
}
// SingleDeploymentResponse is used to respond with a single deployment
type SingleDeploymentResponse struct {
Deployment *Deployment
QueryMeta
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
WriteMeta
}
// VersionResponse is used for the Status.Version reseponse
type VersionResponse struct {
Build string
Versions map[string]int
QueryMeta
}
// JobRegisterResponse is used to respond to a job registration
type JobRegisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
QueryMeta
}
// JobDeregisterResponse is used to respond to a job deregistration
type JobDeregisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
QueryMeta
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
// Error is a string version of any error that may have occured
Error string
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
}
// NodeUpdateResponse is used to respond to a node update
type NodeUpdateResponse struct {
HeartbeatTTL time.Duration
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
// LeaderRPCAddr is the RPC address of the current Raft Leader. If
// empty, the current Nomad Server is in the minority of a partition.
LeaderRPCAddr string
// NumNodes is the number of Nomad nodes attached to this quorum of
// Nomad Servers at the time of the response. This value can
// fluctuate based on the health of the cluster between heartbeats.
NumNodes int32
// Servers is the full list of known Nomad servers in the local
// region.
Servers []*NodeServerInfo
QueryMeta
}
// NodeDrainUpdateResponse is used to respond to a node drain update
type NodeDrainUpdateResponse struct {
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
QueryMeta
}
// NodeAllocsResponse is used to return allocs for a single node
type NodeAllocsResponse struct {
Allocs []*Allocation
QueryMeta
}
// NodeClientAllocsResponse is used to return allocs meta data for a single node
type NodeClientAllocsResponse struct {
Allocs map[string]uint64
QueryMeta
}
// SingleNodeResponse is used to return a single node
type SingleNodeResponse struct {
Node *Node
QueryMeta
}
// NodeListResponse is used for a list request
type NodeListResponse struct {
Nodes []*NodeListStub
QueryMeta
}
// SingleJobResponse is used to return a single job
type SingleJobResponse struct {
Job *Job
QueryMeta
}
// JobSummaryResponse is used to return a single job summary
type JobSummaryResponse struct {
JobSummary *JobSummary
QueryMeta
}
type JobDispatchResponse struct {
DispatchedJobID string
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
WriteMeta
}
// JobListResponse is used for a list request
type JobListResponse struct {
Jobs []*JobListStub
QueryMeta
}
// JobVersionsRequest is used to get a jobs versions
type JobVersionsRequest struct {
JobID string
Diffs bool
QueryOptions
}
// JobVersionsResponse is used for a job get versions request
type JobVersionsResponse struct {
Versions []*Job
Diffs []*JobDiff
QueryMeta
}
// JobPlanResponse is used to respond to a job plan request
type JobPlanResponse struct {
// Annotations stores annotations explaining decisions the scheduler made.
Annotations *PlanAnnotations
// FailedTGAllocs is the placement failures per task group.
FailedTGAllocs map[string]*AllocMetric
// JobModifyIndex is the modification index of the job. The value can be
// used when running `nomad run` to ensure that the Job wasn’t modified
// since the last plan. If the job is being created, the value is zero.
JobModifyIndex uint64
// CreatedEvals is the set of evaluations created by the scheduler. The
// reasons for this can be rolling-updates or blocked evals.
CreatedEvals []*Evaluation
// Diff contains the diff of the job and annotations on whether the change
// causes an in-place update or create/destroy
Diff *JobDiff
// NextPeriodicLaunch is the time duration till the job would be launched if
// submitted.
NextPeriodicLaunch time.Time
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
WriteMeta
}
// SingleAllocResponse is used to return a single allocation
type SingleAllocResponse struct {
Alloc *Allocation
QueryMeta
}
// AllocsGetResponse is used to return a set of allocations
type AllocsGetResponse struct {
Allocs []*Allocation
QueryMeta
}
// JobAllocationsResponse is used to return the allocations for a job
type JobAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// JobEvaluationsResponse is used to return the evaluations for a job
type JobEvaluationsResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// SingleEvalResponse is used to return a single evaluation
type SingleEvalResponse struct {
Eval *Evaluation
QueryMeta
}
// EvalDequeueResponse is used to return from a dequeue
type EvalDequeueResponse struct {
Eval *Evaluation
Token string
QueryMeta
}
// PlanResponse is used to return from a PlanRequest
type PlanResponse struct {
Result *PlanResult
WriteMeta
}
// AllocListResponse is used for a list request
type AllocListResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// DeploymentListResponse is used for a list request
type DeploymentListResponse struct {
Deployments []*Deployment
QueryMeta
}
// EvalListResponse is used for a list request
type EvalListResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// EvalAllocationsResponse is used to return the allocations for an evaluation
type EvalAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// PeriodicForceResponse is used to respond to a periodic job force launch
type PeriodicForceResponse struct {
EvalID string
EvalCreateIndex uint64
WriteMeta
}
// DeploymentUpdateResponse is used to respond to a deployment change. The
// response will include the modify index of the deployment as well as details
// of any triggered evaluation.
type DeploymentUpdateResponse struct {
EvalID string
EvalCreateIndex uint64
DeploymentModifyIndex uint64
// RevertedJobVersion is the version the job was reverted to. If unset, the
// job wasn't reverted
RevertedJobVersion *uint64
WriteMeta
}
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
)
// ShouldDrainNode checks if a given node status should trigger an
// evaluation. Some states don't require any further action.
func ShouldDrainNode(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady:
return false
case NodeStatusDown:
return true
default:
panic(fmt.Sprintf("unhandled node status %s", status))
}
}
// ValidNodeStatus is used to check if a node status is valid
func ValidNodeStatus(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDown:
return true
default:
return false
}
}
// Node is a representation of a schedulable client node
type Node struct {
// ID is a unique identifier for the node. It can be constructed
// by doing a concatenation of the Name and Datacenter as a simple
// approach. Alternatively a UUID may be used.
ID string
// SecretID is an ID that is only known by the Node and the set of Servers.
// It is not accessible via the API and is used to authenticate nodes
// conducting priviledged activities.
SecretID string
// Datacenter for this node
Datacenter string
// Node name
Name string
// HTTPAddr is the address on which the Nomad client is listening for http
// requests
HTTPAddr string
// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
TLSEnabled bool
// Attributes is an arbitrary set of key/value
// data that can be used for constraints. Examples
// include "kernel.name=linux", "arch=386", "driver.docker=1",
// "docker.runtime=1.8.3"
Attributes map[string]string
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
Resources *Resources
// Reserved is the set of resources that are reserved,
// and should be subtracted from the total resources for
// the purposes of scheduling. This may be provide certain
// high-watermark tolerances or because of external schedulers
// consuming resources.
Reserved *Resources
// Links are used to 'link' this client to external
// systems. For example 'consul=foo.dc1' 'aws=i-83212'
// 'ami=ami-123'
Links map[string]string
// Meta is used to associate arbitrary metadata with this
// client. This is opaque to Nomad.
Meta map[string]string
// NodeClass is an opaque identifier used to group nodes
// together for the purpose of determining scheduling pressure.
NodeClass string
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities.
ComputedClass string
// Drain is controlled by the servers, and not the client.
// If true, no jobs will be scheduled to this node, and existing
// allocations will be drained.
Drain bool
// Status of this node
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// StatusUpdatedAt is the time stamp at which the state of the node was
// updated
StatusUpdatedAt int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Ready returns if the node is ready for running allocations
func (n *Node) Ready() bool {
return n.Status == NodeStatusReady && !n.Drain
}
func (n *Node) Copy() *Node {
if n == nil {
return nil
}
nn := new(Node)
*nn = *n
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
return nn
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (n *Node) TerminalStatus() bool {
switch n.Status {
case NodeStatusDown:
return true
default:
return false
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub() *NodeListStub {
return &NodeListStub{
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Drain: n.Drain,
Status: n.Status,
StatusDescription: n.StatusDescription,
CreateIndex: n.CreateIndex,
ModifyIndex: n.ModifyIndex,
}
}
// NodeListStub is used to return a subset of job information
// for the job list
type NodeListStub struct {
ID string
Datacenter string
Name string
NodeClass string
Drain bool
Status string
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) (string, int) {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return n.IP, p.Value
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return n.IP, p.Value
}
}
}
return "", 0
}
// Resources is used to define the resources available
// on a client
type Resources struct {
CPU int
MemoryMB int
DiskMB int
IOPS int
Networks Networks
}
const (
BytesInMegabyte = 1024 * 1024
)
// DefaultResources returns the default resources for a task.
func DefaultResources() *Resources {
return &Resources{
CPU: 100,
MemoryMB: 10,
IOPS: 0,
}
}
// DiskInBytes returns the amount of disk resources in bytes.
func (r *Resources) DiskInBytes() int64 {
return int64(r.DiskMB * BytesInMegabyte)
}
// Merge merges this resource with another resource.
func (r *Resources) Merge(other *Resources) {
if other.CPU != 0 {
r.CPU = other.CPU
}
if other.MemoryMB != 0 {
r.MemoryMB = other.MemoryMB
}
if other.DiskMB != 0 {
r.DiskMB = other.DiskMB
}
if other.IOPS != 0 {
r.IOPS = other.IOPS
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
}
func (r *Resources) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(r.Networks) == 0 {
r.Networks = nil
}
for _, n := range r.Networks {
n.Canonicalize()
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
func (r *Resources) MeetsMinResources() error {
var mErr multierror.Error
if r.CPU < 20 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is 20; got %d", r.CPU))
}
if r.MemoryMB < 10 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is 10; got %d", r.MemoryMB))
}
if r.IOPS < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is 0; got %d", r.IOPS))
}
for i, n := range r.Networks {
if err := n.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the resources
func (r *Resources) Copy() *Resources {
if r == nil {
return nil
}
newR := new(Resources)
*newR = *r
if r.Networks != nil {
n := len(r.Networks)
newR.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
newR.Networks[i] = r.Networks[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
func (r *Resources) NetIndex(n *NetworkResource) int {
for idx, net := range r.Networks {
if net.Device == n.Device {
return idx
}
}
return -1
}
// Superset checks if one set of resources is a superset
// of another. This ignores network resources, and the NetworkIndex
// should be used for that.
func (r *Resources) Superset(other *Resources) (bool, string) {
if r.CPU < other.CPU {
return false, "cpu exhausted"
}
if r.MemoryMB < other.MemoryMB {
return false, "memory exhausted"
}
if r.DiskMB < other.DiskMB {
return false, "disk exhausted"
}
if r.IOPS < other.IOPS {
return false, "iops exhausted"
}
return true, ""
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (r *Resources) Add(delta *Resources) error {
if delta == nil {
return nil
}
r.CPU += delta.CPU
r.MemoryMB += delta.MemoryMB
r.DiskMB += delta.DiskMB
r.IOPS += delta.IOPS
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := r.NetIndex(n)
if idx == -1 {
r.Networks = append(r.Networks, n.Copy())
} else {
r.Networks[idx].Add(n)
}
}
return nil
}
func (r *Resources) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
type Port struct {
Label string
Value int
}
// NetworkResource is used to represent available network
// resources
type NetworkResource struct {
Device string // Name of the device
CIDR string // CIDR block of addresses
IP string // Host IP address
MBits int // Throughput
ReservedPorts []Port // Host Reserved ports
DynamicPorts []Port // Host Dynamically assigned ports
}
func (n *NetworkResource) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(n.ReservedPorts) == 0 {
n.ReservedPorts = nil
}
if len(n.DynamicPorts) == 0 {
n.DynamicPorts = nil
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
func (n *NetworkResource) MeetsMinResources() error {
var mErr multierror.Error
if n.MBits < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits))
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the network resource
func (n *NetworkResource) Copy() *NetworkResource {
if n == nil {
return nil
}
newR := new(NetworkResource)
*newR = *n
if n.ReservedPorts != nil {
newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
copy(newR.ReservedPorts, n.ReservedPorts)
}
if n.DynamicPorts != nil {
newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
copy(newR.DynamicPorts, n.DynamicPorts)
}
return newR
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (n *NetworkResource) Add(delta *NetworkResource) {
if len(delta.ReservedPorts) > 0 {
n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
}
n.MBits += delta.MBits
n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
}
func (n *NetworkResource) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
// PortLabels returns a map of port labels to their assigned host ports.
func (n *NetworkResource) PortLabels() map[string]int {
num := len(n.ReservedPorts) + len(n.DynamicPorts)
labelValues := make(map[string]int, num)
for _, port := range n.ReservedPorts {
labelValues[port.Label] = port.Value
}
for _, port := range n.DynamicPorts {
labelValues[port.Label] = port.Value
}
return labelValues
}
const (
// JobTypeNomad is reserved for internal system tasks and is
// always handled by the CoreScheduler.
JobTypeCore = "_core"
JobTypeService = "service"
JobTypeBatch = "batch"
JobTypeSystem = "system"
)
const (
JobStatusPending = "pending" // Pending means the job is waiting on scheduling
JobStatusRunning = "running" // Running means the job has non-terminal allocations
JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal
)
const (
// JobMinPriority is the minimum allowed priority
JobMinPriority = 1
// JobDefaultPriority is the default priority if not
// not specified.
JobDefaultPriority = 50
// JobMaxPriority is the maximum allowed priority
JobMaxPriority = 100
// Ensure CoreJobPriority is higher than any user
// specified job so that it gets priority. This is important
// for the system to remain healthy.
CoreJobPriority = JobMaxPriority * 2
// JobTrackedVersions is the number of historic job versions that are
// kept.
JobTrackedVersions = 6
)
// Job is the scope of a scheduling request to Nomad. It is the largest
// scoped object, and is a named collection of task groups. Each task group
// is further composed of tasks. A task group (TG) is the unit of scheduling
// however.
type Job struct {
// Stop marks whether the user has stopped the job. A stopped job will
// have all created allocations stopped and acts as a way to stop a job
// without purging it from the system. This allows existing allocs to be
// queried and the job to be inspected as it is being killed.
Stop bool
// Region is the Nomad region that handles scheduling this job
Region string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
// ParentID is the unique identifier of the job that spawned this job.
ParentID string
// Name is the logical name of the job used to refer to it. This is unique
// per region, but not unique globally.
Name string
// Type is used to control various behaviors about the job. Most jobs
// are service jobs, meaning they are expected to be long lived.
// Some jobs are batch oriented meaning they run and then terminate.
// This can be extended in the future to support custom schedulers.
Type string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job. This
// can slow down larger jobs if resources are not available.
AllAtOnce bool
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
// TaskGroups are the collections of task groups that this job needs
// to run. Each task group is an atomic unit of scheduling and placement.
TaskGroups []*TaskGroup
// COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0.
Update UpdateStrategy
// Periodic is used to define the interval the job is run at.
Periodic *PeriodicConfig
// ParameterizedJob is used to specify the job as a parameterized job
// for dispatching.
ParameterizedJob *ParameterizedJobConfig
// Payload is the payload supplied when the job was dispatched.
Payload []byte
// Meta is used to associate arbitrary metadata with this
// job. This is opaque to Nomad.
Meta map[string]string
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
// Job status
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Stable marks a job as stable. Stability is only defined on "service" and
// "system" jobs. The stability of a job will be set automatically as part
// of a deployment and can be manually set via APIs.
Stable bool
// Version is a monitonically increasing version number that is incremened
// on each job register.
Version uint64
// SubmitTime is the time at which the job was submitted as a UnixNano in
// UTC
SubmitTime int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
}
// Canonicalize is used to canonicalize fields in the Job. This should be called
// when registering a Job. A set of warnings are returned if the job was changed
// in anyway that the user should be made aware of.
func (j *Job) Canonicalize() (warnings error) {
var mErr multierror.Error
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(j.Meta) == 0 {
j.Meta = nil
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
// COMPAT: Remove in 0.7.0
// Rewrite any job that has an update block with pre 0.6.0 syntax.
jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0
if jobHasOldUpdate && j.Type != JobTypeBatch {
// Build an appropriate update block and copy it down to each task group
base := DefaultUpdateStrategy.Copy()
base.MaxParallel = j.Update.MaxParallel
base.MinHealthyTime = j.Update.Stagger
// Add to each task group, modifying as needed
upgraded := false
l := len(j.TaskGroups)
for _, tg := range j.TaskGroups {
// The task group doesn't need upgrading if it has an update block with the new syntax
u := tg.Update
if u != nil && u.Stagger > 0 && u.MaxParallel > 0 &&
u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 {
continue
}
upgraded = true
// The MaxParallel for the job should be 10% of the total count
// unless there is just one task group then we can infer the old
// max parallel should be the new
tgu := base.Copy()
if l != 1 {
// RoundTo 10%
var percent float64 = float64(tg.Count) * 0.1
tgu.MaxParallel = int(percent + 0.5)
}
// Safety guards
if tgu.MaxParallel == 0 {
tgu.MaxParallel = 1
} else if tgu.MaxParallel > tg.Count {
tgu.MaxParallel = tg.Count
}
tg.Update = tgu
}
if upgraded {
w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " +
"Please update upgrade stanza before v0.7.0."
multierror.Append(&mErr, fmt.Errorf(w))
}
}
// Ensure that the batch job doesn't have new style or old style update
// stanza. Unfortunately are scanning here because we have to deprecate over
// a release so we can't check in the task group since that may be new style
// but wouldn't capture the old style and we don't want to have duplicate
// warnings.
if j.Type == JobTypeBatch {
displayWarning := jobHasOldUpdate
j.Update.Stagger = 0
j.Update.MaxParallel = 0
j.Update.HealthCheck = ""
j.Update.MinHealthyTime = 0
j.Update.HealthyDeadline = 0
j.Update.AutoRevert = false
j.Update.Canary = 0
// Remove any update spec from the task groups
for _, tg := range j.TaskGroups {
if tg.Update != nil {
displayWarning = true
tg.Update = nil
}
}
if displayWarning {
w := "Update stanza is disallowed for batch jobs since v0.6.0. " +
"The update block has automatically been removed"
multierror.Append(&mErr, fmt.Errorf(w))
}
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
// This job can panic if the deep copy failed as it uses reflection.
func (j *Job) Copy() *Job {
if j == nil {
return nil
}
nj := new(Job)
*nj = *j
nj.Datacenters = helper.CopySliceString(nj.Datacenters)
nj.Constraints = CopySliceConstraints(nj.Constraints)
if j.TaskGroups != nil {
tgs := make([]*TaskGroup, len(nj.TaskGroups))
for i, tg := range nj.TaskGroups {
tgs[i] = tg.Copy()
}
nj.TaskGroups = tgs
}
nj.Periodic = nj.Periodic.Copy()
nj.Meta = helper.CopyMapStringString(nj.Meta)
nj.ParameterizedJob = nj.ParameterizedJob.Copy()
return nj
}
// Validate is used to sanity check a job input
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}
if j.ID == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
} else if strings.Contains(j.ID, " ") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
}
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem:
case "":
mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
}
if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
}
if len(j.Datacenters) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
for idx, constr := range j.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Check for duplicate task groups
taskGroups := make(map[string]int)
for idx, tg := range j.TaskGroups {
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
} else if existing, ok := taskGroups[tg.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
} else {
taskGroups[tg.Name] = idx
}
if j.Type == "system" && tg.Count > 1 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
tg.Name, tg.Count))
}
}
// Validate the task group
for _, tg := range j.TaskGroups {
if err := tg.Validate(j); err != nil {
outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate periodic is only used with batch jobs.
if j.IsPeriodic() && j.Periodic.Enabled {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch))
}
if err := j.Periodic.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsParameterized() {
if j.Type != JobTypeBatch {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch))
}
if err := j.ParameterizedJob.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (j *Job) Warnings() error {
var mErr multierror.Error
// Check the groups
for _, tg := range j.TaskGroups {
if err := tg.Warnings(j); err != nil {
outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// LookupTaskGroup finds a task group by name
func (j *Job) LookupTaskGroup(name string) *TaskGroup {
for _, tg := range j.TaskGroups {
if tg.Name == name {
return tg
}
}
return nil
}
// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
// meta data for the task. When joining Job, Group and Task Meta, the precedence
// is by deepest scope (Task > Group > Job).
func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
group := j.LookupTaskGroup(groupName)
if group == nil {
return nil
}
task := group.LookupTask(taskName)
if task == nil {
return nil
}
meta := helper.CopyMapStringString(task.Meta)
if meta == nil {
meta = make(map[string]string, len(group.Meta)+len(j.Meta))
}
// Add the group specific meta
for k, v := range group.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
// Add the job specific meta
for k, v := range j.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
return meta
}
// Stopped returns if a job is stopped.
func (j *Job) Stopped() bool {
return j == nil || j.Stop
}
// HasUpdateStrategy returns if any task group in the job has an update strategy
func (j *Job) HasUpdateStrategy() bool {
for _, tg := range j.TaskGroups {
if tg.Update != nil {
return true
}
}
return false
}
// Stub is used to return a summary of the job
func (j *Job) Stub(summary *JobSummary) *JobListStub {
return &JobListStub{
ID: j.ID,
ParentID: j.ParentID,
Name: j.Name,
Type: j.Type,
Priority: j.Priority,
Periodic: j.IsPeriodic(),
ParameterizedJob: j.IsParameterized(),
Stop: j.Stop,
Status: j.Status,
StatusDescription: j.StatusDescription,
CreateIndex: j.CreateIndex,
ModifyIndex: j.ModifyIndex,
JobModifyIndex: j.JobModifyIndex,
SubmitTime: j.SubmitTime,
JobSummary: summary,
}
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil
}
// VaultPolicies returns the set of Vault policies per task group, per task
func (j *Job) VaultPolicies() map[string]map[string]*Vault {
policies := make(map[string]map[string]*Vault, len(j.TaskGroups))
for _, tg := range j.TaskGroups {
tgPolicies := make(map[string]*Vault, len(tg.Tasks))
for _, task := range tg.Tasks {
if task.Vault == nil {
continue
}
tgPolicies[task.Name] = task.Vault
}
if len(tgPolicies) != 0 {
policies[tg.Name] = tgPolicies
}
}
return policies
}
// RequiredSignals returns a mapping of task groups to tasks to their required
// set of signals
func (j *Job) RequiredSignals() map[string]map[string][]string {
signals := make(map[string]map[string][]string)
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
// Use this local one as a set
taskSignals := make(map[string]struct{})
// Check if the Vault change mode uses signals
if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
taskSignals[task.Vault.ChangeSignal] = struct{}{}
}
// Check if any template change mode uses signals
for _, t := range task.Templates {
if t.ChangeMode != TemplateChangeModeSignal {
continue
}
taskSignals[t.ChangeSignal] = struct{}{}
}
// Flatten and sort the signals
l := len(taskSignals)
if l == 0 {
continue
}
flat := make([]string, 0, l)
for sig := range taskSignals {
flat = append(flat, sig)
}
sort.Strings(flat)
tgSignals, ok := signals[tg.Name]
if !ok {
tgSignals = make(map[string][]string)
signals[tg.Name] = tgSignals
}
tgSignals[task.Name] = flat
}
}
return signals
}
// SpecChanged determines if the functional specification has changed between
// two job versions.
func (j *Job) SpecChanged(new *Job) bool {
if j == nil {
return new != nil
}
// Create a copy of the new job
c := new.Copy()
// Update the new job so we can do a reflect
c.Status = j.Status
c.StatusDescription = j.StatusDescription
c.Stable = j.Stable
c.Version = j.Version
c.CreateIndex = j.CreateIndex
c.ModifyIndex = j.ModifyIndex
c.JobModifyIndex = j.JobModifyIndex
c.SubmitTime = j.SubmitTime
// Deep equals the jobs
return !reflect.DeepEqual(j, c)
}
func (j *Job) SetSubmitTime() {
j.SubmitTime = time.Now().UTC().UnixNano()
}
// JobListStub is used to return a subset of job information
// for the job list
type JobListStub struct {
ID string
ParentID string
Name string
Type string
Priority int
Periodic bool
ParameterizedJob bool
Stop bool
Status string
StatusDescription string
JobSummary *JobSummary
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
SubmitTime int64
}
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
JobID string
// Summmary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
// Children contains a summary for the children of this job.
Children *JobChildrenSummary
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Copy returns a new copy of JobSummary
func (js *JobSummary) Copy() *JobSummary {
newJobSummary := new(JobSummary)
*newJobSummary = *js
newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
for k, v := range js.Summary {
newTGSummary[k] = v
}
newJobSummary.Summary = newTGSummary
newJobSummary.Children = newJobSummary.Children.Copy()
return newJobSummary
}
// JobChildrenSummary contains the summary of children job statuses
type JobChildrenSummary struct {
Pending int64
Running int64
Dead int64
}
// Copy returns a new copy of a JobChildrenSummary
func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
if jc == nil {
return nil
}
njc := new(JobChildrenSummary)
*njc = *jc
return njc
}
// TaskGroup summarizes the state of all the allocations of a particular
// TaskGroup
type TaskGroupSummary struct {
Queued int
Complete int
Failed int
Running int
Starting int
Lost int
}
const (
// Checks uses any registered health check state in combination with task
// states to determine if a allocation is healthy.
UpdateStrategyHealthCheck_Checks = "checks"
// TaskStates uses the task states of an allocation to determine if the
// allocation is healthy.
UpdateStrategyHealthCheck_TaskStates = "task_states"
// Manual allows the operator to manually signal to Nomad when an
// allocations is healthy. This allows more advanced health checking that is
// outside of the scope of Nomad.
UpdateStrategyHealthCheck_Manual = "manual"
)
var (
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
DefaultUpdateStrategy = &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 0,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
}
)
// UpdateStrategy is used to modify how updates are done
type UpdateStrategy struct {
// Stagger is used to determine the rate at which allocations are migrated
// due to down or draining nodes.
Stagger time.Duration
// MaxParallel is how many updates can be done in parallel
MaxParallel int
// HealthCheck specifies the mechanism in which allocations are marked
// healthy or unhealthy as part of a deployment.
HealthCheck string
// MinHealthyTime is the minimum time an allocation must be in the healthy
// state before it is marked as healthy, unblocking more alllocations to be
// rolled.
MinHealthyTime time.Duration
// HealthyDeadline is the time in which an allocation must be marked as
// healthy before it is automatically transistioned to unhealthy. This time
// period doesn't count against the MinHealthyTime.
HealthyDeadline time.Duration
// AutoRevert declares that if a deployment fails because of unhealthy
// allocations, there should be an attempt to auto-revert the job to a
// stable version.
AutoRevert bool
// Canary is the number of canaries to deploy when a change to the task
// group is detected.
Canary int
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
}
copy := new(UpdateStrategy)
*copy = *u
return copy
}
func (u *UpdateStrategy) Validate() error {
if u == nil {
return nil
}
var mErr multierror.Error
switch u.HealthCheck {
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
default:
multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
}
if u.MaxParallel < 0 {
multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
}
if u.Canary < 0 {
multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
}
if u.MinHealthyTime < 0 {
multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
}
if u.HealthyDeadline <= 0 {
multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
}
if u.Stagger <= 0 {
multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
}
return mErr.ErrorOrNil()
}
// TODO(alexdadgar): Remove once no longer used by the scheduler.
// Rolling returns if a rolling strategy should be used
func (u *UpdateStrategy) Rolling() bool {
return u.Stagger > 0 && u.MaxParallel > 0
}
const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
// separated list of unix timestamps at which to launch.
PeriodicSpecTest = "_internal_test"
)
// Periodic defines the interval a job should be run at.
type PeriodicConfig struct {
// Enabled determines if the job should be run periodically.
Enabled bool
// Spec specifies the interval the job should be run as. It is parsed based
// on the SpecType.
Spec string
// SpecType defines the format of the spec.
SpecType string
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
if p == nil {
return nil
}
np := new(PeriodicConfig)
*np = *p
return np
}
func (p *PeriodicConfig) Validate() error {
if !p.Enabled {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
switch p.SpecType {
case PeriodicSpecCron:
if e, err := cronexpr.Parse(p.Spec); err == nil {
return e.Next(fromTime)
}
case PeriodicSpecTest:
split := strings.Split(p.Spec, ",")
if len(split) == 1 && split[0] == "" {
return time.Time{}
}
// Parse the times
times := make([]time.Time, len(split))
for i, s := range split {
unix, err := strconv.Atoi(s)
if err != nil {
return time.Time{}
}
times[i] = time.Unix(int64(unix), 0)
}
// Find the next match
for _, next := range times {
if fromTime.Before(next) {
return next
}
}
}
return time.Time{}
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.
PeriodicLaunchSuffix = "/periodic-"
)
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Launch time.Time // The last launch time.
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
const (
DispatchPayloadForbidden = "forbidden"
DispatchPayloadOptional = "optional"
DispatchPayloadRequired = "required"
// DispatchLaunchSuffix is the string appended to the parameterized job's ID
// when dispatching instances of it.
DispatchLaunchSuffix = "/dispatch-"
)
// ParameterizedJobConfig is used to configure the parameterized job
type ParameterizedJobConfig struct {
// Payload configure the payload requirements
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string
}
func (d *ParameterizedJobConfig) Validate() error {
var mErr multierror.Error
switch d.Payload {
case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
}
// Check that the meta configurations are disjoint sets
disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
if !disjoint {
multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
}
return mErr.ErrorOrNil()
}
func (d *ParameterizedJobConfig) Canonicalize() {
if d.Payload == "" {
d.Payload = DispatchPayloadOptional
}
}
func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
if d == nil {
return nil
}
nd := new(ParameterizedJobConfig)
*nd = *d
nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
return nd
}
// DispatchedID returns an ID appropriate for a job dispatched against a
// particular parameterized job
func DispatchedID(templateID string, t time.Time) string {
u := GenerateUUID()[:8]
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
return fmt.Errorf("destination escapes allocation directory")
}
return nil
}
var (
defaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 1 * time.Minute,
Mode: RestartPolicyModeDelay,
}
defaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 15,
Interval: 7 * 24 * time.Hour,
Mode: RestartPolicyModeDelay,
}
)
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// RestartPolicyMinInterval is the minimum interval that is accepted for a
// restart policy.
RestartPolicyMinInterval = 5 * time.Second
)
// RestartPolicy configures how Tasks are restarted when they crash or fail.
type RestartPolicy struct {
// Attempts is the number of restart that will occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of restarts
// within.
Interval time.Duration
// Delay is the time between a failure and a restart.
Delay time.Duration
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
}
func (r *RestartPolicy) Copy() *RestartPolicy {
if r == nil {
return nil
}
nrp := new(RestartPolicy)
*nrp = *r
return nrp
}
func (r *RestartPolicy) Validate() error {
var mErr multierror.Error
switch r.Mode {
case RestartPolicyModeDelay, RestartPolicyModeFail:
default:
multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
}
// Check for ambiguous/confusing settings
if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
}
if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
}
if time.Duration(r.Attempts)*r.Delay > r.Interval {
multierror.Append(&mErr,
fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
}
return mErr.ErrorOrNil()
}
func NewRestartPolicy(jobType string) *RestartPolicy {
switch jobType {
case JobTypeService, JobTypeSystem:
rp := defaultServiceJobRestartPolicy
return &rp
case JobTypeBatch:
rp := defaultBatchJobRestartPolicy
return &rp
}
return nil
}
// TaskGroup is an atomic unit of placement. Each task group belongs to
// a job and may contain any number of tasks. A task group support running
// in many replicas using the same configuration..
type TaskGroup struct {
// Name of the task group
Name string
// Count is the number of replicas of this task group that should
// be scheduled.
Count int
// Update is used to control the update strategy for this task group
Update *UpdateStrategy
// Constraints can be specified at a task group level and apply to
// all the tasks contained.
Constraints []*Constraint
//RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// Tasks are the collection of tasks that this task group needs to run
Tasks []*Task
// EphemeralDisk is the disk resources that the task group requests
EphemeralDisk *EphemeralDisk
// Meta is used to associate arbitrary metadata with this
// task group. This is opaque to Nomad.
Meta map[string]string
}
func (tg *TaskGroup) Copy() *TaskGroup {
if tg == nil {
return nil
}
ntg := new(TaskGroup)
*ntg = *tg
ntg.Update = ntg.Update.Copy()
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
if tg.Tasks != nil {
tasks := make([]*Task, len(ntg.Tasks))
for i, t := range ntg.Tasks {
tasks[i] = t.Copy()
}
ntg.Tasks = tasks
}
ntg.Meta = helper.CopyMapStringString(ntg.Meta)
if tg.EphemeralDisk != nil {
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
}
return ntg
}
// Canonicalize is used to canonicalize fields in the TaskGroup.
func (tg *TaskGroup) Canonicalize(job *Job) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(tg.Meta) == 0 {
tg.Meta = nil
}
// Set the default restart policy.
if tg.RestartPolicy == nil {
tg.RestartPolicy = NewRestartPolicy(job.Type)
}
// Set a default ephemeral disk object if the user has not requested for one
if tg.EphemeralDisk == nil {
tg.EphemeralDisk = DefaultEphemeralDisk()
}
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
// Add up the disk resources to EphemeralDisk. This is done so that users
// are not required to move their disk attribute from resources to
// EphemeralDisk section of the job spec in Nomad 0.5
// COMPAT 0.4.1 -> 0.5
// Remove in 0.6
var diskMB int
for _, task := range tg.Tasks {
diskMB += task.Resources.DiskMB
}
if diskMB > 0 {
tg.EphemeralDisk.SizeMB = diskMB
}
}
// Validate is used to sanity check a task group
func (tg *TaskGroup) Validate(j *Job) error {
var mErr multierror.Error
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
}
if tg.Count < 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
}
if len(tg.Tasks) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
}
for idx, constr := range tg.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if tg.RestartPolicy != nil {
if err := tg.RestartPolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
}
if tg.EphemeralDisk != nil {
if err := tg.EphemeralDisk.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
}
// Validate the update strategy
if u := tg.Update; u != nil {
switch j.Type {
case JobTypeService, JobTypeSystem:
default:
// COMPAT: Enable in 0.7.0
//mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
}
if err := u.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
// Check for duplicate tasks, that there is only leader task if any,
// and no duplicated static ports
tasks := make(map[string]int)
staticPorts := make(map[int]string)
leaderTasks := 0
for idx, task := range tg.Tasks {
if task.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
} else if existing, ok := tasks[task.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
} else {
tasks[task.Name] = idx
}
if task.Leader {
leaderTasks++
}
if task.Resources == nil {
continue
}
for _, net := range task.Resources.Networks {
for _, port := range net.ReservedPorts {
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
}
}
}
}
if leaderTasks > 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
}
// Validate the tasks
for _, task := range tg.Tasks {
if err := task.Validate(tg.EphemeralDisk); err != nil {
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (tg *TaskGroup) Warnings(j *Job) error {
var mErr multierror.Error
// Validate the update strategy
if u := tg.Update; u != nil {
// Check the counts are appropriate
if u.MaxParallel > tg.Count {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
}
}
return mErr.ErrorOrNil()
}
// LookupTask finds a task by name
func (tg *TaskGroup) LookupTask(name string) *Task {
for _, t := range tg.Tasks {
if t.Name == name {
return t
}
}
return nil
}
func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
const (
ServiceCheckHTTP = "http"
ServiceCheckTCP = "tcp"
ServiceCheckScript = "script"
// minCheckInterval is the minimum check interval permitted. Consul
// currently has its MinInterval set to 1s. Mirror that here for
// consistency.
minCheckInterval = 1 * time.Second
// minCheckTimeout is the minimum check timeout permitted for Consul
// script TTL checks.
minCheckTimeout = 1 * time.Second
)
// The ServiceCheck data model represents the consul health check that
// Nomad registers for a Task
type ServiceCheck struct {
Name string // Name of the check, defaults to id
Type string // Type of the check - tcp, http, docker and script
Command string // Command is the command to run for script checks
Args []string // Args is a list of argumes for script checks
Path string // path of the health check url for http type check
Protocol string // Protocol to use if check is http, defaults to http
PortLabel string // The port to use for tcp/http checks
Interval time.Duration // Interval of the check
Timeout time.Duration // Timeout of the response from the check before consul fails the check
InitialStatus string // Initial status of the check
TLSSkipVerify bool // Skip TLS verification when Protocol=https
}
func (sc *ServiceCheck) Copy() *ServiceCheck {
if sc == nil {
return nil
}
nsc := new(ServiceCheck)
*nsc = *sc
return nsc
}
func (sc *ServiceCheck) Canonicalize(serviceName string) {
// Ensure empty slices are treated as null to avoid scheduling issues when
// using DeepEquals.
if len(sc.Args) == 0 {
sc.Args = nil
}
if sc.Name == "" {
sc.Name = fmt.Sprintf("service: %q check", serviceName)
}
}
// validate a Service's ServiceCheck
func (sc *ServiceCheck) validate() error {
switch strings.ToLower(sc.Type) {
case ServiceCheckTCP:
if sc.Timeout == 0 {
return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval)
} else if sc.Timeout < minCheckTimeout {
return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval)
}
case ServiceCheckHTTP:
if sc.Path == "" {
return fmt.Errorf("http type must have a valid http path")
}
if sc.Timeout == 0 {
return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval)
} else if sc.Timeout < minCheckTimeout {
return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval)
}
case ServiceCheckScript:
if sc.Command == "" {
return fmt.Errorf("script type must have a valid script path")
}
// TODO: enforce timeout on the Client side and reenable
// validation.
default:
return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type)
}
if sc.Interval == 0 {
return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval)
} else if sc.Interval < minCheckInterval {
return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval)
}
switch sc.InitialStatus {
case "":
// case api.HealthUnknown: TODO: Add when Consul releases 0.7.1
case api.HealthPassing:
case api.HealthWarning:
case api.HealthCritical:
default:
return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical)
}
return nil
}
// RequiresPort returns whether the service check requires the task has a port.
func (sc *ServiceCheck) RequiresPort() bool {
switch sc.Type {
case ServiceCheckHTTP, ServiceCheckTCP:
return true
default:
return false
}
}
// Hash all ServiceCheck fields and the check's corresponding service ID to
// create an identifier. The identifier is not guaranteed to be unique as if
// the PortLabel is blank, the Service's PortLabel will be used after Hash is
// called.
func (sc *ServiceCheck) Hash(serviceID string) string {
h := sha1.New()
io.WriteString(h, serviceID)
io.WriteString(h, sc.Name)
io.WriteString(h, sc.Type)
io.WriteString(h, sc.Command)
io.WriteString(h, strings.Join(sc.Args, ""))
io.WriteString(h, sc.Path)
io.WriteString(h, sc.Protocol)
io.WriteString(h, sc.PortLabel)
io.WriteString(h, sc.Interval.String())
io.WriteString(h, sc.Timeout.String())
// Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6
if sc.TLSSkipVerify {
io.WriteString(h, "true")
}
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
AddressModeAuto = "auto"
AddressModeHost = "host"
AddressModeDriver = "driver"
)
// Service represents a Consul service definition in Nomad
type Service struct {
// Name of the service registered with Consul. Consul defaults the
// Name to ServiceID if not specified. The Name if specified is used
// as one of the seed values when generating a Consul ServiceID.
Name string
// PortLabel is either the numeric port number or the `host:port`.
// To specify the port number using the host's Consul Advertise
// address, specify an empty host in the PortLabel (e.g. `:port`).
PortLabel string
// AddressMode specifies whether or not to use the host ip:port for
// this service.
AddressMode string
Tags []string // List of tags for the service
Checks []*ServiceCheck // List of checks associated with the service
}
func (s *Service) Copy() *Service {
if s == nil {
return nil
}
ns := new(Service)
*ns = *s
ns.Tags = helper.CopySliceString(ns.Tags)
if s.Checks != nil {
checks := make([]*ServiceCheck, len(ns.Checks))
for i, c := range ns.Checks {
checks[i] = c.Copy()
}
ns.Checks = checks
}
return ns
}
// Canonicalize interpolates values of Job, Task Group and Task in the Service
// Name. This also generates check names, service id and check ids.
func (s *Service) Canonicalize(job string, taskGroup string, task string) {
// Ensure empty lists are treated as null to avoid scheduler issues when
// using DeepEquals
if len(s.Tags) == 0 {
s.Tags = nil
}
if len(s.Checks) == 0 {
s.Checks = nil
}
s.Name = args.ReplaceEnv(s.Name, map[string]string{
"JOB": job,
"TASKGROUP": taskGroup,
"TASK": task,
"BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task),
},
)
for _, check := range s.Checks {
check.Canonicalize(s.Name)
}
}
// Validate checks if the Check definition is valid
func (s *Service) Validate() error {
var mErr multierror.Error
// Ensure the service name is valid per the below RFCs but make an exception
// for our interpolation syntax
// RFC-952 §1 (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1
// (https://tools.ietf.org/html/rfc1123), and RFC-2782
// (https://tools.ietf.org/html/rfc2782).
re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9\$][a-zA-Z0-9\-\$\{\}\_\.]*[a-z0-9\}])$`)
if !re.MatchString(s.Name) {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name))
}
switch s.AddressMode {
case "", AddressModeAuto, AddressModeHost, AddressModeDriver:
// OK
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode))
}
for _, c := range s.Checks {
if s.PortLabel == "" && c.RequiresPort() {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name))
continue
}
if err := c.validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err))
}
}
return mErr.ErrorOrNil()
}
// ValidateName checks if the services Name is valid and should be called after
// the name has been interpolated
func (s *Service) ValidateName(name string) error {
// Ensure the service name is valid per RFC-952 §1
// (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1
// (https://tools.ietf.org/html/rfc1123), and RFC-2782
// (https://tools.ietf.org/html/rfc2782).
re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`)
if !re.MatchString(name) {
return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name)
}
return nil
}
// Hash calculates the hash of the check based on it's content and the service
// which owns it
func (s *Service) Hash() string {
h := sha1.New()
io.WriteString(h, s.Name)
io.WriteString(h, strings.Join(s.Tags, ""))
io.WriteString(h, s.PortLabel)
io.WriteString(h, s.AddressMode)
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
// DefaultKillTimeout is the default timeout between signaling a task it
// will be killed and killing it.
DefaultKillTimeout = 5 * time.Second
)
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
}
// DefaultLogConfig returns the default LogConfig values.
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
}
}
// Validate returns an error if the log config specified are less than
// the minimum allowed.
func (l *LogConfig) Validate() error {
var mErr multierror.Error
if l.MaxFiles < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
}
if l.MaxFileSizeMB < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
}
return mErr.ErrorOrNil()
}
// Task is a single process typically that is executed as part of a task group.
type Task struct {
// Name of the task
Name string
// Driver is used to control which driver is used
Driver string
// User is used to determine which user will run the task. It defaults to
// the same user the Nomad client is being run as.
User string
// Config is provided to the driver to initialize
Config map[string]interface{}
// Map of environment variables to be used by the driver
Env map[string]string
// List of service definitions exposed by the Task
Services []*Service
// Vault is used to define the set of Vault policies that this task should
// have access to.
Vault *Vault
// Templates are the set of templates to be rendered for the task.
Templates []*Template
// Constraints can be specified at a task level and apply only to
// the particular task.
Constraints []*Constraint
// Resources is the resources needed by this task
Resources *Resources
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
Meta map[string]string
// KillTimeout is the time between signaling a task that it will be
// killed and killing it.
KillTimeout time.Duration
// LogConfig provides configuration for log rotation
LogConfig *LogConfig
// Artifacts is a list of artifacts to download and extract before running
// the task.
Artifacts []*TaskArtifact
// Leader marks the task as the leader within the group. When the leader
// task exits, other tasks will be gracefully terminated.
Leader bool
}
func (t *Task) Copy() *Task {
if t == nil {
return nil
}
nt := new(Task)
*nt = *t
nt.Env = helper.CopyMapStringString(nt.Env)
if t.Services != nil {
services := make([]*Service, len(nt.Services))
for i, s := range nt.Services {
services[i] = s.Copy()
}
nt.Services = services
}
nt.Constraints = CopySliceConstraints(nt.Constraints)
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchPayload = nt.DispatchPayload.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
for _, a := range nt.Artifacts {
artifacts = append(artifacts, a.Copy())
}
nt.Artifacts = artifacts
}
if i, err := copystructure.Copy(nt.Config); err != nil {
nt.Config = i.(map[string]interface{})
}
if t.Templates != nil {
templates := make([]*Template, len(t.Templates))
for i, tmpl := range nt.Templates {
templates[i] = tmpl.Copy()
}
nt.Templates = templates
}
return nt
}
// Canonicalize canonicalizes fields in the task.
func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(t.Meta) == 0 {
t.Meta = nil
}
if len(t.Config) == 0 {
t.Config = nil
}
if len(t.Env) == 0 {
t.Env = nil
}
for _, service := range t.Services {
service.Canonicalize(job.Name, tg.Name, t.Name)
}
// If Resources are nil initialize them to defaults, otherwise canonicalize
if t.Resources == nil {
t.Resources = DefaultResources()
} else {
t.Resources.Canonicalize()
}
// Set the default timeout if it is not specified.
if t.KillTimeout == 0 {
t.KillTimeout = DefaultKillTimeout
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, template := range t.Templates {
template.Canonicalize()
}
}
func (t *Task) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
// Validate is used to sanity check a task
func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error {
var mErr multierror.Error
if t.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
}
if strings.ContainsAny(t.Name, `/\`) {
// We enforce this so that when creating the directory on disk it will
// not have any slashes.
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
}
if t.Driver == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
}
if t.KillTimeout.Nanoseconds() < 0 {
mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
}
// Validate the resources.
if t.Resources == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
} else {
if err := t.Resources.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Ensure the task isn't asking for disk resources
if t.Resources.DiskMB > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
}
}
// Validate the log config
if t.LogConfig == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
} else if err := t.LogConfig.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
for idx, constr := range t.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate Services
if err := validateServices(t); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if t.LogConfig != nil && ephemeralDisk != nil {
logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
if ephemeralDisk.SizeMB <= logUsage {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
logUsage, ephemeralDisk.SizeMB))
}
}
for idx, artifact := range t.Artifacts {
if err := artifact.Validate(); err != nil {
outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if t.Vault != nil {
if err := t.Vault.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
}
}
destinations := make(map[string]int, len(t.Templates))
for idx, tmpl := range t.Templates {
if err := tmpl.Validate(); err != nil {
outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
if other, ok := destinations[tmpl.DestPath]; ok {
outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
mErr.Errors = append(mErr.Errors, outer)
} else {
destinations[tmpl.DestPath] = idx + 1
}
}
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}
return mErr.ErrorOrNil()
}
// validateServices takes a task and validates the services within it are valid
// and reference ports that exist.
func validateServices(t *Task) error {
var mErr multierror.Error
// Ensure that services don't ask for non-existent ports and their names are
// unique.
servicePorts := make(map[string][]string)
knownServices := make(map[string]struct{})
for i, service := range t.Services {
if err := service.Validate(); err != nil {
outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
// Ensure that services with the same name are not being registered for
// the same port
if _, ok := knownServices[service.Name+service.PortLabel]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
}
knownServices[service.Name+service.PortLabel] = struct{}{}
if service.PortLabel != "" {
servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name)
}
// Ensure that check names are unique.
knownChecks := make(map[string]struct{})
for _, check := range service.Checks {
if _, ok := knownChecks[check.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
}
knownChecks[check.Name] = struct{}{}
}
}
// Get the set of port labels.
portLabels := make(map[string]struct{})
if t.Resources != nil {
for _, network := range t.Resources.Networks {
ports := network.PortLabels()
for portLabel, _ := range ports {
portLabels[portLabel] = struct{}{}
}
}
}
// Ensure all ports referenced in services exist.
for servicePort, services := range servicePorts {
_, ok := portLabels[servicePort]
if !ok {
joined := strings.Join(services, ", ")
err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
mErr.Errors = append(mErr.Errors, err)
}
}
// Ensure address mode is valid
return mErr.ErrorOrNil()
}
const (
// TemplateChangeModeNoop marks that no action should be taken if the
// template is re-rendered
TemplateChangeModeNoop = "noop"
// TemplateChangeModeSignal marks that the task should be signaled if the
// template is re-rendered
TemplateChangeModeSignal = "signal"
// TemplateChangeModeRestart marks that the task should be restarted if the
// template is re-rendered
TemplateChangeModeRestart = "restart"
)
var (
// TemplateChangeModeInvalidError is the error for when an invalid change
// mode is given
TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
)
// Template represents a template configuration to be rendered for a given task
type Template struct {
// SourcePath is the path to the template to be rendered
SourcePath string
// DestPath is the path to where the template should be rendered
DestPath string
// EmbeddedTmpl store the raw template. This is useful for smaller templates
// where they are embedded in the job file rather than sent as an artificat
EmbeddedTmpl string
// ChangeMode indicates what should be done if the template is re-rendered
ChangeMode string
// ChangeSignal is the signal that should be sent if the change mode
// requires it.
ChangeSignal string
// Splay is used to avoid coordinated restarts of processes by applying a
// random wait between 0 and the given splay value before signalling the
// application of a change
Splay time.Duration
// Perms is the permission the file should be written out with.
Perms string
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim string
RightDelim string
// Envvars enables exposing the template as environment variables
// instead of as a file. The template must be of the form:
//
// VAR_NAME_1={{ key service/my-key }}
// VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
//
// Lines will be split on the initial "=" with the first part being the
// key name and the second part the value.
// Empty lines and lines starting with # will be ignored, but to avoid
// escaping issues #s within lines will not be treated as comments.
Envvars bool
}
// DefaultTemplate returns a default template.
func DefaultTemplate() *Template {
return &Template{
ChangeMode: TemplateChangeModeRestart,
Splay: 5 * time.Second,
Perms: "0644",
}
}
func (t *Template) Copy() *Template {
if t == nil {
return nil
}
copy := new(Template)
*copy = *t
return copy
}
func (t *Template) Canonicalize() {
if t.ChangeSignal != "" {
t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
}
}
func (t *Template) Validate() error {
var mErr multierror.Error
// Verify we have something to render
if t.SourcePath == "" && t.EmbeddedTmpl == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
}
// Verify we can render somewhere
if t.DestPath == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
}
// Verify the destination doesn't escape
escaped, err := PathEscapesAllocDir("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify a proper change mode
switch t.ChangeMode {
case TemplateChangeModeNoop, TemplateChangeModeRestart:
case TemplateChangeModeSignal:
if t.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
}
default:
multierror.Append(&mErr, TemplateChangeModeInvalidError)
}
// Verify the splay is positive
if t.Splay < 0 {
multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
}
// Verify the permissions
if t.Perms != "" {
if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
}
}
return mErr.ErrorOrNil()
}
// Set of possible states for a task.
const (
TaskStatePending = "pending" // The task is waiting to be run.
TaskStateRunning = "running" // The task is currently running.
TaskStateDead = "dead" // Terminal state of task.
)
// TaskState tracks the current state of a task and events that caused state
// transitions.
type TaskState struct {
// The current state of the task.
State string
// Failed marks a task as having failed
Failed bool
// Restarts is the number of times the task has restarted
Restarts uint64
// LastRestart is the time the task last restarted. It is updated each time the
// task restarts
LastRestart time.Time
// StartedAt is the time the task is started. It is updated each time the
// task starts
StartedAt time.Time
// FinishedAt is the time at which the task transistioned to dead and will
// not be started again.
FinishedAt time.Time
// Series of task events that transition the state of the task.
Events []*TaskEvent
}
func (ts *TaskState) Copy() *TaskState {
if ts == nil {
return nil
}
copy := new(TaskState)
*copy = *ts
if ts.Events != nil {
copy.Events = make([]*TaskEvent, len(ts.Events))
for i, e := range ts.Events {
copy.Events[i] = e.Copy()
}
}
return copy
}
// Successful returns whether a task finished successfully.
func (ts *TaskState) Successful() bool {
l := len(ts.Events)
if ts.State != TaskStateDead || l == 0 {
return false
}
e := ts.Events[l-1]
if e.Type != TaskTerminated {
return false
}
return e.ExitCode == 0
}
const (
// TaskSetupFailure indicates that the task could not be started due to a
// a setup failure.
TaskSetupFailure = "Setup Failure"
// TaskDriveFailure indicates that the task could not be started due to a
// failure in the driver.
TaskDriverFailure = "Driver Failure"
// TaskReceived signals that the task has been pulled by the client at the
// given timestamp.
TaskReceived = "Received"
// TaskFailedValidation indicates the task was invalid and as such was not
// run.
TaskFailedValidation = "Failed Validation"
// TaskStarted signals that the task was started and its timestamp can be
// used to determine the running length of the task.
TaskStarted = "Started"
// TaskTerminated indicates that the task was started and exited.
TaskTerminated = "Terminated"
// TaskKilling indicates a kill signal has been sent to the task.
TaskKilling = "Killing"
// TaskKilled indicates a user has killed the task.
TaskKilled = "Killed"
// TaskRestarting indicates that task terminated and is being restarted.
TaskRestarting = "Restarting"
// TaskNotRestarting indicates that the task has failed and is not being
// restarted because it has exceeded its restart policy.
TaskNotRestarting = "Not Restarting"
// TaskRestartSignal indicates that the task has been signalled to be
// restarted
TaskRestartSignal = "Restart Signaled"
// TaskSignaling indicates that the task is being signalled.
TaskSignaling = "Signaling"
// TaskDownloadingArtifacts means the task is downloading the artifacts
// specified in the task.
TaskDownloadingArtifacts = "Downloading Artifacts"
// TaskArtifactDownloadFailed indicates that downloading the artifacts
// failed.
TaskArtifactDownloadFailed = "Failed Artifact Download"
// TaskBuildingTaskDir indicates that the task directory/chroot is being
// built.
TaskBuildingTaskDir = "Building Task Directory"
// TaskSetup indicates the task runner is setting up the task environment
TaskSetup = "Task Setup"
// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
// exceeded the requested disk resources.
TaskDiskExceeded = "Disk Resources Exceeded"
// TaskSiblingFailed indicates that a sibling task in the task group has
// failed.
TaskSiblingFailed = "Sibling Task Failed"
// TaskDriverMessage is an informational event message emitted by
// drivers such as when they're performing a long running action like
// downloading an image.
TaskDriverMessage = "Driver"
// TaskLeaderDead indicates that the leader task within the has finished.
TaskLeaderDead = "Leader Task Dead"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64 // Unix Nanosecond timestamp
// FailsTask marks whether this event fails the task
FailsTask bool
// Restart fields.
RestartReason string
// Setup Failure fields.
SetupError string
// Driver Failure fields.
DriverError string // A driver error occurred while starting the task.
// Task Terminated Fields.
ExitCode int // The exit code of the task.
Signal int // The signal that terminated the task.
Message string // A possible message explaining the termination of the task.
// Killing fields
KillTimeout time.Duration
// Task Killed Fields.
KillError string // Error killing the task.
// KillReason is the reason the task was killed
KillReason string
// TaskRestarting fields.
StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
// Artifact Download fields
DownloadError string // Error downloading artifacts
// Validation fields
ValidationError string // Validation error
// The maximum allowed task disk size.
DiskLimit int64
// Name of the sibling task that caused termination of the task that
// the TaskEvent refers to.
FailedSibling string
// VaultError is the error from token renewal
VaultError string
// TaskSignalReason indicates the reason the task is being signalled.
TaskSignalReason string
// TaskSignal is the signal that was sent to the task
TaskSignal string
// DriverMessage indicates a driver action being taken.
DriverMessage string
}
func (te *TaskEvent) GoString() string {
return fmt.Sprintf("%v at %v", te.Type, te.Time)
}
// SetMessage sets the message of TaskEvent
func (te *TaskEvent) SetMessage(msg string) *TaskEvent {
te.Message = msg
return te
}
func (te *TaskEvent) Copy() *TaskEvent {
if te == nil {
return nil
}
copy := new(TaskEvent)
*copy = *te
return copy
}
func NewTaskEvent(event string) *TaskEvent {
return &TaskEvent{
Type: event,
Time: time.Now().UnixNano(),
}
}
// SetSetupError is used to store an error that occured while setting up the
// task
func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
if err != nil {
e.SetupError = err.Error()
}
return e
}
func (e *TaskEvent) SetFailsTask() *TaskEvent {
e.FailsTask = true
return e
}
func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
if err != nil {
e.DriverError = err.Error()
}
return e
}
func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
e.ExitCode = c
return e
}
func (e *TaskEvent) SetSignal(s int) *TaskEvent {
e.Signal = s
return e
}
func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
if err != nil {
e.Message = err.Error()
}
return e
}
func (e *TaskEvent) SetKillError(err error) *TaskEvent {
if err != nil {
e.KillError = err.Error()
}
return e
}
func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
e.KillReason = r
return e
}
func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
e.StartDelay = int64(delay)
return e
}
func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
e.RestartReason = reason
return e
}
func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
e.TaskSignalReason = r
return e
}
func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
e.TaskSignal = s.String()
return e
}
func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
if err != nil {
e.DownloadError = err.Error()
}
return e
}
func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
if err != nil {
e.ValidationError = err.Error()
}
return e
}
func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
e.KillTimeout = timeout
return e
}
func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
e.DiskLimit = limit
return e
}
func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
e.FailedSibling = sibling
return e
}
func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
if err != nil {
e.VaultError = err.Error()
}
return e
}
func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
e.DriverMessage = m
return e
}
// TaskArtifact is an artifact to download before running the task.
type TaskArtifact struct {
// GetterSource is the source to download an artifact using go-getter
GetterSource string
// GetterOptions are options to use when downloading the artifact using
// go-getter.
GetterOptions map[string]string
// GetterMode is the go-getter.ClientMode for fetching resources.
// Defaults to "any" but can be set to "file" or "dir".
GetterMode string
// RelativeDest is the download destination given relative to the task's
// directory.
RelativeDest string
}
func (ta *TaskArtifact) Copy() *TaskArtifact {
if ta == nil {
return nil
}
nta := new(TaskArtifact)
*nta = *ta
nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions)
return nta
}
func (ta *TaskArtifact) GoString() string {
return fmt.Sprintf("%+v", ta)
}
// PathEscapesAllocDir returns if the given path escapes the allocation
// directory. The prefix allows adding a prefix if the path will be joined, for
// example a "task/local" prefix may be provided if the path will be joined
// against that prefix.
func PathEscapesAllocDir(prefix, path string) (bool, error) {
// Verify the destination doesn't escape the tasks directory
alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/"))
if err != nil {
return false, err
}
abs, err := filepath.Abs(filepath.Join(alloc, prefix, path))
if err != nil {
return false, err
}
rel, err := filepath.Rel(alloc, abs)
if err != nil {
return false, err
}
return strings.HasPrefix(rel, ".."), nil
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
if ta.GetterSource == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
}
switch ta.GetterMode {
case "":
// Default to any
ta.GetterMode = GetterModeAny
case GetterModeAny, GetterModeFile, GetterModeDir:
// Ok
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := PathEscapesAllocDir("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify the checksum
if check, ok := ta.GetterOptions["checksum"]; ok {
check = strings.TrimSpace(check)
if check == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty"))
return mErr.ErrorOrNil()
}
parts := strings.Split(check, ":")
if l := len(parts); l != 2 {
mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check))
return mErr.ErrorOrNil()
}
checksumVal := parts[1]
checksumBytes, err := hex.DecodeString(checksumVal)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err))
return mErr.ErrorOrNil()
}
checksumType := parts[0]
expectedLength := 0
switch checksumType {
case "md5":
expectedLength = md5.Size
case "sha1":
expectedLength = sha1.Size
case "sha256":
expectedLength = sha256.Size
case "sha512":
expectedLength = sha512.Size
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType))
return mErr.ErrorOrNil()
}
if len(checksumBytes) != expectedLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal))
return mErr.ErrorOrNil()
}
}
return mErr.ErrorOrNil()
}
const (
ConstraintDistinctProperty = "distinct_property"
ConstraintDistinctHosts = "distinct_hosts"
ConstraintRegex = "regexp"
ConstraintVersion = "version"
ConstraintSetContains = "set_contains"
)
// Constraints are used to restrict placement options.
type Constraint struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
str string // Memoized string
}
// Equal checks if two constraints are equal
func (c *Constraint) Equal(o *Constraint) bool {
return c.LTarget == o.LTarget &&
c.RTarget == o.RTarget &&
c.Operand == o.Operand
}
func (c *Constraint) Copy() *Constraint {
if c == nil {
return nil
}
nc := new(Constraint)
*nc = *c
return nc
}
func (c *Constraint) String() string {
if c.str != "" {
return c.str
}
c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
return c.str
}
func (c *Constraint) Validate() error {
var mErr multierror.Error
if c.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
}
// Perform additional validation based on operand
switch c.Operand {
case ConstraintRegex:
if _, err := regexp.Compile(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
}
}
return mErr.ErrorOrNil()
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
// Sticky indicates whether the allocation is sticky to a node
Sticky bool
// SizeMB is the size of the local disk
SizeMB int
// Migrate determines if Nomad client should migrate the allocation dir for
// sticky allocations
Migrate bool
}
// DefaultEphemeralDisk returns a EphemeralDisk with default configurations
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
SizeMB: 300,
}
}
// Validate validates EphemeralDisk
func (d *EphemeralDisk) Validate() error {
if d.SizeMB < 10 {
return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
}
return nil
}
// Copy copies the EphemeralDisk struct and returns a new one
func (d *EphemeralDisk) Copy() *EphemeralDisk {
ld := new(EphemeralDisk)
*ld = *d
return ld
}
const (
// VaultChangeModeNoop takes no action when a new token is retrieved.
VaultChangeModeNoop = "noop"
// VaultChangeModeSignal signals the task when a new token is retrieved.
VaultChangeModeSignal = "signal"
// VaultChangeModeRestart restarts the task when a new token is retrieved.
VaultChangeModeRestart = "restart"
)
// Vault stores the set of premissions a task needs access to from Vault.
type Vault struct {
// Policies is the set of policies that the task needs access to
Policies []string
// Env marks whether the Vault Token should be exposed as an environment
// variable
Env bool
// ChangeMode is used to configure the task's behavior when the Vault
// token changes because the original token could not be renewed in time.
ChangeMode string
// ChangeSignal is the signal sent to the task when a new token is
// retrieved. This is only valid when using the signal change mode.
ChangeSignal string
}
func DefaultVaultBlock() *Vault {
return &Vault{
Env: true,
ChangeMode: VaultChangeModeRestart,
}
}
// Copy returns a copy of this Vault block.
func (v *Vault) Copy() *Vault {
if v == nil {
return nil
}
nv := new(Vault)
*nv = *v
return nv
}
func (v *Vault) Canonicalize() {
if v.ChangeSignal != "" {
v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
}
}
// Validate returns if the Vault block is valid.
func (v *Vault) Validate() error {
if v == nil {
return nil
}
var mErr multierror.Error
if len(v.Policies) == 0 {
multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
}
for _, p := range v.Policies {
if p == "root" {
multierror.Append(&mErr, fmt.Errorf("Can not specifiy \"root\" policy"))
}
}
switch v.ChangeMode {
case VaultChangeModeSignal:
if v.ChangeSignal == "" {
multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
}
case VaultChangeModeNoop, VaultChangeModeRestart:
default:
multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
}
return mErr.ErrorOrNil()
}
const (
// DeploymentStatuses are the various states a deployment can be be in
DeploymentStatusRunning = "running"
DeploymentStatusPaused = "paused"
DeploymentStatusFailed = "failed"
DeploymentStatusSuccessful = "successful"
DeploymentStatusCancelled = "cancelled"
// DeploymentStatusDescriptions are the various descriptions of the states a
// deployment can be in.
DeploymentStatusDescriptionRunning = "Deployment is running"
DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion"
DeploymentStatusDescriptionPaused = "Deployment is paused"
DeploymentStatusDescriptionSuccessful = "Deployment completed successfully"
DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped"
DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job"
DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations"
DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed"
)
// DeploymentStatusDescriptionRollback is used to get the status description of
// a deployment when rolling back to an older job.
func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
}
// Deployment is the object that represents a job deployment which is used to
// transistion a job between versions.
type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// JobID is the job the deployment is created for
JobID string
// JobVersion is the version of the job at which the deployment is tracking
JobVersion uint64
// JobModifyIndex is the modify index of the job at which the deployment is tracking
JobModifyIndex uint64
// JobCreateIndex is the create index of the job which the deployment is
// tracking. It is needed so that if the job gets stopped and reran we can
// present the correct list of deployments for the job and not old ones.
JobCreateIndex uint64
// TaskGroups is the set of task groups effected by the deployment and their
// current deployment status.
TaskGroups map[string]*DeploymentState
// The status of the deployment
Status string
// StatusDescription allows a human readable description of the deployment
// status.
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
}
// NewDeployment creates a new deployment given the job.
func NewDeployment(job *Job) *Deployment {
return &Deployment{
ID: GenerateUUID(),
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
JobCreateIndex: job.CreateIndex,
Status: DeploymentStatusRunning,
StatusDescription: DeploymentStatusDescriptionRunning,
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
}
}
func (d *Deployment) Copy() *Deployment {
if d == nil {
return nil
}
c := &Deployment{}
*c = *d
c.TaskGroups = nil
if l := len(d.TaskGroups); d.TaskGroups != nil {
c.TaskGroups = make(map[string]*DeploymentState, l)
for tg, s := range d.TaskGroups {
c.TaskGroups[tg] = s.Copy()
}
}
return c
}
// Active returns whether the deployment is active or terminal.
func (d *Deployment) Active() bool {
switch d.Status {
case DeploymentStatusRunning, DeploymentStatusPaused:
return true
default:
return false
}
}
// GetID is a helper for getting the ID when the object may be nil
func (d *Deployment) GetID() string {
if d == nil {
return ""
}
return d.ID
}
// HasPlacedCanaries returns whether the deployment has placed canaries
func (d *Deployment) HasPlacedCanaries() bool {
if d == nil || len(d.TaskGroups) == 0 {
return false
}
for _, group := range d.TaskGroups {
if len(group.PlacedCanaries) != 0 {
return true
}
}
return false
}
// RequiresPromotion returns whether the deployment requires promotion to
// continue
func (d *Deployment) RequiresPromotion() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.Promoted {
return true
}
}
return false
}
func (d *Deployment) GoString() string {
base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
for group, state := range d.TaskGroups {
base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
}
return base
}
// DeploymentState tracks the state of a deployment for a given task group.
type DeploymentState struct {
// AutoRevert marks whether the task group has indicated the job should be
// reverted on failure
AutoRevert bool
// Promoted marks whether the canaries have been promoted
Promoted bool
// PlacedCanaries is the set of placed canary allocations
PlacedCanaries []string
// DesiredCanaries is the number of canaries that should be created.
DesiredCanaries int
// DesiredTotal is the total number of allocations that should be created as
// part of the deployment.
DesiredTotal int
// PlacedAllocs is the number of allocations that have been placed
PlacedAllocs int
// HealthyAllocs is the number of allocations that have been marked healthy.
HealthyAllocs int
// UnhealthyAllocs are allocations that have been marked as unhealthy.
UnhealthyAllocs int
}
func (d *DeploymentState) GoString() string {
base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
return base
}
func (d *DeploymentState) Copy() *DeploymentState {
c := &DeploymentState{}
*c = *d
c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
return c
}
// DeploymentStatusUpdate is used to update the status of a given deployment
type DeploymentStatusUpdate struct {
// DeploymentID is the ID of the deployment to update
DeploymentID string
// Status is the new status of the deployment.
Status string
// StatusDescription is the new status description of the deployment.
StatusDescription string
}
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
)
// Allocation is used to allocate the placement of a task group to a node.
type Allocation struct {
// ID of the allocation (UUID)
ID string
// ID of the evaluation that generated this allocation
EvalID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// Job is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
Job *Job
// TaskGroup is the name of the task group that should be run
TaskGroup string
// Resources is the total set of resources allocated as part
// of this allocation of the task group.
Resources *Resources
// SharedResources are the resources that are shared by all the tasks in an
// allocation
SharedResources *Resources
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources.
TaskResources map[string]*Resources
// Metrics associated with this allocation
Metrics *AllocMetric
// Desired Status of the allocation on the client
DesiredStatus string
// DesiredStatusDescription is meant to provide more human useful information
DesiredDescription string
// Status of the allocation on the client
ClientStatus string
// ClientStatusDescription is meant to provide more human useful information
ClientDescription string
// TaskStates stores the state of each task,
TaskStates map[string]*TaskState
// PreviousAllocation is the allocation that this allocation is replacing
PreviousAllocation string
// DeploymentID identifies an allocation as being created from a
// particular deployment
DeploymentID string
// DeploymentStatus captures the status of the allocation as part of the
// given deployment
DeploymentStatus *AllocDeploymentStatus
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
// AllocModifyIndex is not updated when the client updates allocations. This
// lets the client pull only the allocs updated by the server.
AllocModifyIndex uint64
// CreateTime is the time the allocation has finished scheduling and been
// verified by the plan applier.
CreateTime int64
}
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
l := len(a.Name)
prefix := len(a.JobID) + len(a.TaskGroup) + 2
if l <= 3 || l <= prefix {
return uint(0)
}
strNum := a.Name[prefix : len(a.Name)-1]
num, _ := strconv.Atoi(strNum)
return uint(num)
}
func (a *Allocation) Copy() *Allocation {
return a.copyImpl(true)
}
// Copy provides a copy of the allocation but doesn't deep copy the job
func (a *Allocation) CopySkipJob() *Allocation {
return a.copyImpl(false)
}
func (a *Allocation) copyImpl(job bool) *Allocation {
if a == nil {
return nil
}
na := new(Allocation)
*na = *a
if job {
na.Job = na.Job.Copy()
}
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
if a.TaskResources != nil {
tr := make(map[string]*Resources, len(na.TaskResources))
for task, resource := range na.TaskResources {
tr[task] = resource.Copy()
}
na.TaskResources = tr
}
na.Metrics = na.Metrics.Copy()
na.DeploymentStatus = na.DeploymentStatus.Copy()
if a.TaskStates != nil {
ts := make(map[string]*TaskState, len(na.TaskStates))
for task, state := range na.TaskStates {
ts[task] = state.Copy()
}
na.TaskStates = ts
}
return na
}
// TerminalStatus returns if the desired or actual status is terminal and
// will no longer transition.
func (a *Allocation) TerminalStatus() bool {
// First check the desired state and if that isn't terminal, check client
// state.
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return true
default:
}
switch a.ClientStatus {
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
return true
default:
return false
}
}
// Terminated returns if the allocation is in a terminal state on a client.
func (a *Allocation) Terminated() bool {
if a.ClientStatus == AllocClientStatusFailed ||
a.ClientStatus == AllocClientStatusComplete ||
a.ClientStatus == AllocClientStatusLost {
return true
}
return false
}
// RanSuccessfully returns whether the client has ran the allocation and all
// tasks finished successfully
func (a *Allocation) RanSuccessfully() bool {
// Handle the case the client hasn't started the allocation.
if len(a.TaskStates) == 0 {
return false
}
// Check to see if all the tasks finised successfully in the allocation
allSuccess := true
for _, state := range a.TaskStates {
allSuccess = allSuccess && state.Successful()
}
return allSuccess
}
// ShouldMigrate returns if the allocation needs data migration
func (a *Allocation) ShouldMigrate() bool {
if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// if the task group is nil or the ephemeral disk block isn't present then
// we won't migrate
if tg == nil || tg.EphemeralDisk == nil {
return false
}
// We won't migrate any data is the user hasn't enabled migration or the
// disk is not marked as sticky
if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
return false
}
return true
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub() *AllocListStub {
return &AllocListStub{
ID: a.ID,
EvalID: a.EvalID,
Name: a.Name,
NodeID: a.NodeID,
JobID: a.JobID,
JobVersion: a.Job.Version,
TaskGroup: a.TaskGroup,
DesiredStatus: a.DesiredStatus,
DesiredDescription: a.DesiredDescription,
ClientStatus: a.ClientStatus,
ClientDescription: a.ClientDescription,
TaskStates: a.TaskStates,
DeploymentStatus: a.DeploymentStatus,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
CreateTime: a.CreateTime,
}
}
// AllocListStub is used to return a subset of alloc information
type AllocListStub struct {
ID string
EvalID string
Name string
NodeID string
JobID string
JobVersion uint64
TaskGroup string
DesiredStatus string
DesiredDescription string
ClientStatus string
ClientDescription string
TaskStates map[string]*TaskState
DeploymentStatus *AllocDeploymentStatus
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
}
// AllocMetric is used to track various metrics while attempting
// to make an allocation. These are used to debug a job, or to better
// understand the pressure within the system.
type AllocMetric struct {
// NodesEvaluated is the number of nodes that were evaluated
NodesEvaluated int
// NodesFiltered is the number of nodes filtered due to a constraint
NodesFiltered int
// NodesAvailable is the number of nodes available for evaluation per DC.
NodesAvailable map[string]int
// ClassFiltered is the number of nodes filtered by class
ClassFiltered map[string]int
// ConstraintFiltered is the number of failures caused by constraint
ConstraintFiltered map[string]int
// NodesExhausted is the number of nodes skipped due to being
// exhausted of at least one resource
NodesExhausted int
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
Scores map[string]float64
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
// CoalescedFailures indicates the number of other
// allocations that were coalesced into this failed allocation.
// This is to prevent creating many failed allocations for a
// single task group.
CoalescedFailures int
}
func (a *AllocMetric) Copy() *AllocMetric {
if a == nil {
return nil
}
na := new(AllocMetric)
*na = *a
na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
return na
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
if dimension != "" {
if a.DimensionExhausted == nil {
a.DimensionExhausted = make(map[string]int)
}
a.DimensionExhausted[dimension] += 1
}
}
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
if a.Scores == nil {
a.Scores = make(map[string]float64)
}
key := fmt.Sprintf("%s.%s", node.ID, name)
a.Scores[key] = score
}
// AllocDeploymentStatus captures the status of the allocation as part of the
// deployment. This can include things like if the allocation has been marked as
// heatlhy.
type AllocDeploymentStatus struct {
// Healthy marks whether the allocation has been marked healthy or unhealthy
// as part of a deployment. It can be unset if it has neither been marked
// healthy or unhealthy.
Healthy *bool
// ModifyIndex is the raft index in which the deployment status was last
// changed.
ModifyIndex uint64
}
// IsHealthy returns if the allocation is marked as healthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsHealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && *a.Healthy
}
// IsUnhealthy returns if the allocation is marked as unhealthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsUnhealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && !*a.Healthy
}
func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
if a == nil {
return nil
}
c := new(AllocDeploymentStatus)
*c = *a
if a.Healthy != nil {
c.Healthy = helper.BoolToPtr(*a.Healthy)
}
return c
}
const (
EvalStatusBlocked = "blocked"
EvalStatusPending = "pending"
EvalStatusComplete = "complete"
EvalStatusFailed = "failed"
EvalStatusCancelled = "canceled"
)
const (
EvalTriggerJobRegister = "job-register"
EvalTriggerJobDeregister = "job-deregister"
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerScheduled = "scheduled"
EvalTriggerRollingUpdate = "rolling-update"
EvalTriggerDeploymentWatcher = "deployment-watcher"
EvalTriggerFailedFollowUp = "failed-follow-up"
EvalTriggerMaxPlans = "max-plan-attempts"
)
const (
// CoreJobEvalGC is used for the garbage collection of evaluations
// and allocations. We periodically scan evaluations in a terminal state,
// in which all the corresponding allocations are also terminal. We
// delete these out of the system to bound the state.
CoreJobEvalGC = "eval-gc"
// CoreJobNodeGC is used for the garbage collection of failed nodes.
// We periodically scan nodes in a terminal state, and if they have no
// corresponding allocations we delete these out of the system.
CoreJobNodeGC = "node-gc"
// CoreJobJobGC is used for the garbage collection of eligible jobs. We
// periodically scan garbage collectible jobs and check if both their
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobDeploymentGC is used for the garbage collection of eligible
// deployments. We periodically scan garbage collectible deployments and
// check if they are terminal. If so, we delete these out of the system.
CoreJobDeploymentGC = "deployment-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result
// of a change to our desired state (job specification) or the emergent state
// (registered nodes). When the inputs change, we need to "evaluate" them,
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// ID is a randonly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// Type is used to control which schedulers are available to handle
// this evaluation.
Type string
// TriggeredBy is used to give some insight into why this Eval
// was created. (Job change, node failure, alloc failure, etc).
TriggeredBy string
// JobID is the job this evaluation is scoped to. Evaluations cannot
// be run in parallel for a given JobID, so we serialize on this.
JobID string
// JobModifyIndex is the modify index of the job at the time
// the evaluation was created
JobModifyIndex uint64
// NodeID is the node that was affected triggering the evaluation.
NodeID string
// NodeModifyIndex is the modify index of the node at the time
// the evaluation was created
NodeModifyIndex uint64
// DeploymentID is the ID of the deployment that triggered the evaluation.
DeploymentID string
// Status of the evaluation
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Wait is a minimum wait time for running the eval. This is used to
// support a rolling upgrade.
Wait time.Duration
// NextEval is the evaluation ID for the eval created to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
NextEval string
// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
// This is used to support rolling upgrades, where we need a chain of evaluations.
PreviousEval string
// BlockedEval is the evaluation ID for a created blocked eval. A
// blocked eval will be created if all allocations could not be placed due
// to constraints or lacking resources.
BlockedEval string
// FailedTGAllocs are task groups which have allocations that could not be
// made, but the metrics are persisted so that the user can use the feedback
// to determine the cause.
FailedTGAllocs map[string]*AllocMetric
// ClassEligibility tracks computed node classes that have been explicitly
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
// AnnotatePlan triggers the scheduler to provide additional annotations
// during the evaluation. This should not be set during normal operations.
AnnotatePlan bool
// QueuedAllocations is the number of unplaced allocations at the time the
// evaluation was processed. The map is keyed by Task Group names.
QueuedAllocations map[string]int
// SnapshotIndex is the Raft index of the snapshot used to process the
// evaluation. As such it will only be set once it has gone through the
// scheduler.
SnapshotIndex uint64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (e *Evaluation) TerminalStatus() bool {
switch e.Status {
case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
return true
default:
return false
}
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval '%s' JobID: '%s'>", e.ID, e.JobID)
}
func (e *Evaluation) Copy() *Evaluation {
if e == nil {
return nil
}
ne := new(Evaluation)
*ne = *e
// Copy ClassEligibility
if e.ClassEligibility != nil {
classes := make(map[string]bool, len(e.ClassEligibility))
for class, elig := range e.ClassEligibility {
classes[class] = elig
}
ne.ClassEligibility = classes
}
// Copy FailedTGAllocs
if e.FailedTGAllocs != nil {
failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
for tg, metric := range e.FailedTGAllocs {
failedTGs[tg] = metric.Copy()
}
ne.FailedTGAllocs = failedTGs
}
// Copy queued allocations
if e.QueuedAllocations != nil {
queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
for tg, num := range e.QueuedAllocations {
queuedAllocations[tg] = num
}
ne.QueuedAllocations = queuedAllocations
}
return ne
}
// ShouldEnqueue checks if a given evaluation should be enqueued into the
// eval_broker
func (e *Evaluation) ShouldEnqueue() bool {
switch e.Status {
case EvalStatusPending:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// ShouldBlock checks if a given evaluation should be entered into the blocked
// eval tracker.
func (e *Evaluation) ShouldBlock() bool {
switch e.Status {
case EvalStatusBlocked:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// MakePlan is used to make a plan from the given evaluation
// for a given Job
func (e *Evaluation) MakePlan(j *Job) *Plan {
p := &Plan{
EvalID: e.ID,
Priority: e.Priority,
Job: j,
NodeUpdate: make(map[string][]*Allocation),
NodeAllocation: make(map[string][]*Allocation),
}
if j != nil {
p.AllAtOnce = j.AllAtOnce
}
return p
}
// NextRollingEval creates an evaluation to followup this eval for rolling updates
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible and whether the job has escaped computed node classes.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusBlocked,
PreviousEval: e.ID,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
}
}
// CreateFailedFollowUpEval creates a follow up evaluation when the current one
// has been marked as failed becasue it has hit the delivery limit and will not
// be retried by the eval_broker.
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
}
}
// Plan is used to submit a commit plan for task allocations. These
// are submitted to the leader which verifies that resources have
// not been overcommitted before admiting the plan.
type Plan struct {
// EvalID is the evaluation ID this plan is associated with
EvalID string
// EvalToken is used to prevent a split-brain processing of
// an evaluation. There should only be a single scheduler running
// an Eval at a time, but this could be violated after a leadership
// transition. This unique token is used to reject plans that are
// being submitted from a different leader.
EvalToken string
// Priority is the priority of the upstream job
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job.
// If this is false, a plan may be partially applied. Otherwise, the
// entire plan must be able to make progress.
AllAtOnce bool
// Job is the parent job of all the allocations in the Plan.
// Since a Plan only involves a single Job, we can reduce the size
// of the plan by only including it once.
Job *Job
// NodeUpdate contains all the allocations for each node. For each node,
// this is a list of the allocations to update to either stop or evict.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations for each node.
// The evicts must be considered prior to the allocations.
NodeAllocation map[string][]*Allocation
// Annotations contains annotations by the scheduler to be used by operators
// to understand the decisions made by the scheduler.
Annotations *PlanAnnotations
// Deployment is the deployment created or updated by the scheduler that
// should be applied by the planner.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
}
// AppendUpdate marks the allocation for eviction. The clientStatus of the
// allocation may be optionally set by passing in a non-empty value.
func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) {
newAlloc := new(Allocation)
*newAlloc = *alloc
// If the job is not set in the plan we are deregistering a job so we
// extract the job from the allocation.
if p.Job == nil && newAlloc.Job != nil {
p.Job = newAlloc.Job
}
// Normalize the job
newAlloc.Job = nil
// Strip the resources as it can be rebuilt.
newAlloc.Resources = nil
newAlloc.DesiredStatus = desiredStatus
newAlloc.DesiredDescription = desiredDesc
if clientStatus != "" {
newAlloc.ClientStatus = clientStatus
}
node := alloc.NodeID
existing := p.NodeUpdate[node]
p.NodeUpdate[node] = append(existing, newAlloc)
}
func (p *Plan) PopUpdate(alloc *Allocation) {
existing := p.NodeUpdate[alloc.NodeID]
n := len(existing)
if n > 0 && existing[n-1].ID == alloc.ID {
existing = existing[:n-1]
if len(existing) > 0 {
p.NodeUpdate[alloc.NodeID] = existing
} else {
delete(p.NodeUpdate, alloc.NodeID)
}
}
}
func (p *Plan) AppendAlloc(alloc *Allocation) {
node := alloc.NodeID
existing := p.NodeAllocation[node]
p.NodeAllocation[node] = append(existing, alloc)
}
// IsNoOp checks if this plan would do nothing
func (p *Plan) IsNoOp() bool {
return len(p.NodeUpdate) == 0 &&
len(p.NodeAllocation) == 0 &&
p.Deployment == nil &&
len(p.DeploymentUpdates) == 0
}
// PlanResult is the result of a plan submitted to the leader.
type PlanResult struct {
// NodeUpdate contains all the updates that were committed.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations that were committed.
NodeAllocation map[string][]*Allocation
// Deployment is the deployment that was committed.
Deployment *Deployment
// DeploymentUpdates is the set of deployment updates that were commited.
DeploymentUpdates []*DeploymentStatusUpdate
// RefreshIndex is the index the worker should refresh state up to.
// This allows all evictions and allocations to be materialized.
// If any allocations were rejected due to stale data (node state,
// over committed) this can be used to force a worker refresh.
RefreshIndex uint64
// AllocIndex is the Raft index in which the evictions and
// allocations took place. This is used for the write index.
AllocIndex uint64
}
// IsNoOp checks if this plan result would do nothing
func (p *PlanResult) IsNoOp() bool {
return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
len(p.DeploymentUpdates) == 0 && p.Deployment == nil
}
// FullCommit is used to check if all the allocations in a plan
// were committed as part of the result. Returns if there was
// a match, and the number of expected and actual allocations.
func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc, _ := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}
return actual == expected, expected, actual
}
// PlanAnnotations holds annotations made by the scheduler to give further debug
// information to operators.
type PlanAnnotations struct {
// DesiredTGUpdates is the set of desired updates per task group.
DesiredTGUpdates map[string]*DesiredUpdates
}
// DesiredUpdates is the set of changes the scheduler would like to make given
// sufficient resources and cluster capacity.
type DesiredUpdates struct {
Ignore uint64
Place uint64
Migrate uint64
Stop uint64
InPlaceUpdate uint64
DestructiveUpdate uint64
Canary uint64
}
func (d *DesiredUpdates) GoString() string {
return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
}
// msgpackHandle is a shared handle for encoding/decoding of structs
var MsgpackHandle = func() *codec.MsgpackHandle {
h := &codec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
var (
// JsonHandle and JsonHandlePretty are the codec handles to JSON encode
// structs. The pretty handle will add indents for easier human consumption.
JsonHandle = &codec.JsonHandle{
HTMLCharsAsIs: true,
}
JsonHandlePretty = &codec.JsonHandle{
HTMLCharsAsIs: true,
Indent: 4,
}
)
var HashiMsgpackHandle = func() *hcodec.MsgpackHandle {
h := &hcodec.MsgpackHandle{RawToString: true}
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
return h
}()
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err string
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) error {
if e == nil {
return nil
}
return &RecoverableError{
Err: e.Error(),
Recoverable: recoverable,
}
}
// WrapRecoverable wraps an existing error in a new RecoverableError with a new
// message. If the error was recoverable before the returned error is as well;
// otherwise it is unrecoverable.
func WrapRecoverable(msg string, err error) error {
return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
}
func (r *RecoverableError) Error() string {
return r.Err
}
func (r *RecoverableError) IsRecoverable() bool {
return r.Recoverable
}
// Recoverable is an interface for errors to implement to indicate whether or
// not they are fatal or recoverable.
type Recoverable interface {
error
IsRecoverable() bool
}
// IsRecoverable returns true if error is a RecoverableError with
// Recoverable=true. Otherwise false is returned.
func IsRecoverable(e error) bool {
if re, ok := e.(Recoverable); ok {
return re.IsRecoverable()
}
return false
}
|
package lnwire
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"net"
"github.com/go-errors/errors"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/chaincfg/chainhash"
"github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil"
)
// MaxSliceLength is the maximum allowed lenth for any opaque byte slices in
// the wire protocol.
const MaxSliceLength = 65535
// PkScript is simple type definition which represents a raw serialized public
// key script.
type PkScript []byte
// addressType specifies the network protocol and version that should be used
// when connecting to a node at a particular address.
type addressType uint8
const (
// noAddr denotes a blank address. An address of this type indicates
// that a node doesn't have any advertise d addresses.
noAddr addressType = 0
// tcp4Addr denotes an IPv4 TCP address.
tcp4Addr addressType = 1
// tcp4Addr denotes an IPv6 TCP address.
tcp6Addr addressType = 2
// v2OnionAddr denotes a version 2 Tor onion service address.
v2OnionAddr addressType = 3
// v3OnionAddr denotes a version 3 Tor (prop224) onion service
// addresses
v3OnionAddr addressType = 4
)
// AddrLen returns the number of bytes that it takes to encode the target
// address.
func (a addressType) AddrLen() uint16 {
switch a {
case noAddr:
return 0
case tcp4Addr:
return 6
case tcp6Addr:
return 18
case v2OnionAddr:
return 12
case v3OnionAddr:
return 37
default:
return 0
}
}
// writeElement is a one-stop shop to write the big endian representation of
// any element which is to be serialized for the wire protocol. The passed
// io.Writer should be backed by an appropriately sized byte slice, or be able
// to dynamically expand to accommodate additional data.
//
// TODO(roasbeef): this should eventually draw from a buffer pool for
// serialization.
// TODO(roasbeef): switch to var-ints for all?
func writeElement(w io.Writer, element interface{}) error {
switch e := element.(type) {
case uint8:
var b [1]byte
b[0] = e
if _, err := w.Write(b[:]); err != nil {
return err
}
case uint16:
var b [2]byte
binary.BigEndian.PutUint16(b[:], e)
if _, err := w.Write(b[:]); err != nil {
return err
}
case ErrorCode:
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(e))
if _, err := w.Write(b[:]); err != nil {
return err
}
case MilliSatoshi:
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(e))
if _, err := w.Write(b[:]); err != nil {
return err
}
case btcutil.Amount:
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(e))
if _, err := w.Write(b[:]); err != nil {
return err
}
case uint32:
var b [4]byte
binary.BigEndian.PutUint32(b[:], e)
if _, err := w.Write(b[:]); err != nil {
return err
}
case uint64:
var b [8]byte
binary.BigEndian.PutUint64(b[:], e)
if _, err := w.Write(b[:]); err != nil {
return err
}
case *btcec.PublicKey:
if e == nil {
return fmt.Errorf("cannot write nil pubkey")
}
var b [33]byte
serializedPubkey := e.SerializeCompressed()
copy(b[:], serializedPubkey)
if _, err := w.Write(b[:]); err != nil {
return err
}
case []*btcec.Signature:
var b [2]byte
numSigs := uint16(len(e))
binary.BigEndian.PutUint16(b[:], numSigs)
if _, err := w.Write(b[:]); err != nil {
return err
}
for _, sig := range e {
if err := writeElement(w, sig); err != nil {
return err
}
}
case *btcec.Signature:
if e == nil {
return fmt.Errorf("cannot write nil signature")
}
var b [64]byte
err := serializeSigToWire(&b, e)
if err != nil {
return err
}
// Write buffer
if _, err = w.Write(b[:]); err != nil {
return err
}
case PingPayload:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case PongPayload:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case ErrorData:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case OpaqueReason:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case []byte:
if _, err := w.Write(e[:]); err != nil {
return err
}
case PkScript:
// The largest script we'll accept is a p2wsh which is exactly
// 34 bytes long.
scriptLength := len(e)
if scriptLength > 34 {
return fmt.Errorf("'PkScript' too long")
}
if err := wire.WriteVarBytes(w, 0, e); err != nil {
return err
}
case *FeatureVector:
if e == nil {
return fmt.Errorf("cannot write nil feature vector")
}
if err := e.Encode(w); err != nil {
return err
}
case wire.OutPoint:
var h [32]byte
copy(h[:], e.Hash[:])
if _, err := w.Write(h[:]); err != nil {
return err
}
if e.Index > math.MaxUint16 {
return fmt.Errorf("index for outpoint (%v) is "+
"greater than max index of %v", e.Index,
math.MaxUint16)
}
var idx [2]byte
binary.BigEndian.PutUint16(idx[:], uint16(e.Index))
if _, err := w.Write(idx[:]); err != nil {
return err
}
case ChannelID:
if _, err := w.Write(e[:]); err != nil {
return err
}
case FailCode:
if err := writeElement(w, uint16(e)); err != nil {
return err
}
case ShortChannelID:
// Check that field fit in 3 bytes and write the blockHeight
if e.BlockHeight > ((1 << 24) - 1) {
return errors.New("block height should fit in 3 bytes")
}
var blockHeight [4]byte
binary.BigEndian.PutUint32(blockHeight[:], e.BlockHeight)
if _, err := w.Write(blockHeight[1:]); err != nil {
return err
}
// Check that field fit in 3 bytes and write the txIndex
if e.TxIndex > ((1 << 24) - 1) {
return errors.New("tx index should fit in 3 bytes")
}
var txIndex [4]byte
binary.BigEndian.PutUint32(txIndex[:], e.TxIndex)
if _, err := w.Write(txIndex[1:]); err != nil {
return err
}
// Write the txPosition
var txPosition [2]byte
binary.BigEndian.PutUint16(txPosition[:], e.TxPosition)
if _, err := w.Write(txPosition[:]); err != nil {
return err
}
case *net.TCPAddr:
if e == nil {
return fmt.Errorf("cannot write nil TCPAddr")
}
// TODO(roasbeef): account for onion types too
if e.IP.To4() != nil {
var descriptor [1]byte
descriptor[0] = uint8(tcp4Addr)
if _, err := w.Write(descriptor[:]); err != nil {
return err
}
var ip [4]byte
copy(ip[:], e.IP.To4())
if _, err := w.Write(ip[:]); err != nil {
return err
}
} else {
var descriptor [1]byte
descriptor[0] = uint8(tcp6Addr)
if _, err := w.Write(descriptor[:]); err != nil {
return err
}
var ip [16]byte
copy(ip[:], e.IP.To16())
if _, err := w.Write(ip[:]); err != nil {
return err
}
}
var port [2]byte
binary.BigEndian.PutUint16(port[:], uint16(e.Port))
if _, err := w.Write(port[:]); err != nil {
return err
}
case []net.Addr:
// First, we'll encode all the addresses into an intermediate
// buffer. We need to do this in order to compute the total
// length of the addresses.
var addrBuf bytes.Buffer
for _, address := range e {
if err := writeElement(&addrBuf, address); err != nil {
return err
}
}
// With the addresses fully encoded, we can now write out the
// number of bytes needed to encode them.
addrLen := addrBuf.Len()
if err := writeElement(w, uint16(addrLen)); err != nil {
return err
}
// Finally, we'll write out the raw addresses themselves, but
// only if we have any bytes to write.
if addrLen > 0 {
if _, err := w.Write(addrBuf.Bytes()); err != nil {
return err
}
}
case RGB:
if err := writeElements(w, e.red, e.green, e.blue); err != nil {
return err
}
case DeliveryAddress:
var length [2]byte
binary.BigEndian.PutUint16(length[:], uint16(len(e)))
if _, err := w.Write(length[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
default:
return fmt.Errorf("Unknown type in writeElement: %T", e)
}
return nil
}
// writeElements is writes each element in the elements slice to the passed
// io.Writer using writeElement.
func writeElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
err := writeElement(w, element)
if err != nil {
return err
}
}
return nil
}
// readElement is a one-stop utility function to deserialize any datastructure
// encoded using the serialization format of lnwire.
func readElement(r io.Reader, element interface{}) error {
var err error
switch e := element.(type) {
case *uint8:
var b [1]uint8
if _, err := r.Read(b[:]); err != nil {
return err
}
*e = b[0]
case *uint16:
var b [2]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = binary.BigEndian.Uint16(b[:])
case *ErrorCode:
var b [2]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = ErrorCode(binary.BigEndian.Uint16(b[:]))
case *uint32:
var b [4]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = binary.BigEndian.Uint32(b[:])
case *uint64:
var b [8]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = binary.BigEndian.Uint64(b[:])
case *MilliSatoshi:
var b [8]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = MilliSatoshi(int64(binary.BigEndian.Uint64(b[:])))
case *btcutil.Amount:
var b [8]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = btcutil.Amount(int64(binary.BigEndian.Uint64(b[:])))
case **btcec.PublicKey:
var b [btcec.PubKeyBytesLenCompressed]byte
if _, err = io.ReadFull(r, b[:]); err != nil {
return err
}
pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
if err != nil {
return err
}
*e = pubKey
case **FeatureVector:
f, err := NewFeatureVectorFromReader(r)
if err != nil {
return err
}
*e = f
case *[]*btcec.Signature:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
numSigs := binary.BigEndian.Uint16(l[:])
var sigs []*btcec.Signature
if numSigs > 0 {
sigs = make([]*btcec.Signature, numSigs)
for i := 0; i < int(numSigs); i++ {
if err := readElement(r, &sigs[i]); err != nil {
return err
}
}
}
*e = sigs
case **btcec.Signature:
var b [64]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
err = deserializeSigFromWire(e, b)
if err != nil {
return err
}
case *OpaqueReason:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
reasonLen := binary.BigEndian.Uint16(l[:])
*e = OpaqueReason(make([]byte, reasonLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case *ErrorData:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
errorLen := binary.BigEndian.Uint16(l[:])
*e = ErrorData(make([]byte, errorLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case *PingPayload:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
pingLen := binary.BigEndian.Uint16(l[:])
*e = PingPayload(make([]byte, pingLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case *PongPayload:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
pongLen := binary.BigEndian.Uint16(l[:])
*e = PongPayload(make([]byte, pongLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case []byte:
if _, err := io.ReadFull(r, e); err != nil {
return err
}
case *PkScript:
pkScript, err := wire.ReadVarBytes(r, 0, 34, "pkscript")
if err != nil {
return err
}
*e = pkScript
case *wire.OutPoint:
var h [32]byte
if _, err = io.ReadFull(r, h[:]); err != nil {
return err
}
hash, err := chainhash.NewHash(h[:])
if err != nil {
return err
}
var idxBytes [2]byte
_, err = io.ReadFull(r, idxBytes[:])
if err != nil {
return err
}
index := binary.BigEndian.Uint16(idxBytes[:])
*e = wire.OutPoint{
Hash: *hash,
Index: uint32(index),
}
case *FailCode:
if err := readElement(r, (*uint16)(e)); err != nil {
return err
}
case *ChannelID:
if _, err := io.ReadFull(r, e[:]); err != nil {
return err
}
case *ShortChannelID:
var blockHeight [4]byte
if _, err = io.ReadFull(r, blockHeight[1:]); err != nil {
return err
}
var txIndex [4]byte
if _, err = io.ReadFull(r, txIndex[1:]); err != nil {
return err
}
var txPosition [2]byte
if _, err = io.ReadFull(r, txPosition[:]); err != nil {
return err
}
*e = ShortChannelID{
BlockHeight: binary.BigEndian.Uint32(blockHeight[:]),
TxIndex: binary.BigEndian.Uint32(txIndex[:]),
TxPosition: binary.BigEndian.Uint16(txPosition[:]),
}
case *[]net.Addr:
// First, we'll read the number of total bytes that have been
// used to encode the set of addresses.
var numAddrsBytes [2]byte
if _, err = io.ReadFull(r, numAddrsBytes[:]); err != nil {
return err
}
addrsLen := binary.BigEndian.Uint16(numAddrsBytes[:])
// With the number of addresses, read, we'll now pull in the
// buffer of the encoded addresses into memory.
addrs := make([]byte, addrsLen)
if _, err := io.ReadFull(r, addrs[:]); err != nil {
return err
}
addrBuf := bytes.NewReader(addrs)
// Finally, we'll parse the remaining address payload in
// series, using the first byte to denote how to decode the
// address itself.
var (
addresses []net.Addr
addrBytesRead uint16
)
for addrBytesRead < addrsLen {
var descriptor [1]byte
if _, err = io.ReadFull(addrBuf, descriptor[:]); err != nil {
return err
}
addrBytesRead += 1
address := &net.TCPAddr{}
aType := addressType(descriptor[0])
switch aType {
case noAddr:
addrBytesRead += aType.AddrLen()
continue
case tcp4Addr:
var ip [4]byte
if _, err = io.ReadFull(addrBuf, ip[:]); err != nil {
return err
}
address.IP = (net.IP)(ip[:])
var port [2]byte
if _, err = io.ReadFull(addrBuf, port[:]); err != nil {
return err
}
address.Port = int(binary.BigEndian.Uint16(port[:]))
addrBytesRead += aType.AddrLen()
case tcp6Addr:
var ip [16]byte
if _, err = io.ReadFull(addrBuf, ip[:]); err != nil {
return err
}
address.IP = (net.IP)(ip[:])
var port [2]byte
if _, err = io.ReadFull(addrBuf, port[:]); err != nil {
return err
}
address.Port = int(binary.BigEndian.Uint16(port[:]))
addrBytesRead += aType.AddrLen()
case v2OnionAddr:
addrBytesRead += aType.AddrLen()
continue
case v3OnionAddr:
addrBytesRead += aType.AddrLen()
continue
default:
return fmt.Errorf("unknown address type: %v", aType)
}
addresses = append(addresses, address)
}
*e = addresses
case *RGB:
err := readElements(r,
&e.red,
&e.green,
&e.blue,
)
if err != nil {
return err
}
case *DeliveryAddress:
var addrLen [2]byte
if _, err = io.ReadFull(r, addrLen[:]); err != nil {
return err
}
length := binary.BigEndian.Uint16(addrLen[:])
var addrBytes [34]byte
if length > 34 {
return fmt.Errorf("Cannot read %d bytes into addrBytes", length)
}
if _, err = io.ReadFull(r, addrBytes[:length]); err != nil {
return err
}
*e = addrBytes[:length]
default:
return fmt.Errorf("Unknown type in readElement: %T", e)
}
return nil
}
// readElements deserializes a variable number of elements into the passed
// io.Reader, with each element being deserialized according to the readElement
// function.
func readElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
err := readElement(r, element)
if err != nil {
return err
}
}
return nil
}
lnwire: fix linter error
package lnwire
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"net"
"github.com/go-errors/errors"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/chaincfg/chainhash"
"github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil"
)
// MaxSliceLength is the maximum allowed lenth for any opaque byte slices in
// the wire protocol.
const MaxSliceLength = 65535
// PkScript is simple type definition which represents a raw serialized public
// key script.
type PkScript []byte
// addressType specifies the network protocol and version that should be used
// when connecting to a node at a particular address.
type addressType uint8
const (
// noAddr denotes a blank address. An address of this type indicates
// that a node doesn't have any advertise d addresses.
noAddr addressType = 0
// tcp4Addr denotes an IPv4 TCP address.
tcp4Addr addressType = 1
// tcp4Addr denotes an IPv6 TCP address.
tcp6Addr addressType = 2
// v2OnionAddr denotes a version 2 Tor onion service address.
v2OnionAddr addressType = 3
// v3OnionAddr denotes a version 3 Tor (prop224) onion service
// addresses
v3OnionAddr addressType = 4
)
// AddrLen returns the number of bytes that it takes to encode the target
// address.
func (a addressType) AddrLen() uint16 {
switch a {
case noAddr:
return 0
case tcp4Addr:
return 6
case tcp6Addr:
return 18
case v2OnionAddr:
return 12
case v3OnionAddr:
return 37
default:
return 0
}
}
// writeElement is a one-stop shop to write the big endian representation of
// any element which is to be serialized for the wire protocol. The passed
// io.Writer should be backed by an appropriately sized byte slice, or be able
// to dynamically expand to accommodate additional data.
//
// TODO(roasbeef): this should eventually draw from a buffer pool for
// serialization.
// TODO(roasbeef): switch to var-ints for all?
func writeElement(w io.Writer, element interface{}) error {
switch e := element.(type) {
case uint8:
var b [1]byte
b[0] = e
if _, err := w.Write(b[:]); err != nil {
return err
}
case uint16:
var b [2]byte
binary.BigEndian.PutUint16(b[:], e)
if _, err := w.Write(b[:]); err != nil {
return err
}
case ErrorCode:
var b [2]byte
binary.BigEndian.PutUint16(b[:], uint16(e))
if _, err := w.Write(b[:]); err != nil {
return err
}
case MilliSatoshi:
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(e))
if _, err := w.Write(b[:]); err != nil {
return err
}
case btcutil.Amount:
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(e))
if _, err := w.Write(b[:]); err != nil {
return err
}
case uint32:
var b [4]byte
binary.BigEndian.PutUint32(b[:], e)
if _, err := w.Write(b[:]); err != nil {
return err
}
case uint64:
var b [8]byte
binary.BigEndian.PutUint64(b[:], e)
if _, err := w.Write(b[:]); err != nil {
return err
}
case *btcec.PublicKey:
if e == nil {
return fmt.Errorf("cannot write nil pubkey")
}
var b [33]byte
serializedPubkey := e.SerializeCompressed()
copy(b[:], serializedPubkey)
if _, err := w.Write(b[:]); err != nil {
return err
}
case []*btcec.Signature:
var b [2]byte
numSigs := uint16(len(e))
binary.BigEndian.PutUint16(b[:], numSigs)
if _, err := w.Write(b[:]); err != nil {
return err
}
for _, sig := range e {
if err := writeElement(w, sig); err != nil {
return err
}
}
case *btcec.Signature:
if e == nil {
return fmt.Errorf("cannot write nil signature")
}
var b [64]byte
err := serializeSigToWire(&b, e)
if err != nil {
return err
}
// Write buffer
if _, err = w.Write(b[:]); err != nil {
return err
}
case PingPayload:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case PongPayload:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case ErrorData:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case OpaqueReason:
var l [2]byte
binary.BigEndian.PutUint16(l[:], uint16(len(e)))
if _, err := w.Write(l[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
case []byte:
if _, err := w.Write(e[:]); err != nil {
return err
}
case PkScript:
// The largest script we'll accept is a p2wsh which is exactly
// 34 bytes long.
scriptLength := len(e)
if scriptLength > 34 {
return fmt.Errorf("'PkScript' too long")
}
if err := wire.WriteVarBytes(w, 0, e); err != nil {
return err
}
case *FeatureVector:
if e == nil {
return fmt.Errorf("cannot write nil feature vector")
}
if err := e.Encode(w); err != nil {
return err
}
case wire.OutPoint:
var h [32]byte
copy(h[:], e.Hash[:])
if _, err := w.Write(h[:]); err != nil {
return err
}
if e.Index > math.MaxUint16 {
return fmt.Errorf("index for outpoint (%v) is "+
"greater than max index of %v", e.Index,
math.MaxUint16)
}
var idx [2]byte
binary.BigEndian.PutUint16(idx[:], uint16(e.Index))
if _, err := w.Write(idx[:]); err != nil {
return err
}
case ChannelID:
if _, err := w.Write(e[:]); err != nil {
return err
}
case FailCode:
if err := writeElement(w, uint16(e)); err != nil {
return err
}
case ShortChannelID:
// Check that field fit in 3 bytes and write the blockHeight
if e.BlockHeight > ((1 << 24) - 1) {
return errors.New("block height should fit in 3 bytes")
}
var blockHeight [4]byte
binary.BigEndian.PutUint32(blockHeight[:], e.BlockHeight)
if _, err := w.Write(blockHeight[1:]); err != nil {
return err
}
// Check that field fit in 3 bytes and write the txIndex
if e.TxIndex > ((1 << 24) - 1) {
return errors.New("tx index should fit in 3 bytes")
}
var txIndex [4]byte
binary.BigEndian.PutUint32(txIndex[:], e.TxIndex)
if _, err := w.Write(txIndex[1:]); err != nil {
return err
}
// Write the txPosition
var txPosition [2]byte
binary.BigEndian.PutUint16(txPosition[:], e.TxPosition)
if _, err := w.Write(txPosition[:]); err != nil {
return err
}
case *net.TCPAddr:
if e == nil {
return fmt.Errorf("cannot write nil TCPAddr")
}
// TODO(roasbeef): account for onion types too
if e.IP.To4() != nil {
var descriptor [1]byte
descriptor[0] = uint8(tcp4Addr)
if _, err := w.Write(descriptor[:]); err != nil {
return err
}
var ip [4]byte
copy(ip[:], e.IP.To4())
if _, err := w.Write(ip[:]); err != nil {
return err
}
} else {
var descriptor [1]byte
descriptor[0] = uint8(tcp6Addr)
if _, err := w.Write(descriptor[:]); err != nil {
return err
}
var ip [16]byte
copy(ip[:], e.IP.To16())
if _, err := w.Write(ip[:]); err != nil {
return err
}
}
var port [2]byte
binary.BigEndian.PutUint16(port[:], uint16(e.Port))
if _, err := w.Write(port[:]); err != nil {
return err
}
case []net.Addr:
// First, we'll encode all the addresses into an intermediate
// buffer. We need to do this in order to compute the total
// length of the addresses.
var addrBuf bytes.Buffer
for _, address := range e {
if err := writeElement(&addrBuf, address); err != nil {
return err
}
}
// With the addresses fully encoded, we can now write out the
// number of bytes needed to encode them.
addrLen := addrBuf.Len()
if err := writeElement(w, uint16(addrLen)); err != nil {
return err
}
// Finally, we'll write out the raw addresses themselves, but
// only if we have any bytes to write.
if addrLen > 0 {
if _, err := w.Write(addrBuf.Bytes()); err != nil {
return err
}
}
case RGB:
if err := writeElements(w, e.red, e.green, e.blue); err != nil {
return err
}
case DeliveryAddress:
var length [2]byte
binary.BigEndian.PutUint16(length[:], uint16(len(e)))
if _, err := w.Write(length[:]); err != nil {
return err
}
if _, err := w.Write(e[:]); err != nil {
return err
}
default:
return fmt.Errorf("Unknown type in writeElement: %T", e)
}
return nil
}
// writeElements is writes each element in the elements slice to the passed
// io.Writer using writeElement.
func writeElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
err := writeElement(w, element)
if err != nil {
return err
}
}
return nil
}
// readElement is a one-stop utility function to deserialize any datastructure
// encoded using the serialization format of lnwire.
func readElement(r io.Reader, element interface{}) error {
var err error
switch e := element.(type) {
case *uint8:
var b [1]uint8
if _, err := r.Read(b[:]); err != nil {
return err
}
*e = b[0]
case *uint16:
var b [2]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = binary.BigEndian.Uint16(b[:])
case *ErrorCode:
var b [2]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = ErrorCode(binary.BigEndian.Uint16(b[:]))
case *uint32:
var b [4]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = binary.BigEndian.Uint32(b[:])
case *uint64:
var b [8]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = binary.BigEndian.Uint64(b[:])
case *MilliSatoshi:
var b [8]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = MilliSatoshi(int64(binary.BigEndian.Uint64(b[:])))
case *btcutil.Amount:
var b [8]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
*e = btcutil.Amount(int64(binary.BigEndian.Uint64(b[:])))
case **btcec.PublicKey:
var b [btcec.PubKeyBytesLenCompressed]byte
if _, err = io.ReadFull(r, b[:]); err != nil {
return err
}
pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
if err != nil {
return err
}
*e = pubKey
case **FeatureVector:
f, err := NewFeatureVectorFromReader(r)
if err != nil {
return err
}
*e = f
case *[]*btcec.Signature:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
numSigs := binary.BigEndian.Uint16(l[:])
var sigs []*btcec.Signature
if numSigs > 0 {
sigs = make([]*btcec.Signature, numSigs)
for i := 0; i < int(numSigs); i++ {
if err := readElement(r, &sigs[i]); err != nil {
return err
}
}
}
*e = sigs
case **btcec.Signature:
var b [64]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
err = deserializeSigFromWire(e, b)
if err != nil {
return err
}
case *OpaqueReason:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
reasonLen := binary.BigEndian.Uint16(l[:])
*e = OpaqueReason(make([]byte, reasonLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case *ErrorData:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
errorLen := binary.BigEndian.Uint16(l[:])
*e = ErrorData(make([]byte, errorLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case *PingPayload:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
pingLen := binary.BigEndian.Uint16(l[:])
*e = PingPayload(make([]byte, pingLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case *PongPayload:
var l [2]byte
if _, err := io.ReadFull(r, l[:]); err != nil {
return err
}
pongLen := binary.BigEndian.Uint16(l[:])
*e = PongPayload(make([]byte, pongLen))
if _, err := io.ReadFull(r, *e); err != nil {
return err
}
case []byte:
if _, err := io.ReadFull(r, e); err != nil {
return err
}
case *PkScript:
pkScript, err := wire.ReadVarBytes(r, 0, 34, "pkscript")
if err != nil {
return err
}
*e = pkScript
case *wire.OutPoint:
var h [32]byte
if _, err = io.ReadFull(r, h[:]); err != nil {
return err
}
hash, err := chainhash.NewHash(h[:])
if err != nil {
return err
}
var idxBytes [2]byte
_, err = io.ReadFull(r, idxBytes[:])
if err != nil {
return err
}
index := binary.BigEndian.Uint16(idxBytes[:])
*e = wire.OutPoint{
Hash: *hash,
Index: uint32(index),
}
case *FailCode:
if err := readElement(r, (*uint16)(e)); err != nil {
return err
}
case *ChannelID:
if _, err := io.ReadFull(r, e[:]); err != nil {
return err
}
case *ShortChannelID:
var blockHeight [4]byte
if _, err = io.ReadFull(r, blockHeight[1:]); err != nil {
return err
}
var txIndex [4]byte
if _, err = io.ReadFull(r, txIndex[1:]); err != nil {
return err
}
var txPosition [2]byte
if _, err = io.ReadFull(r, txPosition[:]); err != nil {
return err
}
*e = ShortChannelID{
BlockHeight: binary.BigEndian.Uint32(blockHeight[:]),
TxIndex: binary.BigEndian.Uint32(txIndex[:]),
TxPosition: binary.BigEndian.Uint16(txPosition[:]),
}
case *[]net.Addr:
// First, we'll read the number of total bytes that have been
// used to encode the set of addresses.
var numAddrsBytes [2]byte
if _, err = io.ReadFull(r, numAddrsBytes[:]); err != nil {
return err
}
addrsLen := binary.BigEndian.Uint16(numAddrsBytes[:])
// With the number of addresses, read, we'll now pull in the
// buffer of the encoded addresses into memory.
addrs := make([]byte, addrsLen)
if _, err := io.ReadFull(r, addrs[:]); err != nil {
return err
}
addrBuf := bytes.NewReader(addrs)
// Finally, we'll parse the remaining address payload in
// series, using the first byte to denote how to decode the
// address itself.
var (
addresses []net.Addr
addrBytesRead uint16
)
for addrBytesRead < addrsLen {
var descriptor [1]byte
if _, err = io.ReadFull(addrBuf, descriptor[:]); err != nil {
return err
}
addrBytesRead++
address := &net.TCPAddr{}
aType := addressType(descriptor[0])
switch aType {
case noAddr:
addrBytesRead += aType.AddrLen()
continue
case tcp4Addr:
var ip [4]byte
if _, err = io.ReadFull(addrBuf, ip[:]); err != nil {
return err
}
address.IP = (net.IP)(ip[:])
var port [2]byte
if _, err = io.ReadFull(addrBuf, port[:]); err != nil {
return err
}
address.Port = int(binary.BigEndian.Uint16(port[:]))
addrBytesRead += aType.AddrLen()
case tcp6Addr:
var ip [16]byte
if _, err = io.ReadFull(addrBuf, ip[:]); err != nil {
return err
}
address.IP = (net.IP)(ip[:])
var port [2]byte
if _, err = io.ReadFull(addrBuf, port[:]); err != nil {
return err
}
address.Port = int(binary.BigEndian.Uint16(port[:]))
addrBytesRead += aType.AddrLen()
case v2OnionAddr:
addrBytesRead += aType.AddrLen()
continue
case v3OnionAddr:
addrBytesRead += aType.AddrLen()
continue
default:
return fmt.Errorf("unknown address type: %v", aType)
}
addresses = append(addresses, address)
}
*e = addresses
case *RGB:
err := readElements(r,
&e.red,
&e.green,
&e.blue,
)
if err != nil {
return err
}
case *DeliveryAddress:
var addrLen [2]byte
if _, err = io.ReadFull(r, addrLen[:]); err != nil {
return err
}
length := binary.BigEndian.Uint16(addrLen[:])
var addrBytes [34]byte
if length > 34 {
return fmt.Errorf("Cannot read %d bytes into addrBytes", length)
}
if _, err = io.ReadFull(r, addrBytes[:length]); err != nil {
return err
}
*e = addrBytes[:length]
default:
return fmt.Errorf("Unknown type in readElement: %T", e)
}
return nil
}
// readElements deserializes a variable number of elements into the passed
// io.Reader, with each element being deserialized according to the readElement
// function.
func readElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
err := readElement(r, element)
if err != nil {
return err
}
}
return nil
}
|
package structs
import (
"bytes"
"container/heap"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/base32"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"hash/crc32"
"math"
"net"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/hashicorp/nomad/helper/escapingfs"
"golang.org/x/crypto/blake2b"
"github.com/hashicorp/cronexpr"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/command/agent/host"
"github.com/hashicorp/nomad/command/agent/pprof"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/args"
"github.com/hashicorp/nomad/helper/constraints/semver"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/lib/cpuset"
"github.com/hashicorp/nomad/lib/kheap"
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
"github.com/miekg/dns"
"github.com/mitchellh/copystructure"
)
var (
// validPolicyName is used to validate a policy name
validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
// b32 is a lowercase base32 encoding for use in URL friendly service hashes
b32 = base32.NewEncoding(strings.ToLower("abcdefghijklmnopqrstuvwxyz234567"))
)
type MessageType uint8
// note: new raft message types need to be added to the end of this
// list of contents
const (
NodeRegisterRequestType MessageType = 0
NodeDeregisterRequestType MessageType = 1
NodeUpdateStatusRequestType MessageType = 2
NodeUpdateDrainRequestType MessageType = 3
JobRegisterRequestType MessageType = 4
JobDeregisterRequestType MessageType = 5
EvalUpdateRequestType MessageType = 6
EvalDeleteRequestType MessageType = 7
AllocUpdateRequestType MessageType = 8
AllocClientUpdateRequestType MessageType = 9
ReconcileJobSummariesRequestType MessageType = 10
VaultAccessorRegisterRequestType MessageType = 11
VaultAccessorDeregisterRequestType MessageType = 12
ApplyPlanResultsRequestType MessageType = 13
DeploymentStatusUpdateRequestType MessageType = 14
DeploymentPromoteRequestType MessageType = 15
DeploymentAllocHealthRequestType MessageType = 16
DeploymentDeleteRequestType MessageType = 17
JobStabilityRequestType MessageType = 18
ACLPolicyUpsertRequestType MessageType = 19
ACLPolicyDeleteRequestType MessageType = 20
ACLTokenUpsertRequestType MessageType = 21
ACLTokenDeleteRequestType MessageType = 22
ACLTokenBootstrapRequestType MessageType = 23
AutopilotRequestType MessageType = 24
UpsertNodeEventsType MessageType = 25
JobBatchDeregisterRequestType MessageType = 26
AllocUpdateDesiredTransitionRequestType MessageType = 27
NodeUpdateEligibilityRequestType MessageType = 28
BatchNodeUpdateDrainRequestType MessageType = 29
SchedulerConfigRequestType MessageType = 30
NodeBatchDeregisterRequestType MessageType = 31
ClusterMetadataRequestType MessageType = 32
ServiceIdentityAccessorRegisterRequestType MessageType = 33
ServiceIdentityAccessorDeregisterRequestType MessageType = 34
CSIVolumeRegisterRequestType MessageType = 35
CSIVolumeDeregisterRequestType MessageType = 36
CSIVolumeClaimRequestType MessageType = 37
ScalingEventRegisterRequestType MessageType = 38
CSIVolumeClaimBatchRequestType MessageType = 39
CSIPluginDeleteRequestType MessageType = 40
EventSinkUpsertRequestType MessageType = 41
EventSinkDeleteRequestType MessageType = 42
BatchEventSinkUpdateProgressType MessageType = 43
OneTimeTokenUpsertRequestType MessageType = 44
OneTimeTokenDeleteRequestType MessageType = 45
OneTimeTokenExpireRequestType MessageType = 46
ServiceRegistrationUpsertRequestType MessageType = 47
ServiceRegistrationDeleteByIDRequestType MessageType = 48
ServiceRegistrationDeleteByNodeIDRequestType MessageType = 49
// Namespace types were moved from enterprise and therefore start at 64
NamespaceUpsertRequestType MessageType = 64
NamespaceDeleteRequestType MessageType = 65
)
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// MsgTypeTestSetup is used during testing when calling state store
// methods directly that require an FSM MessageType
MsgTypeTestSetup MessageType = IgnoreUnknownTypeFlag
GetterModeAny = "any"
GetterModeFile = "file"
GetterModeDir = "dir"
// maxPolicyDescriptionLength limits a policy description length
maxPolicyDescriptionLength = 256
// maxTokenNameLength limits a ACL token name length
maxTokenNameLength = 256
// ACLClientToken and ACLManagementToken are the only types of tokens
ACLClientToken = "client"
ACLManagementToken = "management"
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
DefaultNamespaceDescription = "Default shared namespace"
// AllNamespacesSentinel is the value used as a namespace RPC value
// to indicate that endpoints must search in all namespaces
AllNamespacesSentinel = "*"
// maxNamespaceDescriptionLength limits a namespace description length
maxNamespaceDescriptionLength = 256
// JitterFraction is a the limit to the amount of jitter we apply
// to a user specified MaxQueryTime. We divide the specified time by
// the fraction. So 16 == 6.25% limit of jitter. This jitter is also
// applied to RPCHoldTimeout.
JitterFraction = 16
// MaxRetainedNodeEvents is the maximum number of node events that will be
// retained for a single node
MaxRetainedNodeEvents = 10
// MaxRetainedNodeScores is the number of top scoring nodes for which we
// retain scoring metadata
MaxRetainedNodeScores = 5
// Normalized scorer name
NormScorerName = "normalized-score"
// MaxBlockingRPCQueryTime is used to bound the limit of a blocking query
MaxBlockingRPCQueryTime = 300 * time.Second
// DefaultBlockingRPCQueryTime is the amount of time we block waiting for a change
// if no time is specified. Previously we would wait the MaxBlockingRPCQueryTime.
DefaultBlockingRPCQueryTime = 300 * time.Second
)
var (
// validNamespaceName is used to validate a namespace name
validNamespaceName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
)
// NamespacedID is a tuple of an ID and a namespace
type NamespacedID struct {
ID string
Namespace string
}
// NewNamespacedID returns a new namespaced ID given the ID and namespace
func NewNamespacedID(id, ns string) NamespacedID {
return NamespacedID{
ID: id,
Namespace: ns,
}
}
func (n NamespacedID) String() string {
return fmt.Sprintf("<ns: %q, id: %q>", n.Namespace, n.ID)
}
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
IsRead() bool
AllowStaleRead() bool
IsForwarded() bool
SetForwarded()
TimeToBlock() time.Duration
// SetTimeToBlock sets how long this request can block. The requested time may not be possible,
// so Callers should readback TimeToBlock. E.g. you cannot set time to block at all on WriteRequests
// and it cannot exceed MaxBlockingRPCQueryTime
SetTimeToBlock(t time.Duration)
}
// InternalRpcInfo allows adding internal RPC metadata to an RPC. This struct
// should NOT be replicated in the API package as it is internal only.
type InternalRpcInfo struct {
// Forwarded marks whether the RPC has been forwarded.
Forwarded bool
}
// IsForwarded returns whether the RPC is forwarded from another server.
func (i *InternalRpcInfo) IsForwarded() bool {
return i.Forwarded
}
// SetForwarded marks that the RPC is being forwarded from another server.
func (i *InternalRpcInfo) SetForwarded() {
i.Forwarded = true
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
// The target region for this query
Region string
// Namespace is the target namespace for the query.
//
// Since handlers do not have a default value set they should access
// the Namespace via the RequestNamespace method.
//
// Requests accessing specific namespaced objects must check ACLs
// against the namespace of the object, not the namespace in the
// request.
Namespace string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool
// If set, used as prefix for resource list searches
Prefix string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
// Filter specifies the go-bexpr filter expression to be used for
// filtering the data prior to returning a response
Filter string
// PerPage is the number of entries to be returned in queries that support
// paginated lists.
PerPage int32
// NextToken is the token used to indicate where to start paging
// for queries that support paginated lists. This token should be
// the ID of the next object after the last one seen in the
// previous response.
NextToken string
// Reverse is used to reverse the default order of list results.
Reverse bool
InternalRpcInfo
}
// TimeToBlock returns MaxQueryTime adjusted for maximums and defaults
// it will return 0 if this is not a blocking query
func (q QueryOptions) TimeToBlock() time.Duration {
if q.MinQueryIndex == 0 {
return 0
}
if q.MaxQueryTime > MaxBlockingRPCQueryTime {
return MaxBlockingRPCQueryTime
} else if q.MaxQueryTime <= 0 {
return DefaultBlockingRPCQueryTime
}
return q.MaxQueryTime
}
func (q *QueryOptions) SetTimeToBlock(t time.Duration) {
q.MaxQueryTime = t
}
func (q QueryOptions) RequestRegion() string {
return q.Region
}
// RequestNamespace returns the request's namespace or the default namespace if
// no explicit namespace was sent.
//
// Requests accessing specific namespaced objects must check ACLs against the
// namespace of the object, not the namespace in the request.
func (q QueryOptions) RequestNamespace() string {
if q.Namespace == "" {
return DefaultNamespace
}
return q.Namespace
}
// IsRead only applies to reads, so always true.
func (q QueryOptions) IsRead() bool {
return true
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
// AgentPprofRequest is used to request a pprof report for a given node.
type AgentPprofRequest struct {
// ReqType specifies the profile to use
ReqType pprof.ReqType
// Profile specifies the runtime/pprof profile to lookup and generate.
Profile string
// Seconds is the number of seconds to capture a profile
Seconds int
// Debug specifies if pprof profile should inclue debug output
Debug int
// GC specifies if the profile should call runtime.GC() before
// running its profile. This is only used for "heap" profiles
GC int
// NodeID is the node we want to track the logs of
NodeID string
// ServerID is the server we want to track the logs of
ServerID string
QueryOptions
}
// AgentPprofResponse is used to return a generated pprof profile
type AgentPprofResponse struct {
// ID of the agent that fulfilled the request
AgentID string
// Payload is the generated pprof profile
Payload []byte
// HTTPHeaders are a set of key value pairs to be applied as
// HTTP headers for a specific runtime profile
HTTPHeaders map[string]string
}
type WriteRequest struct {
// The target region for this write
Region string
// Namespace is the target namespace for the write.
//
// Since RPC handlers do not have a default value set they should
// access the Namespace via the RequestNamespace method.
//
// Requests accessing specific namespaced objects must check ACLs
// against the namespace of the object, not the namespace in the
// request.
Namespace string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
// IdempotencyToken can be used to ensure the write is idempotent.
IdempotencyToken string
InternalRpcInfo
}
func (w WriteRequest) TimeToBlock() time.Duration {
return 0
}
func (w WriteRequest) SetTimeToBlock(_ time.Duration) {
}
func (w WriteRequest) RequestRegion() string {
// The target region for this request
return w.Region
}
// RequestNamespace returns the request's namespace or the default namespace if
// no explicit namespace was sent.
//
// Requests accessing specific namespaced objects must check ACLs against the
// namespace of the object, not the namespace in the request.
func (w WriteRequest) RequestNamespace() string {
if w.Namespace == "" {
return DefaultNamespace
}
return w.Namespace
}
// IsRead only applies to writes, always false.
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// This is the index associated with the read
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
// NextToken is the token returned with queries that support
// paginated lists. To resume paging from this point, pass
// this token in the next request's QueryOptions.
NextToken string
}
// WriteMeta allows a write response to include potentially
// useful metadata about the write
type WriteMeta struct {
// This is the index associated with the write
Index uint64
}
// NodeRegisterRequest is used for Node.Register endpoint
// to register a node as being a schedulable entity.
type NodeRegisterRequest struct {
Node *Node
NodeEvent *NodeEvent
WriteRequest
}
// NodeDeregisterRequest is used for Node.Deregister endpoint
// to deregister a node as being a schedulable entity.
type NodeDeregisterRequest struct {
NodeID string
WriteRequest
}
// NodeBatchDeregisterRequest is used for Node.BatchDeregister endpoint
// to deregister a batch of nodes from being schedulable entities.
type NodeBatchDeregisterRequest struct {
NodeIDs []string
WriteRequest
}
// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
// information used in RPC server lists.
type NodeServerInfo struct {
// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
// be contacted at for RPCs.
RPCAdvertiseAddr string
// RpcMajorVersion is the major version number the Nomad Server
// supports
RPCMajorVersion int32
// RpcMinorVersion is the minor version number the Nomad Server
// supports
RPCMinorVersion int32
// Datacenter is the datacenter that a Nomad server belongs to
Datacenter string
}
// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
// to update the status of a node.
type NodeUpdateStatusRequest struct {
NodeID string
Status string
NodeEvent *NodeEvent
UpdatedAt int64
WriteRequest
}
// NodeUpdateDrainRequest is used for updating the drain strategy
type NodeUpdateDrainRequest struct {
NodeID string
DrainStrategy *DrainStrategy
// MarkEligible marks the node as eligible if removing the drain strategy.
MarkEligible bool
// NodeEvent is the event added to the node
NodeEvent *NodeEvent
// UpdatedAt represents server time of receiving request
UpdatedAt int64
// Meta is user-provided metadata relating to the drain operation
Meta map[string]string
WriteRequest
}
// BatchNodeUpdateDrainRequest is used for updating the drain strategy for a
// batch of nodes
type BatchNodeUpdateDrainRequest struct {
// Updates is a mapping of nodes to their updated drain strategy
Updates map[string]*DrainUpdate
// NodeEvents is a mapping of the node to the event to add to the node
NodeEvents map[string]*NodeEvent
// UpdatedAt represents server time of receiving request
UpdatedAt int64
WriteRequest
}
// DrainUpdate is used to update the drain of a node
type DrainUpdate struct {
// DrainStrategy is the new strategy for the node
DrainStrategy *DrainStrategy
// MarkEligible marks the node as eligible if removing the drain strategy.
MarkEligible bool
}
// NodeUpdateEligibilityRequest is used for updating the scheduling eligibility
type NodeUpdateEligibilityRequest struct {
NodeID string
Eligibility string
// NodeEvent is the event added to the node
NodeEvent *NodeEvent
// UpdatedAt represents server time of receiving request
UpdatedAt int64
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the node
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
}
// NodeSpecificRequest is used when we just need to specify a target node
type NodeSpecificRequest struct {
NodeID string
SecretID string
QueryOptions
}
// JobRegisterRequest is used for Job.Register endpoint
// to register a job as being a schedulable entity.
type JobRegisterRequest struct {
Job *Job
// If EnforceIndex is set then the job will only be registered if the passed
// JobModifyIndex matches the current Jobs index. If the index is zero, the
// register only occurs if the job is new.
EnforceIndex bool
JobModifyIndex uint64
// PreserveCounts indicates that during job update, existing task group
// counts should be preserved, over those specified in the new job spec
// PreserveCounts is ignored for newly created jobs.
PreserveCounts bool
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
// EvalPriority is an optional priority to use on any evaluation created as
// a result on this job registration. This value must be between 1-100
// inclusively, where a larger value corresponds to a higher priority. This
// is useful when an operator wishes to push through a job registration in
// busy clusters with a large evaluation backlog. This avoids needing to
// change the job priority which also impacts preemption.
EvalPriority int
// Eval is the evaluation that is associated with the job registration
Eval *Evaluation
WriteRequest
}
// JobDeregisterRequest is used for Job.Deregister endpoint
// to deregister a job as being a schedulable entity.
type JobDeregisterRequest struct {
JobID string
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
// Global controls whether all regions of a multi-region job are
// deregistered. It is ignored for single-region jobs.
Global bool
// EvalPriority is an optional priority to use on any evaluation created as
// a result on this job deregistration. This value must be between 1-100
// inclusively, where a larger value corresponds to a higher priority. This
// is useful when an operator wishes to push through a job deregistration
// in busy clusters with a large evaluation backlog.
EvalPriority int
// NoShutdownDelay, if set to true, will override the group and
// task shutdown_delay configuration and ignore the delay for any
// allocations stopped as a result of this Deregister call.
NoShutdownDelay bool
// Eval is the evaluation to create that's associated with job deregister
Eval *Evaluation
WriteRequest
}
// JobBatchDeregisterRequest is used to batch deregister jobs and upsert
// evaluations.
type JobBatchDeregisterRequest struct {
// Jobs is the set of jobs to deregister
Jobs map[NamespacedID]*JobDeregisterOptions
// Evals is the set of evaluations to create.
Evals []*Evaluation
WriteRequest
}
// JobDeregisterOptions configures how a job is deregistered.
type JobDeregisterOptions struct {
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
}
// JobEvaluateRequest is used when we just need to re-evaluate a target job
type JobEvaluateRequest struct {
JobID string
EvalOptions EvalOptions
WriteRequest
}
// EvalOptions is used to encapsulate options when forcing a job evaluation
type EvalOptions struct {
ForceReschedule bool
}
// JobSpecificRequest is used when we just need to specify a target job
type JobSpecificRequest struct {
JobID string
All bool
QueryOptions
}
// JobListRequest is used to parameterize a list request
type JobListRequest struct {
QueryOptions
}
// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
// evaluation of the Job.
type JobPlanRequest struct {
Job *Job
Diff bool // Toggles an annotated diff
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// JobScaleRequest is used for the Job.Scale endpoint to scale one of the
// scaling targets in a job
type JobScaleRequest struct {
JobID string
Target map[string]string
Count *int64
Message string
Error bool
Meta map[string]interface{}
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// Validate is used to validate the arguments in the request
func (r *JobScaleRequest) Validate() error {
namespace := r.Target[ScalingTargetNamespace]
if namespace != "" && namespace != r.RequestNamespace() {
return NewErrRPCCoded(400, "namespace in payload did not match header")
}
jobID := r.Target[ScalingTargetJob]
if jobID != "" && jobID != r.JobID {
return fmt.Errorf("job ID in payload did not match URL")
}
groupName := r.Target[ScalingTargetGroup]
if groupName == "" {
return NewErrRPCCoded(400, "missing task group name for scaling action")
}
if r.Count != nil {
if *r.Count < 0 {
return NewErrRPCCoded(400, "scaling action count can't be negative")
}
if r.Error {
return NewErrRPCCoded(400, "scaling action should not contain count if error is true")
}
truncCount := int(*r.Count)
if int64(truncCount) != *r.Count {
return NewErrRPCCoded(400,
fmt.Sprintf("new scaling count is too large for TaskGroup.Count (int): %v", r.Count))
}
}
return nil
}
// JobSummaryRequest is used when we just need to get a specific job summary
type JobSummaryRequest struct {
JobID string
QueryOptions
}
// JobScaleStatusRequest is used to get the scale status for a job
type JobScaleStatusRequest struct {
JobID string
QueryOptions
}
// JobDispatchRequest is used to dispatch a job based on a parameterized job
type JobDispatchRequest struct {
JobID string
Payload []byte
Meta map[string]string
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobRevertRequest is used to revert a job to a prior version.
type JobRevertRequest struct {
// JobID is the ID of the job being reverted
JobID string
// JobVersion the version to revert to.
JobVersion uint64
// EnforcePriorVersion if set will enforce that the job is at the given
// version before reverting.
EnforcePriorVersion *uint64
// ConsulToken is the Consul token that proves the submitter of the job revert
// has access to the Service Identity policies associated with the job's
// Consul Connect enabled services. This field is only used to transfer the
// token and is not stored after the Job revert.
ConsulToken string
// VaultToken is the Vault token that proves the submitter of the job revert
// has access to any Vault policies specified in the targeted job version. This
// field is only used to transfer the token and is not stored after the Job
// revert.
VaultToken string
WriteRequest
}
// JobStabilityRequest is used to marked a job as stable.
type JobStabilityRequest struct {
// Job to set the stability on
JobID string
JobVersion uint64
// Set the stability
Stable bool
WriteRequest
}
// JobStabilityResponse is the response when marking a job as stable.
type JobStabilityResponse struct {
WriteMeta
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
Fields *NodeStubFields
}
// EvalUpdateRequest is used for upserting evaluations.
type EvalUpdateRequest struct {
Evals []*Evaluation
EvalToken string
WriteRequest
}
// EvalDeleteRequest is used for deleting an evaluation.
type EvalDeleteRequest struct {
Evals []string
Allocs []string
WriteRequest
}
// EvalSpecificRequest is used when we just need to specify a target evaluation
type EvalSpecificRequest struct {
EvalID string
IncludeRelated bool
QueryOptions
}
// EvalAckRequest is used to Ack/Nack a specific evaluation
type EvalAckRequest struct {
EvalID string
Token string
WriteRequest
}
// EvalDequeueRequest is used when we want to dequeue an evaluation
type EvalDequeueRequest struct {
Schedulers []string
Timeout time.Duration
SchedulerVersion uint16
WriteRequest
}
// EvalListRequest is used to list the evaluations
type EvalListRequest struct {
FilterJobID string
FilterEvalStatus string
QueryOptions
}
// ShouldBeFiltered indicates that the eval should be filtered (that
// is, removed) from the results
func (req *EvalListRequest) ShouldBeFiltered(e *Evaluation) bool {
if req.FilterJobID != "" && req.FilterJobID != e.JobID {
return true
}
if req.FilterEvalStatus != "" && req.FilterEvalStatus != e.Status {
return true
}
return false
}
// PlanRequest is used to submit an allocation plan to the leader
type PlanRequest struct {
Plan *Plan
WriteRequest
}
// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
// committing the result of a plan.
type ApplyPlanResultsRequest struct {
// AllocUpdateRequest holds the allocation updates to be made by the
// scheduler.
AllocUpdateRequest
// Deployment is the deployment created or updated as a result of a
// scheduling event.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
// EvalID is the eval ID of the plan being applied. The modify index of the
// evaluation is updated as part of applying the plan to ensure that subsequent
// scheduling events for the same job will wait for the index that last produced
// state changes. This is necessary for blocked evaluations since they can be
// processed many times, potentially making state updates, without the state of
// the evaluation itself being updated.
EvalID string
// COMPAT 0.11
// NodePreemptions is a slice of allocations from other lower priority jobs
// that are preempted. Preempted allocations are marked as evicted.
// Deprecated: Replaced with AllocsPreempted which contains only the diff
NodePreemptions []*Allocation
// AllocsPreempted is a slice of allocation diffs from other lower priority jobs
// that are preempted. Preempted allocations are marked as evicted.
AllocsPreempted []*AllocationDiff
// PreemptionEvals is a slice of follow up evals for jobs whose allocations
// have been preempted to place allocs in this plan
PreemptionEvals []*Evaluation
}
// AllocUpdateRequest is used to submit changes to allocations, either
// to cause evictions or to assign new allocations. Both can be done
// within a single transaction
type AllocUpdateRequest struct {
// COMPAT 0.11
// Alloc is the list of new allocations to assign
// Deprecated: Replaced with two separate slices, one containing stopped allocations
// and another containing updated allocations
Alloc []*Allocation
// Allocations to stop. Contains only the diff, not the entire allocation
AllocsStopped []*AllocationDiff
// New or updated allocations
AllocsUpdated []*Allocation
// Evals is the list of new evaluations to create
// Evals are valid only when used in the Raft RPC
Evals []*Evaluation
// Job is the shared parent job of the allocations.
// It is pulled out since it is common to reduce payload size.
Job *Job
WriteRequest
}
// AllocUpdateDesiredTransitionRequest is used to submit changes to allocations
// desired transition state.
type AllocUpdateDesiredTransitionRequest struct {
// Allocs is the mapping of allocation ids to their desired state
// transition
Allocs map[string]*DesiredTransition
// Evals is the set of evaluations to create
Evals []*Evaluation
WriteRequest
}
// AllocStopRequest is used to stop and reschedule a running Allocation.
type AllocStopRequest struct {
AllocID string
NoShutdownDelay bool
WriteRequest
}
// AllocStopResponse is the response to an `AllocStopRequest`
type AllocStopResponse struct {
// EvalID is the id of the follow up evalution for the rescheduled alloc.
EvalID string
WriteMeta
}
// AllocListRequest is used to request a list of allocations
type AllocListRequest struct {
QueryOptions
Fields *AllocStubFields
}
// AllocSpecificRequest is used to query a specific allocation
type AllocSpecificRequest struct {
AllocID string
QueryOptions
}
// AllocSignalRequest is used to signal a specific allocation
type AllocSignalRequest struct {
AllocID string
Task string
Signal string
QueryOptions
}
// AllocsGetRequest is used to query a set of allocations
type AllocsGetRequest struct {
AllocIDs []string
QueryOptions
}
// AllocRestartRequest is used to restart a specific allocations tasks.
type AllocRestartRequest struct {
AllocID string
TaskName string
QueryOptions
}
// PeriodicForceRequest is used to force a specific periodic job.
type PeriodicForceRequest struct {
JobID string
WriteRequest
}
// ServerMembersResponse has the list of servers in a cluster
type ServerMembersResponse struct {
ServerName string
ServerRegion string
ServerDC string
Members []*ServerMember
}
// ServerMember holds information about a Nomad server agent in a cluster
type ServerMember struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// ClusterMetadata is used to store per-cluster metadata.
type ClusterMetadata struct {
ClusterID string
CreateTime int64
}
// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
// following tasks in the given allocation
type DeriveVaultTokenRequest struct {
NodeID string
SecretID string
AllocID string
Tasks []string
QueryOptions
}
// VaultAccessorsRequest is used to operate on a set of Vault accessors
type VaultAccessorsRequest struct {
Accessors []*VaultAccessor
}
// VaultAccessor is a reference to a created Vault token on behalf of
// an allocation's task.
type VaultAccessor struct {
AllocID string
Task string
NodeID string
Accessor string
CreationTTL int
// Raft Indexes
CreateIndex uint64
}
// DeriveVaultTokenResponse returns the wrapped tokens for each requested task
type DeriveVaultTokenResponse struct {
// Tasks is a mapping between the task name and the wrapped token
Tasks map[string]string
// Error stores any error that occurred. Errors are stored here so we can
// communicate whether it is retryable
Error *RecoverableError
QueryMeta
}
// GenericRequest is used to request where no
// specific information is needed.
type GenericRequest struct {
QueryOptions
}
// DeploymentListRequest is used to list the deployments
type DeploymentListRequest struct {
QueryOptions
}
// DeploymentDeleteRequest is used for deleting deployments.
type DeploymentDeleteRequest struct {
Deployments []string
WriteRequest
}
// DeploymentStatusUpdateRequest is used to update the status of a deployment as
// well as optionally creating an evaluation atomically.
type DeploymentStatusUpdateRequest struct {
// Eval, if set, is used to create an evaluation at the same time as
// updating the status of a deployment.
Eval *Evaluation
// DeploymentUpdate is a status update to apply to the given
// deployment.
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
}
// DeploymentAllocHealthRequest is used to set the health of a set of
// allocations as part of a deployment.
type DeploymentAllocHealthRequest struct {
DeploymentID string
// Marks these allocations as healthy, allow further allocations
// to be rolled.
HealthyAllocationIDs []string
// Any unhealthy allocations fail the deployment
UnhealthyAllocationIDs []string
WriteRequest
}
// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
type ApplyDeploymentAllocHealthRequest struct {
DeploymentAllocHealthRequest
// Timestamp is the timestamp to use when setting the allocations health.
Timestamp time.Time
// An optional field to update the status of a deployment
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPromoteRequest is used to promote task groups in a deployment
type DeploymentPromoteRequest struct {
DeploymentID string
// All is to promote all task groups
All bool
// Groups is used to set the promotion status per task group
Groups []string
WriteRequest
}
// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
type ApplyDeploymentPromoteRequest struct {
DeploymentPromoteRequest
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPauseRequest is used to pause a deployment
type DeploymentPauseRequest struct {
DeploymentID string
// Pause sets the pause status
Pause bool
WriteRequest
}
// DeploymentRunRequest is used to remotely start a pending deployment.
// Used only for multiregion deployments.
type DeploymentRunRequest struct {
DeploymentID string
WriteRequest
}
// DeploymentUnblockRequest is used to remotely unblock a deployment.
// Used only for multiregion deployments.
type DeploymentUnblockRequest struct {
DeploymentID string
WriteRequest
}
// DeploymentCancelRequest is used to remotely cancel a deployment.
// Used only for multiregion deployments.
type DeploymentCancelRequest struct {
DeploymentID string
WriteRequest
}
// DeploymentSpecificRequest is used to make a request specific to a particular
// deployment
type DeploymentSpecificRequest struct {
DeploymentID string
QueryOptions
}
// DeploymentFailRequest is used to fail a particular deployment
type DeploymentFailRequest struct {
DeploymentID string
WriteRequest
}
// ScalingPolicySpecificRequest is used when we just need to specify a target scaling policy
type ScalingPolicySpecificRequest struct {
ID string
QueryOptions
}
// SingleScalingPolicyResponse is used to return a single job
type SingleScalingPolicyResponse struct {
Policy *ScalingPolicy
QueryMeta
}
// ScalingPolicyListRequest is used to parameterize a scaling policy list request
type ScalingPolicyListRequest struct {
Job string
Type string
QueryOptions
}
// ScalingPolicyListResponse is used for a list request
type ScalingPolicyListResponse struct {
Policies []*ScalingPolicyListStub
QueryMeta
}
// SingleDeploymentResponse is used to respond with a single deployment
type SingleDeploymentResponse struct {
Deployment *Deployment
QueryMeta
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
WriteMeta
}
// VersionResponse is used for the Status.Version response
type VersionResponse struct {
Build string
Versions map[string]int
QueryMeta
}
// JobRegisterResponse is used to respond to a job registration
type JobRegisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
QueryMeta
}
// JobDeregisterResponse is used to respond to a job deregistration
type JobDeregisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
VolumeEvalID string
VolumeEvalIndex uint64
QueryMeta
}
// JobBatchDeregisterResponse is used to respond to a batch job deregistration
type JobBatchDeregisterResponse struct {
// JobEvals maps the job to its created evaluation
JobEvals map[NamespacedID]string
QueryMeta
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
// Error is a string version of any error that may have occurred
Error string
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
}
// NodeUpdateResponse is used to respond to a node update
type NodeUpdateResponse struct {
HeartbeatTTL time.Duration
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
// Features informs clients what enterprise features are allowed
Features uint64
// LeaderRPCAddr is the RPC address of the current Raft Leader. If
// empty, the current Nomad Server is in the minority of a partition.
LeaderRPCAddr string
// NumNodes is the number of Nomad nodes attached to this quorum of
// Nomad Servers at the time of the response. This value can
// fluctuate based on the health of the cluster between heartbeats.
NumNodes int32
// Servers is the full list of known Nomad servers in the local
// region.
Servers []*NodeServerInfo
QueryMeta
}
// NodeDrainUpdateResponse is used to respond to a node drain update
type NodeDrainUpdateResponse struct {
NodeModifyIndex uint64
EvalIDs []string
EvalCreateIndex uint64
WriteMeta
}
// NodeEligibilityUpdateResponse is used to respond to a node eligibility update
type NodeEligibilityUpdateResponse struct {
NodeModifyIndex uint64
EvalIDs []string
EvalCreateIndex uint64
WriteMeta
}
// NodeAllocsResponse is used to return allocs for a single node
type NodeAllocsResponse struct {
Allocs []*Allocation
QueryMeta
}
// NodeClientAllocsResponse is used to return allocs meta data for a single node
type NodeClientAllocsResponse struct {
Allocs map[string]uint64
// MigrateTokens are used when ACLs are enabled to allow cross node,
// authenticated access to sticky volumes
MigrateTokens map[string]string
QueryMeta
}
// SingleNodeResponse is used to return a single node
type SingleNodeResponse struct {
Node *Node
QueryMeta
}
// NodeListResponse is used for a list request
type NodeListResponse struct {
Nodes []*NodeListStub
QueryMeta
}
// SingleJobResponse is used to return a single job
type SingleJobResponse struct {
Job *Job
QueryMeta
}
// JobSummaryResponse is used to return a single job summary
type JobSummaryResponse struct {
JobSummary *JobSummary
QueryMeta
}
// JobScaleStatusResponse is used to return the scale status for a job
type JobScaleStatusResponse struct {
JobScaleStatus *JobScaleStatus
QueryMeta
}
type JobScaleStatus struct {
JobID string
Namespace string
JobCreateIndex uint64
JobModifyIndex uint64
JobStopped bool
TaskGroups map[string]*TaskGroupScaleStatus
}
// TaskGroupScaleStatus is used to return the scale status for a given task group
type TaskGroupScaleStatus struct {
Desired int
Placed int
Running int
Healthy int
Unhealthy int
Events []*ScalingEvent
}
type JobDispatchResponse struct {
DispatchedJobID string
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
WriteMeta
}
// JobListResponse is used for a list request
type JobListResponse struct {
Jobs []*JobListStub
QueryMeta
}
// JobVersionsRequest is used to get a jobs versions
type JobVersionsRequest struct {
JobID string
Diffs bool
QueryOptions
}
// JobVersionsResponse is used for a job get versions request
type JobVersionsResponse struct {
Versions []*Job
Diffs []*JobDiff
QueryMeta
}
// JobPlanResponse is used to respond to a job plan request
type JobPlanResponse struct {
// Annotations stores annotations explaining decisions the scheduler made.
Annotations *PlanAnnotations
// FailedTGAllocs is the placement failures per task group.
FailedTGAllocs map[string]*AllocMetric
// JobModifyIndex is the modification index of the job. The value can be
// used when running `nomad run` to ensure that the Job wasn’t modified
// since the last plan. If the job is being created, the value is zero.
JobModifyIndex uint64
// CreatedEvals is the set of evaluations created by the scheduler. The
// reasons for this can be rolling-updates or blocked evals.
CreatedEvals []*Evaluation
// Diff contains the diff of the job and annotations on whether the change
// causes an in-place update or create/destroy
Diff *JobDiff
// NextPeriodicLaunch is the time duration till the job would be launched if
// submitted.
NextPeriodicLaunch time.Time
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
WriteMeta
}
// SingleAllocResponse is used to return a single allocation
type SingleAllocResponse struct {
Alloc *Allocation
QueryMeta
}
// AllocsGetResponse is used to return a set of allocations
type AllocsGetResponse struct {
Allocs []*Allocation
QueryMeta
}
// JobAllocationsResponse is used to return the allocations for a job
type JobAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// JobEvaluationsResponse is used to return the evaluations for a job
type JobEvaluationsResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// SingleEvalResponse is used to return a single evaluation
type SingleEvalResponse struct {
Eval *Evaluation
QueryMeta
}
// EvalDequeueResponse is used to return from a dequeue
type EvalDequeueResponse struct {
Eval *Evaluation
Token string
// WaitIndex is the Raft index the worker should wait until invoking the
// scheduler.
WaitIndex uint64
QueryMeta
}
// GetWaitIndex is used to retrieve the Raft index in which state should be at
// or beyond before invoking the scheduler.
func (e *EvalDequeueResponse) GetWaitIndex() uint64 {
// Prefer the wait index sent. This will be populated on all responses from
// 0.7.0 and above
if e.WaitIndex != 0 {
return e.WaitIndex
} else if e.Eval != nil {
return e.Eval.ModifyIndex
}
// This should never happen
return 1
}
// PlanResponse is used to return from a PlanRequest
type PlanResponse struct {
Result *PlanResult
WriteMeta
}
// AllocListResponse is used for a list request
type AllocListResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// DeploymentListResponse is used for a list request
type DeploymentListResponse struct {
Deployments []*Deployment
QueryMeta
}
// EvalListResponse is used for a list request
type EvalListResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// EvalAllocationsResponse is used to return the allocations for an evaluation
type EvalAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// PeriodicForceResponse is used to respond to a periodic job force launch
type PeriodicForceResponse struct {
EvalID string
EvalCreateIndex uint64
WriteMeta
}
// DeploymentUpdateResponse is used to respond to a deployment change. The
// response will include the modify index of the deployment as well as details
// of any triggered evaluation.
type DeploymentUpdateResponse struct {
EvalID string
EvalCreateIndex uint64
DeploymentModifyIndex uint64
// RevertedJobVersion is the version the job was reverted to. If unset, the
// job wasn't reverted
RevertedJobVersion *uint64
WriteMeta
}
// NodeConnQueryResponse is used to respond to a query of whether a server has
// a connection to a specific Node
type NodeConnQueryResponse struct {
// Connected indicates whether a connection to the Client exists
Connected bool
// Established marks the time at which the connection was established
Established time.Time
QueryMeta
}
// HostDataRequest is used by /agent/host to retrieve data about the agent's host system. If
// ServerID or NodeID is specified, the request is forwarded to the remote agent
type HostDataRequest struct {
ServerID string
NodeID string
QueryOptions
}
// HostDataResponse contains the HostData content
type HostDataResponse struct {
AgentID string
HostData *host.HostData
}
// EmitNodeEventsRequest is a request to update the node events source
// with a new client-side event
type EmitNodeEventsRequest struct {
// NodeEvents are a map where the key is a node id, and value is a list of
// events for that node
NodeEvents map[string][]*NodeEvent
WriteRequest
}
// EmitNodeEventsResponse is a response to the client about the status of
// the node event source update.
type EmitNodeEventsResponse struct {
WriteMeta
}
const (
NodeEventSubsystemDrain = "Drain"
NodeEventSubsystemDriver = "Driver"
NodeEventSubsystemHeartbeat = "Heartbeat"
NodeEventSubsystemCluster = "Cluster"
NodeEventSubsystemStorage = "Storage"
)
// NodeEvent is a single unit representing a node’s state change
type NodeEvent struct {
Message string
Subsystem string
Details map[string]string
Timestamp time.Time
CreateIndex uint64
}
func (ne *NodeEvent) String() string {
var details []string
for k, v := range ne.Details {
details = append(details, fmt.Sprintf("%s: %s", k, v))
}
return fmt.Sprintf("Message: %s, Subsystem: %s, Details: %s, Timestamp: %s", ne.Message, ne.Subsystem, strings.Join(details, ","), ne.Timestamp.String())
}
func (ne *NodeEvent) Copy() *NodeEvent {
c := new(NodeEvent)
*c = *ne
c.Details = helper.CopyMapStringString(ne.Details)
return c
}
// NewNodeEvent generates a new node event storing the current time as the
// timestamp
func NewNodeEvent() *NodeEvent {
return &NodeEvent{Timestamp: time.Now()}
}
// SetMessage is used to set the message on the node event
func (ne *NodeEvent) SetMessage(msg string) *NodeEvent {
ne.Message = msg
return ne
}
// SetSubsystem is used to set the subsystem on the node event
func (ne *NodeEvent) SetSubsystem(sys string) *NodeEvent {
ne.Subsystem = sys
return ne
}
// SetTimestamp is used to set the timestamp on the node event
func (ne *NodeEvent) SetTimestamp(ts time.Time) *NodeEvent {
ne.Timestamp = ts
return ne
}
// AddDetail is used to add a detail to the node event
func (ne *NodeEvent) AddDetail(k, v string) *NodeEvent {
if ne.Details == nil {
ne.Details = make(map[string]string, 1)
}
ne.Details[k] = v
return ne
}
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
NodeStatusDisconnected = "disconnected"
)
// ShouldDrainNode checks if a given node status should trigger an
// evaluation. Some states don't require any further action.
func ShouldDrainNode(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDisconnected:
return false
case NodeStatusDown:
return true
default:
panic(fmt.Sprintf("unhandled node status %s", status))
}
}
// ValidNodeStatus is used to check if a node status is valid
func ValidNodeStatus(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDown, NodeStatusDisconnected:
return true
default:
return false
}
}
const (
// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
// respectively, for receiving allocations. This is orthogonal to the node
// status being ready.
NodeSchedulingEligible = "eligible"
NodeSchedulingIneligible = "ineligible"
)
// DrainSpec describes a Node's desired drain behavior.
type DrainSpec struct {
// Deadline is the duration after StartTime when the remaining
// allocations on a draining Node should be told to stop.
Deadline time.Duration
// IgnoreSystemJobs allows systems jobs to remain on the node even though it
// has been marked for draining.
IgnoreSystemJobs bool
}
// DrainStrategy describes a Node's drain behavior.
type DrainStrategy struct {
// DrainSpec is the user declared drain specification
DrainSpec
// ForceDeadline is the deadline time for the drain after which drains will
// be forced
ForceDeadline time.Time
// StartedAt is the time the drain process started
StartedAt time.Time
}
func (d *DrainStrategy) Copy() *DrainStrategy {
if d == nil {
return nil
}
nd := new(DrainStrategy)
*nd = *d
return nd
}
// DeadlineTime returns a boolean whether the drain strategy allows an infinite
// duration or otherwise the deadline time. The force drain is captured by the
// deadline time being in the past.
func (d *DrainStrategy) DeadlineTime() (infinite bool, deadline time.Time) {
// Treat the nil case as a force drain so during an upgrade where a node may
// not have a drain strategy but has Drain set to true, it is treated as a
// force to mimick old behavior.
if d == nil {
return false, time.Time{}
}
ns := d.Deadline.Nanoseconds()
switch {
case ns < 0: // Force
return false, time.Time{}
case ns == 0: // Infinite
return true, time.Time{}
default:
return false, d.ForceDeadline
}
}
func (d *DrainStrategy) Equal(o *DrainStrategy) bool {
if d == nil && o == nil {
return true
} else if o != nil && d == nil {
return false
} else if d != nil && o == nil {
return false
}
// Compare values
if d.ForceDeadline != o.ForceDeadline {
return false
} else if d.Deadline != o.Deadline {
return false
} else if d.IgnoreSystemJobs != o.IgnoreSystemJobs {
return false
}
return true
}
const (
// DrainStatuses are the various states a drain can be in, as reflect in DrainMetadata
DrainStatusDraining DrainStatus = "draining"
DrainStatusComplete DrainStatus = "complete"
DrainStatusCanceled DrainStatus = "canceled"
)
type DrainStatus string
// DrainMetadata contains information about the most recent drain operation for a given Node.
type DrainMetadata struct {
// StartedAt is the time that the drain operation started. This is equal to Node.DrainStrategy.StartedAt,
// if it exists
StartedAt time.Time
// UpdatedAt is the time that that this struct was most recently updated, either via API action
// or drain completion
UpdatedAt time.Time
// Status reflects the status of the drain operation.
Status DrainStatus
// AccessorID is the accessor ID of the ACL token used in the most recent API operation against this drain
AccessorID string
// Meta includes the operator-submitted metadata about this drain operation
Meta map[string]string
}
func (m *DrainMetadata) Copy() *DrainMetadata {
if m == nil {
return nil
}
c := new(DrainMetadata)
*c = *m
c.Meta = helper.CopyMapStringString(m.Meta)
return c
}
// Node is a representation of a schedulable client node
type Node struct {
// ID is a unique identifier for the node. It can be constructed
// by doing a concatenation of the Name and Datacenter as a simple
// approach. Alternatively a UUID may be used.
ID string
// SecretID is an ID that is only known by the Node and the set of Servers.
// It is not accessible via the API and is used to authenticate nodes
// conducting privileged activities.
SecretID string
// Datacenter for this node
Datacenter string
// Node name
Name string
// CgroupParent for this node (linux only)
CgroupParent string
// HTTPAddr is the address on which the Nomad client is listening for http
// requests
HTTPAddr string
// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
TLSEnabled bool
// Attributes is an arbitrary set of key/value
// data that can be used for constraints. Examples
// include "kernel.name=linux", "arch=386", "driver.docker=1",
// "docker.runtime=1.8.3"
Attributes map[string]string
// NodeResources captures the available resources on the client.
NodeResources *NodeResources
// ReservedResources captures the set resources on the client that are
// reserved from scheduling.
ReservedResources *NodeReservedResources
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
// COMPAT(0.10): Remove after 0.10
Resources *Resources
// Reserved is the set of resources that are reserved,
// and should be subtracted from the total resources for
// the purposes of scheduling. This may be provide certain
// high-watermark tolerances or because of external schedulers
// consuming resources.
// COMPAT(0.10): Remove after 0.10
Reserved *Resources
// Links are used to 'link' this client to external
// systems. For example 'consul=foo.dc1' 'aws=i-83212'
// 'ami=ami-123'
Links map[string]string
// Meta is used to associate arbitrary metadata with this
// client. This is opaque to Nomad.
Meta map[string]string
// NodeClass is an opaque identifier used to group nodes
// together for the purpose of determining scheduling pressure.
NodeClass string
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities.
ComputedClass string
// DrainStrategy determines the node's draining behavior.
// Will be non-nil only while draining.
DrainStrategy *DrainStrategy
// SchedulingEligibility determines whether this node will receive new
// placements.
SchedulingEligibility string
// Status of this node
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// StatusUpdatedAt is the time stamp at which the state of the node was
// updated
StatusUpdatedAt int64
// Events is the most recent set of events generated for the node,
// retaining only MaxRetainedNodeEvents number at a time
Events []*NodeEvent
// Drivers is a map of driver names to current driver information
Drivers map[string]*DriverInfo
// CSIControllerPlugins is a map of plugin names to current CSI Plugin info
CSIControllerPlugins map[string]*CSIInfo
// CSINodePlugins is a map of plugin names to current CSI Plugin info
CSINodePlugins map[string]*CSIInfo
// HostVolumes is a map of host volume names to their configuration
HostVolumes map[string]*ClientHostVolumeConfig
// HostNetworks is a map of host host_network names to their configuration
HostNetworks map[string]*ClientHostNetworkConfig
// LastDrain contains metadata about the most recent drain operation
LastDrain *DrainMetadata
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// GetID is a helper for getting the ID when the object may be nil and is
// required for pagination.
func (n *Node) GetID() string {
if n == nil {
return ""
}
return n.ID
}
// Sanitize returns a copy of the Node omitting confidential fields
// It only returns a copy if the Node contains the confidential fields
func (n *Node) Sanitize() *Node {
if n == nil {
return nil
}
if n.SecretID == "" {
return n
}
clean := n.Copy()
clean.SecretID = ""
return clean
}
// Ready returns true if the node is ready for running allocations
func (n *Node) Ready() bool {
return n.Status == NodeStatusReady && n.DrainStrategy == nil && n.SchedulingEligibility == NodeSchedulingEligible
}
func (n *Node) Canonicalize() {
if n == nil {
return
}
// Ensure SchedulingEligibility is correctly set whenever draining so the plan applier and other scheduling logic
// only need to check SchedulingEligibility when determining whether a placement is feasible on a node.
if n.DrainStrategy != nil {
n.SchedulingEligibility = NodeSchedulingIneligible
} else if n.SchedulingEligibility == "" {
n.SchedulingEligibility = NodeSchedulingEligible
}
// COMPAT remove in 1.0
// In v0.12.0 we introduced a separate node specific network resource struct
// so we need to covert any pre 0.12 clients to the correct struct
if n.NodeResources != nil && n.NodeResources.NodeNetworks == nil {
if n.NodeResources.Networks != nil {
for _, nr := range n.NodeResources.Networks {
nnr := &NodeNetworkResource{
Mode: nr.Mode,
Speed: nr.MBits,
Device: nr.Device,
}
if nr.IP != "" {
nnr.Addresses = []NodeNetworkAddress{
{
Alias: "default",
Address: nr.IP,
},
}
}
n.NodeResources.NodeNetworks = append(n.NodeResources.NodeNetworks, nnr)
}
}
}
}
func (n *Node) Copy() *Node {
if n == nil {
return nil
}
nn := new(Node)
*nn = *n
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.NodeResources = nn.NodeResources.Copy()
nn.ReservedResources = nn.ReservedResources.Copy()
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
nn.DrainStrategy = nn.DrainStrategy.Copy()
nn.Events = copyNodeEvents(n.Events)
nn.Drivers = copyNodeDrivers(n.Drivers)
nn.CSIControllerPlugins = copyNodeCSI(nn.CSIControllerPlugins)
nn.CSINodePlugins = copyNodeCSI(nn.CSINodePlugins)
nn.HostVolumes = copyNodeHostVolumes(n.HostVolumes)
nn.HostNetworks = copyNodeHostNetworks(n.HostNetworks)
nn.LastDrain = nn.LastDrain.Copy()
return nn
}
// copyNodeEvents is a helper to copy a list of NodeEvent's
func copyNodeEvents(events []*NodeEvent) []*NodeEvent {
l := len(events)
if l == 0 {
return nil
}
c := make([]*NodeEvent, l)
for i, event := range events {
c[i] = event.Copy()
}
return c
}
// copyNodeCSI is a helper to copy a map of CSIInfo
func copyNodeCSI(plugins map[string]*CSIInfo) map[string]*CSIInfo {
l := len(plugins)
if l == 0 {
return nil
}
c := make(map[string]*CSIInfo, l)
for plugin, info := range plugins {
c[plugin] = info.Copy()
}
return c
}
// copyNodeDrivers is a helper to copy a map of DriverInfo
func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo {
l := len(drivers)
if l == 0 {
return nil
}
c := make(map[string]*DriverInfo, l)
for driver, info := range drivers {
c[driver] = info.Copy()
}
return c
}
// copyNodeHostVolumes is a helper to copy a map of string to Volume
func copyNodeHostVolumes(volumes map[string]*ClientHostVolumeConfig) map[string]*ClientHostVolumeConfig {
l := len(volumes)
if l == 0 {
return nil
}
c := make(map[string]*ClientHostVolumeConfig, l)
for volume, v := range volumes {
c[volume] = v.Copy()
}
return c
}
// copyNodeHostVolumes is a helper to copy a map of string to HostNetwork
func copyNodeHostNetworks(networks map[string]*ClientHostNetworkConfig) map[string]*ClientHostNetworkConfig {
l := len(networks)
if l == 0 {
return nil
}
c := make(map[string]*ClientHostNetworkConfig, l)
for network, v := range networks {
c[network] = v.Copy()
}
return c
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (n *Node) TerminalStatus() bool {
switch n.Status {
case NodeStatusDown:
return true
default:
return false
}
}
// ComparableReservedResources returns the reserved resouces on the node
// handling upgrade paths. Reserved networks must be handled separately. After
// 0.11 calls to this should be replaced with:
// node.ReservedResources.Comparable()
//
// COMPAT(0.11): Remove in 0.11
func (n *Node) ComparableReservedResources() *ComparableResources {
// See if we can no-op
if n.Reserved == nil && n.ReservedResources == nil {
return nil
}
// Node already has 0.9+ behavior
if n.ReservedResources != nil {
return n.ReservedResources.Comparable()
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: int64(n.Reserved.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: int64(n.Reserved.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: int64(n.Reserved.DiskMB),
},
}
}
// ComparableResources returns the resouces on the node
// handling upgrade paths. Networking must be handled separately. After 0.11
// calls to this should be replaced with: node.NodeResources.Comparable()
//
// // COMPAT(0.11): Remove in 0.11
func (n *Node) ComparableResources() *ComparableResources {
// Node already has 0.9+ behavior
if n.NodeResources != nil {
return n.NodeResources.Comparable()
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: int64(n.Resources.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: int64(n.Resources.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: int64(n.Resources.DiskMB),
},
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub(fields *NodeStubFields) *NodeListStub {
addr, _, _ := net.SplitHostPort(n.HTTPAddr)
s := &NodeListStub{
Address: addr,
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Version: n.Attributes["nomad.version"],
Drain: n.DrainStrategy != nil,
SchedulingEligibility: n.SchedulingEligibility,
Status: n.Status,
StatusDescription: n.StatusDescription,
Drivers: n.Drivers,
HostVolumes: n.HostVolumes,
LastDrain: n.LastDrain,
CreateIndex: n.CreateIndex,
ModifyIndex: n.ModifyIndex,
}
if fields != nil {
if fields.Resources {
s.NodeResources = n.NodeResources
s.ReservedResources = n.ReservedResources
}
// Fetch key attributes from the main Attributes map.
if fields.OS {
m := make(map[string]string)
m["os.name"] = n.Attributes["os.name"]
s.Attributes = m
}
}
return s
}
// NodeListStub is used to return a subset of job information
// for the job list
type NodeListStub struct {
Address string
ID string
Attributes map[string]string `json:",omitempty"`
Datacenter string
Name string
NodeClass string
Version string
Drain bool
SchedulingEligibility string
Status string
StatusDescription string
Drivers map[string]*DriverInfo
HostVolumes map[string]*ClientHostVolumeConfig
NodeResources *NodeResources `json:",omitempty"`
ReservedResources *NodeReservedResources `json:",omitempty"`
LastDrain *DrainMetadata
CreateIndex uint64
ModifyIndex uint64
}
// NodeStubFields defines which fields are included in the NodeListStub.
type NodeStubFields struct {
Resources bool
OS bool
}
// Resources is used to define the resources available
// on a client
type Resources struct {
CPU int
Cores int
MemoryMB int
MemoryMaxMB int
DiskMB int
IOPS int // COMPAT(0.10): Only being used to issue warnings
Networks Networks
Devices ResourceDevices
}
const (
BytesInMegabyte = 1024 * 1024
)
// DefaultResources is a small resources object that contains the
// default resources requests that we will provide to an object.
// --- THIS FUNCTION IS REPLICATED IN api/resources.go and should
// be kept in sync.
func DefaultResources() *Resources {
return &Resources{
CPU: 100,
Cores: 0,
MemoryMB: 300,
}
}
// MinResources is a small resources object that contains the
// absolute minimum resources that we will provide to an object.
// This should not be confused with the defaults which are
// provided in Canonicalize() --- THIS FUNCTION IS REPLICATED IN
// api/resources.go and should be kept in sync.
func MinResources() *Resources {
return &Resources{
CPU: 1,
Cores: 0,
MemoryMB: 10,
}
}
// DiskInBytes returns the amount of disk resources in bytes.
func (r *Resources) DiskInBytes() int64 {
return int64(r.DiskMB * BytesInMegabyte)
}
func (r *Resources) Validate() error {
var mErr multierror.Error
if r.Cores > 0 && r.CPU > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can only ask for 'cpu' or 'cores' resource, not both."))
}
if err := r.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Ensure the task isn't asking for disk resources
if r.DiskMB > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
}
for i, d := range r.Devices {
if err := d.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("device %d failed validation: %v", i+1, err))
}
}
if r.MemoryMaxMB != 0 && r.MemoryMaxMB < r.MemoryMB {
mErr.Errors = append(mErr.Errors, fmt.Errorf("MemoryMaxMB value (%d) should be larger than MemoryMB value (%d)", r.MemoryMaxMB, r.MemoryMB))
}
return mErr.ErrorOrNil()
}
// Merge merges this resource with another resource.
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Merge(other *Resources) {
if other.CPU != 0 {
r.CPU = other.CPU
}
if other.Cores != 0 {
r.Cores = other.Cores
}
if other.MemoryMB != 0 {
r.MemoryMB = other.MemoryMB
}
if other.MemoryMaxMB != 0 {
r.MemoryMaxMB = other.MemoryMaxMB
}
if other.DiskMB != 0 {
r.DiskMB = other.DiskMB
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
if len(other.Devices) != 0 {
r.Devices = other.Devices
}
}
// Equals Resources.
//
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Equals(o *Resources) bool {
if r == o {
return true
}
if r == nil || o == nil {
return false
}
return r.CPU == o.CPU &&
r.Cores == o.Cores &&
r.MemoryMB == o.MemoryMB &&
r.MemoryMaxMB == o.MemoryMaxMB &&
r.DiskMB == o.DiskMB &&
r.IOPS == o.IOPS &&
r.Networks.Equals(&o.Networks) &&
r.Devices.Equals(&o.Devices)
}
// ResourceDevices are part of Resources.
//
// COMPAT(0.10): Remove in 0.10.
type ResourceDevices []*RequestedDevice
// Equals ResourceDevices as set keyed by Name.
//
// COMPAT(0.10): Remove in 0.10
func (d *ResourceDevices) Equals(o *ResourceDevices) bool {
if d == o {
return true
}
if d == nil || o == nil {
return false
}
if len(*d) != len(*o) {
return false
}
m := make(map[string]*RequestedDevice, len(*d))
for _, e := range *d {
m[e.Name] = e
}
for _, oe := range *o {
de, ok := m[oe.Name]
if !ok || !de.Equals(oe) {
return false
}
}
return true
}
// Canonicalize the Resources struct.
//
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(r.Networks) == 0 {
r.Networks = nil
}
if len(r.Devices) == 0 {
r.Devices = nil
}
for _, n := range r.Networks {
n.Canonicalize()
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
// This is based on the minimums defined in the Resources type
// COMPAT(0.10): Remove in 0.10
func (r *Resources) MeetsMinResources() error {
var mErr multierror.Error
minResources := MinResources()
if r.CPU < minResources.CPU && r.Cores == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU))
}
if r.MemoryMB < minResources.MemoryMB {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.MemoryMB, r.MemoryMB))
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the resources
func (r *Resources) Copy() *Resources {
if r == nil {
return nil
}
newR := new(Resources)
*newR = *r
// Copy the network objects
newR.Networks = r.Networks.Copy()
// Copy the devices
if r.Devices != nil {
n := len(r.Devices)
newR.Devices = make([]*RequestedDevice, n)
for i := 0; i < n; i++ {
newR.Devices[i] = r.Devices[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
// COMPAT(0.10): Remove in 0.10
func (r *Resources) NetIndex(n *NetworkResource) int {
return r.Networks.NetIndex(n)
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Add(delta *Resources) {
if delta == nil {
return
}
r.CPU += delta.CPU
r.MemoryMB += delta.MemoryMB
if delta.MemoryMaxMB > 0 {
r.MemoryMaxMB += delta.MemoryMaxMB
} else {
r.MemoryMaxMB += delta.MemoryMB
}
r.DiskMB += delta.DiskMB
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := r.NetIndex(n)
if idx == -1 {
r.Networks = append(r.Networks, n.Copy())
} else {
r.Networks[idx].Add(n)
}
}
}
// GoString returns the string representation of the Resources struct.
//
// COMPAT(0.10): Remove in 0.10
func (r *Resources) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
// NodeNetworkResource is used to describe a fingerprinted network of a node
type NodeNetworkResource struct {
Mode string // host for physical networks, cni/<name> for cni networks
// The following apply only to host networks
Device string // interface name
MacAddress string
Speed int
Addresses []NodeNetworkAddress // not valid for cni, for bridge there will only be 1 ip
}
func (n *NodeNetworkResource) Equals(o *NodeNetworkResource) bool {
return reflect.DeepEqual(n, o)
}
func (n *NodeNetworkResource) Copy() *NodeNetworkResource {
if n == nil {
return nil
}
c := new(NodeNetworkResource)
*c = *n
if n.Addresses != nil {
c.Addresses = make([]NodeNetworkAddress, len(n.Addresses))
copy(c.Addresses, n.Addresses)
}
return c
}
func (n *NodeNetworkResource) HasAlias(alias string) bool {
for _, addr := range n.Addresses {
if addr.Alias == alias {
return true
}
}
return false
}
type NodeNetworkAF string
const (
NodeNetworkAF_IPv4 NodeNetworkAF = "ipv4"
NodeNetworkAF_IPv6 NodeNetworkAF = "ipv6"
)
type NodeNetworkAddress struct {
Family NodeNetworkAF
Alias string
Address string
ReservedPorts string
Gateway string // default route for this address
}
type AllocatedPortMapping struct {
Label string
Value int
To int
HostIP string
}
type AllocatedPorts []AllocatedPortMapping
func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) {
for _, port := range p {
if port.Label == label {
return port, true
}
}
return AllocatedPortMapping{}, false
}
type Port struct {
// Label is the key for HCL port stanzas: port "foo" {}
Label string
// Value is the static or dynamic port value. For dynamic ports this
// will be 0 in the jobspec and set by the scheduler.
Value int
// To is the port inside a network namespace where this port is
// forwarded. -1 is an internal sentinel value used by Consul Connect
// to mean "same as the host port."
To int
// HostNetwork is the name of the network this port should be assigned
// to. Jobs with a HostNetwork set can only be placed on nodes with
// that host network available.
HostNetwork string
}
type DNSConfig struct {
Servers []string
Searches []string
Options []string
}
func (d *DNSConfig) Copy() *DNSConfig {
if d == nil {
return nil
}
newD := new(DNSConfig)
newD.Servers = make([]string, len(d.Servers))
copy(newD.Servers, d.Servers)
newD.Searches = make([]string, len(d.Searches))
copy(newD.Searches, d.Searches)
newD.Options = make([]string, len(d.Options))
copy(newD.Options, d.Options)
return newD
}
// NetworkResource is used to represent available network
// resources
type NetworkResource struct {
Mode string // Mode of the network
Device string // Name of the device
CIDR string // CIDR block of addresses
IP string // Host IP address
Hostname string `json:",omitempty"` // Hostname of the network namespace
MBits int // Throughput
DNS *DNSConfig // DNS Configuration
ReservedPorts []Port // Host Reserved ports
DynamicPorts []Port // Host Dynamically assigned ports
}
func (n *NetworkResource) Hash() uint32 {
var data []byte
data = append(data, []byte(fmt.Sprintf("%s%s%s%s%s%d", n.Mode, n.Device, n.CIDR, n.IP, n.Hostname, n.MBits))...)
for i, port := range n.ReservedPorts {
data = append(data, []byte(fmt.Sprintf("r%d%s%d%d", i, port.Label, port.Value, port.To))...)
}
for i, port := range n.DynamicPorts {
data = append(data, []byte(fmt.Sprintf("d%d%s%d%d", i, port.Label, port.Value, port.To))...)
}
return crc32.ChecksumIEEE(data)
}
func (n *NetworkResource) Equals(other *NetworkResource) bool {
return n.Hash() == other.Hash()
}
func (n *NetworkResource) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(n.ReservedPorts) == 0 {
n.ReservedPorts = nil
}
if len(n.DynamicPorts) == 0 {
n.DynamicPorts = nil
}
for i, p := range n.DynamicPorts {
if p.HostNetwork == "" {
n.DynamicPorts[i].HostNetwork = "default"
}
}
for i, p := range n.ReservedPorts {
if p.HostNetwork == "" {
n.ReservedPorts[i].HostNetwork = "default"
}
}
}
// Copy returns a deep copy of the network resource
func (n *NetworkResource) Copy() *NetworkResource {
if n == nil {
return nil
}
newR := new(NetworkResource)
*newR = *n
newR.DNS = n.DNS.Copy()
if n.ReservedPorts != nil {
newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
copy(newR.ReservedPorts, n.ReservedPorts)
}
if n.DynamicPorts != nil {
newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
copy(newR.DynamicPorts, n.DynamicPorts)
}
return newR
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (n *NetworkResource) Add(delta *NetworkResource) {
if len(delta.ReservedPorts) > 0 {
n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
}
n.MBits += delta.MBits
n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
}
func (n *NetworkResource) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
// PortLabels returns a map of port labels to their assigned host ports.
func (n *NetworkResource) PortLabels() map[string]int {
num := len(n.ReservedPorts) + len(n.DynamicPorts)
labelValues := make(map[string]int, num)
for _, port := range n.ReservedPorts {
labelValues[port.Label] = port.Value
}
for _, port := range n.DynamicPorts {
labelValues[port.Label] = port.Value
}
return labelValues
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
func (ns Networks) Copy() Networks {
if len(ns) == 0 {
return nil
}
out := make([]*NetworkResource, len(ns))
for i := range ns {
out[i] = ns[i].Copy()
}
return out
}
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) AllocatedPortMapping {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return AllocatedPortMapping{
Label: label,
Value: p.Value,
To: p.To,
HostIP: n.IP,
}
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return AllocatedPortMapping{
Label: label,
Value: p.Value,
To: p.To,
HostIP: n.IP,
}
}
}
}
return AllocatedPortMapping{}
}
func (ns Networks) NetIndex(n *NetworkResource) int {
for idx, net := range ns {
if net.Device == n.Device {
return idx
}
}
return -1
}
// RequestedDevice is used to request a device for a task.
type RequestedDevice struct {
// Name is the request name. The possible values are as follows:
// * <type>: A single value only specifies the type of request.
// * <vendor>/<type>: A single slash delimiter assumes the vendor and type of device is specified.
// * <vendor>/<type>/<name>: Two slash delimiters assume vendor, type and specific model are specified.
//
// Examples are as follows:
// * "gpu"
// * "nvidia/gpu"
// * "nvidia/gpu/GTX2080Ti"
Name string
// Count is the number of requested devices
Count uint64
// Constraints are a set of constraints to apply when selecting the device
// to use.
Constraints Constraints
// Affinities are a set of affinities to apply when selecting the device
// to use.
Affinities Affinities
}
func (r *RequestedDevice) Equals(o *RequestedDevice) bool {
if r == o {
return true
}
if r == nil || o == nil {
return false
}
return r.Name == o.Name &&
r.Count == o.Count &&
r.Constraints.Equals(&o.Constraints) &&
r.Affinities.Equals(&o.Affinities)
}
func (r *RequestedDevice) Copy() *RequestedDevice {
if r == nil {
return nil
}
nr := *r
nr.Constraints = CopySliceConstraints(nr.Constraints)
nr.Affinities = CopySliceAffinities(nr.Affinities)
return &nr
}
func (r *RequestedDevice) ID() *DeviceIdTuple {
if r == nil || r.Name == "" {
return nil
}
parts := strings.SplitN(r.Name, "/", 3)
switch len(parts) {
case 1:
return &DeviceIdTuple{
Type: parts[0],
}
case 2:
return &DeviceIdTuple{
Vendor: parts[0],
Type: parts[1],
}
default:
return &DeviceIdTuple{
Vendor: parts[0],
Type: parts[1],
Name: parts[2],
}
}
}
func (r *RequestedDevice) Validate() error {
if r == nil {
return nil
}
var mErr multierror.Error
if r.Name == "" {
_ = multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name"))
}
for idx, constr := range r.Constraints {
// Ensure that the constraint doesn't use an operand we do not allow
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d validation failed: using unsupported operand %q", idx+1, constr.Operand)
_ = multierror.Append(&mErr, outer)
default:
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
_ = multierror.Append(&mErr, outer)
}
}
}
for idx, affinity := range r.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
_ = multierror.Append(&mErr, outer)
}
}
return mErr.ErrorOrNil()
}
// NodeResources is used to define the resources available on a client node.
type NodeResources struct {
Cpu NodeCpuResources
Memory NodeMemoryResources
Disk NodeDiskResources
Networks Networks
NodeNetworks []*NodeNetworkResource
Devices []*NodeDeviceResource
MinDynamicPort int
MaxDynamicPort int
}
func (n *NodeResources) Copy() *NodeResources {
if n == nil {
return nil
}
newN := new(NodeResources)
*newN = *n
newN.Cpu = n.Cpu.Copy()
newN.Networks = n.Networks.Copy()
if n.NodeNetworks != nil {
newN.NodeNetworks = make([]*NodeNetworkResource, len(n.NodeNetworks))
for i, nn := range n.NodeNetworks {
newN.NodeNetworks[i] = nn.Copy()
}
}
// Copy the devices
if n.Devices != nil {
devices := len(n.Devices)
newN.Devices = make([]*NodeDeviceResource, devices)
for i := 0; i < devices; i++ {
newN.Devices[i] = n.Devices[i].Copy()
}
}
return newN
}
// Comparable returns a comparable version of the nodes resources. This
// conversion can be lossy so care must be taken when using it.
func (n *NodeResources) Comparable() *ComparableResources {
if n == nil {
return nil
}
c := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: n.Cpu.CpuShares,
ReservedCores: n.Cpu.ReservableCpuCores,
},
Memory: AllocatedMemoryResources{
MemoryMB: n.Memory.MemoryMB,
},
Networks: n.Networks,
},
Shared: AllocatedSharedResources{
DiskMB: n.Disk.DiskMB,
},
}
return c
}
func (n *NodeResources) Merge(o *NodeResources) {
if o == nil {
return
}
n.Cpu.Merge(&o.Cpu)
n.Memory.Merge(&o.Memory)
n.Disk.Merge(&o.Disk)
if len(o.Networks) != 0 {
n.Networks = append(n.Networks, o.Networks...)
}
if len(o.Devices) != 0 {
n.Devices = o.Devices
}
if len(o.NodeNetworks) != 0 {
lookupNetwork := func(nets []*NodeNetworkResource, name string) (int, *NodeNetworkResource) {
for i, nw := range nets {
if nw.Device == name {
return i, nw
}
}
return 0, nil
}
for _, nw := range o.NodeNetworks {
if i, nnw := lookupNetwork(n.NodeNetworks, nw.Device); nnw != nil {
n.NodeNetworks[i] = nw
} else {
n.NodeNetworks = append(n.NodeNetworks, nw)
}
}
}
}
func (n *NodeResources) Equals(o *NodeResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if !n.Cpu.Equals(&o.Cpu) {
return false
}
if !n.Memory.Equals(&o.Memory) {
return false
}
if !n.Disk.Equals(&o.Disk) {
return false
}
if !n.Networks.Equals(&o.Networks) {
return false
}
// Check the devices
if !DevicesEquals(n.Devices, o.Devices) {
return false
}
if !NodeNetworksEquals(n.NodeNetworks, o.NodeNetworks) {
return false
}
return true
}
// Equals equates Networks as a set
func (ns *Networks) Equals(o *Networks) bool {
if ns == o {
return true
}
if ns == nil || o == nil {
return false
}
if len(*ns) != len(*o) {
return false
}
SETEQUALS:
for _, ne := range *ns {
for _, oe := range *o {
if ne.Equals(oe) {
continue SETEQUALS
}
}
return false
}
return true
}
// DevicesEquals returns true if the two device arrays are set equal
func DevicesEquals(d1, d2 []*NodeDeviceResource) bool {
if len(d1) != len(d2) {
return false
}
idMap := make(map[DeviceIdTuple]*NodeDeviceResource, len(d1))
for _, d := range d1 {
idMap[*d.ID()] = d
}
for _, otherD := range d2 {
if d, ok := idMap[*otherD.ID()]; !ok || !d.Equals(otherD) {
return false
}
}
return true
}
func NodeNetworksEquals(n1, n2 []*NodeNetworkResource) bool {
if len(n1) != len(n2) {
return false
}
netMap := make(map[string]*NodeNetworkResource, len(n1))
for _, n := range n1 {
netMap[n.Device] = n
}
for _, otherN := range n2 {
if n, ok := netMap[otherN.Device]; !ok || !n.Equals(otherN) {
return false
}
}
return true
}
// NodeCpuResources captures the CPU resources of the node.
type NodeCpuResources struct {
// CpuShares is the CPU shares available. This is calculated by number of
// cores multiplied by the core frequency.
CpuShares int64
// TotalCpuCores is the total number of cores on the machine. This includes cores not in
// the agent's cpuset if on a linux platform
TotalCpuCores uint16
// ReservableCpuCores is the set of cpus which are available to be reserved on the Node.
// This value is currently only reported on Linux platforms which support cgroups and is
// discovered by inspecting the cpuset of the agent's cgroup.
ReservableCpuCores []uint16
}
func (n NodeCpuResources) Copy() NodeCpuResources {
newN := n
if n.ReservableCpuCores != nil {
newN.ReservableCpuCores = make([]uint16, len(n.ReservableCpuCores))
copy(newN.ReservableCpuCores, n.ReservableCpuCores)
}
return newN
}
func (n *NodeCpuResources) Merge(o *NodeCpuResources) {
if o == nil {
return
}
if o.CpuShares != 0 {
n.CpuShares = o.CpuShares
}
if o.TotalCpuCores != 0 {
n.TotalCpuCores = o.TotalCpuCores
}
if len(o.ReservableCpuCores) != 0 {
n.ReservableCpuCores = o.ReservableCpuCores
}
}
func (n *NodeCpuResources) Equals(o *NodeCpuResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.CpuShares != o.CpuShares {
return false
}
if n.TotalCpuCores != o.TotalCpuCores {
return false
}
if len(n.ReservableCpuCores) != len(o.ReservableCpuCores) {
return false
}
for i := range n.ReservableCpuCores {
if n.ReservableCpuCores[i] != o.ReservableCpuCores[i] {
return false
}
}
return true
}
func (n *NodeCpuResources) SharesPerCore() int64 {
return n.CpuShares / int64(n.TotalCpuCores)
}
// NodeMemoryResources captures the memory resources of the node
type NodeMemoryResources struct {
// MemoryMB is the total available memory on the node
MemoryMB int64
}
func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) {
if o == nil {
return
}
if o.MemoryMB != 0 {
n.MemoryMB = o.MemoryMB
}
}
func (n *NodeMemoryResources) Equals(o *NodeMemoryResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.MemoryMB != o.MemoryMB {
return false
}
return true
}
// NodeDiskResources captures the disk resources of the node
type NodeDiskResources struct {
// DiskMB is the total available disk space on the node
DiskMB int64
}
func (n *NodeDiskResources) Merge(o *NodeDiskResources) {
if o == nil {
return
}
if o.DiskMB != 0 {
n.DiskMB = o.DiskMB
}
}
func (n *NodeDiskResources) Equals(o *NodeDiskResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.DiskMB != o.DiskMB {
return false
}
return true
}
// DeviceIdTuple is the tuple that identifies a device
type DeviceIdTuple struct {
Vendor string
Type string
Name string
}
func (id *DeviceIdTuple) String() string {
if id == nil {
return ""
}
return fmt.Sprintf("%s/%s/%s", id.Vendor, id.Type, id.Name)
}
// Matches returns if this Device ID is a superset of the passed ID.
func (id *DeviceIdTuple) Matches(other *DeviceIdTuple) bool {
if other == nil {
return false
}
if other.Name != "" && other.Name != id.Name {
return false
}
if other.Vendor != "" && other.Vendor != id.Vendor {
return false
}
if other.Type != "" && other.Type != id.Type {
return false
}
return true
}
// Equals returns if this Device ID is the same as the passed ID.
func (id *DeviceIdTuple) Equals(o *DeviceIdTuple) bool {
if id == nil && o == nil {
return true
} else if id == nil || o == nil {
return false
}
return o.Vendor == id.Vendor && o.Type == id.Type && o.Name == id.Name
}
// NodeDeviceResource captures a set of devices sharing a common
// vendor/type/device_name tuple.
type NodeDeviceResource struct {
Vendor string
Type string
Name string
Instances []*NodeDevice
Attributes map[string]*psstructs.Attribute
}
func (n *NodeDeviceResource) ID() *DeviceIdTuple {
if n == nil {
return nil
}
return &DeviceIdTuple{
Vendor: n.Vendor,
Type: n.Type,
Name: n.Name,
}
}
func (n *NodeDeviceResource) Copy() *NodeDeviceResource {
if n == nil {
return nil
}
// Copy the primitives
nn := *n
// Copy the device instances
if l := len(nn.Instances); l != 0 {
nn.Instances = make([]*NodeDevice, 0, l)
for _, d := range n.Instances {
nn.Instances = append(nn.Instances, d.Copy())
}
}
// Copy the Attributes
nn.Attributes = psstructs.CopyMapStringAttribute(nn.Attributes)
return &nn
}
func (n *NodeDeviceResource) Equals(o *NodeDeviceResource) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.Vendor != o.Vendor {
return false
} else if n.Type != o.Type {
return false
} else if n.Name != o.Name {
return false
}
// Check the attributes
if len(n.Attributes) != len(o.Attributes) {
return false
}
for k, v := range n.Attributes {
if otherV, ok := o.Attributes[k]; !ok || v != otherV {
return false
}
}
// Check the instances
if len(n.Instances) != len(o.Instances) {
return false
}
idMap := make(map[string]*NodeDevice, len(n.Instances))
for _, d := range n.Instances {
idMap[d.ID] = d
}
for _, otherD := range o.Instances {
if d, ok := idMap[otherD.ID]; !ok || !d.Equals(otherD) {
return false
}
}
return true
}
// NodeDevice is an instance of a particular device.
type NodeDevice struct {
// ID is the ID of the device.
ID string
// Healthy captures whether the device is healthy.
Healthy bool
// HealthDescription is used to provide a human readable description of why
// the device may be unhealthy.
HealthDescription string
// Locality stores HW locality information for the node to optionally be
// used when making placement decisions.
Locality *NodeDeviceLocality
}
func (n *NodeDevice) Equals(o *NodeDevice) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.ID != o.ID {
return false
} else if n.Healthy != o.Healthy {
return false
} else if n.HealthDescription != o.HealthDescription {
return false
} else if !n.Locality.Equals(o.Locality) {
return false
}
return false
}
func (n *NodeDevice) Copy() *NodeDevice {
if n == nil {
return nil
}
// Copy the primitives
nn := *n
// Copy the locality
nn.Locality = nn.Locality.Copy()
return &nn
}
// NodeDeviceLocality stores information about the devices hardware locality on
// the node.
type NodeDeviceLocality struct {
// PciBusID is the PCI Bus ID for the device.
PciBusID string
}
func (n *NodeDeviceLocality) Equals(o *NodeDeviceLocality) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.PciBusID != o.PciBusID {
return false
}
return true
}
func (n *NodeDeviceLocality) Copy() *NodeDeviceLocality {
if n == nil {
return nil
}
// Copy the primitives
nn := *n
return &nn
}
// NodeReservedResources is used to capture the resources on a client node that
// should be reserved and not made available to jobs.
type NodeReservedResources struct {
Cpu NodeReservedCpuResources
Memory NodeReservedMemoryResources
Disk NodeReservedDiskResources
Networks NodeReservedNetworkResources
}
func (n *NodeReservedResources) Copy() *NodeReservedResources {
if n == nil {
return nil
}
newN := new(NodeReservedResources)
*newN = *n
return newN
}
// Comparable returns a comparable version of the node's reserved resources. The
// returned resources doesn't contain any network information. This conversion
// can be lossy so care must be taken when using it.
func (n *NodeReservedResources) Comparable() *ComparableResources {
if n == nil {
return nil
}
c := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: n.Cpu.CpuShares,
ReservedCores: n.Cpu.ReservedCpuCores,
},
Memory: AllocatedMemoryResources{
MemoryMB: n.Memory.MemoryMB,
},
},
Shared: AllocatedSharedResources{
DiskMB: n.Disk.DiskMB,
},
}
return c
}
// NodeReservedCpuResources captures the reserved CPU resources of the node.
type NodeReservedCpuResources struct {
CpuShares int64
ReservedCpuCores []uint16
}
// NodeReservedMemoryResources captures the reserved memory resources of the node.
type NodeReservedMemoryResources struct {
MemoryMB int64
}
// NodeReservedDiskResources captures the reserved disk resources of the node.
type NodeReservedDiskResources struct {
DiskMB int64
}
// NodeReservedNetworkResources captures the reserved network resources of the node.
type NodeReservedNetworkResources struct {
// ReservedHostPorts is the set of ports reserved on all host network
// interfaces. Its format is a comma separate list of integers or integer
// ranges. (80,443,1000-2000,2005)
ReservedHostPorts string
}
// ParseReservedHostPorts returns the reserved host ports.
func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) {
return ParsePortRanges(n.ReservedHostPorts)
}
// AllocatedResources is the set of resources to be used by an allocation.
type AllocatedResources struct {
// Tasks is a mapping of task name to the resources for the task.
Tasks map[string]*AllocatedTaskResources
TaskLifecycles map[string]*TaskLifecycleConfig
// Shared is the set of resource that are shared by all tasks in the group.
Shared AllocatedSharedResources
}
func (a *AllocatedResources) Copy() *AllocatedResources {
if a == nil {
return nil
}
out := AllocatedResources{
Shared: a.Shared.Copy(),
}
if a.Tasks != nil {
out.Tasks = make(map[string]*AllocatedTaskResources, len(out.Tasks))
for task, resource := range a.Tasks {
out.Tasks[task] = resource.Copy()
}
}
if a.TaskLifecycles != nil {
out.TaskLifecycles = make(map[string]*TaskLifecycleConfig, len(out.TaskLifecycles))
for task, lifecycle := range a.TaskLifecycles {
out.TaskLifecycles[task] = lifecycle.Copy()
}
}
return &out
}
// Comparable returns a comparable version of the allocations allocated
// resources. This conversion can be lossy so care must be taken when using it.
func (a *AllocatedResources) Comparable() *ComparableResources {
if a == nil {
return nil
}
c := &ComparableResources{
Shared: a.Shared,
}
prestartSidecarTasks := &AllocatedTaskResources{}
prestartEphemeralTasks := &AllocatedTaskResources{}
main := &AllocatedTaskResources{}
poststopTasks := &AllocatedTaskResources{}
for taskName, r := range a.Tasks {
lc := a.TaskLifecycles[taskName]
if lc == nil {
main.Add(r)
} else if lc.Hook == TaskLifecycleHookPrestart {
if lc.Sidecar {
prestartSidecarTasks.Add(r)
} else {
prestartEphemeralTasks.Add(r)
}
} else if lc.Hook == TaskLifecycleHookPoststop {
poststopTasks.Add(r)
}
}
// update this loop to account for lifecycle hook
prestartEphemeralTasks.Max(main)
prestartEphemeralTasks.Max(poststopTasks)
prestartSidecarTasks.Add(prestartEphemeralTasks)
c.Flattened.Add(prestartSidecarTasks)
// Add network resources that are at the task group level
for _, network := range a.Shared.Networks {
c.Flattened.Add(&AllocatedTaskResources{
Networks: []*NetworkResource{network},
})
}
return c
}
// OldTaskResources returns the pre-0.9.0 map of task resources
func (a *AllocatedResources) OldTaskResources() map[string]*Resources {
m := make(map[string]*Resources, len(a.Tasks))
for name, res := range a.Tasks {
m[name] = &Resources{
CPU: int(res.Cpu.CpuShares),
MemoryMB: int(res.Memory.MemoryMB),
MemoryMaxMB: int(res.Memory.MemoryMaxMB),
Networks: res.Networks,
}
}
return m
}
func (a *AllocatedResources) Canonicalize() {
a.Shared.Canonicalize()
for _, r := range a.Tasks {
for _, nw := range r.Networks {
for _, port := range append(nw.DynamicPorts, nw.ReservedPorts...) {
a.Shared.Ports = append(a.Shared.Ports, AllocatedPortMapping{
Label: port.Label,
Value: port.Value,
To: port.To,
HostIP: nw.IP,
})
}
}
}
}
// AllocatedTaskResources are the set of resources allocated to a task.
type AllocatedTaskResources struct {
Cpu AllocatedCpuResources
Memory AllocatedMemoryResources
Networks Networks
Devices []*AllocatedDeviceResource
}
func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
if a == nil {
return nil
}
newA := new(AllocatedTaskResources)
*newA = *a
// Copy the networks
newA.Networks = a.Networks.Copy()
// Copy the devices
if newA.Devices != nil {
n := len(a.Devices)
newA.Devices = make([]*AllocatedDeviceResource, n)
for i := 0; i < n; i++ {
newA.Devices[i] = a.Devices[i].Copy()
}
}
return newA
}
// NetIndex finds the matching net index using device name
func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int {
return a.Networks.NetIndex(n)
}
func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) {
if delta == nil {
return
}
a.Cpu.Add(&delta.Cpu)
a.Memory.Add(&delta.Memory)
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := a.NetIndex(n)
if idx == -1 {
a.Networks = append(a.Networks, n.Copy())
} else {
a.Networks[idx].Add(n)
}
}
for _, d := range delta.Devices {
// Find the matching device
idx := AllocatedDevices(a.Devices).Index(d)
if idx == -1 {
a.Devices = append(a.Devices, d.Copy())
} else {
a.Devices[idx].Add(d)
}
}
}
func (a *AllocatedTaskResources) Max(other *AllocatedTaskResources) {
if other == nil {
return
}
a.Cpu.Max(&other.Cpu)
a.Memory.Max(&other.Memory)
for _, n := range other.Networks {
// Find the matching interface by IP or CIDR
idx := a.NetIndex(n)
if idx == -1 {
a.Networks = append(a.Networks, n.Copy())
} else {
a.Networks[idx].Add(n)
}
}
for _, d := range other.Devices {
// Find the matching device
idx := AllocatedDevices(a.Devices).Index(d)
if idx == -1 {
a.Devices = append(a.Devices, d.Copy())
} else {
a.Devices[idx].Add(d)
}
}
}
// Comparable turns AllocatedTaskResources into ComparableResources
// as a helper step in preemption
func (a *AllocatedTaskResources) Comparable() *ComparableResources {
ret := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: a.Cpu.CpuShares,
ReservedCores: a.Cpu.ReservedCores,
},
Memory: AllocatedMemoryResources{
MemoryMB: a.Memory.MemoryMB,
MemoryMaxMB: a.Memory.MemoryMaxMB,
},
},
}
ret.Flattened.Networks = append(ret.Flattened.Networks, a.Networks...)
return ret
}
// Subtract only subtracts CPU and Memory resources. Network utilization
// is managed separately in NetworkIndex
func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) {
if delta == nil {
return
}
a.Cpu.Subtract(&delta.Cpu)
a.Memory.Subtract(&delta.Memory)
}
// AllocatedSharedResources are the set of resources allocated to a task group.
type AllocatedSharedResources struct {
Networks Networks
DiskMB int64
Ports AllocatedPorts
}
func (a AllocatedSharedResources) Copy() AllocatedSharedResources {
return AllocatedSharedResources{
Networks: a.Networks.Copy(),
DiskMB: a.DiskMB,
Ports: a.Ports,
}
}
func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
if delta == nil {
return
}
a.Networks = append(a.Networks, delta.Networks...)
a.DiskMB += delta.DiskMB
}
func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
if delta == nil {
return
}
diff := map[*NetworkResource]bool{}
for _, n := range delta.Networks {
diff[n] = true
}
var nets Networks
for _, n := range a.Networks {
if _, ok := diff[n]; !ok {
nets = append(nets, n)
}
}
a.Networks = nets
a.DiskMB -= delta.DiskMB
}
func (a *AllocatedSharedResources) Canonicalize() {
if len(a.Networks) > 0 {
if len(a.Networks[0].DynamicPorts)+len(a.Networks[0].ReservedPorts) > 0 && len(a.Ports) == 0 {
for _, ports := range [][]Port{a.Networks[0].DynamicPorts, a.Networks[0].ReservedPorts} {
for _, p := range ports {
a.Ports = append(a.Ports, AllocatedPortMapping{
Label: p.Label,
Value: p.Value,
To: p.To,
HostIP: a.Networks[0].IP,
})
}
}
}
}
}
// AllocatedCpuResources captures the allocated CPU resources.
type AllocatedCpuResources struct {
CpuShares int64
ReservedCores []uint16
}
func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) {
if delta == nil {
return
}
a.CpuShares += delta.CpuShares
a.ReservedCores = cpuset.New(a.ReservedCores...).Union(cpuset.New(delta.ReservedCores...)).ToSlice()
}
func (a *AllocatedCpuResources) Subtract(delta *AllocatedCpuResources) {
if delta == nil {
return
}
a.CpuShares -= delta.CpuShares
a.ReservedCores = cpuset.New(a.ReservedCores...).Difference(cpuset.New(delta.ReservedCores...)).ToSlice()
}
func (a *AllocatedCpuResources) Max(other *AllocatedCpuResources) {
if other == nil {
return
}
if other.CpuShares > a.CpuShares {
a.CpuShares = other.CpuShares
}
if len(other.ReservedCores) > len(a.ReservedCores) {
a.ReservedCores = other.ReservedCores
}
}
// AllocatedMemoryResources captures the allocated memory resources.
type AllocatedMemoryResources struct {
MemoryMB int64
MemoryMaxMB int64
}
func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) {
if delta == nil {
return
}
a.MemoryMB += delta.MemoryMB
if delta.MemoryMaxMB != 0 {
a.MemoryMaxMB += delta.MemoryMaxMB
} else {
a.MemoryMaxMB += delta.MemoryMB
}
}
func (a *AllocatedMemoryResources) Subtract(delta *AllocatedMemoryResources) {
if delta == nil {
return
}
a.MemoryMB -= delta.MemoryMB
if delta.MemoryMaxMB != 0 {
a.MemoryMaxMB -= delta.MemoryMaxMB
} else {
a.MemoryMaxMB -= delta.MemoryMB
}
}
func (a *AllocatedMemoryResources) Max(other *AllocatedMemoryResources) {
if other == nil {
return
}
if other.MemoryMB > a.MemoryMB {
a.MemoryMB = other.MemoryMB
}
if other.MemoryMaxMB > a.MemoryMaxMB {
a.MemoryMaxMB = other.MemoryMaxMB
}
}
type AllocatedDevices []*AllocatedDeviceResource
// Index finds the matching index using the passed device. If not found, -1 is
// returned.
func (a AllocatedDevices) Index(d *AllocatedDeviceResource) int {
if d == nil {
return -1
}
for i, o := range a {
if o.ID().Equals(d.ID()) {
return i
}
}
return -1
}
// AllocatedDeviceResource captures a set of allocated devices.
type AllocatedDeviceResource struct {
// Vendor, Type, and Name are used to select the plugin to request the
// device IDs from.
Vendor string
Type string
Name string
// DeviceIDs is the set of allocated devices
DeviceIDs []string
}
func (a *AllocatedDeviceResource) ID() *DeviceIdTuple {
if a == nil {
return nil
}
return &DeviceIdTuple{
Vendor: a.Vendor,
Type: a.Type,
Name: a.Name,
}
}
func (a *AllocatedDeviceResource) Add(delta *AllocatedDeviceResource) {
if delta == nil {
return
}
a.DeviceIDs = append(a.DeviceIDs, delta.DeviceIDs...)
}
func (a *AllocatedDeviceResource) Copy() *AllocatedDeviceResource {
if a == nil {
return a
}
na := *a
// Copy the devices
na.DeviceIDs = make([]string, len(a.DeviceIDs))
copy(na.DeviceIDs, a.DeviceIDs)
return &na
}
// ComparableResources is the set of resources allocated to a task group but
// not keyed by Task, making it easier to compare.
type ComparableResources struct {
Flattened AllocatedTaskResources
Shared AllocatedSharedResources
}
func (c *ComparableResources) Add(delta *ComparableResources) {
if delta == nil {
return
}
c.Flattened.Add(&delta.Flattened)
c.Shared.Add(&delta.Shared)
}
func (c *ComparableResources) Subtract(delta *ComparableResources) {
if delta == nil {
return
}
c.Flattened.Subtract(&delta.Flattened)
c.Shared.Subtract(&delta.Shared)
}
func (c *ComparableResources) Copy() *ComparableResources {
if c == nil {
return nil
}
newR := new(ComparableResources)
*newR = *c
return newR
}
// Superset checks if one set of resources is a superset of another. This
// ignores network resources, and the NetworkIndex should be used for that.
func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) {
if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares {
return false, "cpu"
}
if len(c.Flattened.Cpu.ReservedCores) > 0 && !cpuset.New(c.Flattened.Cpu.ReservedCores...).IsSupersetOf(cpuset.New(other.Flattened.Cpu.ReservedCores...)) {
return false, "cores"
}
if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB {
return false, "memory"
}
if c.Shared.DiskMB < other.Shared.DiskMB {
return false, "disk"
}
return true, ""
}
// NetIndex finds the matching net index using device name
func (c *ComparableResources) NetIndex(n *NetworkResource) int {
return c.Flattened.Networks.NetIndex(n)
}
const (
// JobTypeCore is reserved for internal system tasks and is
// always handled by the CoreScheduler.
JobTypeCore = "_core"
JobTypeService = "service"
JobTypeBatch = "batch"
JobTypeSystem = "system"
JobTypeSysBatch = "sysbatch"
)
const (
JobStatusPending = "pending" // Pending means the job is waiting on scheduling
JobStatusRunning = "running" // Running means the job has non-terminal allocations
JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal
)
const (
// JobMinPriority is the minimum allowed priority
JobMinPriority = 1
// JobDefaultPriority is the default priority if not
// not specified.
JobDefaultPriority = 50
// JobMaxPriority is the maximum allowed priority
JobMaxPriority = 100
// CoreJobPriority should be higher than any user
// specified job so that it gets priority. This is important
// for the system to remain healthy.
CoreJobPriority = JobMaxPriority * 2
// JobTrackedVersions is the number of historic job versions that are
// kept.
JobTrackedVersions = 6
// JobTrackedScalingEvents is the number of scaling events that are
// kept for a single task group.
JobTrackedScalingEvents = 20
)
// Job is the scope of a scheduling request to Nomad. It is the largest
// scoped object, and is a named collection of task groups. Each task group
// is further composed of tasks. A task group (TG) is the unit of scheduling
// however.
type Job struct {
// Stop marks whether the user has stopped the job. A stopped job will
// have all created allocations stopped and acts as a way to stop a job
// without purging it from the system. This allows existing allocs to be
// queried and the job to be inspected as it is being killed.
Stop bool
// Region is the Nomad region that handles scheduling this job
Region string
// Namespace is the namespace the job is submitted into.
Namespace string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
// ParentID is the unique identifier of the job that spawned this job.
ParentID string
// Name is the logical name of the job used to refer to it. This is unique
// per region, but not unique globally.
Name string
// Type is used to control various behaviors about the job. Most jobs
// are service jobs, meaning they are expected to be long lived.
// Some jobs are batch oriented meaning they run and then terminate.
// This can be extended in the future to support custom schedulers.
Type string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job. This
// can slow down larger jobs if resources are not available.
AllAtOnce bool
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
// Affinities can be specified at the job level to express
// scheduling preferences that apply to all groups and tasks
Affinities []*Affinity
// Spread can be specified at the job level to express spreading
// allocations across a desired attribute, such as datacenter
Spreads []*Spread
// TaskGroups are the collections of task groups that this job needs
// to run. Each task group is an atomic unit of scheduling and placement.
TaskGroups []*TaskGroup
// See agent.ApiJobToStructJob
// Update provides defaults for the TaskGroup Update stanzas
Update UpdateStrategy
Multiregion *Multiregion
// Periodic is used to define the interval the job is run at.
Periodic *PeriodicConfig
// ParameterizedJob is used to specify the job as a parameterized job
// for dispatching.
ParameterizedJob *ParameterizedJobConfig
// Dispatched is used to identify if the Job has been dispatched from a
// parameterized job.
Dispatched bool
// DispatchIdempotencyToken is optionally used to ensure that a dispatched job does not have any
// non-terminal siblings which have the same token value.
DispatchIdempotencyToken string
// Payload is the payload supplied when the job was dispatched.
Payload []byte
// Meta is used to associate arbitrary metadata with this
// job. This is opaque to Nomad.
Meta map[string]string
// ConsulToken is the Consul token that proves the submitter of the job has
// access to the Service Identity policies associated with the job's
// Consul Connect enabled services. This field is only used to transfer the
// token and is not stored after Job submission.
ConsulToken string
// ConsulNamespace is the Consul namespace
ConsulNamespace string
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
// VaultNamespace is the Vault namespace
VaultNamespace string
// NomadTokenID is the Accessor ID of the ACL token (if any)
// used to register this version of the job. Used by deploymentwatcher.
NomadTokenID string
// Job status
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Stable marks a job as stable. Stability is only defined on "service" and
// "system" jobs. The stability of a job will be set automatically as part
// of a deployment and can be manually set via APIs. This field is updated
// when the status of a corresponding deployment transitions to Failed
// or Successful. This field is not meaningful for jobs that don't have an
// update stanza.
Stable bool
// Version is a monotonically increasing version number that is incremented
// on each job register.
Version uint64
// SubmitTime is the time at which the job was submitted as a UnixNano in
// UTC
SubmitTime int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
}
// NamespacedID returns the namespaced id useful for logging
func (j *Job) NamespacedID() NamespacedID {
return NamespacedID{
ID: j.ID,
Namespace: j.Namespace,
}
}
// GetID implements the IDGetter interface, required for pagination.
func (j *Job) GetID() string {
if j == nil {
return ""
}
return j.ID
}
// GetNamespace implements the NamespaceGetter interface, required for
// pagination and filtering namespaces in endpoints that support glob namespace
// requests using tokens with limited access.
func (j *Job) GetNamespace() string {
if j == nil {
return ""
}
return j.Namespace
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (j *Job) GetCreateIndex() uint64 {
if j == nil {
return 0
}
return j.CreateIndex
}
// Canonicalize is used to canonicalize fields in the Job. This should be
// called when registering a Job.
func (j *Job) Canonicalize() {
if j == nil {
return
}
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(j.Meta) == 0 {
j.Meta = nil
}
// Ensure the job is in a namespace.
if j.Namespace == "" {
j.Namespace = DefaultNamespace
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Multiregion != nil {
j.Multiregion.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
// This job can panic if the deep copy failed as it uses reflection.
func (j *Job) Copy() *Job {
if j == nil {
return nil
}
nj := new(Job)
*nj = *j
nj.Datacenters = helper.CopySliceString(nj.Datacenters)
nj.Constraints = CopySliceConstraints(nj.Constraints)
nj.Affinities = CopySliceAffinities(nj.Affinities)
nj.Multiregion = nj.Multiregion.Copy()
if j.TaskGroups != nil {
tgs := make([]*TaskGroup, len(nj.TaskGroups))
for i, tg := range nj.TaskGroups {
tgs[i] = tg.Copy()
}
nj.TaskGroups = tgs
}
nj.Periodic = nj.Periodic.Copy()
nj.Meta = helper.CopyMapStringString(nj.Meta)
nj.ParameterizedJob = nj.ParameterizedJob.Copy()
return nj
}
// Validate is used to check a job for reasonable configuration
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" && j.Multiregion == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}
if j.ID == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
} else if strings.Contains(j.ID, " ") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
} else if strings.Contains(j.ID, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a null character"))
}
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
} else if strings.Contains(j.Name, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Job Name contains a null character"))
}
if j.Namespace == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem, JobTypeSysBatch:
case "":
mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
}
if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
}
if len(j.Datacenters) == 0 && !j.IsMultiregion() {
mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
} else {
for _, v := range j.Datacenters {
if v == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job datacenter must be non-empty string"))
}
}
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
for idx, constr := range j.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if j.Type == JobTypeSystem {
if j.Affinities != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
}
} else {
for idx, affinity := range j.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
if j.Type == JobTypeSystem {
if j.Spreads != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
}
} else {
for idx, spread := range j.Spreads {
if err := spread.Validate(); err != nil {
outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
// Check for duplicate task groups
taskGroups := make(map[string]int)
for idx, tg := range j.TaskGroups {
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
} else if existing, ok := taskGroups[tg.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
} else {
taskGroups[tg.Name] = idx
}
if tg.ShutdownDelay != nil && *tg.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}
if tg.StopAfterClientDisconnect != nil && *tg.StopAfterClientDisconnect != 0 {
if *tg.StopAfterClientDisconnect > 0 &&
!(j.Type == JobTypeBatch || j.Type == JobTypeService) {
mErr.Errors = append(mErr.Errors, errors.New("stop_after_client_disconnect can only be set in batch and service jobs"))
} else if *tg.StopAfterClientDisconnect < 0 {
mErr.Errors = append(mErr.Errors, errors.New("stop_after_client_disconnect must be a positive value"))
}
}
if j.Type == "system" && tg.Count > 1 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
tg.Name, tg.Count))
}
}
// Validate the task group
for _, tg := range j.TaskGroups {
if err := tg.Validate(j); err != nil {
outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate periodic is only used with batch or sysbatch jobs.
if j.IsPeriodic() && j.Periodic.Enabled {
if j.Type != JobTypeBatch && j.Type != JobTypeSysBatch {
mErr.Errors = append(mErr.Errors, fmt.Errorf(
"Periodic can only be used with %q or %q scheduler", JobTypeBatch, JobTypeSysBatch,
))
}
if err := j.Periodic.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsParameterized() {
if j.Type != JobTypeBatch && j.Type != JobTypeSysBatch {
mErr.Errors = append(mErr.Errors, fmt.Errorf(
"Parameterized job can only be used with %q or %q scheduler", JobTypeBatch, JobTypeSysBatch,
))
}
if err := j.ParameterizedJob.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsMultiregion() {
if err := j.Multiregion.Validate(j.Type, j.Datacenters); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (j *Job) Warnings() error {
var mErr multierror.Error
// Check the groups
hasAutoPromote, allAutoPromote := false, true
for _, tg := range j.TaskGroups {
if err := tg.Warnings(j); err != nil {
outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
if u := tg.Update; u != nil {
hasAutoPromote = hasAutoPromote || u.AutoPromote
// Having no canaries implies auto-promotion since there are no canaries to promote.
allAutoPromote = allAutoPromote && (u.Canary == 0 || u.AutoPromote)
}
}
// Check AutoPromote, should be all or none
if hasAutoPromote && !allAutoPromote {
err := fmt.Errorf("auto_promote must be true for all groups to enable automatic promotion")
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// LookupTaskGroup finds a task group by name
func (j *Job) LookupTaskGroup(name string) *TaskGroup {
for _, tg := range j.TaskGroups {
if tg.Name == name {
return tg
}
}
return nil
}
// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
// meta data for the task. When joining Job, Group and Task Meta, the precedence
// is by deepest scope (Task > Group > Job).
func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
group := j.LookupTaskGroup(groupName)
if group == nil {
return j.Meta
}
var meta map[string]string
task := group.LookupTask(taskName)
if task != nil {
meta = helper.CopyMapStringString(task.Meta)
}
if meta == nil {
meta = make(map[string]string, len(group.Meta)+len(j.Meta))
}
// Add the group specific meta
for k, v := range group.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
// Add the job specific meta
for k, v := range j.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
return meta
}
// Stopped returns if a job is stopped.
func (j *Job) Stopped() bool {
return j == nil || j.Stop
}
// HasUpdateStrategy returns if any task group in the job has an update strategy
func (j *Job) HasUpdateStrategy() bool {
for _, tg := range j.TaskGroups {
if !tg.Update.IsEmpty() {
return true
}
}
return false
}
// Stub is used to return a summary of the job
func (j *Job) Stub(summary *JobSummary) *JobListStub {
return &JobListStub{
ID: j.ID,
Namespace: j.Namespace,
ParentID: j.ParentID,
Name: j.Name,
Datacenters: j.Datacenters,
Multiregion: j.Multiregion,
Type: j.Type,
Priority: j.Priority,
Periodic: j.IsPeriodic(),
ParameterizedJob: j.IsParameterized(),
Stop: j.Stop,
Status: j.Status,
StatusDescription: j.StatusDescription,
CreateIndex: j.CreateIndex,
ModifyIndex: j.ModifyIndex,
JobModifyIndex: j.JobModifyIndex,
SubmitTime: j.SubmitTime,
JobSummary: summary,
}
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsPeriodicActive returns whether the job is an active periodic job that will
// create child jobs
func (j *Job) IsPeriodicActive() bool {
return j.IsPeriodic() && j.Periodic.Enabled && !j.Stopped() && !j.IsParameterized()
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil && !j.Dispatched
}
// IsMultiregion returns whether a job is multiregion
func (j *Job) IsMultiregion() bool {
return j.Multiregion != nil && j.Multiregion.Regions != nil && len(j.Multiregion.Regions) > 0
}
// IsPlugin returns whether a job is implements a plugin (currently just CSI)
func (j *Job) IsPlugin() bool {
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
if task.CSIPluginConfig != nil {
return true
}
}
}
return false
}
// Vault returns the set of Vault blocks per task group, per task
func (j *Job) Vault() map[string]map[string]*Vault {
blocks := make(map[string]map[string]*Vault, len(j.TaskGroups))
for _, tg := range j.TaskGroups {
tgBlocks := make(map[string]*Vault, len(tg.Tasks))
for _, task := range tg.Tasks {
if task.Vault == nil {
continue
}
tgBlocks[task.Name] = task.Vault
}
if len(tgBlocks) != 0 {
blocks[tg.Name] = tgBlocks
}
}
return blocks
}
// ConnectTasks returns the set of Consul Connect enabled tasks defined on the
// job that will require a Service Identity token in the case that Consul ACLs
// are enabled. The TaskKind.Value is the name of the Consul service.
//
// This method is meaningful only after the Job has passed through the job
// submission Mutator functions.
func (j *Job) ConnectTasks() []TaskKind {
var kinds []TaskKind
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
if task.Kind.IsConnectProxy() ||
task.Kind.IsConnectNative() ||
task.Kind.IsAnyConnectGateway() {
kinds = append(kinds, task.Kind)
}
}
}
return kinds
}
// RequiredSignals returns a mapping of task groups to tasks to their required
// set of signals
func (j *Job) RequiredSignals() map[string]map[string][]string {
signals := make(map[string]map[string][]string)
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
// Use this local one as a set
taskSignals := make(map[string]struct{})
// Check if the Vault change mode uses signals
if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
taskSignals[task.Vault.ChangeSignal] = struct{}{}
}
// If a user has specified a KillSignal, add it to required signals
if task.KillSignal != "" {
taskSignals[task.KillSignal] = struct{}{}
}
// Check if any template change mode uses signals
for _, t := range task.Templates {
if t.ChangeMode != TemplateChangeModeSignal {
continue
}
taskSignals[t.ChangeSignal] = struct{}{}
}
// Flatten and sort the signals
l := len(taskSignals)
if l == 0 {
continue
}
flat := make([]string, 0, l)
for sig := range taskSignals {
flat = append(flat, sig)
}
sort.Strings(flat)
tgSignals, ok := signals[tg.Name]
if !ok {
tgSignals = make(map[string][]string)
signals[tg.Name] = tgSignals
}
tgSignals[task.Name] = flat
}
}
return signals
}
// SpecChanged determines if the functional specification has changed between
// two job versions.
func (j *Job) SpecChanged(new *Job) bool {
if j == nil {
return new != nil
}
// Create a copy of the new job
c := new.Copy()
// Update the new job so we can do a reflect
c.Status = j.Status
c.StatusDescription = j.StatusDescription
c.Stable = j.Stable
c.Version = j.Version
c.CreateIndex = j.CreateIndex
c.ModifyIndex = j.ModifyIndex
c.JobModifyIndex = j.JobModifyIndex
c.SubmitTime = j.SubmitTime
// cgbaker: FINISH: probably need some consideration of scaling policy ID here
// Deep equals the jobs
return !reflect.DeepEqual(j, c)
}
func (j *Job) SetSubmitTime() {
j.SubmitTime = time.Now().UTC().UnixNano()
}
// JobListStub is used to return a subset of job information
// for the job list
type JobListStub struct {
ID string
ParentID string
Name string
Namespace string `json:",omitempty"`
Datacenters []string
Multiregion *Multiregion
Type string
Priority int
Periodic bool
ParameterizedJob bool
Stop bool
Status string
StatusDescription string
JobSummary *JobSummary
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
SubmitTime int64
}
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
// JobID is the ID of the job the summary is for
JobID string
// Namespace is the namespace of the job and its summary
Namespace string
// Summary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
// Children contains a summary for the children of this job.
Children *JobChildrenSummary
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Copy returns a new copy of JobSummary
func (js *JobSummary) Copy() *JobSummary {
newJobSummary := new(JobSummary)
*newJobSummary = *js
newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
for k, v := range js.Summary {
newTGSummary[k] = v
}
newJobSummary.Summary = newTGSummary
newJobSummary.Children = newJobSummary.Children.Copy()
return newJobSummary
}
// JobChildrenSummary contains the summary of children job statuses
type JobChildrenSummary struct {
Pending int64
Running int64
Dead int64
}
// Copy returns a new copy of a JobChildrenSummary
func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
if jc == nil {
return nil
}
njc := new(JobChildrenSummary)
*njc = *jc
return njc
}
// TaskGroup summarizes the state of all the allocations of a particular
// TaskGroup
type TaskGroupSummary struct {
Queued int
Complete int
Failed int
Running int
Starting int
Lost int
Unknown int
}
const (
// Checks uses any registered health check state in combination with task
// states to determine if an allocation is healthy.
UpdateStrategyHealthCheck_Checks = "checks"
// TaskStates uses the task states of an allocation to determine if the
// allocation is healthy.
UpdateStrategyHealthCheck_TaskStates = "task_states"
// Manual allows the operator to manually signal to Nomad when an
// allocations is healthy. This allows more advanced health checking that is
// outside of the scope of Nomad.
UpdateStrategyHealthCheck_Manual = "manual"
)
var (
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
DefaultUpdateStrategy = &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 1,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
ProgressDeadline: 10 * time.Minute,
AutoRevert: false,
AutoPromote: false,
Canary: 0,
}
)
// UpdateStrategy is used to modify how updates are done
type UpdateStrategy struct {
// Stagger is used to determine the rate at which allocations are migrated
// due to down or draining nodes.
Stagger time.Duration
// MaxParallel is how many updates can be done in parallel
MaxParallel int
// HealthCheck specifies the mechanism in which allocations are marked
// healthy or unhealthy as part of a deployment.
HealthCheck string
// MinHealthyTime is the minimum time an allocation must be in the healthy
// state before it is marked as healthy, unblocking more allocations to be
// rolled.
MinHealthyTime time.Duration
// HealthyDeadline is the time in which an allocation must be marked as
// healthy before it is automatically transitioned to unhealthy. This time
// period doesn't count against the MinHealthyTime.
HealthyDeadline time.Duration
// ProgressDeadline is the time in which an allocation as part of the
// deployment must transition to healthy. If no allocation becomes healthy
// after the deadline, the deployment is marked as failed. If the deadline
// is zero, the first failure causes the deployment to fail.
ProgressDeadline time.Duration
// AutoRevert declares that if a deployment fails because of unhealthy
// allocations, there should be an attempt to auto-revert the job to a
// stable version.
AutoRevert bool
// AutoPromote declares that the deployment should be promoted when all canaries are
// healthy
AutoPromote bool
// Canary is the number of canaries to deploy when a change to the task
// group is detected.
Canary int
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
}
copy := new(UpdateStrategy)
*copy = *u
return copy
}
func (u *UpdateStrategy) Validate() error {
if u == nil {
return nil
}
var mErr multierror.Error
switch u.HealthCheck {
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
}
if u.MaxParallel < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
}
if u.Canary < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
}
if u.Canary == 0 && u.AutoPromote {
_ = multierror.Append(&mErr, fmt.Errorf("Auto Promote requires a Canary count greater than zero"))
}
if u.MinHealthyTime < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
}
if u.HealthyDeadline <= 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
}
if u.ProgressDeadline < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline))
}
if u.MinHealthyTime >= u.HealthyDeadline {
_ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline))
}
if u.ProgressDeadline != 0 && u.HealthyDeadline >= u.ProgressDeadline {
_ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline))
}
if u.Stagger <= 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
}
return mErr.ErrorOrNil()
}
func (u *UpdateStrategy) IsEmpty() bool {
if u == nil {
return true
}
return u.MaxParallel == 0
}
// Rolling returns if a rolling strategy should be used.
// TODO(alexdadgar): Remove once no longer used by the scheduler.
func (u *UpdateStrategy) Rolling() bool {
return u.Stagger > 0 && u.MaxParallel > 0
}
type Multiregion struct {
Strategy *MultiregionStrategy
Regions []*MultiregionRegion
}
func (m *Multiregion) Canonicalize() {
if m.Strategy == nil {
m.Strategy = &MultiregionStrategy{}
}
if m.Regions == nil {
m.Regions = []*MultiregionRegion{}
}
}
// Diff indicates whether the multiregion config has changed
func (m *Multiregion) Diff(m2 *Multiregion) bool {
return !reflect.DeepEqual(m, m2)
}
func (m *Multiregion) Copy() *Multiregion {
if m == nil {
return nil
}
copy := new(Multiregion)
if m.Strategy != nil {
copy.Strategy = &MultiregionStrategy{
MaxParallel: m.Strategy.MaxParallel,
OnFailure: m.Strategy.OnFailure,
}
}
for _, region := range m.Regions {
copyRegion := &MultiregionRegion{
Name: region.Name,
Count: region.Count,
Datacenters: []string{},
Meta: map[string]string{},
}
copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...)
for k, v := range region.Meta {
copyRegion.Meta[k] = v
}
copy.Regions = append(copy.Regions, copyRegion)
}
return copy
}
type MultiregionStrategy struct {
MaxParallel int
OnFailure string
}
type MultiregionRegion struct {
Name string
Count int
Datacenters []string
Meta map[string]string
}
// Namespace allows logically grouping jobs and their associated objects.
type Namespace struct {
// Name is the name of the namespace
Name string
// Description is a human readable description of the namespace
Description string
// Quota is the quota specification that the namespace should account
// against.
Quota string
// Capabilities is the set of capabilities allowed for this namespace
Capabilities *NamespaceCapabilities
// Meta is the set of metadata key/value pairs that attached to the namespace
Meta map[string]string
// Hash is the hash of the namespace which is used to efficiently replicate
// cross-regions.
Hash []byte
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// NamespaceCapabilities represents a set of capabilities allowed for this
// namespace, to be checked at job submission time.
type NamespaceCapabilities struct {
EnabledTaskDrivers []string
DisabledTaskDrivers []string
}
func (n *Namespace) Validate() error {
var mErr multierror.Error
// Validate the name and description
if !validNamespaceName.MatchString(n.Name) {
err := fmt.Errorf("invalid name %q. Must match regex %s", n.Name, validNamespaceName)
mErr.Errors = append(mErr.Errors, err)
}
if len(n.Description) > maxNamespaceDescriptionLength {
err := fmt.Errorf("description longer than %d", maxNamespaceDescriptionLength)
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// SetHash is used to compute and set the hash of the namespace
func (n *Namespace) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
_, _ = hash.Write([]byte(n.Name))
_, _ = hash.Write([]byte(n.Description))
_, _ = hash.Write([]byte(n.Quota))
if n.Capabilities != nil {
for _, driver := range n.Capabilities.EnabledTaskDrivers {
_, _ = hash.Write([]byte(driver))
}
for _, driver := range n.Capabilities.DisabledTaskDrivers {
_, _ = hash.Write([]byte(driver))
}
}
// sort keys to ensure hash stability when meta is stored later
var keys []string
for k := range n.Meta {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
_, _ = hash.Write([]byte(k))
_, _ = hash.Write([]byte(n.Meta[k]))
}
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
n.Hash = hashVal
return hashVal
}
func (n *Namespace) Copy() *Namespace {
nc := new(Namespace)
*nc = *n
nc.Hash = make([]byte, len(n.Hash))
if n.Capabilities != nil {
c := new(NamespaceCapabilities)
*c = *n.Capabilities
c.EnabledTaskDrivers = helper.CopySliceString(n.Capabilities.EnabledTaskDrivers)
c.DisabledTaskDrivers = helper.CopySliceString(n.Capabilities.DisabledTaskDrivers)
nc.Capabilities = c
}
if n.Meta != nil {
nc.Meta = make(map[string]string, len(n.Meta))
for k, v := range n.Meta {
nc.Meta[k] = v
}
}
copy(nc.Hash, n.Hash)
return nc
}
// NamespaceListRequest is used to request a list of namespaces
type NamespaceListRequest struct {
QueryOptions
}
// NamespaceListResponse is used for a list request
type NamespaceListResponse struct {
Namespaces []*Namespace
QueryMeta
}
// NamespaceSpecificRequest is used to query a specific namespace
type NamespaceSpecificRequest struct {
Name string
QueryOptions
}
// SingleNamespaceResponse is used to return a single namespace
type SingleNamespaceResponse struct {
Namespace *Namespace
QueryMeta
}
// NamespaceSetRequest is used to query a set of namespaces
type NamespaceSetRequest struct {
Namespaces []string
QueryOptions
}
// NamespaceSetResponse is used to return a set of namespaces
type NamespaceSetResponse struct {
Namespaces map[string]*Namespace // Keyed by namespace Name
QueryMeta
}
// NamespaceDeleteRequest is used to delete a set of namespaces
type NamespaceDeleteRequest struct {
Namespaces []string
WriteRequest
}
// NamespaceUpsertRequest is used to upsert a set of namespaces
type NamespaceUpsertRequest struct {
Namespaces []*Namespace
WriteRequest
}
const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
// separated list of unix timestamps at which to launch.
PeriodicSpecTest = "_internal_test"
)
// Periodic defines the interval a job should be run at.
type PeriodicConfig struct {
// Enabled determines if the job should be run periodically.
Enabled bool
// Spec specifies the interval the job should be run as. It is parsed based
// on the SpecType.
Spec string
// SpecType defines the format of the spec.
SpecType string
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
if p == nil {
return nil
}
np := new(PeriodicConfig)
*np = *p
return np
}
func (p *PeriodicConfig) Validate() error {
if !p.Enabled {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
_ = multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
_ = multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// CronParseNext is a helper that parses the next time for the given expression
// but captures any panic that may occur in the underlying library.
func CronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) {
defer func() {
if recover() != nil {
t = time.Time{}
err = fmt.Errorf("failed parsing cron expression: %q", spec)
}
}()
return e.Next(fromTime), nil
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) {
switch p.SpecType {
case PeriodicSpecCron:
e, err := cronexpr.Parse(p.Spec)
if err != nil {
return time.Time{}, fmt.Errorf("failed parsing cron expression: %q: %v", p.Spec, err)
}
return CronParseNext(e, fromTime, p.Spec)
case PeriodicSpecTest:
split := strings.Split(p.Spec, ",")
if len(split) == 1 && split[0] == "" {
return time.Time{}, nil
}
// Parse the times
times := make([]time.Time, len(split))
for i, s := range split {
unix, err := strconv.Atoi(s)
if err != nil {
return time.Time{}, nil
}
times[i] = time.Unix(int64(unix), 0)
}
// Find the next match
for _, next := range times {
if fromTime.Before(next) {
return next, nil
}
}
}
return time.Time{}, nil
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.
PeriodicLaunchSuffix = "/periodic-"
)
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Namespace string // Namespace of the periodic job
Launch time.Time // The last launch time.
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
const (
DispatchPayloadForbidden = "forbidden"
DispatchPayloadOptional = "optional"
DispatchPayloadRequired = "required"
// DispatchLaunchSuffix is the string appended to the parameterized job's ID
// when dispatching instances of it.
DispatchLaunchSuffix = "/dispatch-"
)
// ParameterizedJobConfig is used to configure the parameterized job
type ParameterizedJobConfig struct {
// Payload configure the payload requirements
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string
}
func (d *ParameterizedJobConfig) Validate() error {
var mErr multierror.Error
switch d.Payload {
case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
}
// Check that the meta configurations are disjoint sets
disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
if !disjoint {
_ = multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
}
return mErr.ErrorOrNil()
}
func (d *ParameterizedJobConfig) Canonicalize() {
if d.Payload == "" {
d.Payload = DispatchPayloadOptional
}
}
func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
if d == nil {
return nil
}
nd := new(ParameterizedJobConfig)
*nd = *d
nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
return nd
}
// DispatchedID returns an ID appropriate for a job dispatched against a
// particular parameterized job
func DispatchedID(templateID string, t time.Time) string {
u := uuid.Generate()[:8]
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := escapingfs.PathEscapesAllocViaRelative("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
return fmt.Errorf("destination escapes allocation directory")
}
return nil
}
const (
TaskLifecycleHookPrestart = "prestart"
TaskLifecycleHookPoststart = "poststart"
TaskLifecycleHookPoststop = "poststop"
)
type TaskLifecycleConfig struct {
Hook string
Sidecar bool
}
func (d *TaskLifecycleConfig) Copy() *TaskLifecycleConfig {
if d == nil {
return nil
}
nd := new(TaskLifecycleConfig)
*nd = *d
return nd
}
func (d *TaskLifecycleConfig) Validate() error {
if d == nil {
return nil
}
switch d.Hook {
case TaskLifecycleHookPrestart:
case TaskLifecycleHookPoststart:
case TaskLifecycleHookPoststop:
case "":
return fmt.Errorf("no lifecycle hook provided")
default:
return fmt.Errorf("invalid hook: %v", d.Hook)
}
return nil
}
var (
// These default restart policies needs to be in sync with
// Canonicalize in api/tasks.go
DefaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 30 * time.Minute,
Mode: RestartPolicyModeFail,
}
DefaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 3,
Interval: 24 * time.Hour,
Mode: RestartPolicyModeFail,
}
)
var (
// These default reschedule policies needs to be in sync with
// NewDefaultReschedulePolicy in api/tasks.go
DefaultServiceJobReschedulePolicy = ReschedulePolicy{
Delay: 30 * time.Second,
DelayFunction: "exponential",
MaxDelay: 1 * time.Hour,
Unlimited: true,
}
DefaultBatchJobReschedulePolicy = ReschedulePolicy{
Attempts: 1,
Interval: 24 * time.Hour,
Delay: 5 * time.Second,
DelayFunction: "constant",
}
)
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// RestartPolicyMinInterval is the minimum interval that is accepted for a
// restart policy.
RestartPolicyMinInterval = 5 * time.Second
// ReasonWithinPolicy describes restart events that are within policy
ReasonWithinPolicy = "Restart within policy"
)
// JobScalingEvents contains the scaling events for a given job
type JobScalingEvents struct {
Namespace string
JobID string
// This map is indexed by target; currently, this is just task group
// the indexed array is sorted from newest to oldest event
// the array should have less than JobTrackedScalingEvents entries
ScalingEvents map[string][]*ScalingEvent
// Raft index
ModifyIndex uint64
}
// NewScalingEvent method for ScalingEvent objects.
func NewScalingEvent(message string) *ScalingEvent {
return &ScalingEvent{
Time: time.Now().Unix(),
Message: message,
}
}
// ScalingEvent describes a scaling event against a Job
type ScalingEvent struct {
// Unix Nanosecond timestamp for the scaling event
Time int64
// Count is the new scaling count, if provided
Count *int64
// PreviousCount is the count at the time of the scaling event
PreviousCount int64
// Message is the message describing a scaling event
Message string
// Error indicates an error state for this scaling event
Error bool
// Meta is a map of metadata returned during a scaling event
Meta map[string]interface{}
// EvalID is the ID for an evaluation if one was created as part of a scaling event
EvalID *string
// Raft index
CreateIndex uint64
}
func (e *ScalingEvent) SetError(error bool) *ScalingEvent {
e.Error = error
return e
}
func (e *ScalingEvent) SetMeta(meta map[string]interface{}) *ScalingEvent {
e.Meta = meta
return e
}
func (e *ScalingEvent) SetEvalID(evalID string) *ScalingEvent {
e.EvalID = &evalID
return e
}
// ScalingEventRequest is by for Job.Scale endpoint
// to register scaling events
type ScalingEventRequest struct {
Namespace string
JobID string
TaskGroup string
ScalingEvent *ScalingEvent
}
// ScalingPolicy specifies the scaling policy for a scaling target
type ScalingPolicy struct {
// ID is a generated UUID used for looking up the scaling policy
ID string
// Type is the type of scaling performed by the policy
Type string
// Target contains information about the target of the scaling policy, like job and group
Target map[string]string
// Policy is an opaque description of the scaling policy, passed to the autoscaler
Policy map[string]interface{}
// Min is the minimum allowable scaling count for this target
Min int64
// Max is the maximum allowable scaling count for this target
Max int64
// Enabled indicates whether this policy has been enabled/disabled
Enabled bool
CreateIndex uint64
ModifyIndex uint64
}
// JobKey returns a key that is unique to a job-scoped target, useful as a map
// key. This uses the policy type, plus target (group and task).
func (p *ScalingPolicy) JobKey() string {
return p.Type + "\000" +
p.Target[ScalingTargetGroup] + "\000" +
p.Target[ScalingTargetTask]
}
const (
ScalingTargetNamespace = "Namespace"
ScalingTargetJob = "Job"
ScalingTargetGroup = "Group"
ScalingTargetTask = "Task"
ScalingPolicyTypeHorizontal = "horizontal"
)
func (p *ScalingPolicy) Canonicalize() {
if p.Type == "" {
p.Type = ScalingPolicyTypeHorizontal
}
}
func (p *ScalingPolicy) Copy() *ScalingPolicy {
if p == nil {
return nil
}
opaquePolicyConfig, err := copystructure.Copy(p.Policy)
if err != nil {
panic(err.Error())
}
c := ScalingPolicy{
ID: p.ID,
Policy: opaquePolicyConfig.(map[string]interface{}),
Enabled: p.Enabled,
Type: p.Type,
Min: p.Min,
Max: p.Max,
CreateIndex: p.CreateIndex,
ModifyIndex: p.ModifyIndex,
}
c.Target = make(map[string]string, len(p.Target))
for k, v := range p.Target {
c.Target[k] = v
}
return &c
}
func (p *ScalingPolicy) Validate() error {
if p == nil {
return nil
}
var mErr multierror.Error
// Check policy type and target
if p.Type == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing scaling policy type"))
} else {
mErr.Errors = append(mErr.Errors, p.validateType().Errors...)
}
// Check Min and Max
if p.Max < 0 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("maximum count must be specified and non-negative"))
} else if p.Max < p.Min {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("maximum count must not be less than minimum count"))
}
if p.Min < 0 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("minimum count must be specified and non-negative"))
}
return mErr.ErrorOrNil()
}
func (p *ScalingPolicy) validateTargetHorizontal() (mErr multierror.Error) {
if len(p.Target) == 0 {
// This is probably not a Nomad horizontal policy
return
}
// Nomad horizontal policies should have Namespace, Job and TaskGroup
if p.Target[ScalingTargetNamespace] == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target namespace"))
}
if p.Target[ScalingTargetJob] == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target job"))
}
if p.Target[ScalingTargetGroup] == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target group"))
}
return
}
// Diff indicates whether the specification for a given scaling policy has changed
func (p *ScalingPolicy) Diff(p2 *ScalingPolicy) bool {
copy := *p2
copy.ID = p.ID
copy.CreateIndex = p.CreateIndex
copy.ModifyIndex = p.ModifyIndex
return !reflect.DeepEqual(*p, copy)
}
// TargetTaskGroup updates a ScalingPolicy target to specify a given task group
func (p *ScalingPolicy) TargetTaskGroup(job *Job, tg *TaskGroup) *ScalingPolicy {
p.Target = map[string]string{
ScalingTargetNamespace: job.Namespace,
ScalingTargetJob: job.ID,
ScalingTargetGroup: tg.Name,
}
return p
}
// TargetTask updates a ScalingPolicy target to specify a given task
func (p *ScalingPolicy) TargetTask(job *Job, tg *TaskGroup, task *Task) *ScalingPolicy {
p.TargetTaskGroup(job, tg)
p.Target[ScalingTargetTask] = task.Name
return p
}
func (p *ScalingPolicy) Stub() *ScalingPolicyListStub {
stub := &ScalingPolicyListStub{
ID: p.ID,
Type: p.Type,
Target: make(map[string]string),
Enabled: p.Enabled,
CreateIndex: p.CreateIndex,
ModifyIndex: p.ModifyIndex,
}
for k, v := range p.Target {
stub.Target[k] = v
}
return stub
}
// GetScalingPolicies returns a slice of all scaling scaling policies for this job
func (j *Job) GetScalingPolicies() []*ScalingPolicy {
ret := make([]*ScalingPolicy, 0)
for _, tg := range j.TaskGroups {
if tg.Scaling != nil {
ret = append(ret, tg.Scaling)
}
}
ret = append(ret, j.GetEntScalingPolicies()...)
return ret
}
// ScalingPolicyListStub is used to return a subset of scaling policy information
// for the scaling policy list
type ScalingPolicyListStub struct {
ID string
Enabled bool
Type string
Target map[string]string
CreateIndex uint64
ModifyIndex uint64
}
// RestartPolicy configures how Tasks are restarted when they crash or fail.
type RestartPolicy struct {
// Attempts is the number of restart that will occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of restarts
// within.
Interval time.Duration
// Delay is the time between a failure and a restart.
Delay time.Duration
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
}
func (r *RestartPolicy) Copy() *RestartPolicy {
if r == nil {
return nil
}
nrp := new(RestartPolicy)
*nrp = *r
return nrp
}
func (r *RestartPolicy) Validate() error {
var mErr multierror.Error
switch r.Mode {
case RestartPolicyModeDelay, RestartPolicyModeFail:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
}
// Check for ambiguous/confusing settings
if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
_ = multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
}
if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
}
if time.Duration(r.Attempts)*r.Delay > r.Interval {
_ = multierror.Append(&mErr,
fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
}
return mErr.ErrorOrNil()
}
func NewRestartPolicy(jobType string) *RestartPolicy {
switch jobType {
case JobTypeService, JobTypeSystem:
rp := DefaultServiceJobRestartPolicy
return &rp
case JobTypeBatch:
rp := DefaultBatchJobRestartPolicy
return &rp
}
return nil
}
const ReschedulePolicyMinInterval = 15 * time.Second
const ReschedulePolicyMinDelay = 5 * time.Second
var RescheduleDelayFunctions = [...]string{"constant", "exponential", "fibonacci"}
// ReschedulePolicy configures how Tasks are rescheduled when they crash or fail.
type ReschedulePolicy struct {
// Attempts limits the number of rescheduling attempts that can occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of reschedule attempts.
Interval time.Duration
// Delay is a minimum duration to wait between reschedule attempts.
// The delay function determines how much subsequent reschedule attempts are delayed by.
Delay time.Duration
// DelayFunction determines how the delay progressively changes on subsequent reschedule
// attempts. Valid values are "exponential", "constant", and "fibonacci".
DelayFunction string
// MaxDelay is an upper bound on the delay.
MaxDelay time.Duration
// Unlimited allows infinite rescheduling attempts. Only allowed when delay is set
// between reschedule attempts.
Unlimited bool
}
func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
if r == nil {
return nil
}
nrp := new(ReschedulePolicy)
*nrp = *r
return nrp
}
func (r *ReschedulePolicy) Enabled() bool {
enabled := r != nil && (r.Attempts > 0 || r.Unlimited)
return enabled
}
// Validate uses different criteria to validate the reschedule policy
// Delay must be a minimum of 5 seconds
// Delay Ceiling is ignored if Delay Function is "constant"
// Number of possible attempts is validated, given the interval, delay and delay function
func (r *ReschedulePolicy) Validate() error {
if !r.Enabled() {
return nil
}
var mErr multierror.Error
// Check for ambiguous/confusing settings
if r.Attempts > 0 {
if r.Interval <= 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0"))
}
if r.Unlimited {
_ = multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+
"and Unlimited = %v is ambiguous", r.Attempts, r.Interval, r.Unlimited))
_ = multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true"))
}
}
delayPreCheck := true
// Delay should be bigger than the default
if r.Delay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
delayPreCheck = false
}
// Must use a valid delay function
if !isValidDelayFunction(r.DelayFunction) {
_ = multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions))
delayPreCheck = false
}
// Validate MaxDelay if not using linear delay progression
if r.DelayFunction != "constant" {
if r.MaxDelay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
delayPreCheck = false
}
if r.MaxDelay < r.Delay {
_ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay))
delayPreCheck = false
}
}
// Validate Interval and other delay parameters if attempts are limited
if !r.Unlimited {
if r.Interval.Nanoseconds() < ReschedulePolicyMinInterval.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval))
}
if !delayPreCheck {
// We can't cross validate the rest of the delay params if delayPreCheck fails, so return early
return mErr.ErrorOrNil()
}
crossValidationErr := r.validateDelayParams()
if crossValidationErr != nil {
_ = multierror.Append(&mErr, crossValidationErr)
}
}
return mErr.ErrorOrNil()
}
func isValidDelayFunction(delayFunc string) bool {
for _, value := range RescheduleDelayFunctions {
if value == delayFunc {
return true
}
}
return false
}
func (r *ReschedulePolicy) validateDelayParams() error {
ok, possibleAttempts, recommendedInterval := r.viableAttempts()
if ok {
return nil
}
var mErr multierror.Error
if r.DelayFunction == "constant" {
_ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+
"delay function %q", possibleAttempts, r.Interval, r.Delay, r.DelayFunction))
} else {
_ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
"delay function %q, and delay ceiling %v", possibleAttempts, r.Interval, r.Delay, r.DelayFunction, r.MaxDelay))
}
_ = multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts))
return mErr.ErrorOrNil()
}
func (r *ReschedulePolicy) viableAttempts() (bool, int, time.Duration) {
var possibleAttempts int
var recommendedInterval time.Duration
valid := true
switch r.DelayFunction {
case "constant":
recommendedInterval = time.Duration(r.Attempts) * r.Delay
if r.Interval < recommendedInterval {
possibleAttempts = int(r.Interval / r.Delay)
valid = false
}
case "exponential":
for i := 0; i < r.Attempts; i++ {
nextDelay := time.Duration(math.Pow(2, float64(i))) * r.Delay
if nextDelay > r.MaxDelay {
nextDelay = r.MaxDelay
recommendedInterval += nextDelay
} else {
recommendedInterval = nextDelay
}
if recommendedInterval < r.Interval {
possibleAttempts++
}
}
if possibleAttempts < r.Attempts {
valid = false
}
case "fibonacci":
var slots []time.Duration
slots = append(slots, r.Delay)
slots = append(slots, r.Delay)
reachedCeiling := false
for i := 2; i < r.Attempts; i++ {
var nextDelay time.Duration
if reachedCeiling {
//switch to linear
nextDelay = slots[i-1] + r.MaxDelay
} else {
nextDelay = slots[i-1] + slots[i-2]
if nextDelay > r.MaxDelay {
nextDelay = r.MaxDelay
reachedCeiling = true
}
}
slots = append(slots, nextDelay)
}
recommendedInterval = slots[len(slots)-1]
if r.Interval < recommendedInterval {
valid = false
// calculate possible attempts
for i := 0; i < len(slots); i++ {
if slots[i] > r.Interval {
possibleAttempts = i
break
}
}
}
default:
return false, 0, 0
}
if possibleAttempts < 0 { // can happen if delay is bigger than interval
possibleAttempts = 0
}
return valid, possibleAttempts, recommendedInterval
}
func NewReschedulePolicy(jobType string) *ReschedulePolicy {
switch jobType {
case JobTypeService:
rp := DefaultServiceJobReschedulePolicy
return &rp
case JobTypeBatch:
rp := DefaultBatchJobReschedulePolicy
return &rp
}
return nil
}
const (
MigrateStrategyHealthChecks = "checks"
MigrateStrategyHealthStates = "task_states"
)
type MigrateStrategy struct {
MaxParallel int
HealthCheck string
MinHealthyTime time.Duration
HealthyDeadline time.Duration
}
// DefaultMigrateStrategy is used for backwards compat with pre-0.8 Allocations
// that lack an update strategy.
//
// This function should match its counterpart in api/tasks.go
func DefaultMigrateStrategy() *MigrateStrategy {
return &MigrateStrategy{
MaxParallel: 1,
HealthCheck: MigrateStrategyHealthChecks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
}
}
func (m *MigrateStrategy) Validate() error {
var mErr multierror.Error
if m.MaxParallel < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel))
}
switch m.HealthCheck {
case MigrateStrategyHealthChecks, MigrateStrategyHealthStates:
// ok
case "":
if m.MaxParallel > 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck"))
}
default:
_ = multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck))
}
if m.MinHealthyTime < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime))
}
if m.HealthyDeadline < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline))
}
if m.MinHealthyTime > m.HealthyDeadline {
_ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline"))
}
return mErr.ErrorOrNil()
}
// TaskGroup is an atomic unit of placement. Each task group belongs to
// a job and may contain any number of tasks. A task group support running
// in many replicas using the same configuration..
type TaskGroup struct {
// Name of the task group
Name string
// Count is the number of replicas of this task group that should
// be scheduled.
Count int
// Update is used to control the update strategy for this task group
Update *UpdateStrategy
// Migrate is used to control the migration strategy for this task group
Migrate *MigrateStrategy
// Constraints can be specified at a task group level and apply to
// all the tasks contained.
Constraints []*Constraint
// Scaling is the list of autoscaling policies for the TaskGroup
Scaling *ScalingPolicy
// RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// Tasks are the collection of tasks that this task group needs to run
Tasks []*Task
// EphemeralDisk is the disk resources that the task group requests
EphemeralDisk *EphemeralDisk
// Meta is used to associate arbitrary metadata with this
// task group. This is opaque to Nomad.
Meta map[string]string
// ReschedulePolicy is used to configure how the scheduler should
// retry failed allocations.
ReschedulePolicy *ReschedulePolicy
// Affinities can be specified at the task group level to express
// scheduling preferences.
Affinities []*Affinity
// Spread can be specified at the task group level to express spreading
// allocations across a desired attribute, such as datacenter
Spreads []*Spread
// Networks are the network configuration for the task group. This can be
// overridden in the task.
Networks Networks
// Consul configuration specific to this task group
Consul *Consul
// Services this group provides
Services []*Service
// Volumes is a map of volumes that have been requested by the task group.
Volumes map[string]*VolumeRequest
// ShutdownDelay is the amount of time to wait between deregistering
// group services in consul and stopping tasks.
ShutdownDelay *time.Duration
// StopAfterClientDisconnect, if set, configures the client to stop the task group
// after this duration since the last known good heartbeat
StopAfterClientDisconnect *time.Duration
// MaxClientDisconnect, if set, configures the client to allow placed
// allocations for tasks in this group to attempt to resume running without a restart.
MaxClientDisconnect *time.Duration
}
func (tg *TaskGroup) Copy() *TaskGroup {
if tg == nil {
return nil
}
ntg := new(TaskGroup)
*ntg = *tg
ntg.Update = ntg.Update.Copy()
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
ntg.ReschedulePolicy = ntg.ReschedulePolicy.Copy()
ntg.Affinities = CopySliceAffinities(ntg.Affinities)
ntg.Spreads = CopySliceSpreads(ntg.Spreads)
ntg.Volumes = CopyMapVolumeRequest(ntg.Volumes)
ntg.Scaling = ntg.Scaling.Copy()
ntg.Consul = ntg.Consul.Copy()
// Copy the network objects
if tg.Networks != nil {
n := len(tg.Networks)
ntg.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
ntg.Networks[i] = tg.Networks[i].Copy()
}
}
if tg.Tasks != nil {
tasks := make([]*Task, len(ntg.Tasks))
for i, t := range ntg.Tasks {
tasks[i] = t.Copy()
}
ntg.Tasks = tasks
}
ntg.Meta = helper.CopyMapStringString(ntg.Meta)
if tg.EphemeralDisk != nil {
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
}
if tg.Services != nil {
ntg.Services = make([]*Service, len(tg.Services))
for i, s := range tg.Services {
ntg.Services[i] = s.Copy()
}
}
if tg.ShutdownDelay != nil {
ntg.ShutdownDelay = tg.ShutdownDelay
}
if tg.StopAfterClientDisconnect != nil {
ntg.StopAfterClientDisconnect = tg.StopAfterClientDisconnect
}
if tg.MaxClientDisconnect != nil {
ntg.MaxClientDisconnect = tg.MaxClientDisconnect
}
return ntg
}
// Canonicalize is used to canonicalize fields in the TaskGroup.
func (tg *TaskGroup) Canonicalize(job *Job) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(tg.Meta) == 0 {
tg.Meta = nil
}
// Set the default restart policy.
if tg.RestartPolicy == nil {
tg.RestartPolicy = NewRestartPolicy(job.Type)
}
if tg.ReschedulePolicy == nil {
tg.ReschedulePolicy = NewReschedulePolicy(job.Type)
}
// Canonicalize Migrate for service jobs
if job.Type == JobTypeService && tg.Migrate == nil {
tg.Migrate = DefaultMigrateStrategy()
}
// Set a default ephemeral disk object if the user has not requested for one
if tg.EphemeralDisk == nil {
tg.EphemeralDisk = DefaultEphemeralDisk()
}
if tg.Scaling != nil {
tg.Scaling.Canonicalize()
}
for _, service := range tg.Services {
service.Canonicalize(job.Name, tg.Name, "group", job.Namespace)
}
for _, network := range tg.Networks {
network.Canonicalize()
}
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
}
// Validate is used to check a task group for reasonable configuration
func (tg *TaskGroup) Validate(j *Job) error {
var mErr multierror.Error
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
} else if strings.Contains(tg.Name, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Task group name contains null character"))
}
if tg.Count < 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
}
if len(tg.Tasks) == 0 {
// could be a lone consul gateway inserted by the connect mutator
mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
}
if tg.MaxClientDisconnect != nil && tg.StopAfterClientDisconnect != nil {
mErr.Errors = append(mErr.Errors, errors.New("Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect"))
}
if tg.MaxClientDisconnect != nil && *tg.MaxClientDisconnect < 0 {
mErr.Errors = append(mErr.Errors, errors.New("max_client_disconnect cannot be negative"))
}
for idx, constr := range tg.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if j.Type == JobTypeSystem {
if tg.Affinities != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
}
} else {
for idx, affinity := range tg.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
if tg.RestartPolicy != nil {
if err := tg.RestartPolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
}
if j.Type == JobTypeSystem {
if tg.Spreads != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
}
} else {
for idx, spread := range tg.Spreads {
if err := spread.Validate(); err != nil {
outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
if j.Type == JobTypeSystem {
if tg.ReschedulePolicy != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs should not have a reschedule policy"))
}
} else {
if tg.ReschedulePolicy != nil {
if err := tg.ReschedulePolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name))
}
}
if tg.EphemeralDisk != nil {
if err := tg.EphemeralDisk.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
}
// Validate the update strategy
if u := tg.Update; u != nil {
switch j.Type {
case JobTypeService, JobTypeSystem:
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
}
if err := u.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
// Validate the migration strategy
switch j.Type {
case JobTypeService:
if tg.Migrate != nil {
if err := tg.Migrate.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
default:
if tg.Migrate != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow migrate block", j.Type))
}
}
// Check that there is only one leader task if any
tasks := make(map[string]int)
leaderTasks := 0
for idx, task := range tg.Tasks {
if task.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
} else if existing, ok := tasks[task.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
} else {
tasks[task.Name] = idx
}
if task.Leader {
leaderTasks++
}
}
if leaderTasks > 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
}
// Validate the volume requests
var canaries int
if tg.Update != nil {
canaries = tg.Update.Canary
}
for name, volReq := range tg.Volumes {
if err := volReq.Validate(tg.Count, canaries); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf(
"Task group volume validation for %s failed: %v", name, err))
}
}
// Validate task group and task network resources
if err := tg.validateNetworks(); err != nil {
outer := fmt.Errorf("Task group network validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate task group and task services
if err := tg.validateServices(); err != nil {
outer := fmt.Errorf("Task group service validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate group service script-checks
if err := tg.validateScriptChecksInGroupServices(); err != nil {
outer := fmt.Errorf("Task group service check validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate the scaling policy
if err := tg.validateScalingPolicy(j); err != nil {
outer := fmt.Errorf("Task group scaling policy validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate the tasks
for _, task := range tg.Tasks {
// Validate the task does not reference undefined volume mounts
for i, mnt := range task.VolumeMounts {
if mnt.Volume == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a volume mount (%d) referencing an empty volume", task.Name, i))
continue
}
if _, ok := tg.Volumes[mnt.Volume]; !ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a volume mount (%d) referencing undefined volume %s", task.Name, i, mnt.Volume))
continue
}
}
if err := task.Validate(tg.EphemeralDisk, j.Type, tg.Services, tg.Networks); err != nil {
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
func (tg *TaskGroup) validateNetworks() error {
var mErr multierror.Error
portLabels := make(map[string]string)
// host_network -> static port tracking
staticPortsIndex := make(map[string]map[int]string)
for _, net := range tg.Networks {
for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
if other, ok := portLabels[port.Label]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
} else {
portLabels[port.Label] = "taskgroup network"
}
if port.Value != 0 {
hostNetwork := port.HostNetwork
if hostNetwork == "" {
hostNetwork = "default"
}
staticPorts, ok := staticPortsIndex[hostNetwork]
if !ok {
staticPorts = make(map[int]string)
}
// static port
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else if port.Value > math.MaxUint16 {
err := fmt.Errorf("Port %s (%d) cannot be greater than %d", port.Label, port.Value, math.MaxUint16)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("taskgroup network:%s", port.Label)
staticPortsIndex[hostNetwork] = staticPorts
}
}
if port.To < -1 {
err := fmt.Errorf("Port %q cannot be mapped to negative value %d", port.Label, port.To)
mErr.Errors = append(mErr.Errors, err)
} else if port.To > math.MaxUint16 {
err := fmt.Errorf("Port %q cannot be mapped to a port (%d) greater than %d", port.Label, port.To, math.MaxUint16)
mErr.Errors = append(mErr.Errors, err)
}
}
// Validate the hostname field to be a valid DNS name. If the parameter
// looks like it includes an interpolation value, we skip this. It
// would be nice to validate additional parameters, but this isn't the
// right place.
if net.Hostname != "" && !strings.Contains(net.Hostname, "${") {
if _, ok := dns.IsDomainName(net.Hostname); !ok {
mErr.Errors = append(mErr.Errors, errors.New("Hostname is not a valid DNS name"))
}
}
}
// Check for duplicate tasks or port labels, and no duplicated static ports
for _, task := range tg.Tasks {
if task.Resources == nil {
continue
}
for _, net := range task.Resources.Networks {
for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
if other, ok := portLabels[port.Label]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
}
if port.Value != 0 {
hostNetwork := port.HostNetwork
if hostNetwork == "" {
hostNetwork = "default"
}
staticPorts, ok := staticPortsIndex[hostNetwork]
if !ok {
staticPorts = make(map[int]string)
}
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else if port.Value > math.MaxUint16 {
err := fmt.Errorf("Port %s (%d) cannot be greater than %d", port.Label, port.Value, math.MaxUint16)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
staticPortsIndex[hostNetwork] = staticPorts
}
}
}
}
}
return mErr.ErrorOrNil()
}
// validateServices runs Service.Validate() on group-level services, checks
// group service checks that refer to tasks only refer to tasks that exist.
func (tg *TaskGroup) validateServices() error {
var mErr multierror.Error
knownTasks := make(map[string]struct{})
// Track the providers used for this task group. Currently, Nomad only
// allows the use of a single service provider within a task group.
configuredProviders := make(map[string]struct{})
// Create a map of known tasks and their services so we can compare
// vs the group-level services and checks
for _, task := range tg.Tasks {
knownTasks[task.Name] = struct{}{}
if task.Services == nil {
continue
}
for _, service := range task.Services {
for _, check := range service.Checks {
if check.TaskName != "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %s is invalid: only task group service checks can be assigned tasks", check.Name))
}
}
// Add the service provider to the tracking, if it has not already
// been seen.
if _, ok := configuredProviders[service.Provider]; !ok {
configuredProviders[service.Provider] = struct{}{}
}
}
}
for i, service := range tg.Services {
// Add the service provider to the tracking, if it has not already been
// seen.
if _, ok := configuredProviders[service.Provider]; !ok {
configuredProviders[service.Provider] = struct{}{}
}
if err := service.Validate(); err != nil {
outer := fmt.Errorf("Service[%d] %s validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
// we break here to avoid the risk of crashing on null-pointer
// access in a later step, accepting that we might miss out on
// error messages to provide the user.
continue
}
if service.AddressMode == AddressModeDriver {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot use address_mode=\"driver\", only services defined in a \"task\" block can use this mode", service.Name))
}
for _, check := range service.Checks {
if check.TaskName != "" {
if check.Type != ServiceCheckScript && check.Type != ServiceCheckGRPC {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Check %s invalid: only script and gRPC checks should have tasks", check.Name))
}
if check.AddressMode == AddressModeDriver {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %q invalid: cannot use address_mode=\"driver\", only checks defined in a \"task\" service block can use this mode", service.Name))
}
if _, ok := knownTasks[check.TaskName]; !ok {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Check %s invalid: refers to non-existent task %s", check.Name, check.TaskName))
}
}
}
}
// The initial feature release of native service discovery only allows for
// a single service provider to be used across all services in a task
// group.
if len(configuredProviders) > 1 {
mErr.Errors = append(mErr.Errors,
errors.New("Multiple service providers used: task group services must use the same provider"))
}
return mErr.ErrorOrNil()
}
// validateScriptChecksInGroupServices ensures group-level services with script
// checks know what task driver to use. Either the service.task or service.check.task
// parameter must be configured.
func (tg *TaskGroup) validateScriptChecksInGroupServices() error {
var mErr multierror.Error
for _, service := range tg.Services {
if service.TaskName == "" {
for _, check := range service.Checks {
if check.Type == "script" && check.TaskName == "" {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Service [%s]->%s or Check %s must specify task parameter",
tg.Name, service.Name, check.Name,
))
}
}
}
}
return mErr.ErrorOrNil()
}
// validateScalingPolicy ensures that the scaling policy has consistent
// min and max, not in conflict with the task group count
func (tg *TaskGroup) validateScalingPolicy(j *Job) error {
if tg.Scaling == nil {
return nil
}
var mErr multierror.Error
err := tg.Scaling.Validate()
if err != nil {
// prefix scaling policy errors
if me, ok := err.(*multierror.Error); ok {
for _, e := range me.Errors {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Scaling policy invalid: %s", e))
}
}
}
if tg.Scaling.Max < int64(tg.Count) {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be greater than maximum count in scaling policy"))
}
if int64(tg.Count) < tg.Scaling.Min && !(j.IsMultiregion() && tg.Count == 0 && j.Region == "global") {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be less than minimum count in scaling policy"))
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (tg *TaskGroup) Warnings(j *Job) error {
var mErr multierror.Error
// Validate the update strategy
if u := tg.Update; u != nil {
// Check the counts are appropriate
if u.MaxParallel > tg.Count && !(j.IsMultiregion() && tg.Count == 0) {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
}
}
// Check for mbits network field
if len(tg.Networks) > 0 && tg.Networks[0].MBits > 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("mbits has been deprecated as of Nomad 0.12.0. Please remove mbits from the network block"))
}
for _, t := range tg.Tasks {
if err := t.Warnings(); err != nil {
err = multierror.Prefix(err, fmt.Sprintf("Task %q:", t.Name))
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// LookupTask finds a task by name
func (tg *TaskGroup) LookupTask(name string) *Task {
for _, t := range tg.Tasks {
if t.Name == name {
return t
}
}
return nil
}
// UsesConnect for convenience returns true if the TaskGroup contains at least
// one service that makes use of Consul Connect features.
//
// Currently used for validating that the task group contains one or more connect
// aware services before generating a service identity token.
func (tg *TaskGroup) UsesConnect() bool {
for _, service := range tg.Services {
if service.Connect != nil {
if service.Connect.IsNative() || service.Connect.HasSidecar() || service.Connect.IsGateway() {
return true
}
}
}
return false
}
// UsesConnectGateway for convenience returns true if the TaskGroup contains at
// least one service that makes use of Consul Connect Gateway features.
func (tg *TaskGroup) UsesConnectGateway() bool {
for _, service := range tg.Services {
if service.Connect != nil {
if service.Connect.IsGateway() {
return true
}
}
}
return false
}
func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int // Restart task after this many unhealthy intervals
Grace time.Duration // Grace time to give tasks after starting to get healthy
IgnoreWarnings bool // If true treat checks in `warning` as passing
}
func (c *CheckRestart) Copy() *CheckRestart {
if c == nil {
return nil
}
nc := new(CheckRestart)
*nc = *c
return nc
}
func (c *CheckRestart) Equals(o *CheckRestart) bool {
if c == nil || o == nil {
return c == o
}
if c.Limit != o.Limit {
return false
}
if c.Grace != o.Grace {
return false
}
if c.IgnoreWarnings != o.IgnoreWarnings {
return false
}
return true
}
func (c *CheckRestart) Validate() error {
if c == nil {
return nil
}
var mErr multierror.Error
if c.Limit < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit))
}
if c.Grace < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace))
}
return mErr.ErrorOrNil()
}
const (
// DefaultKillTimeout is the default timeout between signaling a task it
// will be killed and killing it.
DefaultKillTimeout = 5 * time.Second
)
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
}
func (l *LogConfig) Equals(o *LogConfig) bool {
if l == nil || o == nil {
return l == o
}
if l.MaxFiles != o.MaxFiles {
return false
}
if l.MaxFileSizeMB != o.MaxFileSizeMB {
return false
}
return true
}
func (l *LogConfig) Copy() *LogConfig {
if l == nil {
return nil
}
return &LogConfig{
MaxFiles: l.MaxFiles,
MaxFileSizeMB: l.MaxFileSizeMB,
}
}
// DefaultLogConfig returns the default LogConfig values.
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
}
}
// Validate returns an error if the log config specified are less than
// the minimum allowed.
func (l *LogConfig) Validate() error {
var mErr multierror.Error
if l.MaxFiles < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
}
if l.MaxFileSizeMB < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
}
return mErr.ErrorOrNil()
}
// Task is a single process typically that is executed as part of a task group.
type Task struct {
// Name of the task
Name string
// Driver is used to control which driver is used
Driver string
// User is used to determine which user will run the task. It defaults to
// the same user the Nomad client is being run as.
User string
// Config is provided to the driver to initialize
Config map[string]interface{}
// Map of environment variables to be used by the driver
Env map[string]string
// List of service definitions exposed by the Task
Services []*Service
// Vault is used to define the set of Vault policies that this task should
// have access to.
Vault *Vault
// Templates are the set of templates to be rendered for the task.
Templates []*Template
// Constraints can be specified at a task level and apply only to
// the particular task.
Constraints []*Constraint
// Affinities can be specified at the task level to express
// scheduling preferences
Affinities []*Affinity
// Resources is the resources needed by this task
Resources *Resources
// RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
Lifecycle *TaskLifecycleConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
Meta map[string]string
// KillTimeout is the time between signaling a task that it will be
// killed and killing it.
KillTimeout time.Duration
// LogConfig provides configuration for log rotation
LogConfig *LogConfig
// Artifacts is a list of artifacts to download and extract before running
// the task.
Artifacts []*TaskArtifact
// Leader marks the task as the leader within the group. When the leader
// task exits, other tasks will be gracefully terminated.
Leader bool
// ShutdownDelay is the duration of the delay between deregistering a
// task from Consul and sending it a signal to shutdown. See #2441
ShutdownDelay time.Duration
// VolumeMounts is a list of Volume name <-> mount configurations that will be
// attached to this task.
VolumeMounts []*VolumeMount
// ScalingPolicies is a list of scaling policies scoped to this task
ScalingPolicies []*ScalingPolicy
// KillSignal is the kill signal to use for the task. This is an optional
// specification and defaults to SIGINT
KillSignal string
// Used internally to manage tasks according to their TaskKind. Initial use case
// is for Consul Connect
Kind TaskKind
// CSIPluginConfig is used to configure the plugin supervisor for the task.
CSIPluginConfig *TaskCSIPluginConfig
}
// UsesConnect is for conveniently detecting if the Task is able to make use
// of Consul Connect features. This will be indicated in the TaskKind of the
// Task, which exports known types of Tasks. UsesConnect will be true if the
// task is a connect proxy, connect native, or is a connect gateway.
func (t *Task) UsesConnect() bool {
return t.Kind.IsConnectNative() || t.UsesConnectSidecar()
}
func (t *Task) UsesConnectSidecar() bool {
return t.Kind.IsConnectProxy() || t.Kind.IsAnyConnectGateway()
}
func (t *Task) Copy() *Task {
if t == nil {
return nil
}
nt := new(Task)
*nt = *t
nt.Env = helper.CopyMapStringString(nt.Env)
if t.Services != nil {
services := make([]*Service, len(nt.Services))
for i, s := range nt.Services {
services[i] = s.Copy()
}
nt.Services = services
}
nt.Constraints = CopySliceConstraints(nt.Constraints)
nt.Affinities = CopySliceAffinities(nt.Affinities)
nt.VolumeMounts = CopySliceVolumeMount(nt.VolumeMounts)
nt.CSIPluginConfig = nt.CSIPluginConfig.Copy()
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.LogConfig = nt.LogConfig.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchPayload = nt.DispatchPayload.Copy()
nt.Lifecycle = nt.Lifecycle.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
for _, a := range nt.Artifacts {
artifacts = append(artifacts, a.Copy())
}
nt.Artifacts = artifacts
}
if i, err := copystructure.Copy(nt.Config); err != nil {
panic(err.Error())
} else {
nt.Config = i.(map[string]interface{})
}
if t.Templates != nil {
templates := make([]*Template, len(t.Templates))
for i, tmpl := range nt.Templates {
templates[i] = tmpl.Copy()
}
nt.Templates = templates
}
return nt
}
// Canonicalize canonicalizes fields in the task.
func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(t.Meta) == 0 {
t.Meta = nil
}
if len(t.Config) == 0 {
t.Config = nil
}
if len(t.Env) == 0 {
t.Env = nil
}
for _, service := range t.Services {
service.Canonicalize(job.Name, tg.Name, t.Name, job.Namespace)
}
// If Resources are nil initialize them to defaults, otherwise canonicalize
if t.Resources == nil {
t.Resources = DefaultResources()
} else {
t.Resources.Canonicalize()
}
if t.RestartPolicy == nil {
t.RestartPolicy = tg.RestartPolicy
}
// Set the default timeout if it is not specified.
if t.KillTimeout == 0 {
t.KillTimeout = DefaultKillTimeout
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, template := range t.Templates {
template.Canonicalize()
}
}
func (t *Task) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
// Validate is used to check a task for reasonable configuration
func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices []*Service, tgNetworks Networks) error {
var mErr multierror.Error
if t.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
}
if strings.ContainsAny(t.Name, `/\`) {
// We enforce this so that when creating the directory on disk it will
// not have any slashes.
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
} else if strings.Contains(t.Name, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include null characters"))
}
if t.Driver == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
}
if t.KillTimeout < 0 {
mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
}
if t.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}
// Validate the resources.
if t.Resources == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
} else if err := t.Resources.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Validate the log config
if t.LogConfig == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
} else if err := t.LogConfig.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
for idx, constr := range t.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
mErr.Errors = append(mErr.Errors, outer)
}
}
if jobType == JobTypeSystem {
if t.Affinities != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
}
} else {
for idx, affinity := range t.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
// Validate Services
if err := validateServices(t, tgNetworks); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if t.LogConfig != nil && ephemeralDisk != nil {
logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
if ephemeralDisk.SizeMB <= logUsage {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
logUsage, ephemeralDisk.SizeMB))
}
}
for idx, artifact := range t.Artifacts {
if err := artifact.Validate(); err != nil {
outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if t.Vault != nil {
if err := t.Vault.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
}
}
destinations := make(map[string]int, len(t.Templates))
for idx, tmpl := range t.Templates {
if err := tmpl.Validate(); err != nil {
outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
if other, ok := destinations[tmpl.DestPath]; ok {
outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
mErr.Errors = append(mErr.Errors, outer)
} else {
destinations[tmpl.DestPath] = idx + 1
}
}
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}
// Validate the Lifecycle block if there
if t.Lifecycle != nil {
if err := t.Lifecycle.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Lifecycle validation failed: %v", err))
}
}
// Validation for TaskKind field which is used for Consul Connect integration
if t.Kind.IsConnectProxy() {
// This task is a Connect proxy so it should not have service stanzas
if len(t.Services) > 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza"))
}
if t.Leader {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set"))
}
// Ensure the proxy task has a corresponding service entry
serviceErr := ValidateConnectProxyService(t.Kind.Value(), tgServices)
if serviceErr != nil {
mErr.Errors = append(mErr.Errors, serviceErr)
}
}
// Validation for volumes
for idx, vm := range t.VolumeMounts {
if !MountPropagationModeIsValid(vm.PropagationMode) {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) has an invalid propagation mode: \"%s\"", idx, vm.PropagationMode))
}
}
// Validate CSI Plugin Config
if t.CSIPluginConfig != nil {
if t.CSIPluginConfig.ID == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig must have a non-empty PluginID"))
}
if !CSIPluginTypeIsValid(t.CSIPluginConfig.Type) {
mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"%s\"", t.CSIPluginConfig.Type))
}
// TODO: Investigate validation of the PluginMountDir. Not much we can do apart from check IsAbs until after we understand its execution environment though :(
}
return mErr.ErrorOrNil()
}
// validateServices takes a task and validates the services within it are valid
// and reference ports that exist.
func validateServices(t *Task, tgNetworks Networks) error {
var mErr multierror.Error
// Ensure that services don't ask for nonexistent ports and their names are
// unique.
servicePorts := make(map[string]map[string]struct{})
addServicePort := func(label, service string) {
if _, ok := servicePorts[label]; !ok {
servicePorts[label] = map[string]struct{}{}
}
servicePorts[label][service] = struct{}{}
}
knownServices := make(map[string]struct{})
for i, service := range t.Services {
if err := service.Validate(); err != nil {
outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
if service.AddressMode == AddressModeAlloc {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot use address_mode=\"alloc\", only services defined in a \"group\" block can use this mode", service.Name))
}
// Ensure that services with the same name are not being registered for
// the same port
if _, ok := knownServices[service.Name+service.PortLabel]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
}
knownServices[service.Name+service.PortLabel] = struct{}{}
if service.PortLabel != "" {
if service.AddressMode == "driver" {
// Numeric port labels are valid for address_mode=driver
_, err := strconv.Atoi(service.PortLabel)
if err != nil {
// Not a numeric port label, add it to list to check
addServicePort(service.PortLabel, service.Name)
}
} else {
addServicePort(service.PortLabel, service.Name)
}
}
// connect block is only allowed on group level
if service.Connect != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot have \"connect\" block, only services defined in a \"group\" block can", service.Name))
}
// Ensure that check names are unique and have valid ports
knownChecks := make(map[string]struct{})
for _, check := range service.Checks {
if _, ok := knownChecks[check.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
}
knownChecks[check.Name] = struct{}{}
if check.AddressMode == AddressModeAlloc {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q cannot use address_mode=\"alloc\", only checks defined in a \"group\" service block can use this mode", service.Name))
}
if !check.RequiresPort() {
// No need to continue validating check if it doesn't need a port
continue
}
effectivePort := check.PortLabel
if effectivePort == "" {
// Inherits from service
effectivePort = service.PortLabel
}
if effectivePort == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is missing a port", check.Name))
continue
}
isNumeric := false
portNumber, err := strconv.Atoi(effectivePort)
if err == nil {
isNumeric = true
}
// Numeric ports are fine for address_mode = "driver"
if check.AddressMode == "driver" && isNumeric {
if portNumber <= 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q has invalid numeric port %d", check.Name, portNumber))
}
continue
}
if isNumeric {
mErr.Errors = append(mErr.Errors, fmt.Errorf(`check %q cannot use a numeric port %d without setting address_mode="driver"`, check.Name, portNumber))
continue
}
// PortLabel must exist, report errors by its parent service
addServicePort(effectivePort, service.Name)
}
}
// Get the set of group port labels.
portLabels := make(map[string]struct{})
if len(tgNetworks) > 0 {
ports := tgNetworks[0].PortLabels()
for portLabel := range ports {
portLabels[portLabel] = struct{}{}
}
}
// COMPAT(0.13)
// Append the set of task port labels. (Note that network resources on the
// task resources are deprecated, but we must let them continue working; a
// warning will be emitted on job submission).
if t.Resources != nil {
for _, network := range t.Resources.Networks {
for portLabel := range network.PortLabels() {
portLabels[portLabel] = struct{}{}
}
}
}
// Iterate over a sorted list of keys to make error listings stable
keys := make([]string, 0, len(servicePorts))
for p := range servicePorts {
keys = append(keys, p)
}
sort.Strings(keys)
// Ensure all ports referenced in services exist.
for _, servicePort := range keys {
services := servicePorts[servicePort]
_, ok := portLabels[servicePort]
if !ok {
names := make([]string, 0, len(services))
for name := range services {
names = append(names, name)
}
// Keep order deterministic
sort.Strings(names)
joined := strings.Join(names, ", ")
err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
mErr.Errors = append(mErr.Errors, err)
}
}
// Ensure address mode is valid
return mErr.ErrorOrNil()
}
func (t *Task) Warnings() error {
var mErr multierror.Error
// Validate the resources
if t.Resources != nil && t.Resources.IOPS != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza."))
}
if t.Resources != nil && len(t.Resources.Networks) != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block."))
}
for idx, tmpl := range t.Templates {
if err := tmpl.Warnings(); err != nil {
err = multierror.Prefix(err, fmt.Sprintf("Template[%d]", idx))
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// TaskKind identifies the special kinds of tasks using the following format:
// '<kind_name>(:<identifier>)`. The TaskKind can optionally include an identifier that
// is opaque to the Task. This identifier can be used to relate the task to some
// other entity based on the kind.
//
// For example, a task may have the TaskKind of `connect-proxy:service` where
// 'connect-proxy' is the kind name and 'service' is the identifier that relates the
// task to the service name of which it is a connect proxy for.
type TaskKind string
func NewTaskKind(name, identifier string) TaskKind {
return TaskKind(fmt.Sprintf("%s:%s", name, identifier))
}
// Name returns the kind name portion of the TaskKind
func (k TaskKind) Name() string {
return strings.Split(string(k), ":")[0]
}
// Value returns the identifier of the TaskKind or an empty string if it doesn't
// include one.
func (k TaskKind) Value() string {
if s := strings.SplitN(string(k), ":", 2); len(s) > 1 {
return s[1]
}
return ""
}
func (k TaskKind) hasPrefix(prefix string) bool {
return strings.HasPrefix(string(k), prefix+":") && len(k) > len(prefix)+1
}
// IsConnectProxy returns true if the TaskKind is connect-proxy.
func (k TaskKind) IsConnectProxy() bool {
return k.hasPrefix(ConnectProxyPrefix)
}
// IsConnectNative returns true if the TaskKind is connect-native.
func (k TaskKind) IsConnectNative() bool {
return k.hasPrefix(ConnectNativePrefix)
}
// IsConnectIngress returns true if the TaskKind is connect-ingress.
func (k TaskKind) IsConnectIngress() bool {
return k.hasPrefix(ConnectIngressPrefix)
}
// IsConnectTerminating returns true if the TaskKind is connect-terminating.
func (k TaskKind) IsConnectTerminating() bool {
return k.hasPrefix(ConnectTerminatingPrefix)
}
// IsConnectMesh returns true if the TaskKind is connect-mesh.
func (k TaskKind) IsConnectMesh() bool {
return k.hasPrefix(ConnectMeshPrefix)
}
// IsAnyConnectGateway returns true if the TaskKind represents any one of the
// supported connect gateway types.
func (k TaskKind) IsAnyConnectGateway() bool {
switch {
case k.IsConnectIngress():
return true
case k.IsConnectTerminating():
return true
case k.IsConnectMesh():
return true
default:
return false
}
}
const (
// ConnectProxyPrefix is the prefix used for fields referencing a Consul Connect
// Proxy
ConnectProxyPrefix = "connect-proxy"
// ConnectNativePrefix is the prefix used for fields referencing a Connect
// Native Task
ConnectNativePrefix = "connect-native"
// ConnectIngressPrefix is the prefix used for fields referencing a Consul
// Connect Ingress Gateway Proxy.
ConnectIngressPrefix = "connect-ingress"
// ConnectTerminatingPrefix is the prefix used for fields referencing a Consul
// Connect Terminating Gateway Proxy.
ConnectTerminatingPrefix = "connect-terminating"
// ConnectMeshPrefix is the prefix used for fields referencing a Consul Connect
// Mesh Gateway Proxy.
ConnectMeshPrefix = "connect-mesh"
)
// ValidateConnectProxyService checks that the service that is being
// proxied by this task exists in the task group and contains
// valid Connect config.
func ValidateConnectProxyService(serviceName string, tgServices []*Service) error {
found := false
names := make([]string, 0, len(tgServices))
for _, svc := range tgServices {
if svc.Connect == nil || svc.Connect.SidecarService == nil {
continue
}
if svc.Name == serviceName {
found = true
break
}
// Build up list of mismatched Connect service names for error
// reporting.
names = append(names, svc.Name)
}
if !found {
if len(names) == 0 {
return fmt.Errorf("No Connect services in task group with Connect proxy (%q)", serviceName)
} else {
return fmt.Errorf("Connect proxy service name (%q) not found in Connect services from task group: %s", serviceName, names)
}
}
return nil
}
const (
// TemplateChangeModeNoop marks that no action should be taken if the
// template is re-rendered
TemplateChangeModeNoop = "noop"
// TemplateChangeModeSignal marks that the task should be signaled if the
// template is re-rendered
TemplateChangeModeSignal = "signal"
// TemplateChangeModeRestart marks that the task should be restarted if the
// template is re-rendered
TemplateChangeModeRestart = "restart"
)
var (
// TemplateChangeModeInvalidError is the error for when an invalid change
// mode is given
TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
)
// Template represents a template configuration to be rendered for a given task
type Template struct {
// SourcePath is the path to the template to be rendered
SourcePath string
// DestPath is the path to where the template should be rendered
DestPath string
// EmbeddedTmpl store the raw template. This is useful for smaller templates
// where they are embedded in the job file rather than sent as an artifact
EmbeddedTmpl string
// ChangeMode indicates what should be done if the template is re-rendered
ChangeMode string
// ChangeSignal is the signal that should be sent if the change mode
// requires it.
ChangeSignal string
// Splay is used to avoid coordinated restarts of processes by applying a
// random wait between 0 and the given splay value before signalling the
// application of a change
Splay time.Duration
// Perms is the permission the file should be written out with.
Perms string
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim string
RightDelim string
// Envvars enables exposing the template as environment variables
// instead of as a file. The template must be of the form:
//
// VAR_NAME_1={{ key service/my-key }}
// VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
//
// Lines will be split on the initial "=" with the first part being the
// key name and the second part the value.
// Empty lines and lines starting with # will be ignored, but to avoid
// escaping issues #s within lines will not be treated as comments.
Envvars bool
// VaultGrace is the grace duration between lease renewal and reacquiring a
// secret. If the lease of a secret is less than the grace, a new secret is
// acquired.
// COMPAT(0.12) VaultGrace has been ignored by Vault since Vault v0.5.
VaultGrace time.Duration
// WaitConfig is used to override the global WaitConfig on a per-template basis
Wait *WaitConfig
}
// DefaultTemplate returns a default template.
func DefaultTemplate() *Template {
return &Template{
ChangeMode: TemplateChangeModeRestart,
Splay: 5 * time.Second,
Perms: "0644",
}
}
func (t *Template) Copy() *Template {
if t == nil {
return nil
}
nt := new(Template)
*nt = *t
if t.Wait != nil {
nt.Wait = t.Wait.Copy()
}
return nt
}
func (t *Template) Canonicalize() {
if t.ChangeSignal != "" {
t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
}
}
func (t *Template) Validate() error {
var mErr multierror.Error
// Verify we have something to render
if t.SourcePath == "" && t.EmbeddedTmpl == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
}
// Verify we can render somewhere
if t.DestPath == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
}
// Verify the destination doesn't escape
escaped, err := escapingfs.PathEscapesAllocViaRelative("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify a proper change mode
switch t.ChangeMode {
case TemplateChangeModeNoop, TemplateChangeModeRestart:
case TemplateChangeModeSignal:
if t.ChangeSignal == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
}
if t.Envvars {
_ = multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates"))
}
default:
_ = multierror.Append(&mErr, TemplateChangeModeInvalidError)
}
// Verify the splay is positive
if t.Splay < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
}
// Verify the permissions
if t.Perms != "" {
if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
_ = multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
}
}
if err = t.Wait.Validate(); err != nil {
_ = multierror.Append(&mErr, err)
}
return mErr.ErrorOrNil()
}
func (t *Template) Warnings() error {
var mErr multierror.Error
// Deprecation notice for vault_grace
if t.VaultGrace != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."))
}
return mErr.ErrorOrNil()
}
// DiffID fulfills the DiffableWithID interface.
func (t *Template) DiffID() string {
return t.DestPath
}
// WaitConfig is the Min/Max duration used by the Consul Template Watcher. Consul
// Template relies on pointer based business logic. This struct uses pointers so
// that we tell the different between zero values and unset values.
type WaitConfig struct {
Min *time.Duration
Max *time.Duration
}
// Copy returns a deep copy of this configuration.
func (wc *WaitConfig) Copy() *WaitConfig {
if wc == nil {
return nil
}
nwc := new(WaitConfig)
if wc.Min != nil {
nwc.Min = &*wc.Min
}
if wc.Max != nil {
nwc.Max = &*wc.Max
}
return nwc
}
func (wc *WaitConfig) Equals(o *WaitConfig) bool {
if wc.Min == nil && o.Min != nil {
return false
}
if wc.Max == nil && o.Max != nil {
return false
}
if wc.Min != nil && (o.Min == nil || *wc.Min != *o.Min) {
return false
}
if wc.Max != nil && (o.Max == nil || *wc.Max != *o.Max) {
return false
}
return true
}
// Validate that the min is not greater than the max
func (wc *WaitConfig) Validate() error {
if wc == nil {
return nil
}
// If either one is nil, they aren't comparable, so they can't be invalid.
if wc.Min == nil || wc.Max == nil {
return nil
}
if *wc.Min > *wc.Max {
return fmt.Errorf("wait min %s is greater than max %s", wc.Min, wc.Max)
}
return nil
}
// AllocState records a single event that changes the state of the whole allocation
type AllocStateField uint8
const (
AllocStateFieldClientStatus AllocStateField = iota
)
type AllocState struct {
Field AllocStateField
Value string
Time time.Time
}
// TaskHandle is optional handle to a task propogated to the servers for use
// by remote tasks. Since remote tasks are not implicitly lost when the node
// they are assigned to is down, their state is migrated to the replacement
// allocation.
//
// Minimal set of fields from plugins/drivers/task_handle.go:TaskHandle
type TaskHandle struct {
// Version of driver state. Used by the driver to gracefully handle
// plugin upgrades.
Version int
// Driver-specific state containing a handle to the remote task.
DriverState []byte
}
func (h *TaskHandle) Copy() *TaskHandle {
if h == nil {
return nil
}
newTH := TaskHandle{
Version: h.Version,
DriverState: make([]byte, len(h.DriverState)),
}
copy(newTH.DriverState, h.DriverState)
return &newTH
}
// Set of possible states for a task.
const (
TaskStatePending = "pending" // The task is waiting to be run.
TaskStateRunning = "running" // The task is currently running.
TaskStateDead = "dead" // Terminal state of task.
)
// TaskState tracks the current state of a task and events that caused state
// transitions.
type TaskState struct {
// The current state of the task.
State string
// Failed marks a task as having failed
Failed bool
// Restarts is the number of times the task has restarted
Restarts uint64
// LastRestart is the time the task last restarted. It is updated each time the
// task restarts
LastRestart time.Time
// StartedAt is the time the task is started. It is updated each time the
// task starts
StartedAt time.Time
// FinishedAt is the time at which the task transitioned to dead and will
// not be started again.
FinishedAt time.Time
// Series of task events that transition the state of the task.
Events []*TaskEvent
// Experimental - TaskHandle is based on drivers.TaskHandle and used
// by remote task drivers to migrate task handles between allocations.
TaskHandle *TaskHandle
}
// NewTaskState returns a TaskState initialized in the Pending state.
func NewTaskState() *TaskState {
return &TaskState{
State: TaskStatePending,
}
}
// Canonicalize ensures the TaskState has a State set. It should default to
// Pending.
func (ts *TaskState) Canonicalize() {
if ts.State == "" {
ts.State = TaskStatePending
}
}
func (ts *TaskState) Copy() *TaskState {
if ts == nil {
return nil
}
newTS := new(TaskState)
*newTS = *ts
if ts.Events != nil {
newTS.Events = make([]*TaskEvent, len(ts.Events))
for i, e := range ts.Events {
newTS.Events[i] = e.Copy()
}
}
newTS.TaskHandle = ts.TaskHandle.Copy()
return newTS
}
// Successful returns whether a task finished successfully. Only meaningful for
// for batch allocations or ephemeral (non-sidecar) lifecycle tasks part of a
// service or system allocation.
func (ts *TaskState) Successful() bool {
return ts.State == TaskStateDead && !ts.Failed
}
const (
// TaskSetupFailure indicates that the task could not be started due to a
// a setup failure.
TaskSetupFailure = "Setup Failure"
// TaskDriveFailure indicates that the task could not be started due to a
// failure in the driver. TaskDriverFailure is considered Recoverable.
TaskDriverFailure = "Driver Failure"
// TaskReceived signals that the task has been pulled by the client at the
// given timestamp.
TaskReceived = "Received"
// TaskFailedValidation indicates the task was invalid and as such was not run.
// TaskFailedValidation is not considered Recoverable.
TaskFailedValidation = "Failed Validation"
// TaskStarted signals that the task was started and its timestamp can be
// used to determine the running length of the task.
TaskStarted = "Started"
// TaskTerminated indicates that the task was started and exited.
TaskTerminated = "Terminated"
// TaskKilling indicates a kill signal has been sent to the task.
TaskKilling = "Killing"
// TaskKilled indicates a user has killed the task.
TaskKilled = "Killed"
// TaskRestarting indicates that task terminated and is being restarted.
TaskRestarting = "Restarting"
// TaskNotRestarting indicates that the task has failed and is not being
// restarted because it has exceeded its restart policy.
TaskNotRestarting = "Not Restarting"
// TaskRestartSignal indicates that the task has been signalled to be
// restarted
TaskRestartSignal = "Restart Signaled"
// TaskSignaling indicates that the task is being signalled.
TaskSignaling = "Signaling"
// TaskDownloadingArtifacts means the task is downloading the artifacts
// specified in the task.
TaskDownloadingArtifacts = "Downloading Artifacts"
// TaskArtifactDownloadFailed indicates that downloading the artifacts
// failed.
TaskArtifactDownloadFailed = "Failed Artifact Download"
// TaskBuildingTaskDir indicates that the task directory/chroot is being
// built.
TaskBuildingTaskDir = "Building Task Directory"
// TaskSetup indicates the task runner is setting up the task environment
TaskSetup = "Task Setup"
// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
// exceeded the requested disk resources.
TaskDiskExceeded = "Disk Resources Exceeded"
// TaskSiblingFailed indicates that a sibling task in the task group has
// failed.
TaskSiblingFailed = "Sibling Task Failed"
// TaskDriverMessage is an informational event message emitted by
// drivers such as when they're performing a long running action like
// downloading an image.
TaskDriverMessage = "Driver"
// TaskLeaderDead indicates that the leader task within the has finished.
TaskLeaderDead = "Leader Task Dead"
// TaskMainDead indicates that the main tasks have dead
TaskMainDead = "Main Tasks Dead"
// TaskHookFailed indicates that one of the hooks for a task failed.
TaskHookFailed = "Task hook failed"
// TaskRestoreFailed indicates Nomad was unable to reattach to a
// restored task.
TaskRestoreFailed = "Failed Restoring Task"
// TaskPluginUnhealthy indicates that a plugin managed by Nomad became unhealthy
TaskPluginUnhealthy = "Plugin became unhealthy"
// TaskPluginHealthy indicates that a plugin managed by Nomad became healthy
TaskPluginHealthy = "Plugin became healthy"
// TaskClientReconnected indicates that the client running the task disconnected.
TaskClientReconnected = "Reconnected"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64 // Unix Nanosecond timestamp
Message string // A possible message explaining the termination of the task.
// DisplayMessage is a human friendly message about the event
DisplayMessage string
// Details is a map with annotated info about the event
Details map[string]string
// DEPRECATION NOTICE: The following fields are deprecated and will be removed
// in a future release. Field values are available in the Details map.
// FailsTask marks whether this event fails the task.
// Deprecated, use Details["fails_task"] to access this.
FailsTask bool
// Restart fields.
// Deprecated, use Details["restart_reason"] to access this.
RestartReason string
// Setup Failure fields.
// Deprecated, use Details["setup_error"] to access this.
SetupError string
// Driver Failure fields.
// Deprecated, use Details["driver_error"] to access this.
DriverError string // A driver error occurred while starting the task.
// Task Terminated Fields.
// Deprecated, use Details["exit_code"] to access this.
ExitCode int // The exit code of the task.
// Deprecated, use Details["signal"] to access this.
Signal int // The signal that terminated the task.
// Killing fields
// Deprecated, use Details["kill_timeout"] to access this.
KillTimeout time.Duration
// Task Killed Fields.
// Deprecated, use Details["kill_error"] to access this.
KillError string // Error killing the task.
// KillReason is the reason the task was killed
// Deprecated, use Details["kill_reason"] to access this.
KillReason string
// TaskRestarting fields.
// Deprecated, use Details["start_delay"] to access this.
StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
// Artifact Download fields
// Deprecated, use Details["download_error"] to access this.
DownloadError string // Error downloading artifacts
// Validation fields
// Deprecated, use Details["validation_error"] to access this.
ValidationError string // Validation error
// The maximum allowed task disk size.
// Deprecated, use Details["disk_limit"] to access this.
DiskLimit int64
// Name of the sibling task that caused termination of the task that
// the TaskEvent refers to.
// Deprecated, use Details["failed_sibling"] to access this.
FailedSibling string
// VaultError is the error from token renewal
// Deprecated, use Details["vault_renewal_error"] to access this.
VaultError string
// TaskSignalReason indicates the reason the task is being signalled.
// Deprecated, use Details["task_signal_reason"] to access this.
TaskSignalReason string
// TaskSignal is the signal that was sent to the task
// Deprecated, use Details["task_signal"] to access this.
TaskSignal string
// DriverMessage indicates a driver action being taken.
// Deprecated, use Details["driver_message"] to access this.
DriverMessage string
// GenericSource is the source of a message.
// Deprecated, is redundant with event type.
GenericSource string
}
func (e *TaskEvent) PopulateEventDisplayMessage() {
// Build up the description based on the event type.
if e == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why?
return
}
if e.DisplayMessage != "" {
return
}
var desc string
switch e.Type {
case TaskSetup:
desc = e.Message
case TaskStarted:
desc = "Task started by client"
case TaskReceived:
desc = "Task received by client"
case TaskFailedValidation:
if e.ValidationError != "" {
desc = e.ValidationError
} else {
desc = "Validation of task failed"
}
case TaskSetupFailure:
if e.SetupError != "" {
desc = e.SetupError
} else {
desc = "Task setup failed"
}
case TaskDriverFailure:
if e.DriverError != "" {
desc = e.DriverError
} else {
desc = "Failed to start task"
}
case TaskDownloadingArtifacts:
desc = "Client is downloading artifacts"
case TaskArtifactDownloadFailed:
if e.DownloadError != "" {
desc = e.DownloadError
} else {
desc = "Failed to download artifacts"
}
case TaskKilling:
if e.KillReason != "" {
desc = e.KillReason
} else if e.KillTimeout != 0 {
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", e.KillTimeout)
} else {
desc = "Sent interrupt"
}
case TaskKilled:
if e.KillError != "" {
desc = e.KillError
} else {
desc = "Task successfully killed"
}
case TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("Exit Code: %d", e.ExitCode))
if e.Signal != 0 {
parts = append(parts, fmt.Sprintf("Signal: %d", e.Signal))
}
if e.Message != "" {
parts = append(parts, fmt.Sprintf("Exit Message: %q", e.Message))
}
desc = strings.Join(parts, ", ")
case TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(e.StartDelay))
if e.RestartReason != "" && e.RestartReason != ReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", e.RestartReason, in)
} else {
desc = in
}
case TaskNotRestarting:
if e.RestartReason != "" {
desc = e.RestartReason
} else {
desc = "Task exceeded restart policy"
}
case TaskSiblingFailed:
if e.FailedSibling != "" {
desc = fmt.Sprintf("Task's sibling %q failed", e.FailedSibling)
} else {
desc = "Task's sibling failed"
}
case TaskSignaling:
sig := e.TaskSignal
reason := e.TaskSignalReason
if sig == "" && reason == "" {
desc = "Task being sent a signal"
} else if sig == "" {
desc = reason
} else if reason == "" {
desc = fmt.Sprintf("Task being sent signal %v", sig)
} else {
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
}
case TaskRestartSignal:
if e.RestartReason != "" {
desc = e.RestartReason
} else {
desc = "Task signaled to restart"
}
case TaskDriverMessage:
desc = e.DriverMessage
case TaskLeaderDead:
desc = "Leader Task in Group dead"
case TaskMainDead:
desc = "Main tasks in the group died"
case TaskClientReconnected:
desc = "Client reconnected"
default:
desc = e.Message
}
e.DisplayMessage = desc
}
func (e *TaskEvent) GoString() string {
return fmt.Sprintf("%v - %v", e.Time, e.Type)
}
// SetDisplayMessage sets the display message of TaskEvent
func (e *TaskEvent) SetDisplayMessage(msg string) *TaskEvent {
e.DisplayMessage = msg
return e
}
// SetMessage sets the message of TaskEvent
func (e *TaskEvent) SetMessage(msg string) *TaskEvent {
e.Message = msg
e.Details["message"] = msg
return e
}
func (e *TaskEvent) Copy() *TaskEvent {
if e == nil {
return nil
}
copy := new(TaskEvent)
*copy = *e
return copy
}
func NewTaskEvent(event string) *TaskEvent {
return &TaskEvent{
Type: event,
Time: time.Now().UnixNano(),
Details: make(map[string]string),
}
}
// SetSetupError is used to store an error that occurred while setting up the
// task
func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
if err != nil {
e.SetupError = err.Error()
e.Details["setup_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetFailsTask() *TaskEvent {
e.FailsTask = true
e.Details["fails_task"] = "true"
return e
}
func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
if err != nil {
e.DriverError = err.Error()
e.Details["driver_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
e.ExitCode = c
e.Details["exit_code"] = fmt.Sprintf("%d", c)
return e
}
func (e *TaskEvent) SetSignal(s int) *TaskEvent {
e.Signal = s
e.Details["signal"] = fmt.Sprintf("%d", s)
return e
}
func (e *TaskEvent) SetSignalText(s string) *TaskEvent {
e.Details["signal"] = s
return e
}
func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
if err != nil {
e.Message = err.Error()
e.Details["exit_message"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillError(err error) *TaskEvent {
if err != nil {
e.KillError = err.Error()
e.Details["kill_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
e.KillReason = r
e.Details["kill_reason"] = r
return e
}
func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
e.StartDelay = int64(delay)
e.Details["start_delay"] = fmt.Sprintf("%d", delay)
return e
}
func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
e.RestartReason = reason
e.Details["restart_reason"] = reason
return e
}
func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
e.TaskSignalReason = r
e.Details["task_signal_reason"] = r
return e
}
func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
e.TaskSignal = s.String()
e.Details["task_signal"] = s.String()
return e
}
func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
if err != nil {
e.DownloadError = err.Error()
e.Details["download_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
if err != nil {
e.ValidationError = err.Error()
e.Details["validation_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
e.KillTimeout = timeout
e.Details["kill_timeout"] = timeout.String()
return e
}
func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
e.DiskLimit = limit
e.Details["disk_limit"] = fmt.Sprintf("%d", limit)
return e
}
func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
e.FailedSibling = sibling
e.Details["failed_sibling"] = sibling
return e
}
func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
if err != nil {
e.VaultError = err.Error()
e.Details["vault_renewal_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
e.DriverMessage = m
e.Details["driver_message"] = m
return e
}
func (e *TaskEvent) SetOOMKilled(oom bool) *TaskEvent {
e.Details["oom_killed"] = strconv.FormatBool(oom)
return e
}
// TaskArtifact is an artifact to download before running the task.
type TaskArtifact struct {
// GetterSource is the source to download an artifact using go-getter
GetterSource string
// GetterOptions are options to use when downloading the artifact using
// go-getter.
GetterOptions map[string]string
// GetterHeaders are headers to use when downloading the artifact using
// go-getter.
GetterHeaders map[string]string
// GetterMode is the go-getter.ClientMode for fetching resources.
// Defaults to "any" but can be set to "file" or "dir".
GetterMode string
// RelativeDest is the download destination given relative to the task's
// directory.
RelativeDest string
}
func (ta *TaskArtifact) Copy() *TaskArtifact {
if ta == nil {
return nil
}
return &TaskArtifact{
GetterSource: ta.GetterSource,
GetterOptions: helper.CopyMapStringString(ta.GetterOptions),
GetterHeaders: helper.CopyMapStringString(ta.GetterHeaders),
GetterMode: ta.GetterMode,
RelativeDest: ta.RelativeDest,
}
}
func (ta *TaskArtifact) GoString() string {
return fmt.Sprintf("%+v", ta)
}
// DiffID fulfills the DiffableWithID interface.
func (ta *TaskArtifact) DiffID() string {
return ta.RelativeDest
}
// hashStringMap appends a deterministic hash of m onto h.
func hashStringMap(h hash.Hash, m map[string]string) {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
_, _ = h.Write([]byte(k))
_, _ = h.Write([]byte(m[k]))
}
}
// Hash creates a unique identifier for a TaskArtifact as the same GetterSource
// may be specified multiple times with different destinations.
func (ta *TaskArtifact) Hash() string {
h, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
_, _ = h.Write([]byte(ta.GetterSource))
hashStringMap(h, ta.GetterOptions)
hashStringMap(h, ta.GetterHeaders)
_, _ = h.Write([]byte(ta.GetterMode))
_, _ = h.Write([]byte(ta.RelativeDest))
return base64.RawStdEncoding.EncodeToString(h.Sum(nil))
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
if ta.GetterSource == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
}
switch ta.GetterMode {
case "":
// Default to any
ta.GetterMode = GetterModeAny
case GetterModeAny, GetterModeFile, GetterModeDir:
// Ok
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := escapingfs.PathEscapesAllocViaRelative("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
if err := ta.validateChecksum(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
func (ta *TaskArtifact) validateChecksum() error {
check, ok := ta.GetterOptions["checksum"]
if !ok {
return nil
}
// Job struct validation occurs before interpolation resolution can be effective.
// Skip checking if checksum contain variable reference, and artifacts fetching will
// eventually fail, if checksum is indeed invalid.
if args.ContainsEnv(check) {
return nil
}
check = strings.TrimSpace(check)
if check == "" {
return fmt.Errorf("checksum value cannot be empty")
}
parts := strings.Split(check, ":")
if l := len(parts); l != 2 {
return fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)
}
checksumVal := parts[1]
checksumBytes, err := hex.DecodeString(checksumVal)
if err != nil {
return fmt.Errorf("invalid checksum: %v", err)
}
checksumType := parts[0]
expectedLength := 0
switch checksumType {
case "md5":
expectedLength = md5.Size
case "sha1":
expectedLength = sha1.Size
case "sha256":
expectedLength = sha256.Size
case "sha512":
expectedLength = sha512.Size
default:
return fmt.Errorf("unsupported checksum type: %s", checksumType)
}
if len(checksumBytes) != expectedLength {
return fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)
}
return nil
}
const (
ConstraintDistinctProperty = "distinct_property"
ConstraintDistinctHosts = "distinct_hosts"
ConstraintRegex = "regexp"
ConstraintVersion = "version"
ConstraintSemver = "semver"
ConstraintSetContains = "set_contains"
ConstraintSetContainsAll = "set_contains_all"
ConstraintSetContainsAny = "set_contains_any"
ConstraintAttributeIsSet = "is_set"
ConstraintAttributeIsNotSet = "is_not_set"
)
// A Constraint is used to restrict placement options.
type Constraint struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
}
// Equals checks if two constraints are equal.
func (c *Constraint) Equals(o *Constraint) bool {
return c == o ||
c.LTarget == o.LTarget &&
c.RTarget == o.RTarget &&
c.Operand == o.Operand
}
// Equal is like Equals but with one less s.
func (c *Constraint) Equal(o *Constraint) bool {
return c.Equals(o)
}
func (c *Constraint) Copy() *Constraint {
if c == nil {
return nil
}
return &Constraint{
LTarget: c.LTarget,
RTarget: c.RTarget,
Operand: c.Operand,
}
}
func (c *Constraint) String() string {
return fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
}
func (c *Constraint) Validate() error {
var mErr multierror.Error
if c.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
}
// requireLtarget specifies whether the constraint requires an LTarget to be
// provided.
requireLtarget := true
// Perform additional validation based on operand
switch c.Operand {
case ConstraintDistinctHosts:
requireLtarget = false
case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget"))
}
case ConstraintRegex:
if _, err := regexp.Compile(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
}
case ConstraintSemver:
if _, err := semver.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver constraint is invalid: %v", err))
}
case ConstraintDistinctProperty:
// If a count is set, make sure it is convertible to a uint64
if c.RTarget != "" {
count, err := strconv.ParseUint(c.RTarget, 10, 64)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err))
} else if count < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count))
}
}
case ConstraintAttributeIsSet, ConstraintAttributeIsNotSet:
if c.RTarget != "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q does not support an RTarget", c.Operand))
}
case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand))
}
// Ensure we have an LTarget for the constraints that need one
if requireLtarget && c.LTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint"))
}
return mErr.ErrorOrNil()
}
type Constraints []*Constraint
// Equals compares Constraints as a set
func (xs *Constraints) Equals(ys *Constraints) bool {
if xs == ys {
return true
}
if xs == nil || ys == nil {
return false
}
if len(*xs) != len(*ys) {
return false
}
SETEQUALS:
for _, x := range *xs {
for _, y := range *ys {
if x.Equals(y) {
continue SETEQUALS
}
}
return false
}
return true
}
// Affinity is used to score placement options based on a weight
type Affinity struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Affinity operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any
Weight int8 // Weight applied to nodes that match the affinity. Can be negative
}
// Equals checks if two affinities are equal.
func (a *Affinity) Equals(o *Affinity) bool {
return a == o ||
a.LTarget == o.LTarget &&
a.RTarget == o.RTarget &&
a.Operand == o.Operand &&
a.Weight == o.Weight
}
func (a *Affinity) Equal(o *Affinity) bool {
return a.Equals(o)
}
func (a *Affinity) Copy() *Affinity {
if a == nil {
return nil
}
return &Affinity{
LTarget: a.LTarget,
RTarget: a.RTarget,
Operand: a.Operand,
Weight: a.Weight,
}
}
func (a *Affinity) String() string {
return fmt.Sprintf("%s %s %s %v", a.LTarget, a.Operand, a.RTarget, a.Weight)
}
func (a *Affinity) Validate() error {
var mErr multierror.Error
if a.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing affinity operand"))
}
// Perform additional validation based on operand
switch a.Operand {
case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
if a.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains operators require an RTarget"))
}
case ConstraintRegex:
if _, err := regexp.Compile(a.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(a.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version affinity is invalid: %v", err))
}
case ConstraintSemver:
if _, err := semver.NewConstraint(a.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver affinity is invalid: %v", err))
}
case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
if a.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", a.Operand))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown affinity operator %q", a.Operand))
}
// Ensure we have an LTarget
if a.LTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required"))
}
// Ensure that weight is between -100 and 100, and not zero
if a.Weight == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight cannot be zero"))
}
if a.Weight > 100 || a.Weight < -100 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight must be within the range [-100,100]"))
}
return mErr.ErrorOrNil()
}
// Spread is used to specify desired distribution of allocations according to weight
type Spread struct {
// Attribute is the node attribute used as the spread criteria
Attribute string
// Weight is the relative weight of this spread, useful when there are multiple
// spread and affinities
Weight int8
// SpreadTarget is used to describe desired percentages for each attribute value
SpreadTarget []*SpreadTarget
// Memoized string representation
str string
}
type Affinities []*Affinity
// Equals compares Affinities as a set
func (xs *Affinities) Equals(ys *Affinities) bool {
if xs == ys {
return true
}
if xs == nil || ys == nil {
return false
}
if len(*xs) != len(*ys) {
return false
}
SETEQUALS:
for _, x := range *xs {
for _, y := range *ys {
if x.Equals(y) {
continue SETEQUALS
}
}
return false
}
return true
}
func (s *Spread) Copy() *Spread {
if s == nil {
return nil
}
ns := new(Spread)
*ns = *s
ns.SpreadTarget = CopySliceSpreadTarget(s.SpreadTarget)
return ns
}
func (s *Spread) String() string {
if s.str != "" {
return s.str
}
s.str = fmt.Sprintf("%s %s %v", s.Attribute, s.SpreadTarget, s.Weight)
return s.str
}
func (s *Spread) Validate() error {
var mErr multierror.Error
if s.Attribute == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute"))
}
if s.Weight <= 0 || s.Weight > 100 {
mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100"))
}
seen := make(map[string]struct{})
sumPercent := uint32(0)
for _, target := range s.SpreadTarget {
// Make sure there are no duplicates
_, ok := seen[target.Value]
if !ok {
seen[target.Value] = struct{}{}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target value %q already defined", target.Value))
}
if target.Percent > 100 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target percentage for value %q must be between 0 and 100", target.Value))
}
sumPercent += uint32(target.Percent)
}
if sumPercent > 100 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent))
}
return mErr.ErrorOrNil()
}
// SpreadTarget is used to specify desired percentages for each attribute value
type SpreadTarget struct {
// Value is a single attribute value, like "dc1"
Value string
// Percent is the desired percentage of allocs
Percent uint8
// Memoized string representation
str string
}
func (s *SpreadTarget) Copy() *SpreadTarget {
if s == nil {
return nil
}
ns := new(SpreadTarget)
*ns = *s
return ns
}
func (s *SpreadTarget) String() string {
if s.str != "" {
return s.str
}
s.str = fmt.Sprintf("%q %v%%", s.Value, s.Percent)
return s.str
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
// Sticky indicates whether the allocation is sticky to a node
Sticky bool
// SizeMB is the size of the local disk
SizeMB int
// Migrate determines if Nomad client should migrate the allocation dir for
// sticky allocations
Migrate bool
}
// DefaultEphemeralDisk returns a EphemeralDisk with default configurations
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
SizeMB: 300,
}
}
// Validate validates EphemeralDisk
func (d *EphemeralDisk) Validate() error {
if d.SizeMB < 10 {
return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
}
return nil
}
// Copy copies the EphemeralDisk struct and returns a new one
func (d *EphemeralDisk) Copy() *EphemeralDisk {
ld := new(EphemeralDisk)
*ld = *d
return ld
}
var (
// VaultUnrecoverableError matches unrecoverable errors returned by a Vault
// server
VaultUnrecoverableError = regexp.MustCompile(`Code:\s+40(0|3|4)`)
)
const (
// VaultChangeModeNoop takes no action when a new token is retrieved.
VaultChangeModeNoop = "noop"
// VaultChangeModeSignal signals the task when a new token is retrieved.
VaultChangeModeSignal = "signal"
// VaultChangeModeRestart restarts the task when a new token is retrieved.
VaultChangeModeRestart = "restart"
)
// Vault stores the set of permissions a task needs access to from Vault.
type Vault struct {
// Policies is the set of policies that the task needs access to
Policies []string
// Namespace is the vault namespace that should be used.
Namespace string
// Env marks whether the Vault Token should be exposed as an environment
// variable
Env bool
// ChangeMode is used to configure the task's behavior when the Vault
// token changes because the original token could not be renewed in time.
ChangeMode string
// ChangeSignal is the signal sent to the task when a new token is
// retrieved. This is only valid when using the signal change mode.
ChangeSignal string
}
func DefaultVaultBlock() *Vault {
return &Vault{
Env: true,
ChangeMode: VaultChangeModeRestart,
}
}
// Copy returns a copy of this Vault block.
func (v *Vault) Copy() *Vault {
if v == nil {
return nil
}
nv := new(Vault)
*nv = *v
return nv
}
func (v *Vault) Canonicalize() {
if v.ChangeSignal != "" {
v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
}
if v.ChangeMode == "" {
v.ChangeMode = VaultChangeModeRestart
}
}
// Validate returns if the Vault block is valid.
func (v *Vault) Validate() error {
if v == nil {
return nil
}
var mErr multierror.Error
if len(v.Policies) == 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
}
for _, p := range v.Policies {
if p == "root" {
_ = multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy"))
}
}
switch v.ChangeMode {
case VaultChangeModeSignal:
if v.ChangeSignal == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
}
case VaultChangeModeNoop, VaultChangeModeRestart:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
}
return mErr.ErrorOrNil()
}
const (
// DeploymentStatuses are the various states a deployment can be be in
DeploymentStatusRunning = "running"
DeploymentStatusPaused = "paused"
DeploymentStatusFailed = "failed"
DeploymentStatusSuccessful = "successful"
DeploymentStatusCancelled = "cancelled"
DeploymentStatusPending = "pending"
DeploymentStatusBlocked = "blocked"
DeploymentStatusUnblocking = "unblocking"
// TODO Statuses and Descriptions do not match 1:1 and we sometimes use the Description as a status flag
// DeploymentStatusDescriptions are the various descriptions of the states a
// deployment can be in.
DeploymentStatusDescriptionRunning = "Deployment is running"
DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires manual promotion"
DeploymentStatusDescriptionRunningAutoPromotion = "Deployment is running pending automatic promotion"
DeploymentStatusDescriptionPaused = "Deployment is paused"
DeploymentStatusDescriptionSuccessful = "Deployment completed successfully"
DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped"
DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job"
DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations"
DeploymentStatusDescriptionProgressDeadline = "Failed due to progress deadline"
DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed"
// used only in multiregion deployments
DeploymentStatusDescriptionFailedByPeer = "Failed because of an error in peer region"
DeploymentStatusDescriptionBlocked = "Deployment is complete but waiting for peer region"
DeploymentStatusDescriptionUnblocking = "Deployment is unblocking remaining regions"
DeploymentStatusDescriptionPendingForPeer = "Deployment is pending, waiting for peer region"
)
// DeploymentStatusDescriptionRollback is used to get the status description of
// a deployment when rolling back to an older job.
func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionRollbackNoop is used to get the status description of
// a deployment when rolling back is not possible because it has the same specification
func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of
// a deployment when there is no target to rollback to but autorevert is desired.
func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string {
return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription)
}
// Deployment is the object that represents a job deployment which is used to
// transition a job between versions.
type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// Namespace is the namespace the deployment is created in
Namespace string
// JobID is the job the deployment is created for
JobID string
// JobVersion is the version of the job at which the deployment is tracking
JobVersion uint64
// JobModifyIndex is the ModifyIndex of the job which the deployment is
// tracking.
JobModifyIndex uint64
// JobSpecModifyIndex is the JobModifyIndex of the job which the
// deployment is tracking.
JobSpecModifyIndex uint64
// JobCreateIndex is the create index of the job which the deployment is
// tracking. It is needed so that if the job gets stopped and reran we can
// present the correct list of deployments for the job and not old ones.
JobCreateIndex uint64
// Multiregion specifies if deployment is part of multiregion deployment
IsMultiregion bool
// TaskGroups is the set of task groups effected by the deployment and their
// current deployment status.
TaskGroups map[string]*DeploymentState
// The status of the deployment
Status string
// StatusDescription allows a human readable description of the deployment
// status.
StatusDescription string
// EvalPriority tracks the priority of the evaluation which lead to the
// creation of this Deployment object. Any additional evaluations created
// as a result of this deployment can therefore inherit this value, which
// is not guaranteed to be that of the job priority parameter.
EvalPriority int
CreateIndex uint64
ModifyIndex uint64
}
// NewDeployment creates a new deployment given the job.
func NewDeployment(job *Job, evalPriority int) *Deployment {
return &Deployment{
ID: uuid.Generate(),
Namespace: job.Namespace,
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
JobSpecModifyIndex: job.JobModifyIndex,
JobCreateIndex: job.CreateIndex,
IsMultiregion: job.IsMultiregion(),
Status: DeploymentStatusRunning,
StatusDescription: DeploymentStatusDescriptionRunning,
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
EvalPriority: evalPriority,
}
}
func (d *Deployment) Copy() *Deployment {
if d == nil {
return nil
}
c := &Deployment{}
*c = *d
c.TaskGroups = nil
if l := len(d.TaskGroups); d.TaskGroups != nil {
c.TaskGroups = make(map[string]*DeploymentState, l)
for tg, s := range d.TaskGroups {
c.TaskGroups[tg] = s.Copy()
}
}
return c
}
// Active returns whether the deployment is active or terminal.
func (d *Deployment) Active() bool {
switch d.Status {
case DeploymentStatusRunning, DeploymentStatusPaused, DeploymentStatusBlocked, DeploymentStatusUnblocking, DeploymentStatusPending:
return true
default:
return false
}
}
// GetID is a helper for getting the ID when the object may be nil
func (d *Deployment) GetID() string {
if d == nil {
return ""
}
return d.ID
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (d *Deployment) GetCreateIndex() uint64 {
if d == nil {
return 0
}
return d.CreateIndex
}
// HasPlacedCanaries returns whether the deployment has placed canaries
func (d *Deployment) HasPlacedCanaries() bool {
if d == nil || len(d.TaskGroups) == 0 {
return false
}
for _, group := range d.TaskGroups {
if len(group.PlacedCanaries) != 0 {
return true
}
}
return false
}
// RequiresPromotion returns whether the deployment requires promotion to
// continue
func (d *Deployment) RequiresPromotion() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.Promoted {
return true
}
}
return false
}
// HasAutoPromote determines if all taskgroups are marked auto_promote
func (d *Deployment) HasAutoPromote() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.AutoPromote {
return false
}
}
return true
}
func (d *Deployment) GoString() string {
base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
for group, state := range d.TaskGroups {
base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
}
return base
}
// DeploymentState tracks the state of a deployment for a given task group.
type DeploymentState struct {
// AutoRevert marks whether the task group has indicated the job should be
// reverted on failure
AutoRevert bool
// AutoPromote marks promotion triggered automatically by healthy canaries
// copied from TaskGroup UpdateStrategy in scheduler.reconcile
AutoPromote bool
// ProgressDeadline is the deadline by which an allocation must transition
// to healthy before the deployment is considered failed. This value is set
// by the jobspec `update.progress_deadline` field.
ProgressDeadline time.Duration
// RequireProgressBy is the time by which an allocation must transition to
// healthy before the deployment is considered failed. This value is reset
// to "now" + ProgressDeadline when an allocation updates the deployment.
RequireProgressBy time.Time
// Promoted marks whether the canaries have been promoted
Promoted bool
// PlacedCanaries is the set of placed canary allocations
PlacedCanaries []string
// DesiredCanaries is the number of canaries that should be created.
DesiredCanaries int
// DesiredTotal is the total number of allocations that should be created as
// part of the deployment.
DesiredTotal int
// PlacedAllocs is the number of allocations that have been placed
PlacedAllocs int
// HealthyAllocs is the number of allocations that have been marked healthy.
HealthyAllocs int
// UnhealthyAllocs are allocations that have been marked as unhealthy.
UnhealthyAllocs int
}
func (d *DeploymentState) GoString() string {
base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
base += fmt.Sprintf("\n\tAutoPromote: %v", d.AutoPromote)
return base
}
func (d *DeploymentState) Copy() *DeploymentState {
c := &DeploymentState{}
*c = *d
c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
return c
}
// DeploymentStatusUpdate is used to update the status of a given deployment
type DeploymentStatusUpdate struct {
// DeploymentID is the ID of the deployment to update
DeploymentID string
// Status is the new status of the deployment.
Status string
// StatusDescription is the new status description of the deployment.
StatusDescription string
}
// RescheduleTracker encapsulates previous reschedule events
type RescheduleTracker struct {
Events []*RescheduleEvent
}
func (rt *RescheduleTracker) Copy() *RescheduleTracker {
if rt == nil {
return nil
}
nt := &RescheduleTracker{}
*nt = *rt
rescheduleEvents := make([]*RescheduleEvent, 0, len(rt.Events))
for _, tracker := range rt.Events {
rescheduleEvents = append(rescheduleEvents, tracker.Copy())
}
nt.Events = rescheduleEvents
return nt
}
// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
type RescheduleEvent struct {
// RescheduleTime is the timestamp of a reschedule attempt
RescheduleTime int64
// PrevAllocID is the ID of the previous allocation being restarted
PrevAllocID string
// PrevNodeID is the node ID of the previous allocation
PrevNodeID string
// Delay is the reschedule delay associated with the attempt
Delay time.Duration
}
func NewRescheduleEvent(rescheduleTime int64, prevAllocID string, prevNodeID string, delay time.Duration) *RescheduleEvent {
return &RescheduleEvent{RescheduleTime: rescheduleTime,
PrevAllocID: prevAllocID,
PrevNodeID: prevNodeID,
Delay: delay}
}
func (re *RescheduleEvent) Copy() *RescheduleEvent {
if re == nil {
return nil
}
copy := new(RescheduleEvent)
*copy = *re
return copy
}
// DesiredTransition is used to mark an allocation as having a desired state
// transition. This information can be used by the scheduler to make the
// correct decision.
type DesiredTransition struct {
// Migrate is used to indicate that this allocation should be stopped and
// migrated to another node.
Migrate *bool
// Reschedule is used to indicate that this allocation is eligible to be
// rescheduled. Most allocations are automatically eligible for
// rescheduling, so this field is only required when an allocation is not
// automatically eligible. An example is an allocation that is part of a
// deployment.
Reschedule *bool
// ForceReschedule is used to indicate that this allocation must be rescheduled.
// This field is only used when operators want to force a placement even if
// a failed allocation is not eligible to be rescheduled
ForceReschedule *bool
// NoShutdownDelay, if set to true, will override the group and
// task shutdown_delay configuration and ignore the delay for any
// allocations stopped as a result of this Deregister call.
NoShutdownDelay *bool
}
// Merge merges the two desired transitions, preferring the values from the
// passed in object.
func (d *DesiredTransition) Merge(o *DesiredTransition) {
if o.Migrate != nil {
d.Migrate = o.Migrate
}
if o.Reschedule != nil {
d.Reschedule = o.Reschedule
}
if o.ForceReschedule != nil {
d.ForceReschedule = o.ForceReschedule
}
if o.NoShutdownDelay != nil {
d.NoShutdownDelay = o.NoShutdownDelay
}
}
// ShouldMigrate returns whether the transition object dictates a migration.
func (d *DesiredTransition) ShouldMigrate() bool {
return d.Migrate != nil && *d.Migrate
}
// ShouldReschedule returns whether the transition object dictates a
// rescheduling.
func (d *DesiredTransition) ShouldReschedule() bool {
return d.Reschedule != nil && *d.Reschedule
}
// ShouldForceReschedule returns whether the transition object dictates a
// forced rescheduling.
func (d *DesiredTransition) ShouldForceReschedule() bool {
if d == nil {
return false
}
return d.ForceReschedule != nil && *d.ForceReschedule
}
// ShouldIgnoreShutdownDelay returns whether the transition object dictates
// that shutdown skip any shutdown delays.
func (d *DesiredTransition) ShouldIgnoreShutdownDelay() bool {
if d == nil {
return false
}
return d.NoShutdownDelay != nil && *d.NoShutdownDelay
}
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
AllocClientStatusUnknown = "unknown"
)
// Allocation is used to allocate the placement of a task group to a node.
type Allocation struct {
// msgpack omit empty fields during serialization
_struct bool `codec:",omitempty"` // nolint: structcheck
// ID of the allocation (UUID)
ID string
// Namespace is the namespace the allocation is created in
Namespace string
// ID of the evaluation that generated this allocation
EvalID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// NodeName is the name of the node this is being placed on.
NodeName string
// Job is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
Job *Job
// TaskGroup is the name of the task group that should be run
TaskGroup string
// COMPAT(0.11): Remove in 0.11
// Resources is the total set of resources allocated as part
// of this allocation of the task group. Dynamic ports will be set by
// the scheduler.
Resources *Resources
// SharedResources are the resources that are shared by all the tasks in an
// allocation
// Deprecated: use AllocatedResources.Shared instead.
// Keep field to allow us to handle upgrade paths from old versions
SharedResources *Resources
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources. Dynamic ports will be
// set by the scheduler.
// Deprecated: use AllocatedResources.Tasks instead.
// Keep field to allow us to handle upgrade paths from old versions
TaskResources map[string]*Resources
// AllocatedResources is the total resources allocated for the task group.
AllocatedResources *AllocatedResources
// Metrics associated with this allocation
Metrics *AllocMetric
// Desired Status of the allocation on the client
DesiredStatus string
// DesiredStatusDescription is meant to provide more human useful information
DesiredDescription string
// DesiredTransition is used to indicate that a state transition
// is desired for a given reason.
DesiredTransition DesiredTransition
// Status of the allocation on the client
ClientStatus string
// ClientStatusDescription is meant to provide more human useful information
ClientDescription string
// TaskStates stores the state of each task,
TaskStates map[string]*TaskState
// AllocStates track meta data associated with changes to the state of the whole allocation, like becoming lost
AllocStates []*AllocState
// PreviousAllocation is the allocation that this allocation is replacing
PreviousAllocation string
// NextAllocation is the allocation that this allocation is being replaced by
NextAllocation string
// DeploymentID identifies an allocation as being created from a
// particular deployment
DeploymentID string
// DeploymentStatus captures the status of the allocation as part of the
// given deployment
DeploymentStatus *AllocDeploymentStatus
// RescheduleTrackers captures details of previous reschedule attempts of the allocation
RescheduleTracker *RescheduleTracker
// NetworkStatus captures networking details of an allocation known at runtime
NetworkStatus *AllocNetworkStatus
// FollowupEvalID captures a follow up evaluation created to handle a failed allocation
// that can be rescheduled in the future
FollowupEvalID string
// PreemptedAllocations captures IDs of any allocations that were preempted
// in order to place this allocation
PreemptedAllocations []string
// PreemptedByAllocation tracks the alloc ID of the allocation that caused this allocation
// to stop running because it got preempted
PreemptedByAllocation string
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
// AllocModifyIndex is not updated when the client updates allocations. This
// lets the client pull only the allocs updated by the server.
AllocModifyIndex uint64
// CreateTime is the time the allocation has finished scheduling and been
// verified by the plan applier.
CreateTime int64
// ModifyTime is the time the allocation was last updated.
ModifyTime int64
}
// GetID implements the IDGetter interface, required for pagination.
func (a *Allocation) GetID() string {
if a == nil {
return ""
}
return a.ID
}
// GetNamespace implements the NamespaceGetter interface, required for
// pagination and filtering namespaces in endpoints that support glob namespace
// requests using tokens with limited access.
func (a *Allocation) GetNamespace() string {
if a == nil {
return ""
}
return a.Namespace
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (a *Allocation) GetCreateIndex() uint64 {
if a == nil {
return 0
}
return a.CreateIndex
}
// ConsulNamespace returns the Consul namespace of the task group associated
// with this allocation.
func (a *Allocation) ConsulNamespace() string {
return a.Job.LookupTaskGroup(a.TaskGroup).Consul.GetNamespace()
}
func (a *Allocation) JobNamespacedID() NamespacedID {
return NewNamespacedID(a.JobID, a.Namespace)
}
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
l := len(a.Name)
prefix := len(a.JobID) + len(a.TaskGroup) + 2
if l <= 3 || l <= prefix {
return uint(0)
}
strNum := a.Name[prefix : len(a.Name)-1]
num, _ := strconv.Atoi(strNum)
return uint(num)
}
// Copy provides a copy of the allocation and deep copies the job
func (a *Allocation) Copy() *Allocation {
return a.copyImpl(true)
}
// CopySkipJob provides a copy of the allocation but doesn't deep copy the job
func (a *Allocation) CopySkipJob() *Allocation {
return a.copyImpl(false)
}
// Canonicalize Allocation to ensure fields are initialized to the expectations
// of this version of Nomad. Should be called when restoring persisted
// Allocations or receiving Allocations from Nomad agents potentially on an
// older version of Nomad.
func (a *Allocation) Canonicalize() {
if a.AllocatedResources == nil && a.TaskResources != nil {
ar := AllocatedResources{}
tasks := make(map[string]*AllocatedTaskResources, len(a.TaskResources))
for name, tr := range a.TaskResources {
atr := AllocatedTaskResources{}
atr.Cpu.CpuShares = int64(tr.CPU)
atr.Memory.MemoryMB = int64(tr.MemoryMB)
atr.Networks = tr.Networks.Copy()
tasks[name] = &atr
}
ar.Tasks = tasks
if a.SharedResources != nil {
ar.Shared.DiskMB = int64(a.SharedResources.DiskMB)
ar.Shared.Networks = a.SharedResources.Networks.Copy()
}
a.AllocatedResources = &ar
}
a.Job.Canonicalize()
}
func (a *Allocation) copyImpl(job bool) *Allocation {
if a == nil {
return nil
}
na := new(Allocation)
*na = *a
if job {
na.Job = na.Job.Copy()
}
na.AllocatedResources = na.AllocatedResources.Copy()
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
if a.TaskResources != nil {
tr := make(map[string]*Resources, len(na.TaskResources))
for task, resource := range na.TaskResources {
tr[task] = resource.Copy()
}
na.TaskResources = tr
}
na.Metrics = na.Metrics.Copy()
na.DeploymentStatus = na.DeploymentStatus.Copy()
if a.TaskStates != nil {
ts := make(map[string]*TaskState, len(na.TaskStates))
for task, state := range na.TaskStates {
ts[task] = state.Copy()
}
na.TaskStates = ts
}
na.RescheduleTracker = a.RescheduleTracker.Copy()
na.PreemptedAllocations = helper.CopySliceString(a.PreemptedAllocations)
return na
}
// TerminalStatus returns if the desired or actual status is terminal and
// will no longer transition.
func (a *Allocation) TerminalStatus() bool {
// First check the desired state and if that isn't terminal, check client
// state.
return a.ServerTerminalStatus() || a.ClientTerminalStatus()
}
// ServerTerminalStatus returns true if the desired state of the allocation is terminal
func (a *Allocation) ServerTerminalStatus() bool {
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return true
default:
return false
}
}
// ClientTerminalStatus returns if the client status is terminal and will no longer transition
func (a *Allocation) ClientTerminalStatus() bool {
switch a.ClientStatus {
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
return true
default:
return false
}
}
// ShouldReschedule returns if the allocation is eligible to be rescheduled according
// to its status and ReschedulePolicy given its failure time
func (a *Allocation) ShouldReschedule(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
// First check the desired state
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return false
default:
}
switch a.ClientStatus {
case AllocClientStatusFailed:
return a.RescheduleEligible(reschedulePolicy, failTime)
default:
return false
}
}
// RescheduleEligible returns if the allocation is eligible to be rescheduled according
// to its ReschedulePolicy and the current state of its reschedule trackers
func (a *Allocation) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
if reschedulePolicy == nil {
return false
}
attempts := reschedulePolicy.Attempts
enabled := attempts > 0 || reschedulePolicy.Unlimited
if !enabled {
return false
}
if reschedulePolicy.Unlimited {
return true
}
// Early return true if there are no attempts yet and the number of allowed attempts is > 0
if (a.RescheduleTracker == nil || len(a.RescheduleTracker.Events) == 0) && attempts > 0 {
return true
}
attempted, _ := a.rescheduleInfo(reschedulePolicy, failTime)
return attempted < attempts
}
func (a *Allocation) rescheduleInfo(reschedulePolicy *ReschedulePolicy, failTime time.Time) (int, int) {
if reschedulePolicy == nil {
return 0, 0
}
attempts := reschedulePolicy.Attempts
interval := reschedulePolicy.Interval
attempted := 0
if a.RescheduleTracker != nil && attempts > 0 {
for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- {
lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime
timeDiff := failTime.UTC().UnixNano() - lastAttempt
if timeDiff < interval.Nanoseconds() {
attempted += 1
}
}
}
return attempted, attempts
}
func (a *Allocation) RescheduleInfo() (int, int) {
return a.rescheduleInfo(a.ReschedulePolicy(), a.LastEventTime())
}
// LastEventTime is the time of the last task event in the allocation.
// It is used to determine allocation failure time. If the FinishedAt field
// is not set, the alloc's modify time is used
func (a *Allocation) LastEventTime() time.Time {
var lastEventTime time.Time
if a.TaskStates != nil {
for _, s := range a.TaskStates {
if lastEventTime.IsZero() || s.FinishedAt.After(lastEventTime) {
lastEventTime = s.FinishedAt
}
}
}
if lastEventTime.IsZero() {
return time.Unix(0, a.ModifyTime).UTC()
}
return lastEventTime
}
// ReschedulePolicy returns the reschedule policy based on the task group
func (a *Allocation) ReschedulePolicy() *ReschedulePolicy {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return nil
}
return tg.ReschedulePolicy
}
// MigrateStrategy returns the migrate strategy based on the task group
func (a *Allocation) MigrateStrategy() *MigrateStrategy {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return nil
}
return tg.Migrate
}
// NextRescheduleTime returns a time on or after which the allocation is eligible to be rescheduled,
// and whether the next reschedule time is within policy's interval if the policy doesn't allow unlimited reschedules
func (a *Allocation) NextRescheduleTime() (time.Time, bool) {
failTime := a.LastEventTime()
reschedulePolicy := a.ReschedulePolicy()
if a.DesiredStatus == AllocDesiredStatusStop || a.ClientStatus != AllocClientStatusFailed || failTime.IsZero() || reschedulePolicy == nil {
return time.Time{}, false
}
return a.nextRescheduleTime(failTime, reschedulePolicy)
}
func (a *Allocation) nextRescheduleTime(failTime time.Time, reschedulePolicy *ReschedulePolicy) (time.Time, bool) {
nextDelay := a.NextDelay()
nextRescheduleTime := failTime.Add(nextDelay)
rescheduleEligible := reschedulePolicy.Unlimited || (reschedulePolicy.Attempts > 0 && a.RescheduleTracker == nil)
if reschedulePolicy.Attempts > 0 && a.RescheduleTracker != nil && a.RescheduleTracker.Events != nil {
// Check for eligibility based on the interval if max attempts is set
attempted, attempts := a.rescheduleInfo(reschedulePolicy, failTime)
rescheduleEligible = attempted < attempts && nextDelay < reschedulePolicy.Interval
}
return nextRescheduleTime, rescheduleEligible
}
// NextRescheduleTimeByFailTime works like NextRescheduleTime but allows callers
// specify a failure time. Useful for things like determining whether to reschedule
// an alloc on a disconnected node.
func (a *Allocation) NextRescheduleTimeByFailTime(failTime time.Time) (time.Time, bool) {
reschedulePolicy := a.ReschedulePolicy()
if reschedulePolicy == nil {
return time.Time{}, false
}
return a.nextRescheduleTime(failTime, reschedulePolicy)
}
// ShouldClientStop tests an alloc for StopAfterClientDisconnect configuration
func (a *Allocation) ShouldClientStop() bool {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil ||
tg.StopAfterClientDisconnect == nil ||
*tg.StopAfterClientDisconnect == 0*time.Nanosecond {
return false
}
return true
}
// WaitClientStop uses the reschedule delay mechanism to block rescheduling until
// StopAfterClientDisconnect's block interval passes
func (a *Allocation) WaitClientStop() time.Time {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// An alloc can only be marked lost once, so use the first lost transition
var t time.Time
for _, s := range a.AllocStates {
if s.Field == AllocStateFieldClientStatus &&
s.Value == AllocClientStatusLost {
t = s.Time
break
}
}
// On the first pass, the alloc hasn't been marked lost yet, and so we start
// counting from now
if t.IsZero() {
t = time.Now().UTC()
}
// Find the max kill timeout
kill := DefaultKillTimeout
for _, t := range tg.Tasks {
if t.KillTimeout > kill {
kill = t.KillTimeout
}
}
return t.Add(*tg.StopAfterClientDisconnect + kill)
}
// DisconnectTimeout uses the MaxClientDisconnect to compute when the allocation
// should transition to lost.
func (a *Allocation) DisconnectTimeout(now time.Time) time.Time {
if a == nil || a.Job == nil {
return now
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
timeout := tg.MaxClientDisconnect
if timeout == nil {
return now
}
return now.Add(*timeout)
}
// SupportsDisconnectedClients determines whether both the server and the task group
// are configured to allow the allocation to reconnect after network connectivity
// has been lost and then restored.
func (a *Allocation) SupportsDisconnectedClients(serverSupportsDisconnectedClients bool) bool {
if !serverSupportsDisconnectedClients {
return false
}
if a.Job != nil {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg != nil {
return tg.MaxClientDisconnect != nil
}
}
return false
}
// NextDelay returns a duration after which the allocation can be rescheduled.
// It is calculated according to the delay function and previous reschedule attempts.
func (a *Allocation) NextDelay() time.Duration {
policy := a.ReschedulePolicy()
// Can be nil if the task group was updated to remove its reschedule policy
if policy == nil {
return 0
}
delayDur := policy.Delay
if a.RescheduleTracker == nil || a.RescheduleTracker.Events == nil || len(a.RescheduleTracker.Events) == 0 {
return delayDur
}
events := a.RescheduleTracker.Events
switch policy.DelayFunction {
case "exponential":
delayDur = a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1].Delay * 2
case "fibonacci":
if len(events) >= 2 {
fibN1Delay := events[len(events)-1].Delay
fibN2Delay := events[len(events)-2].Delay
// Handle reset of delay ceiling which should cause
// a new series to start
if fibN2Delay == policy.MaxDelay && fibN1Delay == policy.Delay {
delayDur = fibN1Delay
} else {
delayDur = fibN1Delay + fibN2Delay
}
}
default:
return delayDur
}
if policy.MaxDelay > 0 && delayDur > policy.MaxDelay {
delayDur = policy.MaxDelay
// check if delay needs to be reset
lastRescheduleEvent := a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1]
timeDiff := a.LastEventTime().UTC().UnixNano() - lastRescheduleEvent.RescheduleTime
if timeDiff > delayDur.Nanoseconds() {
delayDur = policy.Delay
}
}
return delayDur
}
// Terminated returns if the allocation is in a terminal state on a client.
func (a *Allocation) Terminated() bool {
if a.ClientStatus == AllocClientStatusFailed ||
a.ClientStatus == AllocClientStatusComplete ||
a.ClientStatus == AllocClientStatusLost {
return true
}
return false
}
// SetStop updates the allocation in place to a DesiredStatus stop, with the ClientStatus
func (a *Allocation) SetStop(clientStatus, clientDesc string) {
a.DesiredStatus = AllocDesiredStatusStop
a.ClientStatus = clientStatus
a.ClientDescription = clientDesc
a.AppendState(AllocStateFieldClientStatus, clientStatus)
}
// AppendState creates and appends an AllocState entry recording the time of the state
// transition. Used to mark the transition to lost
func (a *Allocation) AppendState(field AllocStateField, value string) {
a.AllocStates = append(a.AllocStates, &AllocState{
Field: field,
Value: value,
Time: time.Now().UTC(),
})
}
// RanSuccessfully returns whether the client has ran the allocation and all
// tasks finished successfully. Critically this function returns whether the
// allocation has ran to completion and not just that the alloc has converged to
// its desired state. That is to say that a batch allocation must have finished
// with exit code 0 on all task groups. This doesn't really have meaning on a
// non-batch allocation because a service and system allocation should not
// finish.
func (a *Allocation) RanSuccessfully() bool {
// Handle the case the client hasn't started the allocation.
if len(a.TaskStates) == 0 {
return false
}
// Check to see if all the tasks finished successfully in the allocation
allSuccess := true
for _, state := range a.TaskStates {
allSuccess = allSuccess && state.Successful()
}
return allSuccess
}
// ShouldMigrate returns if the allocation needs data migration
func (a *Allocation) ShouldMigrate() bool {
if a.PreviousAllocation == "" {
return false
}
if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// if the task group is nil or the ephemeral disk block isn't present then
// we won't migrate
if tg == nil || tg.EphemeralDisk == nil {
return false
}
// We won't migrate any data is the user hasn't enabled migration or the
// disk is not marked as sticky
if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
return false
}
return true
}
// SetEventDisplayMessages populates the display message if its not already set,
// a temporary fix to handle old allocations that don't have it.
// This method will be removed in a future release.
func (a *Allocation) SetEventDisplayMessages() {
setDisplayMsg(a.TaskStates)
}
// ComparableResources returns the resources on the allocation
// handling upgrade paths. After 0.11 calls to this should be replaced with:
// alloc.AllocatedResources.Comparable()
//
// COMPAT(0.11): Remove in 0.11
func (a *Allocation) ComparableResources() *ComparableResources {
// Alloc already has 0.9+ behavior
if a.AllocatedResources != nil {
return a.AllocatedResources.Comparable()
}
var resources *Resources
if a.Resources != nil {
resources = a.Resources
} else if a.TaskResources != nil {
resources = new(Resources)
resources.Add(a.SharedResources)
for _, taskResource := range a.TaskResources {
resources.Add(taskResource)
}
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: int64(resources.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: int64(resources.MemoryMB),
MemoryMaxMB: int64(resources.MemoryMaxMB),
},
Networks: resources.Networks,
},
Shared: AllocatedSharedResources{
DiskMB: int64(resources.DiskMB),
},
}
}
// LookupTask by name from the Allocation. Returns nil if the Job is not set, the
// TaskGroup does not exist, or the task name cannot be found.
func (a *Allocation) LookupTask(name string) *Task {
if a.Job == nil {
return nil
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return nil
}
return tg.LookupTask(name)
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub(fields *AllocStubFields) *AllocListStub {
s := &AllocListStub{
ID: a.ID,
EvalID: a.EvalID,
Name: a.Name,
Namespace: a.Namespace,
NodeID: a.NodeID,
NodeName: a.NodeName,
JobID: a.JobID,
JobType: a.Job.Type,
JobVersion: a.Job.Version,
TaskGroup: a.TaskGroup,
DesiredStatus: a.DesiredStatus,
DesiredDescription: a.DesiredDescription,
ClientStatus: a.ClientStatus,
ClientDescription: a.ClientDescription,
DesiredTransition: a.DesiredTransition,
TaskStates: a.TaskStates,
DeploymentStatus: a.DeploymentStatus,
FollowupEvalID: a.FollowupEvalID,
RescheduleTracker: a.RescheduleTracker,
PreemptedAllocations: a.PreemptedAllocations,
PreemptedByAllocation: a.PreemptedByAllocation,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
CreateTime: a.CreateTime,
ModifyTime: a.ModifyTime,
}
if fields != nil {
if fields.Resources {
s.AllocatedResources = a.AllocatedResources
}
if !fields.TaskStates {
s.TaskStates = nil
}
}
return s
}
// AllocationDiff converts an Allocation type to an AllocationDiff type
// If at any time, modification are made to AllocationDiff so that an
// Allocation can no longer be safely converted to AllocationDiff,
// this method should be changed accordingly.
func (a *Allocation) AllocationDiff() *AllocationDiff {
return (*AllocationDiff)(a)
}
// Expired determines whether an allocation has exceeded its MaxClientDisonnect
// duration relative to the passed time stamp.
func (a *Allocation) Expired(now time.Time) bool {
if a == nil || a.Job == nil {
return false
}
// If alloc is not Unknown it cannot be expired.
if a.ClientStatus != AllocClientStatusUnknown {
return false
}
lastUnknown := a.LastUnknown()
if lastUnknown.IsZero() {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return false
}
if tg.MaxClientDisconnect == nil {
return false
}
expiry := lastUnknown.Add(*tg.MaxClientDisconnect)
return now.UTC().After(expiry) || now.UTC().Equal(expiry)
}
// LastUnknown returns the timestamp for the last time the allocation
// transitioned into the unknown client status.
func (a *Allocation) LastUnknown() time.Time {
var lastUnknown time.Time
for _, s := range a.AllocStates {
if s.Field == AllocStateFieldClientStatus &&
s.Value == AllocClientStatusUnknown {
if lastUnknown.IsZero() || lastUnknown.Before(s.Time) {
lastUnknown = s.Time
}
}
}
return lastUnknown.UTC()
}
// Reconnected determines whether a reconnect event has occurred for any task
// and whether that event occurred within the allowable duration specified by MaxClientDisconnect.
func (a *Allocation) Reconnected() (bool, bool) {
var lastReconnect time.Time
for _, taskState := range a.TaskStates {
for _, taskEvent := range taskState.Events {
if taskEvent.Type != TaskClientReconnected {
continue
}
eventTime := time.Unix(0, taskEvent.Time).UTC()
if lastReconnect.IsZero() || lastReconnect.Before(eventTime) {
lastReconnect = eventTime
}
}
}
if lastReconnect.IsZero() {
return false, false
}
return true, a.Expired(lastReconnect)
}
// AllocationDiff is another named type for Allocation (to use the same fields),
// which is used to represent the delta for an Allocation. If you need a method
// defined on the al
type AllocationDiff Allocation
// AllocListStub is used to return a subset of alloc information
type AllocListStub struct {
ID string
EvalID string
Name string
Namespace string
NodeID string
NodeName string
JobID string
JobType string
JobVersion uint64
TaskGroup string
AllocatedResources *AllocatedResources `json:",omitempty"`
DesiredStatus string
DesiredDescription string
ClientStatus string
ClientDescription string
DesiredTransition DesiredTransition
TaskStates map[string]*TaskState
DeploymentStatus *AllocDeploymentStatus
FollowupEvalID string
RescheduleTracker *RescheduleTracker
PreemptedAllocations []string
PreemptedByAllocation string
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
// SetEventDisplayMessages populates the display message if its not already
// set, a temporary fix to handle old allocations that don't have it. This
// method will be removed in a future release.
func (a *AllocListStub) SetEventDisplayMessages() {
setDisplayMsg(a.TaskStates)
}
func setDisplayMsg(taskStates map[string]*TaskState) {
for _, taskState := range taskStates {
for _, event := range taskState.Events {
event.PopulateEventDisplayMessage()
}
}
}
// AllocStubFields defines which fields are included in the AllocListStub.
type AllocStubFields struct {
// Resources includes resource-related fields if true.
Resources bool
// TaskStates removes the TaskStates field if false (default is to
// include TaskStates).
TaskStates bool
}
func NewAllocStubFields() *AllocStubFields {
return &AllocStubFields{
// Maintain backward compatibility by retaining task states by
// default.
TaskStates: true,
}
}
// AllocMetric is used to track various metrics while attempting
// to make an allocation. These are used to debug a job, or to better
// understand the pressure within the system.
type AllocMetric struct {
// NodesEvaluated is the number of nodes that were evaluated
NodesEvaluated int
// NodesFiltered is the number of nodes filtered due to a constraint
NodesFiltered int
// NodesAvailable is the number of nodes available for evaluation per DC.
NodesAvailable map[string]int
// ClassFiltered is the number of nodes filtered by class
ClassFiltered map[string]int
// ConstraintFiltered is the number of failures caused by constraint
ConstraintFiltered map[string]int
// NodesExhausted is the number of nodes skipped due to being
// exhausted of at least one resource
NodesExhausted int
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// QuotaExhausted provides the exhausted dimensions
QuotaExhausted []string
// ResourcesExhausted provides the amount of resources exhausted by task
// during the allocation placement
ResourcesExhausted map[string]*Resources
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
// Deprecated: Replaced by ScoreMetaData in Nomad 0.9
Scores map[string]float64
// ScoreMetaData is a slice of top scoring nodes displayed in the CLI
ScoreMetaData []*NodeScoreMeta
// nodeScoreMeta is used to keep scores for a single node id. It is cleared out after
// we receive normalized score during the last step of the scoring stack.
nodeScoreMeta *NodeScoreMeta
// topScores is used to maintain a heap of the top K nodes with
// the highest normalized score
topScores *kheap.ScoreHeap
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
// CoalescedFailures indicates the number of other
// allocations that were coalesced into this failed allocation.
// This is to prevent creating many failed allocations for a
// single task group.
CoalescedFailures int
}
func (a *AllocMetric) Copy() *AllocMetric {
if a == nil {
return nil
}
na := new(AllocMetric)
*na = *a
na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
na.ScoreMetaData = CopySliceNodeScoreMeta(na.ScoreMetaData)
return na
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
if dimension != "" {
if a.DimensionExhausted == nil {
a.DimensionExhausted = make(map[string]int)
}
a.DimensionExhausted[dimension] += 1
}
}
func (a *AllocMetric) ExhaustQuota(dimensions []string) {
if a.QuotaExhausted == nil {
a.QuotaExhausted = make([]string, 0, len(dimensions))
}
a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
}
// ExhaustResources updates the amount of resources exhausted for the
// allocation because of the given task group.
func (a *AllocMetric) ExhaustResources(tg *TaskGroup) {
if a.DimensionExhausted == nil {
return
}
if a.ResourcesExhausted == nil {
a.ResourcesExhausted = make(map[string]*Resources)
}
for _, t := range tg.Tasks {
exhaustedResources := a.ResourcesExhausted[t.Name]
if exhaustedResources == nil {
exhaustedResources = &Resources{}
}
if a.DimensionExhausted["memory"] > 0 {
exhaustedResources.MemoryMB += t.Resources.MemoryMB
}
if a.DimensionExhausted["cpu"] > 0 {
exhaustedResources.CPU += t.Resources.CPU
}
a.ResourcesExhausted[t.Name] = exhaustedResources
}
}
// ScoreNode is used to gather top K scoring nodes in a heap
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
// Create nodeScoreMeta lazily if its the first time or if its a new node
if a.nodeScoreMeta == nil || a.nodeScoreMeta.NodeID != node.ID {
a.nodeScoreMeta = &NodeScoreMeta{
NodeID: node.ID,
Scores: make(map[string]float64),
}
}
if name == NormScorerName {
a.nodeScoreMeta.NormScore = score
// Once we have the normalized score we can push to the heap
// that tracks top K by normalized score
// Create the heap if its not there already
if a.topScores == nil {
a.topScores = kheap.NewScoreHeap(MaxRetainedNodeScores)
}
heap.Push(a.topScores, a.nodeScoreMeta)
// Clear out this entry because its now in the heap
a.nodeScoreMeta = nil
} else {
a.nodeScoreMeta.Scores[name] = score
}
}
// PopulateScoreMetaData populates a map of scorer to scoring metadata
// The map is populated by popping elements from a heap of top K scores
// maintained per scorer
func (a *AllocMetric) PopulateScoreMetaData() {
if a.topScores == nil {
return
}
if a.ScoreMetaData == nil {
a.ScoreMetaData = make([]*NodeScoreMeta, a.topScores.Len())
}
heapItems := a.topScores.GetItemsReverse()
for i, item := range heapItems {
a.ScoreMetaData[i] = item.(*NodeScoreMeta)
}
}
// MaxNormScore returns the ScoreMetaData entry with the highest normalized
// score.
func (a *AllocMetric) MaxNormScore() *NodeScoreMeta {
if a == nil || len(a.ScoreMetaData) == 0 {
return nil
}
return a.ScoreMetaData[0]
}
// NodeScoreMeta captures scoring meta data derived from
// different scoring factors.
type NodeScoreMeta struct {
NodeID string
Scores map[string]float64
NormScore float64
}
func (s *NodeScoreMeta) Copy() *NodeScoreMeta {
if s == nil {
return nil
}
ns := new(NodeScoreMeta)
*ns = *s
return ns
}
func (s *NodeScoreMeta) String() string {
return fmt.Sprintf("%s %f %v", s.NodeID, s.NormScore, s.Scores)
}
func (s *NodeScoreMeta) Score() float64 {
return s.NormScore
}
func (s *NodeScoreMeta) Data() interface{} {
return s
}
// AllocNetworkStatus captures the status of an allocation's network during runtime.
// Depending on the network mode, an allocation's address may need to be known to other
// systems in Nomad such as service registration.
type AllocNetworkStatus struct {
InterfaceName string
Address string
DNS *DNSConfig
}
func (a *AllocNetworkStatus) Copy() *AllocNetworkStatus {
if a == nil {
return nil
}
return &AllocNetworkStatus{
InterfaceName: a.InterfaceName,
Address: a.Address,
DNS: a.DNS.Copy(),
}
}
// AllocDeploymentStatus captures the status of the allocation as part of the
// deployment. This can include things like if the allocation has been marked as
// healthy.
type AllocDeploymentStatus struct {
// Healthy marks whether the allocation has been marked healthy or unhealthy
// as part of a deployment. It can be unset if it has neither been marked
// healthy or unhealthy.
Healthy *bool
// Timestamp is the time at which the health status was set.
Timestamp time.Time
// Canary marks whether the allocation is a canary or not. A canary that has
// been promoted will have this field set to false.
Canary bool
// ModifyIndex is the raft index in which the deployment status was last
// changed.
ModifyIndex uint64
}
// HasHealth returns true if the allocation has its health set.
func (a *AllocDeploymentStatus) HasHealth() bool {
return a != nil && a.Healthy != nil
}
// IsHealthy returns if the allocation is marked as healthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsHealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && *a.Healthy
}
// IsUnhealthy returns if the allocation is marked as unhealthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsUnhealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && !*a.Healthy
}
// IsCanary returns if the allocation is marked as a canary
func (a *AllocDeploymentStatus) IsCanary() bool {
if a == nil {
return false
}
return a.Canary
}
func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
if a == nil {
return nil
}
c := new(AllocDeploymentStatus)
*c = *a
if a.Healthy != nil {
c.Healthy = helper.BoolToPtr(*a.Healthy)
}
return c
}
const (
EvalStatusBlocked = "blocked"
EvalStatusPending = "pending"
EvalStatusComplete = "complete"
EvalStatusFailed = "failed"
EvalStatusCancelled = "canceled"
)
const (
EvalTriggerJobRegister = "job-register"
EvalTriggerJobDeregister = "job-deregister"
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeDrain = "node-drain"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerAllocStop = "alloc-stop"
EvalTriggerScheduled = "scheduled"
EvalTriggerRollingUpdate = "rolling-update"
EvalTriggerDeploymentWatcher = "deployment-watcher"
EvalTriggerFailedFollowUp = "failed-follow-up"
EvalTriggerMaxPlans = "max-plan-attempts"
EvalTriggerRetryFailedAlloc = "alloc-failure"
EvalTriggerQueuedAllocs = "queued-allocs"
EvalTriggerPreemption = "preemption"
EvalTriggerScaling = "job-scaling"
EvalTriggerMaxDisconnectTimeout = "max-disconnect-timeout"
EvalTriggerReconnect = "reconnect"
)
const (
// CoreJobEvalGC is used for the garbage collection of evaluations
// and allocations. We periodically scan evaluations in a terminal state,
// in which all the corresponding allocations are also terminal. We
// delete these out of the system to bound the state.
CoreJobEvalGC = "eval-gc"
// CoreJobNodeGC is used for the garbage collection of failed nodes.
// We periodically scan nodes in a terminal state, and if they have no
// corresponding allocations we delete these out of the system.
CoreJobNodeGC = "node-gc"
// CoreJobJobGC is used for the garbage collection of eligible jobs. We
// periodically scan garbage collectible jobs and check if both their
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobDeploymentGC is used for the garbage collection of eligible
// deployments. We periodically scan garbage collectible deployments and
// check if they are terminal. If so, we delete these out of the system.
CoreJobDeploymentGC = "deployment-gc"
// CoreJobCSIVolumeClaimGC is use for the garbage collection of CSI
// volume claims. We periodically scan volumes to see if no allocs are
// claiming them. If so, we unclaim the volume.
CoreJobCSIVolumeClaimGC = "csi-volume-claim-gc"
// CoreJobCSIPluginGC is use for the garbage collection of CSI plugins.
// We periodically scan plugins to see if they have no associated volumes
// or allocs running them. If so, we delete the plugin.
CoreJobCSIPluginGC = "csi-plugin-gc"
// CoreJobOneTimeTokenGC is use for the garbage collection of one-time
// tokens. We periodically scan for expired tokens and delete them.
CoreJobOneTimeTokenGC = "one-time-token-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result
// of a change to our desired state (job specification) or the emergent state
// (registered nodes). When the inputs change, we need to "evaluate" them,
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// msgpack omit empty fields during serialization
_struct bool `codec:",omitempty"` // nolint: structcheck
// ID is a randomly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string
// Namespace is the namespace the evaluation is created in
Namespace string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// Type is used to control which schedulers are available to handle
// this evaluation.
Type string
// TriggeredBy is used to give some insight into why this Eval
// was created. (Job change, node failure, alloc failure, etc).
TriggeredBy string
// JobID is the job this evaluation is scoped to. Evaluations cannot
// be run in parallel for a given JobID, so we serialize on this.
JobID string
// JobModifyIndex is the modify index of the job at the time
// the evaluation was created
JobModifyIndex uint64
// NodeID is the node that was affected triggering the evaluation.
NodeID string
// NodeModifyIndex is the modify index of the node at the time
// the evaluation was created
NodeModifyIndex uint64
// DeploymentID is the ID of the deployment that triggered the evaluation.
DeploymentID string
// Status of the evaluation
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Wait is a minimum wait time for running the eval. This is used to
// support a rolling upgrade in versions prior to 0.7.0
// Deprecated
Wait time.Duration
// WaitUntil is the time when this eval should be run. This is used to
// supported delayed rescheduling of failed allocations, and delayed
// stopping of allocations that are configured with max_client_disconnect.
WaitUntil time.Time
// NextEval is the evaluation ID for the eval created to do a followup.
// This is used to support rolling upgrades and failed-follow-up evals, where
// we need a chain of evaluations.
NextEval string
// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
// This is used to support rolling upgrades and failed-follow-up evals, where
// we need a chain of evaluations.
PreviousEval string
// BlockedEval is the evaluation ID for a created blocked eval. A
// blocked eval will be created if all allocations could not be placed due
// to constraints or lacking resources.
BlockedEval string
// RelatedEvals is a list of all the evaluations that are related (next,
// previous, or blocked) to this one. It may be nil if not requested.
RelatedEvals []*EvaluationStub
// FailedTGAllocs are task groups which have allocations that could not be
// made, but the metrics are persisted so that the user can use the feedback
// to determine the cause.
FailedTGAllocs map[string]*AllocMetric
// ClassEligibility tracks computed node classes that have been explicitly
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// QuotaLimitReached marks whether a quota limit was reached for the
// evaluation.
QuotaLimitReached string
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
// AnnotatePlan triggers the scheduler to provide additional annotations
// during the evaluation. This should not be set during normal operations.
AnnotatePlan bool
// QueuedAllocations is the number of unplaced allocations at the time the
// evaluation was processed. The map is keyed by Task Group names.
QueuedAllocations map[string]int
// LeaderACL provides the ACL token to when issuing RPCs back to the
// leader. This will be a valid management token as long as the leader is
// active. This should not ever be exposed via the API.
LeaderACL string
// SnapshotIndex is the Raft index of the snapshot used to process the
// evaluation. The index will either be set when it has gone through the
// scheduler or if a blocked evaluation is being created. The index is set
// in this case so we can determine if an early unblocking is required since
// capacity has changed since the evaluation was created. This can result in
// the SnapshotIndex being less than the CreateIndex.
SnapshotIndex uint64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
type EvaluationStub struct {
ID string
Namespace string
Priority int
Type string
TriggeredBy string
JobID string
NodeID string
DeploymentID string
Status string
StatusDescription string
WaitUntil time.Time
NextEval string
PreviousEval string
BlockedEval string
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
// GetID implements the IDGetter interface, required for pagination.
func (e *Evaluation) GetID() string {
if e == nil {
return ""
}
return e.ID
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (e *Evaluation) GetCreateIndex() uint64 {
if e == nil {
return 0
}
return e.CreateIndex
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (e *Evaluation) TerminalStatus() bool {
switch e.Status {
case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
return true
default:
return false
}
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
}
func (e *Evaluation) RelatedIDs() []string {
if e == nil {
return nil
}
ids := []string{e.NextEval, e.PreviousEval, e.BlockedEval}
related := make([]string, 0, len(ids))
for _, id := range ids {
if id != "" {
related = append(related, id)
}
}
return related
}
func (e *Evaluation) Stub() *EvaluationStub {
if e == nil {
return nil
}
return &EvaluationStub{
ID: e.ID,
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
JobID: e.JobID,
NodeID: e.NodeID,
DeploymentID: e.DeploymentID,
Status: e.Status,
StatusDescription: e.StatusDescription,
WaitUntil: e.WaitUntil,
NextEval: e.NextEval,
PreviousEval: e.PreviousEval,
BlockedEval: e.BlockedEval,
CreateIndex: e.CreateIndex,
ModifyIndex: e.ModifyIndex,
CreateTime: e.CreateTime,
ModifyTime: e.ModifyTime,
}
}
func (e *Evaluation) Copy() *Evaluation {
if e == nil {
return nil
}
ne := new(Evaluation)
*ne = *e
// Copy ClassEligibility
if e.ClassEligibility != nil {
classes := make(map[string]bool, len(e.ClassEligibility))
for class, elig := range e.ClassEligibility {
classes[class] = elig
}
ne.ClassEligibility = classes
}
// Copy FailedTGAllocs
if e.FailedTGAllocs != nil {
failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
for tg, metric := range e.FailedTGAllocs {
failedTGs[tg] = metric.Copy()
}
ne.FailedTGAllocs = failedTGs
}
// Copy queued allocations
if e.QueuedAllocations != nil {
queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
for tg, num := range e.QueuedAllocations {
queuedAllocations[tg] = num
}
ne.QueuedAllocations = queuedAllocations
}
return ne
}
// ShouldEnqueue checks if a given evaluation should be enqueued into the
// eval_broker
func (e *Evaluation) ShouldEnqueue() bool {
switch e.Status {
case EvalStatusPending:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// ShouldBlock checks if a given evaluation should be entered into the blocked
// eval tracker.
func (e *Evaluation) ShouldBlock() bool {
switch e.Status {
case EvalStatusBlocked:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// MakePlan is used to make a plan from the given evaluation
// for a given Job
func (e *Evaluation) MakePlan(j *Job) *Plan {
p := &Plan{
EvalID: e.ID,
Priority: e.Priority,
Job: j,
NodeUpdate: make(map[string][]*Allocation),
NodeAllocation: make(map[string][]*Allocation),
NodePreemptions: make(map[string][]*Allocation),
}
if j != nil {
p.AllAtOnce = j.AllAtOnce
}
return p
}
// NextRollingEval creates an evaluation to followup this eval for rolling updates
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
now := time.Now().UTC().UnixNano()
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
CreateTime: now,
ModifyTime: now,
}
}
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible, whether the job has escaped computed node classes and whether the
// quota limit was reached.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
escaped bool, quotaReached string, failedTGAllocs map[string]*AllocMetric) *Evaluation {
now := time.Now().UTC().UnixNano()
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerQueuedAllocs,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusBlocked,
PreviousEval: e.ID,
FailedTGAllocs: failedTGAllocs,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
QuotaLimitReached: quotaReached,
CreateTime: now,
ModifyTime: now,
}
}
// CreateFailedFollowUpEval creates a follow up evaluation when the current one
// has been marked as failed because it has hit the delivery limit and will not
// be retried by the eval_broker. Callers should copy the created eval's ID to
// into the old eval's NextEval field.
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
now := time.Now().UTC().UnixNano()
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
CreateTime: now,
ModifyTime: now,
}
}
// UpdateModifyTime takes into account that clocks on different servers may be
// slightly out of sync. Even in case of a leader change, this method will
// guarantee that ModifyTime will always be after CreateTime.
func (e *Evaluation) UpdateModifyTime() {
now := time.Now().UTC().UnixNano()
if now <= e.CreateTime {
e.ModifyTime = e.CreateTime + 1
} else {
e.ModifyTime = now
}
}
// Plan is used to submit a commit plan for task allocations. These
// are submitted to the leader which verifies that resources have
// not been overcommitted before admitting the plan.
type Plan struct {
// msgpack omit empty fields during serialization
_struct bool `codec:",omitempty"` // nolint: structcheck
// EvalID is the evaluation ID this plan is associated with
EvalID string
// EvalToken is used to prevent a split-brain processing of
// an evaluation. There should only be a single scheduler running
// an Eval at a time, but this could be violated after a leadership
// transition. This unique token is used to reject plans that are
// being submitted from a different leader.
EvalToken string
// Priority is the priority of the upstream job
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job.
// If this is false, a plan may be partially applied. Otherwise, the
// entire plan must be able to make progress.
AllAtOnce bool
// Job is the parent job of all the allocations in the Plan.
// Since a Plan only involves a single Job, we can reduce the size
// of the plan by only including it once.
Job *Job
// NodeUpdate contains all the allocations for each node. For each node,
// this is a list of the allocations to update to either stop or evict.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations for each node.
// The evicts must be considered prior to the allocations.
NodeAllocation map[string][]*Allocation
// Annotations contains annotations by the scheduler to be used by operators
// to understand the decisions made by the scheduler.
Annotations *PlanAnnotations
// Deployment is the deployment created or updated by the scheduler that
// should be applied by the planner.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
// NodePreemptions is a map from node id to a set of allocations from other
// lower priority jobs that are preempted. Preempted allocations are marked
// as evicted.
NodePreemptions map[string][]*Allocation
// SnapshotIndex is the Raft index of the snapshot used to create the
// Plan. The leader will wait to evaluate the plan until its StateStore
// has reached at least this index.
SnapshotIndex uint64
}
func (p *Plan) GoString() string {
out := fmt.Sprintf("(eval %s", p.EvalID[:8])
if p.Job != nil {
out += fmt.Sprintf(", job %s", p.Job.ID)
}
if p.Deployment != nil {
out += fmt.Sprintf(", deploy %s", p.Deployment.ID[:8])
}
if len(p.NodeUpdate) > 0 {
out += ", NodeUpdates: "
for node, allocs := range p.NodeUpdate {
out += fmt.Sprintf("(node[%s]", node[:8])
for _, alloc := range allocs {
out += fmt.Sprintf(" (%s stop/evict)", alloc.ID[:8])
}
out += ")"
}
}
if len(p.NodeAllocation) > 0 {
out += ", NodeAllocations: "
for node, allocs := range p.NodeAllocation {
out += fmt.Sprintf("(node[%s]", node[:8])
for _, alloc := range allocs {
out += fmt.Sprintf(" (%s %s %s)",
alloc.ID[:8], alloc.Name, alloc.DesiredStatus,
)
}
out += ")"
}
}
if len(p.NodePreemptions) > 0 {
out += ", NodePreemptions: "
for node, allocs := range p.NodePreemptions {
out += fmt.Sprintf("(node[%s]", node[:8])
for _, alloc := range allocs {
out += fmt.Sprintf(" (%s %s %s)",
alloc.ID[:8], alloc.Name, alloc.DesiredStatus,
)
}
out += ")"
}
}
if len(p.DeploymentUpdates) > 0 {
out += ", DeploymentUpdates: "
for _, dupdate := range p.DeploymentUpdates {
out += fmt.Sprintf("(%s %s)",
dupdate.DeploymentID[:8], dupdate.Status)
}
}
if p.Annotations != nil {
out += ", Annotations: "
for tg, updates := range p.Annotations.DesiredTGUpdates {
out += fmt.Sprintf("(update[%s] %v)", tg, updates)
}
for _, preempted := range p.Annotations.PreemptedAllocs {
out += fmt.Sprintf("(preempt %s)", preempted.ID[:8])
}
}
out += ")"
return out
}
// AppendStoppedAlloc marks an allocation to be stopped. The clientStatus of the
// allocation may be optionally set by passing in a non-empty value.
func (p *Plan) AppendStoppedAlloc(alloc *Allocation, desiredDesc, clientStatus, followupEvalID string) {
newAlloc := new(Allocation)
*newAlloc = *alloc
// If the job is not set in the plan we are deregistering a job so we
// extract the job from the allocation.
if p.Job == nil && newAlloc.Job != nil {
p.Job = newAlloc.Job
}
// Normalize the job
newAlloc.Job = nil
// Strip the resources as it can be rebuilt.
newAlloc.Resources = nil
newAlloc.DesiredStatus = AllocDesiredStatusStop
newAlloc.DesiredDescription = desiredDesc
if clientStatus != "" {
newAlloc.ClientStatus = clientStatus
}
newAlloc.AppendState(AllocStateFieldClientStatus, clientStatus)
if followupEvalID != "" {
newAlloc.FollowupEvalID = followupEvalID
}
node := alloc.NodeID
existing := p.NodeUpdate[node]
p.NodeUpdate[node] = append(existing, newAlloc)
}
// AppendPreemptedAlloc is used to append an allocation that's being preempted to the plan.
// To minimize the size of the plan, this only sets a minimal set of fields in the allocation
func (p *Plan) AppendPreemptedAlloc(alloc *Allocation, preemptingAllocID string) {
newAlloc := &Allocation{}
newAlloc.ID = alloc.ID
newAlloc.JobID = alloc.JobID
newAlloc.Namespace = alloc.Namespace
newAlloc.DesiredStatus = AllocDesiredStatusEvict
newAlloc.PreemptedByAllocation = preemptingAllocID
desiredDesc := fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID)
newAlloc.DesiredDescription = desiredDesc
// TaskResources are needed by the plan applier to check if allocations fit
// after removing preempted allocations
if alloc.AllocatedResources != nil {
newAlloc.AllocatedResources = alloc.AllocatedResources
} else {
// COMPAT Remove in version 0.11
newAlloc.TaskResources = alloc.TaskResources
newAlloc.SharedResources = alloc.SharedResources
}
// Append this alloc to slice for this node
node := alloc.NodeID
existing := p.NodePreemptions[node]
p.NodePreemptions[node] = append(existing, newAlloc)
}
// AppendUnknownAlloc marks an allocation as unknown.
func (p *Plan) AppendUnknownAlloc(alloc *Allocation) {
// Strip the resources as they can be rebuilt.
alloc.Resources = nil
existing := p.NodeAllocation[alloc.NodeID]
p.NodeAllocation[alloc.NodeID] = append(existing, alloc)
}
func (p *Plan) PopUpdate(alloc *Allocation) {
existing := p.NodeUpdate[alloc.NodeID]
n := len(existing)
if n > 0 && existing[n-1].ID == alloc.ID {
existing = existing[:n-1]
if len(existing) > 0 {
p.NodeUpdate[alloc.NodeID] = existing
} else {
delete(p.NodeUpdate, alloc.NodeID)
}
}
}
// AppendAlloc appends the alloc to the plan allocations.
// Uses the passed job if explicitly passed, otherwise
// it is assumed the alloc will use the plan Job version.
func (p *Plan) AppendAlloc(alloc *Allocation, job *Job) {
node := alloc.NodeID
existing := p.NodeAllocation[node]
alloc.Job = job
p.NodeAllocation[node] = append(existing, alloc)
}
// IsNoOp checks if this plan would do nothing
func (p *Plan) IsNoOp() bool {
return len(p.NodeUpdate) == 0 &&
len(p.NodeAllocation) == 0 &&
p.Deployment == nil &&
len(p.DeploymentUpdates) == 0
}
// NormalizeAllocations normalizes allocations to remove fields that can
// be fetched from the MemDB instead of sending over the wire
func (p *Plan) NormalizeAllocations() {
for _, allocs := range p.NodeUpdate {
for i, alloc := range allocs {
allocs[i] = &Allocation{
ID: alloc.ID,
DesiredDescription: alloc.DesiredDescription,
ClientStatus: alloc.ClientStatus,
FollowupEvalID: alloc.FollowupEvalID,
}
}
}
for _, allocs := range p.NodePreemptions {
for i, alloc := range allocs {
allocs[i] = &Allocation{
ID: alloc.ID,
PreemptedByAllocation: alloc.PreemptedByAllocation,
}
}
}
}
// PlanResult is the result of a plan submitted to the leader.
type PlanResult struct {
// NodeUpdate contains all the updates that were committed.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations that were committed.
NodeAllocation map[string][]*Allocation
// Deployment is the deployment that was committed.
Deployment *Deployment
// DeploymentUpdates is the set of deployment updates that were committed.
DeploymentUpdates []*DeploymentStatusUpdate
// NodePreemptions is a map from node id to a set of allocations from other
// lower priority jobs that are preempted. Preempted allocations are marked
// as stopped.
NodePreemptions map[string][]*Allocation
// RefreshIndex is the index the worker should refresh state up to.
// This allows all evictions and allocations to be materialized.
// If any allocations were rejected due to stale data (node state,
// over committed) this can be used to force a worker refresh.
RefreshIndex uint64
// AllocIndex is the Raft index in which the evictions and
// allocations took place. This is used for the write index.
AllocIndex uint64
}
// IsNoOp checks if this plan result would do nothing
func (p *PlanResult) IsNoOp() bool {
return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
len(p.DeploymentUpdates) == 0 && p.Deployment == nil
}
// FullCommit is used to check if all the allocations in a plan
// were committed as part of the result. Returns if there was
// a match, and the number of expected and actual allocations.
func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}
return actual == expected, expected, actual
}
// PlanAnnotations holds annotations made by the scheduler to give further debug
// information to operators.
type PlanAnnotations struct {
// DesiredTGUpdates is the set of desired updates per task group.
DesiredTGUpdates map[string]*DesiredUpdates
// PreemptedAllocs is the set of allocations to be preempted to make the placement successful.
PreemptedAllocs []*AllocListStub
}
// DesiredUpdates is the set of changes the scheduler would like to make given
// sufficient resources and cluster capacity.
type DesiredUpdates struct {
Ignore uint64
Place uint64
Migrate uint64
Stop uint64
InPlaceUpdate uint64
DestructiveUpdate uint64
Canary uint64
Preemptions uint64
}
func (d *DesiredUpdates) GoString() string {
return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
}
// msgpackHandle is a shared handle for encoding/decoding of structs
var MsgpackHandle = func() *codec.MsgpackHandle {
h := &codec.MsgpackHandle{}
h.RawToString = true
// maintain binary format from time prior to upgrading latest ugorji
h.BasicHandle.TimeNotBuiltin = true
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
// only review struct codec tags
h.TypeInfos = codec.NewTypeInfos([]string{"codec"})
return h
}()
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err string
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) error {
if e == nil {
return nil
}
return &RecoverableError{
Err: e.Error(),
Recoverable: recoverable,
}
}
// WrapRecoverable wraps an existing error in a new RecoverableError with a new
// message. If the error was recoverable before the returned error is as well;
// otherwise it is unrecoverable.
func WrapRecoverable(msg string, err error) error {
return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
}
func (r *RecoverableError) Error() string {
return r.Err
}
func (r *RecoverableError) IsRecoverable() bool {
return r.Recoverable
}
func (r *RecoverableError) IsUnrecoverable() bool {
return !r.Recoverable
}
// Recoverable is an interface for errors to implement to indicate whether or
// not they are fatal or recoverable.
type Recoverable interface {
error
IsRecoverable() bool
}
// IsRecoverable returns true if error is a RecoverableError with
// Recoverable=true. Otherwise false is returned.
func IsRecoverable(e error) bool {
if re, ok := e.(Recoverable); ok {
return re.IsRecoverable()
}
return false
}
// WrappedServerError wraps an error and satisfies
// both the Recoverable and the ServerSideError interfaces
type WrappedServerError struct {
Err error
}
// NewWrappedServerError is used to create a wrapped server side error
func NewWrappedServerError(e error) error {
return &WrappedServerError{
Err: e,
}
}
func (r *WrappedServerError) IsRecoverable() bool {
return IsRecoverable(r.Err)
}
func (r *WrappedServerError) Error() string {
return r.Err.Error()
}
func (r *WrappedServerError) IsServerSide() bool {
return true
}
// ServerSideError is an interface for errors to implement to indicate
// errors occurring after the request makes it to a server
type ServerSideError interface {
error
IsServerSide() bool
}
// IsServerSide returns true if error is a wrapped
// server side error
func IsServerSide(e error) bool {
if se, ok := e.(ServerSideError); ok {
return se.IsServerSide()
}
return false
}
// ACLPolicy is used to represent an ACL policy
type ACLPolicy struct {
Name string // Unique name
Description string // Human readable
Rules string // HCL or JSON format
RulesJSON *acl.Policy // Generated from Rules on read
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL policy
func (a *ACLPolicy) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
_, _ = hash.Write([]byte(a.Name))
_, _ = hash.Write([]byte(a.Description))
_, _ = hash.Write([]byte(a.Rules))
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
a.Hash = hashVal
return hashVal
}
func (a *ACLPolicy) Stub() *ACLPolicyListStub {
return &ACLPolicyListStub{
Name: a.Name,
Description: a.Description,
Hash: a.Hash,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
func (a *ACLPolicy) Validate() error {
var mErr multierror.Error
if !validPolicyName.MatchString(a.Name) {
err := fmt.Errorf("invalid name '%s'", a.Name)
mErr.Errors = append(mErr.Errors, err)
}
if _, err := acl.Parse(a.Rules); err != nil {
err = fmt.Errorf("failed to parse rules: %v", err)
mErr.Errors = append(mErr.Errors, err)
}
if len(a.Description) > maxPolicyDescriptionLength {
err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// ACLPolicyListStub is used to for listing ACL policies
type ACLPolicyListStub struct {
Name string
Description string
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// ACLPolicyListRequest is used to request a list of policies
type ACLPolicyListRequest struct {
QueryOptions
}
// ACLPolicySpecificRequest is used to query a specific policy
type ACLPolicySpecificRequest struct {
Name string
QueryOptions
}
// ACLPolicySetRequest is used to query a set of policies
type ACLPolicySetRequest struct {
Names []string
QueryOptions
}
// ACLPolicyListResponse is used for a list request
type ACLPolicyListResponse struct {
Policies []*ACLPolicyListStub
QueryMeta
}
// SingleACLPolicyResponse is used to return a single policy
type SingleACLPolicyResponse struct {
Policy *ACLPolicy
QueryMeta
}
// ACLPolicySetResponse is used to return a set of policies
type ACLPolicySetResponse struct {
Policies map[string]*ACLPolicy
QueryMeta
}
// ACLPolicyDeleteRequest is used to delete a set of policies
type ACLPolicyDeleteRequest struct {
Names []string
WriteRequest
}
// ACLPolicyUpsertRequest is used to upsert a set of policies
type ACLPolicyUpsertRequest struct {
Policies []*ACLPolicy
WriteRequest
}
// ACLToken represents a client token which is used to Authenticate
type ACLToken struct {
AccessorID string // Public Accessor ID (UUID)
SecretID string // Secret ID, private (UUID)
Name string // Human friendly name
Type string // Client or Management
Policies []string // Policies this token ties to
Global bool // Global or Region local
Hash []byte
CreateTime time.Time // Time of creation
CreateIndex uint64
ModifyIndex uint64
}
// GetID implements the IDGetter interface, required for pagination.
func (a *ACLToken) GetID() string {
if a == nil {
return ""
}
return a.AccessorID
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (a *ACLToken) GetCreateIndex() uint64 {
if a == nil {
return 0
}
return a.CreateIndex
}
func (a *ACLToken) Copy() *ACLToken {
c := new(ACLToken)
*c = *a
c.Policies = make([]string, len(a.Policies))
copy(c.Policies, a.Policies)
c.Hash = make([]byte, len(a.Hash))
copy(c.Hash, a.Hash)
return c
}
var (
// AnonymousACLToken is used no SecretID is provided, and the
// request is made anonymously.
AnonymousACLToken = &ACLToken{
AccessorID: "anonymous",
Name: "Anonymous Token",
Type: ACLClientToken,
Policies: []string{"anonymous"},
Global: false,
}
)
type ACLTokenListStub struct {
AccessorID string
Name string
Type string
Policies []string
Global bool
Hash []byte
CreateTime time.Time
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL token
func (a *ACLToken) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
_, _ = hash.Write([]byte(a.Name))
_, _ = hash.Write([]byte(a.Type))
for _, policyName := range a.Policies {
_, _ = hash.Write([]byte(policyName))
}
if a.Global {
_, _ = hash.Write([]byte("global"))
} else {
_, _ = hash.Write([]byte("local"))
}
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
a.Hash = hashVal
return hashVal
}
func (a *ACLToken) Stub() *ACLTokenListStub {
return &ACLTokenListStub{
AccessorID: a.AccessorID,
Name: a.Name,
Type: a.Type,
Policies: a.Policies,
Global: a.Global,
Hash: a.Hash,
CreateTime: a.CreateTime,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
// Validate is used to check a token for reasonableness
func (a *ACLToken) Validate() error {
var mErr multierror.Error
if len(a.Name) > maxTokenNameLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long"))
}
switch a.Type {
case ACLClientToken:
if len(a.Policies) == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies"))
}
case ACLManagementToken:
if len(a.Policies) != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies"))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management"))
}
return mErr.ErrorOrNil()
}
// PolicySubset checks if a given set of policies is a subset of the token
func (a *ACLToken) PolicySubset(policies []string) bool {
// Hot-path the management tokens, superset of all policies.
if a.Type == ACLManagementToken {
return true
}
associatedPolicies := make(map[string]struct{}, len(a.Policies))
for _, policy := range a.Policies {
associatedPolicies[policy] = struct{}{}
}
for _, policy := range policies {
if _, ok := associatedPolicies[policy]; !ok {
return false
}
}
return true
}
// ACLTokenListRequest is used to request a list of tokens
type ACLTokenListRequest struct {
GlobalOnly bool
QueryOptions
}
// ACLTokenSpecificRequest is used to query a specific token
type ACLTokenSpecificRequest struct {
AccessorID string
QueryOptions
}
// ACLTokenSetRequest is used to query a set of tokens
type ACLTokenSetRequest struct {
AccessorIDS []string
QueryOptions
}
// ACLTokenListResponse is used for a list request
type ACLTokenListResponse struct {
Tokens []*ACLTokenListStub
QueryMeta
}
// SingleACLTokenResponse is used to return a single token
type SingleACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenSetResponse is used to return a set of token
type ACLTokenSetResponse struct {
Tokens map[string]*ACLToken // Keyed by Accessor ID
QueryMeta
}
// ResolveACLTokenRequest is used to resolve a specific token
type ResolveACLTokenRequest struct {
SecretID string
QueryOptions
}
// ResolveACLTokenResponse is used to resolve a single token
type ResolveACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenDeleteRequest is used to delete a set of tokens
type ACLTokenDeleteRequest struct {
AccessorIDs []string
WriteRequest
}
// ACLTokenBootstrapRequest is used to bootstrap ACLs
type ACLTokenBootstrapRequest struct {
Token *ACLToken // Not client specifiable
ResetIndex uint64 // Reset index is used to clear the bootstrap token
BootstrapSecret string
WriteRequest
}
// ACLTokenUpsertRequest is used to upsert a set of tokens
type ACLTokenUpsertRequest struct {
Tokens []*ACLToken
WriteRequest
}
// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
type ACLTokenUpsertResponse struct {
Tokens []*ACLToken
WriteMeta
}
// OneTimeToken is used to log into the web UI using a token provided by the
// command line.
type OneTimeToken struct {
OneTimeSecretID string
AccessorID string
ExpiresAt time.Time
CreateIndex uint64
ModifyIndex uint64
}
// OneTimeTokenUpsertRequest is the request for a UpsertOneTimeToken RPC
type OneTimeTokenUpsertRequest struct {
WriteRequest
}
// OneTimeTokenUpsertResponse is the response to a UpsertOneTimeToken RPC.
type OneTimeTokenUpsertResponse struct {
OneTimeToken *OneTimeToken
WriteMeta
}
// OneTimeTokenExchangeRequest is a request to swap the one-time token with
// the backing ACL token
type OneTimeTokenExchangeRequest struct {
OneTimeSecretID string
WriteRequest
}
// OneTimeTokenExchangeResponse is the response to swapping the one-time token
// with the backing ACL token
type OneTimeTokenExchangeResponse struct {
Token *ACLToken
WriteMeta
}
// OneTimeTokenDeleteRequest is a request to delete a group of one-time tokens
type OneTimeTokenDeleteRequest struct {
AccessorIDs []string
WriteRequest
}
// OneTimeTokenExpireRequest is a request to delete all expired one-time tokens
type OneTimeTokenExpireRequest struct {
WriteRequest
}
// RpcError is used for serializing errors with a potential error code
type RpcError struct {
Message string
Code *int64
}
func NewRpcError(err error, code *int64) *RpcError {
return &RpcError{
Message: err.Error(),
Code: code,
}
}
func (r *RpcError) Error() string {
return r.Message
}
docs: fix Plan{,Result}.NodeUpdate comment (#13534)
It appears way back when this was first implemented in
9a917281af9c0a97a6c59575eaa52c5c86ffc60d, it was renamed from
NodeEvict (with a correct comment) to NodeUpdate. The comment was
changed from referring to only evictions to referring to "all allocs" in
the first sentence and "stop or evict" in the second.
This confuses every time I see it because I read the name (NodeUpdate)
and first sentence ("all the allocs") and assume this represents *all*
allocations... which isn't true.
I'm going to assume I'm the only one who doesn't read the 2nd sentence
and that's why this suboptimal wording has lasted 7 years, but can we
change it for my sake?
package structs
import (
"bytes"
"container/heap"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/base32"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"hash/crc32"
"math"
"net"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/hashicorp/nomad/helper/escapingfs"
"golang.org/x/crypto/blake2b"
"github.com/hashicorp/cronexpr"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/command/agent/host"
"github.com/hashicorp/nomad/command/agent/pprof"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/args"
"github.com/hashicorp/nomad/helper/constraints/semver"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/lib/cpuset"
"github.com/hashicorp/nomad/lib/kheap"
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
"github.com/miekg/dns"
"github.com/mitchellh/copystructure"
)
var (
// validPolicyName is used to validate a policy name
validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
// b32 is a lowercase base32 encoding for use in URL friendly service hashes
b32 = base32.NewEncoding(strings.ToLower("abcdefghijklmnopqrstuvwxyz234567"))
)
type MessageType uint8
// note: new raft message types need to be added to the end of this
// list of contents
const (
NodeRegisterRequestType MessageType = 0
NodeDeregisterRequestType MessageType = 1
NodeUpdateStatusRequestType MessageType = 2
NodeUpdateDrainRequestType MessageType = 3
JobRegisterRequestType MessageType = 4
JobDeregisterRequestType MessageType = 5
EvalUpdateRequestType MessageType = 6
EvalDeleteRequestType MessageType = 7
AllocUpdateRequestType MessageType = 8
AllocClientUpdateRequestType MessageType = 9
ReconcileJobSummariesRequestType MessageType = 10
VaultAccessorRegisterRequestType MessageType = 11
VaultAccessorDeregisterRequestType MessageType = 12
ApplyPlanResultsRequestType MessageType = 13
DeploymentStatusUpdateRequestType MessageType = 14
DeploymentPromoteRequestType MessageType = 15
DeploymentAllocHealthRequestType MessageType = 16
DeploymentDeleteRequestType MessageType = 17
JobStabilityRequestType MessageType = 18
ACLPolicyUpsertRequestType MessageType = 19
ACLPolicyDeleteRequestType MessageType = 20
ACLTokenUpsertRequestType MessageType = 21
ACLTokenDeleteRequestType MessageType = 22
ACLTokenBootstrapRequestType MessageType = 23
AutopilotRequestType MessageType = 24
UpsertNodeEventsType MessageType = 25
JobBatchDeregisterRequestType MessageType = 26
AllocUpdateDesiredTransitionRequestType MessageType = 27
NodeUpdateEligibilityRequestType MessageType = 28
BatchNodeUpdateDrainRequestType MessageType = 29
SchedulerConfigRequestType MessageType = 30
NodeBatchDeregisterRequestType MessageType = 31
ClusterMetadataRequestType MessageType = 32
ServiceIdentityAccessorRegisterRequestType MessageType = 33
ServiceIdentityAccessorDeregisterRequestType MessageType = 34
CSIVolumeRegisterRequestType MessageType = 35
CSIVolumeDeregisterRequestType MessageType = 36
CSIVolumeClaimRequestType MessageType = 37
ScalingEventRegisterRequestType MessageType = 38
CSIVolumeClaimBatchRequestType MessageType = 39
CSIPluginDeleteRequestType MessageType = 40
EventSinkUpsertRequestType MessageType = 41
EventSinkDeleteRequestType MessageType = 42
BatchEventSinkUpdateProgressType MessageType = 43
OneTimeTokenUpsertRequestType MessageType = 44
OneTimeTokenDeleteRequestType MessageType = 45
OneTimeTokenExpireRequestType MessageType = 46
ServiceRegistrationUpsertRequestType MessageType = 47
ServiceRegistrationDeleteByIDRequestType MessageType = 48
ServiceRegistrationDeleteByNodeIDRequestType MessageType = 49
// Namespace types were moved from enterprise and therefore start at 64
NamespaceUpsertRequestType MessageType = 64
NamespaceDeleteRequestType MessageType = 65
)
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// MsgTypeTestSetup is used during testing when calling state store
// methods directly that require an FSM MessageType
MsgTypeTestSetup MessageType = IgnoreUnknownTypeFlag
GetterModeAny = "any"
GetterModeFile = "file"
GetterModeDir = "dir"
// maxPolicyDescriptionLength limits a policy description length
maxPolicyDescriptionLength = 256
// maxTokenNameLength limits a ACL token name length
maxTokenNameLength = 256
// ACLClientToken and ACLManagementToken are the only types of tokens
ACLClientToken = "client"
ACLManagementToken = "management"
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
DefaultNamespaceDescription = "Default shared namespace"
// AllNamespacesSentinel is the value used as a namespace RPC value
// to indicate that endpoints must search in all namespaces
AllNamespacesSentinel = "*"
// maxNamespaceDescriptionLength limits a namespace description length
maxNamespaceDescriptionLength = 256
// JitterFraction is a the limit to the amount of jitter we apply
// to a user specified MaxQueryTime. We divide the specified time by
// the fraction. So 16 == 6.25% limit of jitter. This jitter is also
// applied to RPCHoldTimeout.
JitterFraction = 16
// MaxRetainedNodeEvents is the maximum number of node events that will be
// retained for a single node
MaxRetainedNodeEvents = 10
// MaxRetainedNodeScores is the number of top scoring nodes for which we
// retain scoring metadata
MaxRetainedNodeScores = 5
// Normalized scorer name
NormScorerName = "normalized-score"
// MaxBlockingRPCQueryTime is used to bound the limit of a blocking query
MaxBlockingRPCQueryTime = 300 * time.Second
// DefaultBlockingRPCQueryTime is the amount of time we block waiting for a change
// if no time is specified. Previously we would wait the MaxBlockingRPCQueryTime.
DefaultBlockingRPCQueryTime = 300 * time.Second
)
var (
// validNamespaceName is used to validate a namespace name
validNamespaceName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$")
)
// NamespacedID is a tuple of an ID and a namespace
type NamespacedID struct {
ID string
Namespace string
}
// NewNamespacedID returns a new namespaced ID given the ID and namespace
func NewNamespacedID(id, ns string) NamespacedID {
return NamespacedID{
ID: id,
Namespace: ns,
}
}
func (n NamespacedID) String() string {
return fmt.Sprintf("<ns: %q, id: %q>", n.Namespace, n.ID)
}
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
IsRead() bool
AllowStaleRead() bool
IsForwarded() bool
SetForwarded()
TimeToBlock() time.Duration
// SetTimeToBlock sets how long this request can block. The requested time may not be possible,
// so Callers should readback TimeToBlock. E.g. you cannot set time to block at all on WriteRequests
// and it cannot exceed MaxBlockingRPCQueryTime
SetTimeToBlock(t time.Duration)
}
// InternalRpcInfo allows adding internal RPC metadata to an RPC. This struct
// should NOT be replicated in the API package as it is internal only.
type InternalRpcInfo struct {
// Forwarded marks whether the RPC has been forwarded.
Forwarded bool
}
// IsForwarded returns whether the RPC is forwarded from another server.
func (i *InternalRpcInfo) IsForwarded() bool {
return i.Forwarded
}
// SetForwarded marks that the RPC is being forwarded from another server.
func (i *InternalRpcInfo) SetForwarded() {
i.Forwarded = true
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
// The target region for this query
Region string
// Namespace is the target namespace for the query.
//
// Since handlers do not have a default value set they should access
// the Namespace via the RequestNamespace method.
//
// Requests accessing specific namespaced objects must check ACLs
// against the namespace of the object, not the namespace in the
// request.
Namespace string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool
// If set, used as prefix for resource list searches
Prefix string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
// Filter specifies the go-bexpr filter expression to be used for
// filtering the data prior to returning a response
Filter string
// PerPage is the number of entries to be returned in queries that support
// paginated lists.
PerPage int32
// NextToken is the token used to indicate where to start paging
// for queries that support paginated lists. This token should be
// the ID of the next object after the last one seen in the
// previous response.
NextToken string
// Reverse is used to reverse the default order of list results.
Reverse bool
InternalRpcInfo
}
// TimeToBlock returns MaxQueryTime adjusted for maximums and defaults
// it will return 0 if this is not a blocking query
func (q QueryOptions) TimeToBlock() time.Duration {
if q.MinQueryIndex == 0 {
return 0
}
if q.MaxQueryTime > MaxBlockingRPCQueryTime {
return MaxBlockingRPCQueryTime
} else if q.MaxQueryTime <= 0 {
return DefaultBlockingRPCQueryTime
}
return q.MaxQueryTime
}
func (q *QueryOptions) SetTimeToBlock(t time.Duration) {
q.MaxQueryTime = t
}
func (q QueryOptions) RequestRegion() string {
return q.Region
}
// RequestNamespace returns the request's namespace or the default namespace if
// no explicit namespace was sent.
//
// Requests accessing specific namespaced objects must check ACLs against the
// namespace of the object, not the namespace in the request.
func (q QueryOptions) RequestNamespace() string {
if q.Namespace == "" {
return DefaultNamespace
}
return q.Namespace
}
// IsRead only applies to reads, so always true.
func (q QueryOptions) IsRead() bool {
return true
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
// AgentPprofRequest is used to request a pprof report for a given node.
type AgentPprofRequest struct {
// ReqType specifies the profile to use
ReqType pprof.ReqType
// Profile specifies the runtime/pprof profile to lookup and generate.
Profile string
// Seconds is the number of seconds to capture a profile
Seconds int
// Debug specifies if pprof profile should inclue debug output
Debug int
// GC specifies if the profile should call runtime.GC() before
// running its profile. This is only used for "heap" profiles
GC int
// NodeID is the node we want to track the logs of
NodeID string
// ServerID is the server we want to track the logs of
ServerID string
QueryOptions
}
// AgentPprofResponse is used to return a generated pprof profile
type AgentPprofResponse struct {
// ID of the agent that fulfilled the request
AgentID string
// Payload is the generated pprof profile
Payload []byte
// HTTPHeaders are a set of key value pairs to be applied as
// HTTP headers for a specific runtime profile
HTTPHeaders map[string]string
}
type WriteRequest struct {
// The target region for this write
Region string
// Namespace is the target namespace for the write.
//
// Since RPC handlers do not have a default value set they should
// access the Namespace via the RequestNamespace method.
//
// Requests accessing specific namespaced objects must check ACLs
// against the namespace of the object, not the namespace in the
// request.
Namespace string
// AuthToken is secret portion of the ACL token used for the request
AuthToken string
// IdempotencyToken can be used to ensure the write is idempotent.
IdempotencyToken string
InternalRpcInfo
}
func (w WriteRequest) TimeToBlock() time.Duration {
return 0
}
func (w WriteRequest) SetTimeToBlock(_ time.Duration) {
}
func (w WriteRequest) RequestRegion() string {
// The target region for this request
return w.Region
}
// RequestNamespace returns the request's namespace or the default namespace if
// no explicit namespace was sent.
//
// Requests accessing specific namespaced objects must check ACLs against the
// namespace of the object, not the namespace in the request.
func (w WriteRequest) RequestNamespace() string {
if w.Namespace == "" {
return DefaultNamespace
}
return w.Namespace
}
// IsRead only applies to writes, always false.
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// This is the index associated with the read
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
// NextToken is the token returned with queries that support
// paginated lists. To resume paging from this point, pass
// this token in the next request's QueryOptions.
NextToken string
}
// WriteMeta allows a write response to include potentially
// useful metadata about the write
type WriteMeta struct {
// This is the index associated with the write
Index uint64
}
// NodeRegisterRequest is used for Node.Register endpoint
// to register a node as being a schedulable entity.
type NodeRegisterRequest struct {
Node *Node
NodeEvent *NodeEvent
WriteRequest
}
// NodeDeregisterRequest is used for Node.Deregister endpoint
// to deregister a node as being a schedulable entity.
type NodeDeregisterRequest struct {
NodeID string
WriteRequest
}
// NodeBatchDeregisterRequest is used for Node.BatchDeregister endpoint
// to deregister a batch of nodes from being schedulable entities.
type NodeBatchDeregisterRequest struct {
NodeIDs []string
WriteRequest
}
// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server
// information used in RPC server lists.
type NodeServerInfo struct {
// RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to
// be contacted at for RPCs.
RPCAdvertiseAddr string
// RpcMajorVersion is the major version number the Nomad Server
// supports
RPCMajorVersion int32
// RpcMinorVersion is the minor version number the Nomad Server
// supports
RPCMinorVersion int32
// Datacenter is the datacenter that a Nomad server belongs to
Datacenter string
}
// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint
// to update the status of a node.
type NodeUpdateStatusRequest struct {
NodeID string
Status string
NodeEvent *NodeEvent
UpdatedAt int64
WriteRequest
}
// NodeUpdateDrainRequest is used for updating the drain strategy
type NodeUpdateDrainRequest struct {
NodeID string
DrainStrategy *DrainStrategy
// MarkEligible marks the node as eligible if removing the drain strategy.
MarkEligible bool
// NodeEvent is the event added to the node
NodeEvent *NodeEvent
// UpdatedAt represents server time of receiving request
UpdatedAt int64
// Meta is user-provided metadata relating to the drain operation
Meta map[string]string
WriteRequest
}
// BatchNodeUpdateDrainRequest is used for updating the drain strategy for a
// batch of nodes
type BatchNodeUpdateDrainRequest struct {
// Updates is a mapping of nodes to their updated drain strategy
Updates map[string]*DrainUpdate
// NodeEvents is a mapping of the node to the event to add to the node
NodeEvents map[string]*NodeEvent
// UpdatedAt represents server time of receiving request
UpdatedAt int64
WriteRequest
}
// DrainUpdate is used to update the drain of a node
type DrainUpdate struct {
// DrainStrategy is the new strategy for the node
DrainStrategy *DrainStrategy
// MarkEligible marks the node as eligible if removing the drain strategy.
MarkEligible bool
}
// NodeUpdateEligibilityRequest is used for updating the scheduling eligibility
type NodeUpdateEligibilityRequest struct {
NodeID string
Eligibility string
// NodeEvent is the event added to the node
NodeEvent *NodeEvent
// UpdatedAt represents server time of receiving request
UpdatedAt int64
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the node
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
}
// NodeSpecificRequest is used when we just need to specify a target node
type NodeSpecificRequest struct {
NodeID string
SecretID string
QueryOptions
}
// JobRegisterRequest is used for Job.Register endpoint
// to register a job as being a schedulable entity.
type JobRegisterRequest struct {
Job *Job
// If EnforceIndex is set then the job will only be registered if the passed
// JobModifyIndex matches the current Jobs index. If the index is zero, the
// register only occurs if the job is new.
EnforceIndex bool
JobModifyIndex uint64
// PreserveCounts indicates that during job update, existing task group
// counts should be preserved, over those specified in the new job spec
// PreserveCounts is ignored for newly created jobs.
PreserveCounts bool
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
// EvalPriority is an optional priority to use on any evaluation created as
// a result on this job registration. This value must be between 1-100
// inclusively, where a larger value corresponds to a higher priority. This
// is useful when an operator wishes to push through a job registration in
// busy clusters with a large evaluation backlog. This avoids needing to
// change the job priority which also impacts preemption.
EvalPriority int
// Eval is the evaluation that is associated with the job registration
Eval *Evaluation
WriteRequest
}
// JobDeregisterRequest is used for Job.Deregister endpoint
// to deregister a job as being a schedulable entity.
type JobDeregisterRequest struct {
JobID string
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
// Global controls whether all regions of a multi-region job are
// deregistered. It is ignored for single-region jobs.
Global bool
// EvalPriority is an optional priority to use on any evaluation created as
// a result on this job deregistration. This value must be between 1-100
// inclusively, where a larger value corresponds to a higher priority. This
// is useful when an operator wishes to push through a job deregistration
// in busy clusters with a large evaluation backlog.
EvalPriority int
// NoShutdownDelay, if set to true, will override the group and
// task shutdown_delay configuration and ignore the delay for any
// allocations stopped as a result of this Deregister call.
NoShutdownDelay bool
// Eval is the evaluation to create that's associated with job deregister
Eval *Evaluation
WriteRequest
}
// JobBatchDeregisterRequest is used to batch deregister jobs and upsert
// evaluations.
type JobBatchDeregisterRequest struct {
// Jobs is the set of jobs to deregister
Jobs map[NamespacedID]*JobDeregisterOptions
// Evals is the set of evaluations to create.
Evals []*Evaluation
WriteRequest
}
// JobDeregisterOptions configures how a job is deregistered.
type JobDeregisterOptions struct {
// Purge controls whether the deregister purges the job from the system or
// whether the job is just marked as stopped and will be removed by the
// garbage collector
Purge bool
}
// JobEvaluateRequest is used when we just need to re-evaluate a target job
type JobEvaluateRequest struct {
JobID string
EvalOptions EvalOptions
WriteRequest
}
// EvalOptions is used to encapsulate options when forcing a job evaluation
type EvalOptions struct {
ForceReschedule bool
}
// JobSpecificRequest is used when we just need to specify a target job
type JobSpecificRequest struct {
JobID string
All bool
QueryOptions
}
// JobListRequest is used to parameterize a list request
type JobListRequest struct {
QueryOptions
}
// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run
// evaluation of the Job.
type JobPlanRequest struct {
Job *Job
Diff bool // Toggles an annotated diff
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// JobScaleRequest is used for the Job.Scale endpoint to scale one of the
// scaling targets in a job
type JobScaleRequest struct {
JobID string
Target map[string]string
Count *int64
Message string
Error bool
Meta map[string]interface{}
// PolicyOverride is set when the user is attempting to override any policies
PolicyOverride bool
WriteRequest
}
// Validate is used to validate the arguments in the request
func (r *JobScaleRequest) Validate() error {
namespace := r.Target[ScalingTargetNamespace]
if namespace != "" && namespace != r.RequestNamespace() {
return NewErrRPCCoded(400, "namespace in payload did not match header")
}
jobID := r.Target[ScalingTargetJob]
if jobID != "" && jobID != r.JobID {
return fmt.Errorf("job ID in payload did not match URL")
}
groupName := r.Target[ScalingTargetGroup]
if groupName == "" {
return NewErrRPCCoded(400, "missing task group name for scaling action")
}
if r.Count != nil {
if *r.Count < 0 {
return NewErrRPCCoded(400, "scaling action count can't be negative")
}
if r.Error {
return NewErrRPCCoded(400, "scaling action should not contain count if error is true")
}
truncCount := int(*r.Count)
if int64(truncCount) != *r.Count {
return NewErrRPCCoded(400,
fmt.Sprintf("new scaling count is too large for TaskGroup.Count (int): %v", r.Count))
}
}
return nil
}
// JobSummaryRequest is used when we just need to get a specific job summary
type JobSummaryRequest struct {
JobID string
QueryOptions
}
// JobScaleStatusRequest is used to get the scale status for a job
type JobScaleStatusRequest struct {
JobID string
QueryOptions
}
// JobDispatchRequest is used to dispatch a job based on a parameterized job
type JobDispatchRequest struct {
JobID string
Payload []byte
Meta map[string]string
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobRevertRequest is used to revert a job to a prior version.
type JobRevertRequest struct {
// JobID is the ID of the job being reverted
JobID string
// JobVersion the version to revert to.
JobVersion uint64
// EnforcePriorVersion if set will enforce that the job is at the given
// version before reverting.
EnforcePriorVersion *uint64
// ConsulToken is the Consul token that proves the submitter of the job revert
// has access to the Service Identity policies associated with the job's
// Consul Connect enabled services. This field is only used to transfer the
// token and is not stored after the Job revert.
ConsulToken string
// VaultToken is the Vault token that proves the submitter of the job revert
// has access to any Vault policies specified in the targeted job version. This
// field is only used to transfer the token and is not stored after the Job
// revert.
VaultToken string
WriteRequest
}
// JobStabilityRequest is used to marked a job as stable.
type JobStabilityRequest struct {
// Job to set the stability on
JobID string
JobVersion uint64
// Set the stability
Stable bool
WriteRequest
}
// JobStabilityResponse is the response when marking a job as stable.
type JobStabilityResponse struct {
WriteMeta
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
Fields *NodeStubFields
}
// EvalUpdateRequest is used for upserting evaluations.
type EvalUpdateRequest struct {
Evals []*Evaluation
EvalToken string
WriteRequest
}
// EvalDeleteRequest is used for deleting an evaluation.
type EvalDeleteRequest struct {
Evals []string
Allocs []string
WriteRequest
}
// EvalSpecificRequest is used when we just need to specify a target evaluation
type EvalSpecificRequest struct {
EvalID string
IncludeRelated bool
QueryOptions
}
// EvalAckRequest is used to Ack/Nack a specific evaluation
type EvalAckRequest struct {
EvalID string
Token string
WriteRequest
}
// EvalDequeueRequest is used when we want to dequeue an evaluation
type EvalDequeueRequest struct {
Schedulers []string
Timeout time.Duration
SchedulerVersion uint16
WriteRequest
}
// EvalListRequest is used to list the evaluations
type EvalListRequest struct {
FilterJobID string
FilterEvalStatus string
QueryOptions
}
// ShouldBeFiltered indicates that the eval should be filtered (that
// is, removed) from the results
func (req *EvalListRequest) ShouldBeFiltered(e *Evaluation) bool {
if req.FilterJobID != "" && req.FilterJobID != e.JobID {
return true
}
if req.FilterEvalStatus != "" && req.FilterEvalStatus != e.Status {
return true
}
return false
}
// PlanRequest is used to submit an allocation plan to the leader
type PlanRequest struct {
Plan *Plan
WriteRequest
}
// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction
// committing the result of a plan.
type ApplyPlanResultsRequest struct {
// AllocUpdateRequest holds the allocation updates to be made by the
// scheduler.
AllocUpdateRequest
// Deployment is the deployment created or updated as a result of a
// scheduling event.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
// EvalID is the eval ID of the plan being applied. The modify index of the
// evaluation is updated as part of applying the plan to ensure that subsequent
// scheduling events for the same job will wait for the index that last produced
// state changes. This is necessary for blocked evaluations since they can be
// processed many times, potentially making state updates, without the state of
// the evaluation itself being updated.
EvalID string
// COMPAT 0.11
// NodePreemptions is a slice of allocations from other lower priority jobs
// that are preempted. Preempted allocations are marked as evicted.
// Deprecated: Replaced with AllocsPreempted which contains only the diff
NodePreemptions []*Allocation
// AllocsPreempted is a slice of allocation diffs from other lower priority jobs
// that are preempted. Preempted allocations are marked as evicted.
AllocsPreempted []*AllocationDiff
// PreemptionEvals is a slice of follow up evals for jobs whose allocations
// have been preempted to place allocs in this plan
PreemptionEvals []*Evaluation
}
// AllocUpdateRequest is used to submit changes to allocations, either
// to cause evictions or to assign new allocations. Both can be done
// within a single transaction
type AllocUpdateRequest struct {
// COMPAT 0.11
// Alloc is the list of new allocations to assign
// Deprecated: Replaced with two separate slices, one containing stopped allocations
// and another containing updated allocations
Alloc []*Allocation
// Allocations to stop. Contains only the diff, not the entire allocation
AllocsStopped []*AllocationDiff
// New or updated allocations
AllocsUpdated []*Allocation
// Evals is the list of new evaluations to create
// Evals are valid only when used in the Raft RPC
Evals []*Evaluation
// Job is the shared parent job of the allocations.
// It is pulled out since it is common to reduce payload size.
Job *Job
WriteRequest
}
// AllocUpdateDesiredTransitionRequest is used to submit changes to allocations
// desired transition state.
type AllocUpdateDesiredTransitionRequest struct {
// Allocs is the mapping of allocation ids to their desired state
// transition
Allocs map[string]*DesiredTransition
// Evals is the set of evaluations to create
Evals []*Evaluation
WriteRequest
}
// AllocStopRequest is used to stop and reschedule a running Allocation.
type AllocStopRequest struct {
AllocID string
NoShutdownDelay bool
WriteRequest
}
// AllocStopResponse is the response to an `AllocStopRequest`
type AllocStopResponse struct {
// EvalID is the id of the follow up evalution for the rescheduled alloc.
EvalID string
WriteMeta
}
// AllocListRequest is used to request a list of allocations
type AllocListRequest struct {
QueryOptions
Fields *AllocStubFields
}
// AllocSpecificRequest is used to query a specific allocation
type AllocSpecificRequest struct {
AllocID string
QueryOptions
}
// AllocSignalRequest is used to signal a specific allocation
type AllocSignalRequest struct {
AllocID string
Task string
Signal string
QueryOptions
}
// AllocsGetRequest is used to query a set of allocations
type AllocsGetRequest struct {
AllocIDs []string
QueryOptions
}
// AllocRestartRequest is used to restart a specific allocations tasks.
type AllocRestartRequest struct {
AllocID string
TaskName string
QueryOptions
}
// PeriodicForceRequest is used to force a specific periodic job.
type PeriodicForceRequest struct {
JobID string
WriteRequest
}
// ServerMembersResponse has the list of servers in a cluster
type ServerMembersResponse struct {
ServerName string
ServerRegion string
ServerDC string
Members []*ServerMember
}
// ServerMember holds information about a Nomad server agent in a cluster
type ServerMember struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// ClusterMetadata is used to store per-cluster metadata.
type ClusterMetadata struct {
ClusterID string
CreateTime int64
}
// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the
// following tasks in the given allocation
type DeriveVaultTokenRequest struct {
NodeID string
SecretID string
AllocID string
Tasks []string
QueryOptions
}
// VaultAccessorsRequest is used to operate on a set of Vault accessors
type VaultAccessorsRequest struct {
Accessors []*VaultAccessor
}
// VaultAccessor is a reference to a created Vault token on behalf of
// an allocation's task.
type VaultAccessor struct {
AllocID string
Task string
NodeID string
Accessor string
CreationTTL int
// Raft Indexes
CreateIndex uint64
}
// DeriveVaultTokenResponse returns the wrapped tokens for each requested task
type DeriveVaultTokenResponse struct {
// Tasks is a mapping between the task name and the wrapped token
Tasks map[string]string
// Error stores any error that occurred. Errors are stored here so we can
// communicate whether it is retryable
Error *RecoverableError
QueryMeta
}
// GenericRequest is used to request where no
// specific information is needed.
type GenericRequest struct {
QueryOptions
}
// DeploymentListRequest is used to list the deployments
type DeploymentListRequest struct {
QueryOptions
}
// DeploymentDeleteRequest is used for deleting deployments.
type DeploymentDeleteRequest struct {
Deployments []string
WriteRequest
}
// DeploymentStatusUpdateRequest is used to update the status of a deployment as
// well as optionally creating an evaluation atomically.
type DeploymentStatusUpdateRequest struct {
// Eval, if set, is used to create an evaluation at the same time as
// updating the status of a deployment.
Eval *Evaluation
// DeploymentUpdate is a status update to apply to the given
// deployment.
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
}
// DeploymentAllocHealthRequest is used to set the health of a set of
// allocations as part of a deployment.
type DeploymentAllocHealthRequest struct {
DeploymentID string
// Marks these allocations as healthy, allow further allocations
// to be rolled.
HealthyAllocationIDs []string
// Any unhealthy allocations fail the deployment
UnhealthyAllocationIDs []string
WriteRequest
}
// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft
type ApplyDeploymentAllocHealthRequest struct {
DeploymentAllocHealthRequest
// Timestamp is the timestamp to use when setting the allocations health.
Timestamp time.Time
// An optional field to update the status of a deployment
DeploymentUpdate *DeploymentStatusUpdate
// Job is used to optionally upsert a job. This is used when setting the
// allocation health results in a deployment failure and the deployment
// auto-reverts to the latest stable job.
Job *Job
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPromoteRequest is used to promote task groups in a deployment
type DeploymentPromoteRequest struct {
DeploymentID string
// All is to promote all task groups
All bool
// Groups is used to set the promotion status per task group
Groups []string
WriteRequest
}
// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft
type ApplyDeploymentPromoteRequest struct {
DeploymentPromoteRequest
// An optional evaluation to create after promoting the canaries
Eval *Evaluation
}
// DeploymentPauseRequest is used to pause a deployment
type DeploymentPauseRequest struct {
DeploymentID string
// Pause sets the pause status
Pause bool
WriteRequest
}
// DeploymentRunRequest is used to remotely start a pending deployment.
// Used only for multiregion deployments.
type DeploymentRunRequest struct {
DeploymentID string
WriteRequest
}
// DeploymentUnblockRequest is used to remotely unblock a deployment.
// Used only for multiregion deployments.
type DeploymentUnblockRequest struct {
DeploymentID string
WriteRequest
}
// DeploymentCancelRequest is used to remotely cancel a deployment.
// Used only for multiregion deployments.
type DeploymentCancelRequest struct {
DeploymentID string
WriteRequest
}
// DeploymentSpecificRequest is used to make a request specific to a particular
// deployment
type DeploymentSpecificRequest struct {
DeploymentID string
QueryOptions
}
// DeploymentFailRequest is used to fail a particular deployment
type DeploymentFailRequest struct {
DeploymentID string
WriteRequest
}
// ScalingPolicySpecificRequest is used when we just need to specify a target scaling policy
type ScalingPolicySpecificRequest struct {
ID string
QueryOptions
}
// SingleScalingPolicyResponse is used to return a single job
type SingleScalingPolicyResponse struct {
Policy *ScalingPolicy
QueryMeta
}
// ScalingPolicyListRequest is used to parameterize a scaling policy list request
type ScalingPolicyListRequest struct {
Job string
Type string
QueryOptions
}
// ScalingPolicyListResponse is used for a list request
type ScalingPolicyListResponse struct {
Policies []*ScalingPolicyListStub
QueryMeta
}
// SingleDeploymentResponse is used to respond with a single deployment
type SingleDeploymentResponse struct {
Deployment *Deployment
QueryMeta
}
// GenericResponse is used to respond to a request where no
// specific response information is needed.
type GenericResponse struct {
WriteMeta
}
// VersionResponse is used for the Status.Version response
type VersionResponse struct {
Build string
Versions map[string]int
QueryMeta
}
// JobRegisterResponse is used to respond to a job registration
type JobRegisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
QueryMeta
}
// JobDeregisterResponse is used to respond to a job deregistration
type JobDeregisterResponse struct {
EvalID string
EvalCreateIndex uint64
JobModifyIndex uint64
VolumeEvalID string
VolumeEvalIndex uint64
QueryMeta
}
// JobBatchDeregisterResponse is used to respond to a batch job deregistration
type JobBatchDeregisterResponse struct {
// JobEvals maps the job to its created evaluation
JobEvals map[NamespacedID]string
QueryMeta
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
// Error is a string version of any error that may have occurred
Error string
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
}
// NodeUpdateResponse is used to respond to a node update
type NodeUpdateResponse struct {
HeartbeatTTL time.Duration
EvalIDs []string
EvalCreateIndex uint64
NodeModifyIndex uint64
// Features informs clients what enterprise features are allowed
Features uint64
// LeaderRPCAddr is the RPC address of the current Raft Leader. If
// empty, the current Nomad Server is in the minority of a partition.
LeaderRPCAddr string
// NumNodes is the number of Nomad nodes attached to this quorum of
// Nomad Servers at the time of the response. This value can
// fluctuate based on the health of the cluster between heartbeats.
NumNodes int32
// Servers is the full list of known Nomad servers in the local
// region.
Servers []*NodeServerInfo
QueryMeta
}
// NodeDrainUpdateResponse is used to respond to a node drain update
type NodeDrainUpdateResponse struct {
NodeModifyIndex uint64
EvalIDs []string
EvalCreateIndex uint64
WriteMeta
}
// NodeEligibilityUpdateResponse is used to respond to a node eligibility update
type NodeEligibilityUpdateResponse struct {
NodeModifyIndex uint64
EvalIDs []string
EvalCreateIndex uint64
WriteMeta
}
// NodeAllocsResponse is used to return allocs for a single node
type NodeAllocsResponse struct {
Allocs []*Allocation
QueryMeta
}
// NodeClientAllocsResponse is used to return allocs meta data for a single node
type NodeClientAllocsResponse struct {
Allocs map[string]uint64
// MigrateTokens are used when ACLs are enabled to allow cross node,
// authenticated access to sticky volumes
MigrateTokens map[string]string
QueryMeta
}
// SingleNodeResponse is used to return a single node
type SingleNodeResponse struct {
Node *Node
QueryMeta
}
// NodeListResponse is used for a list request
type NodeListResponse struct {
Nodes []*NodeListStub
QueryMeta
}
// SingleJobResponse is used to return a single job
type SingleJobResponse struct {
Job *Job
QueryMeta
}
// JobSummaryResponse is used to return a single job summary
type JobSummaryResponse struct {
JobSummary *JobSummary
QueryMeta
}
// JobScaleStatusResponse is used to return the scale status for a job
type JobScaleStatusResponse struct {
JobScaleStatus *JobScaleStatus
QueryMeta
}
type JobScaleStatus struct {
JobID string
Namespace string
JobCreateIndex uint64
JobModifyIndex uint64
JobStopped bool
TaskGroups map[string]*TaskGroupScaleStatus
}
// TaskGroupScaleStatus is used to return the scale status for a given task group
type TaskGroupScaleStatus struct {
Desired int
Placed int
Running int
Healthy int
Unhealthy int
Events []*ScalingEvent
}
type JobDispatchResponse struct {
DispatchedJobID string
EvalID string
EvalCreateIndex uint64
JobCreateIndex uint64
WriteMeta
}
// JobListResponse is used for a list request
type JobListResponse struct {
Jobs []*JobListStub
QueryMeta
}
// JobVersionsRequest is used to get a jobs versions
type JobVersionsRequest struct {
JobID string
Diffs bool
QueryOptions
}
// JobVersionsResponse is used for a job get versions request
type JobVersionsResponse struct {
Versions []*Job
Diffs []*JobDiff
QueryMeta
}
// JobPlanResponse is used to respond to a job plan request
type JobPlanResponse struct {
// Annotations stores annotations explaining decisions the scheduler made.
Annotations *PlanAnnotations
// FailedTGAllocs is the placement failures per task group.
FailedTGAllocs map[string]*AllocMetric
// JobModifyIndex is the modification index of the job. The value can be
// used when running `nomad run` to ensure that the Job wasn’t modified
// since the last plan. If the job is being created, the value is zero.
JobModifyIndex uint64
// CreatedEvals is the set of evaluations created by the scheduler. The
// reasons for this can be rolling-updates or blocked evals.
CreatedEvals []*Evaluation
// Diff contains the diff of the job and annotations on whether the change
// causes an in-place update or create/destroy
Diff *JobDiff
// NextPeriodicLaunch is the time duration till the job would be launched if
// submitted.
NextPeriodicLaunch time.Time
// Warnings contains any warnings about the given job. These may include
// deprecation warnings.
Warnings string
WriteMeta
}
// SingleAllocResponse is used to return a single allocation
type SingleAllocResponse struct {
Alloc *Allocation
QueryMeta
}
// AllocsGetResponse is used to return a set of allocations
type AllocsGetResponse struct {
Allocs []*Allocation
QueryMeta
}
// JobAllocationsResponse is used to return the allocations for a job
type JobAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// JobEvaluationsResponse is used to return the evaluations for a job
type JobEvaluationsResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// SingleEvalResponse is used to return a single evaluation
type SingleEvalResponse struct {
Eval *Evaluation
QueryMeta
}
// EvalDequeueResponse is used to return from a dequeue
type EvalDequeueResponse struct {
Eval *Evaluation
Token string
// WaitIndex is the Raft index the worker should wait until invoking the
// scheduler.
WaitIndex uint64
QueryMeta
}
// GetWaitIndex is used to retrieve the Raft index in which state should be at
// or beyond before invoking the scheduler.
func (e *EvalDequeueResponse) GetWaitIndex() uint64 {
// Prefer the wait index sent. This will be populated on all responses from
// 0.7.0 and above
if e.WaitIndex != 0 {
return e.WaitIndex
} else if e.Eval != nil {
return e.Eval.ModifyIndex
}
// This should never happen
return 1
}
// PlanResponse is used to return from a PlanRequest
type PlanResponse struct {
Result *PlanResult
WriteMeta
}
// AllocListResponse is used for a list request
type AllocListResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// DeploymentListResponse is used for a list request
type DeploymentListResponse struct {
Deployments []*Deployment
QueryMeta
}
// EvalListResponse is used for a list request
type EvalListResponse struct {
Evaluations []*Evaluation
QueryMeta
}
// EvalAllocationsResponse is used to return the allocations for an evaluation
type EvalAllocationsResponse struct {
Allocations []*AllocListStub
QueryMeta
}
// PeriodicForceResponse is used to respond to a periodic job force launch
type PeriodicForceResponse struct {
EvalID string
EvalCreateIndex uint64
WriteMeta
}
// DeploymentUpdateResponse is used to respond to a deployment change. The
// response will include the modify index of the deployment as well as details
// of any triggered evaluation.
type DeploymentUpdateResponse struct {
EvalID string
EvalCreateIndex uint64
DeploymentModifyIndex uint64
// RevertedJobVersion is the version the job was reverted to. If unset, the
// job wasn't reverted
RevertedJobVersion *uint64
WriteMeta
}
// NodeConnQueryResponse is used to respond to a query of whether a server has
// a connection to a specific Node
type NodeConnQueryResponse struct {
// Connected indicates whether a connection to the Client exists
Connected bool
// Established marks the time at which the connection was established
Established time.Time
QueryMeta
}
// HostDataRequest is used by /agent/host to retrieve data about the agent's host system. If
// ServerID or NodeID is specified, the request is forwarded to the remote agent
type HostDataRequest struct {
ServerID string
NodeID string
QueryOptions
}
// HostDataResponse contains the HostData content
type HostDataResponse struct {
AgentID string
HostData *host.HostData
}
// EmitNodeEventsRequest is a request to update the node events source
// with a new client-side event
type EmitNodeEventsRequest struct {
// NodeEvents are a map where the key is a node id, and value is a list of
// events for that node
NodeEvents map[string][]*NodeEvent
WriteRequest
}
// EmitNodeEventsResponse is a response to the client about the status of
// the node event source update.
type EmitNodeEventsResponse struct {
WriteMeta
}
const (
NodeEventSubsystemDrain = "Drain"
NodeEventSubsystemDriver = "Driver"
NodeEventSubsystemHeartbeat = "Heartbeat"
NodeEventSubsystemCluster = "Cluster"
NodeEventSubsystemStorage = "Storage"
)
// NodeEvent is a single unit representing a node’s state change
type NodeEvent struct {
Message string
Subsystem string
Details map[string]string
Timestamp time.Time
CreateIndex uint64
}
func (ne *NodeEvent) String() string {
var details []string
for k, v := range ne.Details {
details = append(details, fmt.Sprintf("%s: %s", k, v))
}
return fmt.Sprintf("Message: %s, Subsystem: %s, Details: %s, Timestamp: %s", ne.Message, ne.Subsystem, strings.Join(details, ","), ne.Timestamp.String())
}
func (ne *NodeEvent) Copy() *NodeEvent {
c := new(NodeEvent)
*c = *ne
c.Details = helper.CopyMapStringString(ne.Details)
return c
}
// NewNodeEvent generates a new node event storing the current time as the
// timestamp
func NewNodeEvent() *NodeEvent {
return &NodeEvent{Timestamp: time.Now()}
}
// SetMessage is used to set the message on the node event
func (ne *NodeEvent) SetMessage(msg string) *NodeEvent {
ne.Message = msg
return ne
}
// SetSubsystem is used to set the subsystem on the node event
func (ne *NodeEvent) SetSubsystem(sys string) *NodeEvent {
ne.Subsystem = sys
return ne
}
// SetTimestamp is used to set the timestamp on the node event
func (ne *NodeEvent) SetTimestamp(ts time.Time) *NodeEvent {
ne.Timestamp = ts
return ne
}
// AddDetail is used to add a detail to the node event
func (ne *NodeEvent) AddDetail(k, v string) *NodeEvent {
if ne.Details == nil {
ne.Details = make(map[string]string, 1)
}
ne.Details[k] = v
return ne
}
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
NodeStatusDisconnected = "disconnected"
)
// ShouldDrainNode checks if a given node status should trigger an
// evaluation. Some states don't require any further action.
func ShouldDrainNode(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDisconnected:
return false
case NodeStatusDown:
return true
default:
panic(fmt.Sprintf("unhandled node status %s", status))
}
}
// ValidNodeStatus is used to check if a node status is valid
func ValidNodeStatus(status string) bool {
switch status {
case NodeStatusInit, NodeStatusReady, NodeStatusDown, NodeStatusDisconnected:
return true
default:
return false
}
}
const (
// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
// respectively, for receiving allocations. This is orthogonal to the node
// status being ready.
NodeSchedulingEligible = "eligible"
NodeSchedulingIneligible = "ineligible"
)
// DrainSpec describes a Node's desired drain behavior.
type DrainSpec struct {
// Deadline is the duration after StartTime when the remaining
// allocations on a draining Node should be told to stop.
Deadline time.Duration
// IgnoreSystemJobs allows systems jobs to remain on the node even though it
// has been marked for draining.
IgnoreSystemJobs bool
}
// DrainStrategy describes a Node's drain behavior.
type DrainStrategy struct {
// DrainSpec is the user declared drain specification
DrainSpec
// ForceDeadline is the deadline time for the drain after which drains will
// be forced
ForceDeadline time.Time
// StartedAt is the time the drain process started
StartedAt time.Time
}
func (d *DrainStrategy) Copy() *DrainStrategy {
if d == nil {
return nil
}
nd := new(DrainStrategy)
*nd = *d
return nd
}
// DeadlineTime returns a boolean whether the drain strategy allows an infinite
// duration or otherwise the deadline time. The force drain is captured by the
// deadline time being in the past.
func (d *DrainStrategy) DeadlineTime() (infinite bool, deadline time.Time) {
// Treat the nil case as a force drain so during an upgrade where a node may
// not have a drain strategy but has Drain set to true, it is treated as a
// force to mimick old behavior.
if d == nil {
return false, time.Time{}
}
ns := d.Deadline.Nanoseconds()
switch {
case ns < 0: // Force
return false, time.Time{}
case ns == 0: // Infinite
return true, time.Time{}
default:
return false, d.ForceDeadline
}
}
func (d *DrainStrategy) Equal(o *DrainStrategy) bool {
if d == nil && o == nil {
return true
} else if o != nil && d == nil {
return false
} else if d != nil && o == nil {
return false
}
// Compare values
if d.ForceDeadline != o.ForceDeadline {
return false
} else if d.Deadline != o.Deadline {
return false
} else if d.IgnoreSystemJobs != o.IgnoreSystemJobs {
return false
}
return true
}
const (
// DrainStatuses are the various states a drain can be in, as reflect in DrainMetadata
DrainStatusDraining DrainStatus = "draining"
DrainStatusComplete DrainStatus = "complete"
DrainStatusCanceled DrainStatus = "canceled"
)
type DrainStatus string
// DrainMetadata contains information about the most recent drain operation for a given Node.
type DrainMetadata struct {
// StartedAt is the time that the drain operation started. This is equal to Node.DrainStrategy.StartedAt,
// if it exists
StartedAt time.Time
// UpdatedAt is the time that that this struct was most recently updated, either via API action
// or drain completion
UpdatedAt time.Time
// Status reflects the status of the drain operation.
Status DrainStatus
// AccessorID is the accessor ID of the ACL token used in the most recent API operation against this drain
AccessorID string
// Meta includes the operator-submitted metadata about this drain operation
Meta map[string]string
}
func (m *DrainMetadata) Copy() *DrainMetadata {
if m == nil {
return nil
}
c := new(DrainMetadata)
*c = *m
c.Meta = helper.CopyMapStringString(m.Meta)
return c
}
// Node is a representation of a schedulable client node
type Node struct {
// ID is a unique identifier for the node. It can be constructed
// by doing a concatenation of the Name and Datacenter as a simple
// approach. Alternatively a UUID may be used.
ID string
// SecretID is an ID that is only known by the Node and the set of Servers.
// It is not accessible via the API and is used to authenticate nodes
// conducting privileged activities.
SecretID string
// Datacenter for this node
Datacenter string
// Node name
Name string
// CgroupParent for this node (linux only)
CgroupParent string
// HTTPAddr is the address on which the Nomad client is listening for http
// requests
HTTPAddr string
// TLSEnabled indicates if the Agent has TLS enabled for the HTTP API
TLSEnabled bool
// Attributes is an arbitrary set of key/value
// data that can be used for constraints. Examples
// include "kernel.name=linux", "arch=386", "driver.docker=1",
// "docker.runtime=1.8.3"
Attributes map[string]string
// NodeResources captures the available resources on the client.
NodeResources *NodeResources
// ReservedResources captures the set resources on the client that are
// reserved from scheduling.
ReservedResources *NodeReservedResources
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
// COMPAT(0.10): Remove after 0.10
Resources *Resources
// Reserved is the set of resources that are reserved,
// and should be subtracted from the total resources for
// the purposes of scheduling. This may be provide certain
// high-watermark tolerances or because of external schedulers
// consuming resources.
// COMPAT(0.10): Remove after 0.10
Reserved *Resources
// Links are used to 'link' this client to external
// systems. For example 'consul=foo.dc1' 'aws=i-83212'
// 'ami=ami-123'
Links map[string]string
// Meta is used to associate arbitrary metadata with this
// client. This is opaque to Nomad.
Meta map[string]string
// NodeClass is an opaque identifier used to group nodes
// together for the purpose of determining scheduling pressure.
NodeClass string
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities.
ComputedClass string
// DrainStrategy determines the node's draining behavior.
// Will be non-nil only while draining.
DrainStrategy *DrainStrategy
// SchedulingEligibility determines whether this node will receive new
// placements.
SchedulingEligibility string
// Status of this node
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// StatusUpdatedAt is the time stamp at which the state of the node was
// updated
StatusUpdatedAt int64
// Events is the most recent set of events generated for the node,
// retaining only MaxRetainedNodeEvents number at a time
Events []*NodeEvent
// Drivers is a map of driver names to current driver information
Drivers map[string]*DriverInfo
// CSIControllerPlugins is a map of plugin names to current CSI Plugin info
CSIControllerPlugins map[string]*CSIInfo
// CSINodePlugins is a map of plugin names to current CSI Plugin info
CSINodePlugins map[string]*CSIInfo
// HostVolumes is a map of host volume names to their configuration
HostVolumes map[string]*ClientHostVolumeConfig
// HostNetworks is a map of host host_network names to their configuration
HostNetworks map[string]*ClientHostNetworkConfig
// LastDrain contains metadata about the most recent drain operation
LastDrain *DrainMetadata
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// GetID is a helper for getting the ID when the object may be nil and is
// required for pagination.
func (n *Node) GetID() string {
if n == nil {
return ""
}
return n.ID
}
// Sanitize returns a copy of the Node omitting confidential fields
// It only returns a copy if the Node contains the confidential fields
func (n *Node) Sanitize() *Node {
if n == nil {
return nil
}
if n.SecretID == "" {
return n
}
clean := n.Copy()
clean.SecretID = ""
return clean
}
// Ready returns true if the node is ready for running allocations
func (n *Node) Ready() bool {
return n.Status == NodeStatusReady && n.DrainStrategy == nil && n.SchedulingEligibility == NodeSchedulingEligible
}
func (n *Node) Canonicalize() {
if n == nil {
return
}
// Ensure SchedulingEligibility is correctly set whenever draining so the plan applier and other scheduling logic
// only need to check SchedulingEligibility when determining whether a placement is feasible on a node.
if n.DrainStrategy != nil {
n.SchedulingEligibility = NodeSchedulingIneligible
} else if n.SchedulingEligibility == "" {
n.SchedulingEligibility = NodeSchedulingEligible
}
// COMPAT remove in 1.0
// In v0.12.0 we introduced a separate node specific network resource struct
// so we need to covert any pre 0.12 clients to the correct struct
if n.NodeResources != nil && n.NodeResources.NodeNetworks == nil {
if n.NodeResources.Networks != nil {
for _, nr := range n.NodeResources.Networks {
nnr := &NodeNetworkResource{
Mode: nr.Mode,
Speed: nr.MBits,
Device: nr.Device,
}
if nr.IP != "" {
nnr.Addresses = []NodeNetworkAddress{
{
Alias: "default",
Address: nr.IP,
},
}
}
n.NodeResources.NodeNetworks = append(n.NodeResources.NodeNetworks, nnr)
}
}
}
}
func (n *Node) Copy() *Node {
if n == nil {
return nil
}
nn := new(Node)
*nn = *n
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.NodeResources = nn.NodeResources.Copy()
nn.ReservedResources = nn.ReservedResources.Copy()
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
nn.DrainStrategy = nn.DrainStrategy.Copy()
nn.Events = copyNodeEvents(n.Events)
nn.Drivers = copyNodeDrivers(n.Drivers)
nn.CSIControllerPlugins = copyNodeCSI(nn.CSIControllerPlugins)
nn.CSINodePlugins = copyNodeCSI(nn.CSINodePlugins)
nn.HostVolumes = copyNodeHostVolumes(n.HostVolumes)
nn.HostNetworks = copyNodeHostNetworks(n.HostNetworks)
nn.LastDrain = nn.LastDrain.Copy()
return nn
}
// copyNodeEvents is a helper to copy a list of NodeEvent's
func copyNodeEvents(events []*NodeEvent) []*NodeEvent {
l := len(events)
if l == 0 {
return nil
}
c := make([]*NodeEvent, l)
for i, event := range events {
c[i] = event.Copy()
}
return c
}
// copyNodeCSI is a helper to copy a map of CSIInfo
func copyNodeCSI(plugins map[string]*CSIInfo) map[string]*CSIInfo {
l := len(plugins)
if l == 0 {
return nil
}
c := make(map[string]*CSIInfo, l)
for plugin, info := range plugins {
c[plugin] = info.Copy()
}
return c
}
// copyNodeDrivers is a helper to copy a map of DriverInfo
func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo {
l := len(drivers)
if l == 0 {
return nil
}
c := make(map[string]*DriverInfo, l)
for driver, info := range drivers {
c[driver] = info.Copy()
}
return c
}
// copyNodeHostVolumes is a helper to copy a map of string to Volume
func copyNodeHostVolumes(volumes map[string]*ClientHostVolumeConfig) map[string]*ClientHostVolumeConfig {
l := len(volumes)
if l == 0 {
return nil
}
c := make(map[string]*ClientHostVolumeConfig, l)
for volume, v := range volumes {
c[volume] = v.Copy()
}
return c
}
// copyNodeHostVolumes is a helper to copy a map of string to HostNetwork
func copyNodeHostNetworks(networks map[string]*ClientHostNetworkConfig) map[string]*ClientHostNetworkConfig {
l := len(networks)
if l == 0 {
return nil
}
c := make(map[string]*ClientHostNetworkConfig, l)
for network, v := range networks {
c[network] = v.Copy()
}
return c
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (n *Node) TerminalStatus() bool {
switch n.Status {
case NodeStatusDown:
return true
default:
return false
}
}
// ComparableReservedResources returns the reserved resouces on the node
// handling upgrade paths. Reserved networks must be handled separately. After
// 0.11 calls to this should be replaced with:
// node.ReservedResources.Comparable()
//
// COMPAT(0.11): Remove in 0.11
func (n *Node) ComparableReservedResources() *ComparableResources {
// See if we can no-op
if n.Reserved == nil && n.ReservedResources == nil {
return nil
}
// Node already has 0.9+ behavior
if n.ReservedResources != nil {
return n.ReservedResources.Comparable()
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: int64(n.Reserved.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: int64(n.Reserved.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: int64(n.Reserved.DiskMB),
},
}
}
// ComparableResources returns the resouces on the node
// handling upgrade paths. Networking must be handled separately. After 0.11
// calls to this should be replaced with: node.NodeResources.Comparable()
//
// // COMPAT(0.11): Remove in 0.11
func (n *Node) ComparableResources() *ComparableResources {
// Node already has 0.9+ behavior
if n.NodeResources != nil {
return n.NodeResources.Comparable()
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: int64(n.Resources.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: int64(n.Resources.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: int64(n.Resources.DiskMB),
},
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub(fields *NodeStubFields) *NodeListStub {
addr, _, _ := net.SplitHostPort(n.HTTPAddr)
s := &NodeListStub{
Address: addr,
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Version: n.Attributes["nomad.version"],
Drain: n.DrainStrategy != nil,
SchedulingEligibility: n.SchedulingEligibility,
Status: n.Status,
StatusDescription: n.StatusDescription,
Drivers: n.Drivers,
HostVolumes: n.HostVolumes,
LastDrain: n.LastDrain,
CreateIndex: n.CreateIndex,
ModifyIndex: n.ModifyIndex,
}
if fields != nil {
if fields.Resources {
s.NodeResources = n.NodeResources
s.ReservedResources = n.ReservedResources
}
// Fetch key attributes from the main Attributes map.
if fields.OS {
m := make(map[string]string)
m["os.name"] = n.Attributes["os.name"]
s.Attributes = m
}
}
return s
}
// NodeListStub is used to return a subset of job information
// for the job list
type NodeListStub struct {
Address string
ID string
Attributes map[string]string `json:",omitempty"`
Datacenter string
Name string
NodeClass string
Version string
Drain bool
SchedulingEligibility string
Status string
StatusDescription string
Drivers map[string]*DriverInfo
HostVolumes map[string]*ClientHostVolumeConfig
NodeResources *NodeResources `json:",omitempty"`
ReservedResources *NodeReservedResources `json:",omitempty"`
LastDrain *DrainMetadata
CreateIndex uint64
ModifyIndex uint64
}
// NodeStubFields defines which fields are included in the NodeListStub.
type NodeStubFields struct {
Resources bool
OS bool
}
// Resources is used to define the resources available
// on a client
type Resources struct {
CPU int
Cores int
MemoryMB int
MemoryMaxMB int
DiskMB int
IOPS int // COMPAT(0.10): Only being used to issue warnings
Networks Networks
Devices ResourceDevices
}
const (
BytesInMegabyte = 1024 * 1024
)
// DefaultResources is a small resources object that contains the
// default resources requests that we will provide to an object.
// --- THIS FUNCTION IS REPLICATED IN api/resources.go and should
// be kept in sync.
func DefaultResources() *Resources {
return &Resources{
CPU: 100,
Cores: 0,
MemoryMB: 300,
}
}
// MinResources is a small resources object that contains the
// absolute minimum resources that we will provide to an object.
// This should not be confused with the defaults which are
// provided in Canonicalize() --- THIS FUNCTION IS REPLICATED IN
// api/resources.go and should be kept in sync.
func MinResources() *Resources {
return &Resources{
CPU: 1,
Cores: 0,
MemoryMB: 10,
}
}
// DiskInBytes returns the amount of disk resources in bytes.
func (r *Resources) DiskInBytes() int64 {
return int64(r.DiskMB * BytesInMegabyte)
}
func (r *Resources) Validate() error {
var mErr multierror.Error
if r.Cores > 0 && r.CPU > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can only ask for 'cpu' or 'cores' resource, not both."))
}
if err := r.MeetsMinResources(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Ensure the task isn't asking for disk resources
if r.DiskMB > 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level."))
}
for i, d := range r.Devices {
if err := d.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("device %d failed validation: %v", i+1, err))
}
}
if r.MemoryMaxMB != 0 && r.MemoryMaxMB < r.MemoryMB {
mErr.Errors = append(mErr.Errors, fmt.Errorf("MemoryMaxMB value (%d) should be larger than MemoryMB value (%d)", r.MemoryMaxMB, r.MemoryMB))
}
return mErr.ErrorOrNil()
}
// Merge merges this resource with another resource.
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Merge(other *Resources) {
if other.CPU != 0 {
r.CPU = other.CPU
}
if other.Cores != 0 {
r.Cores = other.Cores
}
if other.MemoryMB != 0 {
r.MemoryMB = other.MemoryMB
}
if other.MemoryMaxMB != 0 {
r.MemoryMaxMB = other.MemoryMaxMB
}
if other.DiskMB != 0 {
r.DiskMB = other.DiskMB
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
if len(other.Devices) != 0 {
r.Devices = other.Devices
}
}
// Equals Resources.
//
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Equals(o *Resources) bool {
if r == o {
return true
}
if r == nil || o == nil {
return false
}
return r.CPU == o.CPU &&
r.Cores == o.Cores &&
r.MemoryMB == o.MemoryMB &&
r.MemoryMaxMB == o.MemoryMaxMB &&
r.DiskMB == o.DiskMB &&
r.IOPS == o.IOPS &&
r.Networks.Equals(&o.Networks) &&
r.Devices.Equals(&o.Devices)
}
// ResourceDevices are part of Resources.
//
// COMPAT(0.10): Remove in 0.10.
type ResourceDevices []*RequestedDevice
// Equals ResourceDevices as set keyed by Name.
//
// COMPAT(0.10): Remove in 0.10
func (d *ResourceDevices) Equals(o *ResourceDevices) bool {
if d == o {
return true
}
if d == nil || o == nil {
return false
}
if len(*d) != len(*o) {
return false
}
m := make(map[string]*RequestedDevice, len(*d))
for _, e := range *d {
m[e.Name] = e
}
for _, oe := range *o {
de, ok := m[oe.Name]
if !ok || !de.Equals(oe) {
return false
}
}
return true
}
// Canonicalize the Resources struct.
//
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(r.Networks) == 0 {
r.Networks = nil
}
if len(r.Devices) == 0 {
r.Devices = nil
}
for _, n := range r.Networks {
n.Canonicalize()
}
}
// MeetsMinResources returns an error if the resources specified are less than
// the minimum allowed.
// This is based on the minimums defined in the Resources type
// COMPAT(0.10): Remove in 0.10
func (r *Resources) MeetsMinResources() error {
var mErr multierror.Error
minResources := MinResources()
if r.CPU < minResources.CPU && r.Cores == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU))
}
if r.MemoryMB < minResources.MemoryMB {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.MemoryMB, r.MemoryMB))
}
return mErr.ErrorOrNil()
}
// Copy returns a deep copy of the resources
func (r *Resources) Copy() *Resources {
if r == nil {
return nil
}
newR := new(Resources)
*newR = *r
// Copy the network objects
newR.Networks = r.Networks.Copy()
// Copy the devices
if r.Devices != nil {
n := len(r.Devices)
newR.Devices = make([]*RequestedDevice, n)
for i := 0; i < n; i++ {
newR.Devices[i] = r.Devices[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
// COMPAT(0.10): Remove in 0.10
func (r *Resources) NetIndex(n *NetworkResource) int {
return r.Networks.NetIndex(n)
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
// COMPAT(0.10): Remove in 0.10
func (r *Resources) Add(delta *Resources) {
if delta == nil {
return
}
r.CPU += delta.CPU
r.MemoryMB += delta.MemoryMB
if delta.MemoryMaxMB > 0 {
r.MemoryMaxMB += delta.MemoryMaxMB
} else {
r.MemoryMaxMB += delta.MemoryMB
}
r.DiskMB += delta.DiskMB
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := r.NetIndex(n)
if idx == -1 {
r.Networks = append(r.Networks, n.Copy())
} else {
r.Networks[idx].Add(n)
}
}
}
// GoString returns the string representation of the Resources struct.
//
// COMPAT(0.10): Remove in 0.10
func (r *Resources) GoString() string {
return fmt.Sprintf("*%#v", *r)
}
// NodeNetworkResource is used to describe a fingerprinted network of a node
type NodeNetworkResource struct {
Mode string // host for physical networks, cni/<name> for cni networks
// The following apply only to host networks
Device string // interface name
MacAddress string
Speed int
Addresses []NodeNetworkAddress // not valid for cni, for bridge there will only be 1 ip
}
func (n *NodeNetworkResource) Equals(o *NodeNetworkResource) bool {
return reflect.DeepEqual(n, o)
}
func (n *NodeNetworkResource) Copy() *NodeNetworkResource {
if n == nil {
return nil
}
c := new(NodeNetworkResource)
*c = *n
if n.Addresses != nil {
c.Addresses = make([]NodeNetworkAddress, len(n.Addresses))
copy(c.Addresses, n.Addresses)
}
return c
}
func (n *NodeNetworkResource) HasAlias(alias string) bool {
for _, addr := range n.Addresses {
if addr.Alias == alias {
return true
}
}
return false
}
type NodeNetworkAF string
const (
NodeNetworkAF_IPv4 NodeNetworkAF = "ipv4"
NodeNetworkAF_IPv6 NodeNetworkAF = "ipv6"
)
type NodeNetworkAddress struct {
Family NodeNetworkAF
Alias string
Address string
ReservedPorts string
Gateway string // default route for this address
}
type AllocatedPortMapping struct {
Label string
Value int
To int
HostIP string
}
type AllocatedPorts []AllocatedPortMapping
func (p AllocatedPorts) Get(label string) (AllocatedPortMapping, bool) {
for _, port := range p {
if port.Label == label {
return port, true
}
}
return AllocatedPortMapping{}, false
}
type Port struct {
// Label is the key for HCL port stanzas: port "foo" {}
Label string
// Value is the static or dynamic port value. For dynamic ports this
// will be 0 in the jobspec and set by the scheduler.
Value int
// To is the port inside a network namespace where this port is
// forwarded. -1 is an internal sentinel value used by Consul Connect
// to mean "same as the host port."
To int
// HostNetwork is the name of the network this port should be assigned
// to. Jobs with a HostNetwork set can only be placed on nodes with
// that host network available.
HostNetwork string
}
type DNSConfig struct {
Servers []string
Searches []string
Options []string
}
func (d *DNSConfig) Copy() *DNSConfig {
if d == nil {
return nil
}
newD := new(DNSConfig)
newD.Servers = make([]string, len(d.Servers))
copy(newD.Servers, d.Servers)
newD.Searches = make([]string, len(d.Searches))
copy(newD.Searches, d.Searches)
newD.Options = make([]string, len(d.Options))
copy(newD.Options, d.Options)
return newD
}
// NetworkResource is used to represent available network
// resources
type NetworkResource struct {
Mode string // Mode of the network
Device string // Name of the device
CIDR string // CIDR block of addresses
IP string // Host IP address
Hostname string `json:",omitempty"` // Hostname of the network namespace
MBits int // Throughput
DNS *DNSConfig // DNS Configuration
ReservedPorts []Port // Host Reserved ports
DynamicPorts []Port // Host Dynamically assigned ports
}
func (n *NetworkResource) Hash() uint32 {
var data []byte
data = append(data, []byte(fmt.Sprintf("%s%s%s%s%s%d", n.Mode, n.Device, n.CIDR, n.IP, n.Hostname, n.MBits))...)
for i, port := range n.ReservedPorts {
data = append(data, []byte(fmt.Sprintf("r%d%s%d%d", i, port.Label, port.Value, port.To))...)
}
for i, port := range n.DynamicPorts {
data = append(data, []byte(fmt.Sprintf("d%d%s%d%d", i, port.Label, port.Value, port.To))...)
}
return crc32.ChecksumIEEE(data)
}
func (n *NetworkResource) Equals(other *NetworkResource) bool {
return n.Hash() == other.Hash()
}
func (n *NetworkResource) Canonicalize() {
// Ensure that an empty and nil slices are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(n.ReservedPorts) == 0 {
n.ReservedPorts = nil
}
if len(n.DynamicPorts) == 0 {
n.DynamicPorts = nil
}
for i, p := range n.DynamicPorts {
if p.HostNetwork == "" {
n.DynamicPorts[i].HostNetwork = "default"
}
}
for i, p := range n.ReservedPorts {
if p.HostNetwork == "" {
n.ReservedPorts[i].HostNetwork = "default"
}
}
}
// Copy returns a deep copy of the network resource
func (n *NetworkResource) Copy() *NetworkResource {
if n == nil {
return nil
}
newR := new(NetworkResource)
*newR = *n
newR.DNS = n.DNS.Copy()
if n.ReservedPorts != nil {
newR.ReservedPorts = make([]Port, len(n.ReservedPorts))
copy(newR.ReservedPorts, n.ReservedPorts)
}
if n.DynamicPorts != nil {
newR.DynamicPorts = make([]Port, len(n.DynamicPorts))
copy(newR.DynamicPorts, n.DynamicPorts)
}
return newR
}
// Add adds the resources of the delta to this, potentially
// returning an error if not possible.
func (n *NetworkResource) Add(delta *NetworkResource) {
if len(delta.ReservedPorts) > 0 {
n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...)
}
n.MBits += delta.MBits
n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...)
}
func (n *NetworkResource) GoString() string {
return fmt.Sprintf("*%#v", *n)
}
// PortLabels returns a map of port labels to their assigned host ports.
func (n *NetworkResource) PortLabels() map[string]int {
num := len(n.ReservedPorts) + len(n.DynamicPorts)
labelValues := make(map[string]int, num)
for _, port := range n.ReservedPorts {
labelValues[port.Label] = port.Value
}
for _, port := range n.DynamicPorts {
labelValues[port.Label] = port.Value
}
return labelValues
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
func (ns Networks) Copy() Networks {
if len(ns) == 0 {
return nil
}
out := make([]*NetworkResource, len(ns))
for i := range ns {
out[i] = ns[i].Copy()
}
return out
}
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) AllocatedPortMapping {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return AllocatedPortMapping{
Label: label,
Value: p.Value,
To: p.To,
HostIP: n.IP,
}
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return AllocatedPortMapping{
Label: label,
Value: p.Value,
To: p.To,
HostIP: n.IP,
}
}
}
}
return AllocatedPortMapping{}
}
func (ns Networks) NetIndex(n *NetworkResource) int {
for idx, net := range ns {
if net.Device == n.Device {
return idx
}
}
return -1
}
// RequestedDevice is used to request a device for a task.
type RequestedDevice struct {
// Name is the request name. The possible values are as follows:
// * <type>: A single value only specifies the type of request.
// * <vendor>/<type>: A single slash delimiter assumes the vendor and type of device is specified.
// * <vendor>/<type>/<name>: Two slash delimiters assume vendor, type and specific model are specified.
//
// Examples are as follows:
// * "gpu"
// * "nvidia/gpu"
// * "nvidia/gpu/GTX2080Ti"
Name string
// Count is the number of requested devices
Count uint64
// Constraints are a set of constraints to apply when selecting the device
// to use.
Constraints Constraints
// Affinities are a set of affinities to apply when selecting the device
// to use.
Affinities Affinities
}
func (r *RequestedDevice) Equals(o *RequestedDevice) bool {
if r == o {
return true
}
if r == nil || o == nil {
return false
}
return r.Name == o.Name &&
r.Count == o.Count &&
r.Constraints.Equals(&o.Constraints) &&
r.Affinities.Equals(&o.Affinities)
}
func (r *RequestedDevice) Copy() *RequestedDevice {
if r == nil {
return nil
}
nr := *r
nr.Constraints = CopySliceConstraints(nr.Constraints)
nr.Affinities = CopySliceAffinities(nr.Affinities)
return &nr
}
func (r *RequestedDevice) ID() *DeviceIdTuple {
if r == nil || r.Name == "" {
return nil
}
parts := strings.SplitN(r.Name, "/", 3)
switch len(parts) {
case 1:
return &DeviceIdTuple{
Type: parts[0],
}
case 2:
return &DeviceIdTuple{
Vendor: parts[0],
Type: parts[1],
}
default:
return &DeviceIdTuple{
Vendor: parts[0],
Type: parts[1],
Name: parts[2],
}
}
}
func (r *RequestedDevice) Validate() error {
if r == nil {
return nil
}
var mErr multierror.Error
if r.Name == "" {
_ = multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name"))
}
for idx, constr := range r.Constraints {
// Ensure that the constraint doesn't use an operand we do not allow
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d validation failed: using unsupported operand %q", idx+1, constr.Operand)
_ = multierror.Append(&mErr, outer)
default:
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
_ = multierror.Append(&mErr, outer)
}
}
}
for idx, affinity := range r.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
_ = multierror.Append(&mErr, outer)
}
}
return mErr.ErrorOrNil()
}
// NodeResources is used to define the resources available on a client node.
type NodeResources struct {
Cpu NodeCpuResources
Memory NodeMemoryResources
Disk NodeDiskResources
Networks Networks
NodeNetworks []*NodeNetworkResource
Devices []*NodeDeviceResource
MinDynamicPort int
MaxDynamicPort int
}
func (n *NodeResources) Copy() *NodeResources {
if n == nil {
return nil
}
newN := new(NodeResources)
*newN = *n
newN.Cpu = n.Cpu.Copy()
newN.Networks = n.Networks.Copy()
if n.NodeNetworks != nil {
newN.NodeNetworks = make([]*NodeNetworkResource, len(n.NodeNetworks))
for i, nn := range n.NodeNetworks {
newN.NodeNetworks[i] = nn.Copy()
}
}
// Copy the devices
if n.Devices != nil {
devices := len(n.Devices)
newN.Devices = make([]*NodeDeviceResource, devices)
for i := 0; i < devices; i++ {
newN.Devices[i] = n.Devices[i].Copy()
}
}
return newN
}
// Comparable returns a comparable version of the nodes resources. This
// conversion can be lossy so care must be taken when using it.
func (n *NodeResources) Comparable() *ComparableResources {
if n == nil {
return nil
}
c := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: n.Cpu.CpuShares,
ReservedCores: n.Cpu.ReservableCpuCores,
},
Memory: AllocatedMemoryResources{
MemoryMB: n.Memory.MemoryMB,
},
Networks: n.Networks,
},
Shared: AllocatedSharedResources{
DiskMB: n.Disk.DiskMB,
},
}
return c
}
func (n *NodeResources) Merge(o *NodeResources) {
if o == nil {
return
}
n.Cpu.Merge(&o.Cpu)
n.Memory.Merge(&o.Memory)
n.Disk.Merge(&o.Disk)
if len(o.Networks) != 0 {
n.Networks = append(n.Networks, o.Networks...)
}
if len(o.Devices) != 0 {
n.Devices = o.Devices
}
if len(o.NodeNetworks) != 0 {
lookupNetwork := func(nets []*NodeNetworkResource, name string) (int, *NodeNetworkResource) {
for i, nw := range nets {
if nw.Device == name {
return i, nw
}
}
return 0, nil
}
for _, nw := range o.NodeNetworks {
if i, nnw := lookupNetwork(n.NodeNetworks, nw.Device); nnw != nil {
n.NodeNetworks[i] = nw
} else {
n.NodeNetworks = append(n.NodeNetworks, nw)
}
}
}
}
func (n *NodeResources) Equals(o *NodeResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if !n.Cpu.Equals(&o.Cpu) {
return false
}
if !n.Memory.Equals(&o.Memory) {
return false
}
if !n.Disk.Equals(&o.Disk) {
return false
}
if !n.Networks.Equals(&o.Networks) {
return false
}
// Check the devices
if !DevicesEquals(n.Devices, o.Devices) {
return false
}
if !NodeNetworksEquals(n.NodeNetworks, o.NodeNetworks) {
return false
}
return true
}
// Equals equates Networks as a set
func (ns *Networks) Equals(o *Networks) bool {
if ns == o {
return true
}
if ns == nil || o == nil {
return false
}
if len(*ns) != len(*o) {
return false
}
SETEQUALS:
for _, ne := range *ns {
for _, oe := range *o {
if ne.Equals(oe) {
continue SETEQUALS
}
}
return false
}
return true
}
// DevicesEquals returns true if the two device arrays are set equal
func DevicesEquals(d1, d2 []*NodeDeviceResource) bool {
if len(d1) != len(d2) {
return false
}
idMap := make(map[DeviceIdTuple]*NodeDeviceResource, len(d1))
for _, d := range d1 {
idMap[*d.ID()] = d
}
for _, otherD := range d2 {
if d, ok := idMap[*otherD.ID()]; !ok || !d.Equals(otherD) {
return false
}
}
return true
}
func NodeNetworksEquals(n1, n2 []*NodeNetworkResource) bool {
if len(n1) != len(n2) {
return false
}
netMap := make(map[string]*NodeNetworkResource, len(n1))
for _, n := range n1 {
netMap[n.Device] = n
}
for _, otherN := range n2 {
if n, ok := netMap[otherN.Device]; !ok || !n.Equals(otherN) {
return false
}
}
return true
}
// NodeCpuResources captures the CPU resources of the node.
type NodeCpuResources struct {
// CpuShares is the CPU shares available. This is calculated by number of
// cores multiplied by the core frequency.
CpuShares int64
// TotalCpuCores is the total number of cores on the machine. This includes cores not in
// the agent's cpuset if on a linux platform
TotalCpuCores uint16
// ReservableCpuCores is the set of cpus which are available to be reserved on the Node.
// This value is currently only reported on Linux platforms which support cgroups and is
// discovered by inspecting the cpuset of the agent's cgroup.
ReservableCpuCores []uint16
}
func (n NodeCpuResources) Copy() NodeCpuResources {
newN := n
if n.ReservableCpuCores != nil {
newN.ReservableCpuCores = make([]uint16, len(n.ReservableCpuCores))
copy(newN.ReservableCpuCores, n.ReservableCpuCores)
}
return newN
}
func (n *NodeCpuResources) Merge(o *NodeCpuResources) {
if o == nil {
return
}
if o.CpuShares != 0 {
n.CpuShares = o.CpuShares
}
if o.TotalCpuCores != 0 {
n.TotalCpuCores = o.TotalCpuCores
}
if len(o.ReservableCpuCores) != 0 {
n.ReservableCpuCores = o.ReservableCpuCores
}
}
func (n *NodeCpuResources) Equals(o *NodeCpuResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.CpuShares != o.CpuShares {
return false
}
if n.TotalCpuCores != o.TotalCpuCores {
return false
}
if len(n.ReservableCpuCores) != len(o.ReservableCpuCores) {
return false
}
for i := range n.ReservableCpuCores {
if n.ReservableCpuCores[i] != o.ReservableCpuCores[i] {
return false
}
}
return true
}
func (n *NodeCpuResources) SharesPerCore() int64 {
return n.CpuShares / int64(n.TotalCpuCores)
}
// NodeMemoryResources captures the memory resources of the node
type NodeMemoryResources struct {
// MemoryMB is the total available memory on the node
MemoryMB int64
}
func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) {
if o == nil {
return
}
if o.MemoryMB != 0 {
n.MemoryMB = o.MemoryMB
}
}
func (n *NodeMemoryResources) Equals(o *NodeMemoryResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.MemoryMB != o.MemoryMB {
return false
}
return true
}
// NodeDiskResources captures the disk resources of the node
type NodeDiskResources struct {
// DiskMB is the total available disk space on the node
DiskMB int64
}
func (n *NodeDiskResources) Merge(o *NodeDiskResources) {
if o == nil {
return
}
if o.DiskMB != 0 {
n.DiskMB = o.DiskMB
}
}
func (n *NodeDiskResources) Equals(o *NodeDiskResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.DiskMB != o.DiskMB {
return false
}
return true
}
// DeviceIdTuple is the tuple that identifies a device
type DeviceIdTuple struct {
Vendor string
Type string
Name string
}
func (id *DeviceIdTuple) String() string {
if id == nil {
return ""
}
return fmt.Sprintf("%s/%s/%s", id.Vendor, id.Type, id.Name)
}
// Matches returns if this Device ID is a superset of the passed ID.
func (id *DeviceIdTuple) Matches(other *DeviceIdTuple) bool {
if other == nil {
return false
}
if other.Name != "" && other.Name != id.Name {
return false
}
if other.Vendor != "" && other.Vendor != id.Vendor {
return false
}
if other.Type != "" && other.Type != id.Type {
return false
}
return true
}
// Equals returns if this Device ID is the same as the passed ID.
func (id *DeviceIdTuple) Equals(o *DeviceIdTuple) bool {
if id == nil && o == nil {
return true
} else if id == nil || o == nil {
return false
}
return o.Vendor == id.Vendor && o.Type == id.Type && o.Name == id.Name
}
// NodeDeviceResource captures a set of devices sharing a common
// vendor/type/device_name tuple.
type NodeDeviceResource struct {
Vendor string
Type string
Name string
Instances []*NodeDevice
Attributes map[string]*psstructs.Attribute
}
func (n *NodeDeviceResource) ID() *DeviceIdTuple {
if n == nil {
return nil
}
return &DeviceIdTuple{
Vendor: n.Vendor,
Type: n.Type,
Name: n.Name,
}
}
func (n *NodeDeviceResource) Copy() *NodeDeviceResource {
if n == nil {
return nil
}
// Copy the primitives
nn := *n
// Copy the device instances
if l := len(nn.Instances); l != 0 {
nn.Instances = make([]*NodeDevice, 0, l)
for _, d := range n.Instances {
nn.Instances = append(nn.Instances, d.Copy())
}
}
// Copy the Attributes
nn.Attributes = psstructs.CopyMapStringAttribute(nn.Attributes)
return &nn
}
func (n *NodeDeviceResource) Equals(o *NodeDeviceResource) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.Vendor != o.Vendor {
return false
} else if n.Type != o.Type {
return false
} else if n.Name != o.Name {
return false
}
// Check the attributes
if len(n.Attributes) != len(o.Attributes) {
return false
}
for k, v := range n.Attributes {
if otherV, ok := o.Attributes[k]; !ok || v != otherV {
return false
}
}
// Check the instances
if len(n.Instances) != len(o.Instances) {
return false
}
idMap := make(map[string]*NodeDevice, len(n.Instances))
for _, d := range n.Instances {
idMap[d.ID] = d
}
for _, otherD := range o.Instances {
if d, ok := idMap[otherD.ID]; !ok || !d.Equals(otherD) {
return false
}
}
return true
}
// NodeDevice is an instance of a particular device.
type NodeDevice struct {
// ID is the ID of the device.
ID string
// Healthy captures whether the device is healthy.
Healthy bool
// HealthDescription is used to provide a human readable description of why
// the device may be unhealthy.
HealthDescription string
// Locality stores HW locality information for the node to optionally be
// used when making placement decisions.
Locality *NodeDeviceLocality
}
func (n *NodeDevice) Equals(o *NodeDevice) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.ID != o.ID {
return false
} else if n.Healthy != o.Healthy {
return false
} else if n.HealthDescription != o.HealthDescription {
return false
} else if !n.Locality.Equals(o.Locality) {
return false
}
return false
}
func (n *NodeDevice) Copy() *NodeDevice {
if n == nil {
return nil
}
// Copy the primitives
nn := *n
// Copy the locality
nn.Locality = nn.Locality.Copy()
return &nn
}
// NodeDeviceLocality stores information about the devices hardware locality on
// the node.
type NodeDeviceLocality struct {
// PciBusID is the PCI Bus ID for the device.
PciBusID string
}
func (n *NodeDeviceLocality) Equals(o *NodeDeviceLocality) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.PciBusID != o.PciBusID {
return false
}
return true
}
func (n *NodeDeviceLocality) Copy() *NodeDeviceLocality {
if n == nil {
return nil
}
// Copy the primitives
nn := *n
return &nn
}
// NodeReservedResources is used to capture the resources on a client node that
// should be reserved and not made available to jobs.
type NodeReservedResources struct {
Cpu NodeReservedCpuResources
Memory NodeReservedMemoryResources
Disk NodeReservedDiskResources
Networks NodeReservedNetworkResources
}
func (n *NodeReservedResources) Copy() *NodeReservedResources {
if n == nil {
return nil
}
newN := new(NodeReservedResources)
*newN = *n
return newN
}
// Comparable returns a comparable version of the node's reserved resources. The
// returned resources doesn't contain any network information. This conversion
// can be lossy so care must be taken when using it.
func (n *NodeReservedResources) Comparable() *ComparableResources {
if n == nil {
return nil
}
c := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: n.Cpu.CpuShares,
ReservedCores: n.Cpu.ReservedCpuCores,
},
Memory: AllocatedMemoryResources{
MemoryMB: n.Memory.MemoryMB,
},
},
Shared: AllocatedSharedResources{
DiskMB: n.Disk.DiskMB,
},
}
return c
}
// NodeReservedCpuResources captures the reserved CPU resources of the node.
type NodeReservedCpuResources struct {
CpuShares int64
ReservedCpuCores []uint16
}
// NodeReservedMemoryResources captures the reserved memory resources of the node.
type NodeReservedMemoryResources struct {
MemoryMB int64
}
// NodeReservedDiskResources captures the reserved disk resources of the node.
type NodeReservedDiskResources struct {
DiskMB int64
}
// NodeReservedNetworkResources captures the reserved network resources of the node.
type NodeReservedNetworkResources struct {
// ReservedHostPorts is the set of ports reserved on all host network
// interfaces. Its format is a comma separate list of integers or integer
// ranges. (80,443,1000-2000,2005)
ReservedHostPorts string
}
// ParseReservedHostPorts returns the reserved host ports.
func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) {
return ParsePortRanges(n.ReservedHostPorts)
}
// AllocatedResources is the set of resources to be used by an allocation.
type AllocatedResources struct {
// Tasks is a mapping of task name to the resources for the task.
Tasks map[string]*AllocatedTaskResources
TaskLifecycles map[string]*TaskLifecycleConfig
// Shared is the set of resource that are shared by all tasks in the group.
Shared AllocatedSharedResources
}
func (a *AllocatedResources) Copy() *AllocatedResources {
if a == nil {
return nil
}
out := AllocatedResources{
Shared: a.Shared.Copy(),
}
if a.Tasks != nil {
out.Tasks = make(map[string]*AllocatedTaskResources, len(out.Tasks))
for task, resource := range a.Tasks {
out.Tasks[task] = resource.Copy()
}
}
if a.TaskLifecycles != nil {
out.TaskLifecycles = make(map[string]*TaskLifecycleConfig, len(out.TaskLifecycles))
for task, lifecycle := range a.TaskLifecycles {
out.TaskLifecycles[task] = lifecycle.Copy()
}
}
return &out
}
// Comparable returns a comparable version of the allocations allocated
// resources. This conversion can be lossy so care must be taken when using it.
func (a *AllocatedResources) Comparable() *ComparableResources {
if a == nil {
return nil
}
c := &ComparableResources{
Shared: a.Shared,
}
prestartSidecarTasks := &AllocatedTaskResources{}
prestartEphemeralTasks := &AllocatedTaskResources{}
main := &AllocatedTaskResources{}
poststopTasks := &AllocatedTaskResources{}
for taskName, r := range a.Tasks {
lc := a.TaskLifecycles[taskName]
if lc == nil {
main.Add(r)
} else if lc.Hook == TaskLifecycleHookPrestart {
if lc.Sidecar {
prestartSidecarTasks.Add(r)
} else {
prestartEphemeralTasks.Add(r)
}
} else if lc.Hook == TaskLifecycleHookPoststop {
poststopTasks.Add(r)
}
}
// update this loop to account for lifecycle hook
prestartEphemeralTasks.Max(main)
prestartEphemeralTasks.Max(poststopTasks)
prestartSidecarTasks.Add(prestartEphemeralTasks)
c.Flattened.Add(prestartSidecarTasks)
// Add network resources that are at the task group level
for _, network := range a.Shared.Networks {
c.Flattened.Add(&AllocatedTaskResources{
Networks: []*NetworkResource{network},
})
}
return c
}
// OldTaskResources returns the pre-0.9.0 map of task resources
func (a *AllocatedResources) OldTaskResources() map[string]*Resources {
m := make(map[string]*Resources, len(a.Tasks))
for name, res := range a.Tasks {
m[name] = &Resources{
CPU: int(res.Cpu.CpuShares),
MemoryMB: int(res.Memory.MemoryMB),
MemoryMaxMB: int(res.Memory.MemoryMaxMB),
Networks: res.Networks,
}
}
return m
}
func (a *AllocatedResources) Canonicalize() {
a.Shared.Canonicalize()
for _, r := range a.Tasks {
for _, nw := range r.Networks {
for _, port := range append(nw.DynamicPorts, nw.ReservedPorts...) {
a.Shared.Ports = append(a.Shared.Ports, AllocatedPortMapping{
Label: port.Label,
Value: port.Value,
To: port.To,
HostIP: nw.IP,
})
}
}
}
}
// AllocatedTaskResources are the set of resources allocated to a task.
type AllocatedTaskResources struct {
Cpu AllocatedCpuResources
Memory AllocatedMemoryResources
Networks Networks
Devices []*AllocatedDeviceResource
}
func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
if a == nil {
return nil
}
newA := new(AllocatedTaskResources)
*newA = *a
// Copy the networks
newA.Networks = a.Networks.Copy()
// Copy the devices
if newA.Devices != nil {
n := len(a.Devices)
newA.Devices = make([]*AllocatedDeviceResource, n)
for i := 0; i < n; i++ {
newA.Devices[i] = a.Devices[i].Copy()
}
}
return newA
}
// NetIndex finds the matching net index using device name
func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int {
return a.Networks.NetIndex(n)
}
func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) {
if delta == nil {
return
}
a.Cpu.Add(&delta.Cpu)
a.Memory.Add(&delta.Memory)
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := a.NetIndex(n)
if idx == -1 {
a.Networks = append(a.Networks, n.Copy())
} else {
a.Networks[idx].Add(n)
}
}
for _, d := range delta.Devices {
// Find the matching device
idx := AllocatedDevices(a.Devices).Index(d)
if idx == -1 {
a.Devices = append(a.Devices, d.Copy())
} else {
a.Devices[idx].Add(d)
}
}
}
func (a *AllocatedTaskResources) Max(other *AllocatedTaskResources) {
if other == nil {
return
}
a.Cpu.Max(&other.Cpu)
a.Memory.Max(&other.Memory)
for _, n := range other.Networks {
// Find the matching interface by IP or CIDR
idx := a.NetIndex(n)
if idx == -1 {
a.Networks = append(a.Networks, n.Copy())
} else {
a.Networks[idx].Add(n)
}
}
for _, d := range other.Devices {
// Find the matching device
idx := AllocatedDevices(a.Devices).Index(d)
if idx == -1 {
a.Devices = append(a.Devices, d.Copy())
} else {
a.Devices[idx].Add(d)
}
}
}
// Comparable turns AllocatedTaskResources into ComparableResources
// as a helper step in preemption
func (a *AllocatedTaskResources) Comparable() *ComparableResources {
ret := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: a.Cpu.CpuShares,
ReservedCores: a.Cpu.ReservedCores,
},
Memory: AllocatedMemoryResources{
MemoryMB: a.Memory.MemoryMB,
MemoryMaxMB: a.Memory.MemoryMaxMB,
},
},
}
ret.Flattened.Networks = append(ret.Flattened.Networks, a.Networks...)
return ret
}
// Subtract only subtracts CPU and Memory resources. Network utilization
// is managed separately in NetworkIndex
func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) {
if delta == nil {
return
}
a.Cpu.Subtract(&delta.Cpu)
a.Memory.Subtract(&delta.Memory)
}
// AllocatedSharedResources are the set of resources allocated to a task group.
type AllocatedSharedResources struct {
Networks Networks
DiskMB int64
Ports AllocatedPorts
}
func (a AllocatedSharedResources) Copy() AllocatedSharedResources {
return AllocatedSharedResources{
Networks: a.Networks.Copy(),
DiskMB: a.DiskMB,
Ports: a.Ports,
}
}
func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
if delta == nil {
return
}
a.Networks = append(a.Networks, delta.Networks...)
a.DiskMB += delta.DiskMB
}
func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) {
if delta == nil {
return
}
diff := map[*NetworkResource]bool{}
for _, n := range delta.Networks {
diff[n] = true
}
var nets Networks
for _, n := range a.Networks {
if _, ok := diff[n]; !ok {
nets = append(nets, n)
}
}
a.Networks = nets
a.DiskMB -= delta.DiskMB
}
func (a *AllocatedSharedResources) Canonicalize() {
if len(a.Networks) > 0 {
if len(a.Networks[0].DynamicPorts)+len(a.Networks[0].ReservedPorts) > 0 && len(a.Ports) == 0 {
for _, ports := range [][]Port{a.Networks[0].DynamicPorts, a.Networks[0].ReservedPorts} {
for _, p := range ports {
a.Ports = append(a.Ports, AllocatedPortMapping{
Label: p.Label,
Value: p.Value,
To: p.To,
HostIP: a.Networks[0].IP,
})
}
}
}
}
}
// AllocatedCpuResources captures the allocated CPU resources.
type AllocatedCpuResources struct {
CpuShares int64
ReservedCores []uint16
}
func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) {
if delta == nil {
return
}
a.CpuShares += delta.CpuShares
a.ReservedCores = cpuset.New(a.ReservedCores...).Union(cpuset.New(delta.ReservedCores...)).ToSlice()
}
func (a *AllocatedCpuResources) Subtract(delta *AllocatedCpuResources) {
if delta == nil {
return
}
a.CpuShares -= delta.CpuShares
a.ReservedCores = cpuset.New(a.ReservedCores...).Difference(cpuset.New(delta.ReservedCores...)).ToSlice()
}
func (a *AllocatedCpuResources) Max(other *AllocatedCpuResources) {
if other == nil {
return
}
if other.CpuShares > a.CpuShares {
a.CpuShares = other.CpuShares
}
if len(other.ReservedCores) > len(a.ReservedCores) {
a.ReservedCores = other.ReservedCores
}
}
// AllocatedMemoryResources captures the allocated memory resources.
type AllocatedMemoryResources struct {
MemoryMB int64
MemoryMaxMB int64
}
func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) {
if delta == nil {
return
}
a.MemoryMB += delta.MemoryMB
if delta.MemoryMaxMB != 0 {
a.MemoryMaxMB += delta.MemoryMaxMB
} else {
a.MemoryMaxMB += delta.MemoryMB
}
}
func (a *AllocatedMemoryResources) Subtract(delta *AllocatedMemoryResources) {
if delta == nil {
return
}
a.MemoryMB -= delta.MemoryMB
if delta.MemoryMaxMB != 0 {
a.MemoryMaxMB -= delta.MemoryMaxMB
} else {
a.MemoryMaxMB -= delta.MemoryMB
}
}
func (a *AllocatedMemoryResources) Max(other *AllocatedMemoryResources) {
if other == nil {
return
}
if other.MemoryMB > a.MemoryMB {
a.MemoryMB = other.MemoryMB
}
if other.MemoryMaxMB > a.MemoryMaxMB {
a.MemoryMaxMB = other.MemoryMaxMB
}
}
type AllocatedDevices []*AllocatedDeviceResource
// Index finds the matching index using the passed device. If not found, -1 is
// returned.
func (a AllocatedDevices) Index(d *AllocatedDeviceResource) int {
if d == nil {
return -1
}
for i, o := range a {
if o.ID().Equals(d.ID()) {
return i
}
}
return -1
}
// AllocatedDeviceResource captures a set of allocated devices.
type AllocatedDeviceResource struct {
// Vendor, Type, and Name are used to select the plugin to request the
// device IDs from.
Vendor string
Type string
Name string
// DeviceIDs is the set of allocated devices
DeviceIDs []string
}
func (a *AllocatedDeviceResource) ID() *DeviceIdTuple {
if a == nil {
return nil
}
return &DeviceIdTuple{
Vendor: a.Vendor,
Type: a.Type,
Name: a.Name,
}
}
func (a *AllocatedDeviceResource) Add(delta *AllocatedDeviceResource) {
if delta == nil {
return
}
a.DeviceIDs = append(a.DeviceIDs, delta.DeviceIDs...)
}
func (a *AllocatedDeviceResource) Copy() *AllocatedDeviceResource {
if a == nil {
return a
}
na := *a
// Copy the devices
na.DeviceIDs = make([]string, len(a.DeviceIDs))
copy(na.DeviceIDs, a.DeviceIDs)
return &na
}
// ComparableResources is the set of resources allocated to a task group but
// not keyed by Task, making it easier to compare.
type ComparableResources struct {
Flattened AllocatedTaskResources
Shared AllocatedSharedResources
}
func (c *ComparableResources) Add(delta *ComparableResources) {
if delta == nil {
return
}
c.Flattened.Add(&delta.Flattened)
c.Shared.Add(&delta.Shared)
}
func (c *ComparableResources) Subtract(delta *ComparableResources) {
if delta == nil {
return
}
c.Flattened.Subtract(&delta.Flattened)
c.Shared.Subtract(&delta.Shared)
}
func (c *ComparableResources) Copy() *ComparableResources {
if c == nil {
return nil
}
newR := new(ComparableResources)
*newR = *c
return newR
}
// Superset checks if one set of resources is a superset of another. This
// ignores network resources, and the NetworkIndex should be used for that.
func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) {
if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares {
return false, "cpu"
}
if len(c.Flattened.Cpu.ReservedCores) > 0 && !cpuset.New(c.Flattened.Cpu.ReservedCores...).IsSupersetOf(cpuset.New(other.Flattened.Cpu.ReservedCores...)) {
return false, "cores"
}
if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB {
return false, "memory"
}
if c.Shared.DiskMB < other.Shared.DiskMB {
return false, "disk"
}
return true, ""
}
// NetIndex finds the matching net index using device name
func (c *ComparableResources) NetIndex(n *NetworkResource) int {
return c.Flattened.Networks.NetIndex(n)
}
const (
// JobTypeCore is reserved for internal system tasks and is
// always handled by the CoreScheduler.
JobTypeCore = "_core"
JobTypeService = "service"
JobTypeBatch = "batch"
JobTypeSystem = "system"
JobTypeSysBatch = "sysbatch"
)
const (
JobStatusPending = "pending" // Pending means the job is waiting on scheduling
JobStatusRunning = "running" // Running means the job has non-terminal allocations
JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal
)
const (
// JobMinPriority is the minimum allowed priority
JobMinPriority = 1
// JobDefaultPriority is the default priority if not
// not specified.
JobDefaultPriority = 50
// JobMaxPriority is the maximum allowed priority
JobMaxPriority = 100
// CoreJobPriority should be higher than any user
// specified job so that it gets priority. This is important
// for the system to remain healthy.
CoreJobPriority = JobMaxPriority * 2
// JobTrackedVersions is the number of historic job versions that are
// kept.
JobTrackedVersions = 6
// JobTrackedScalingEvents is the number of scaling events that are
// kept for a single task group.
JobTrackedScalingEvents = 20
)
// Job is the scope of a scheduling request to Nomad. It is the largest
// scoped object, and is a named collection of task groups. Each task group
// is further composed of tasks. A task group (TG) is the unit of scheduling
// however.
type Job struct {
// Stop marks whether the user has stopped the job. A stopped job will
// have all created allocations stopped and acts as a way to stop a job
// without purging it from the system. This allows existing allocs to be
// queried and the job to be inspected as it is being killed.
Stop bool
// Region is the Nomad region that handles scheduling this job
Region string
// Namespace is the namespace the job is submitted into.
Namespace string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
// ParentID is the unique identifier of the job that spawned this job.
ParentID string
// Name is the logical name of the job used to refer to it. This is unique
// per region, but not unique globally.
Name string
// Type is used to control various behaviors about the job. Most jobs
// are service jobs, meaning they are expected to be long lived.
// Some jobs are batch oriented meaning they run and then terminate.
// This can be extended in the future to support custom schedulers.
Type string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job. This
// can slow down larger jobs if resources are not available.
AllAtOnce bool
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
// Affinities can be specified at the job level to express
// scheduling preferences that apply to all groups and tasks
Affinities []*Affinity
// Spread can be specified at the job level to express spreading
// allocations across a desired attribute, such as datacenter
Spreads []*Spread
// TaskGroups are the collections of task groups that this job needs
// to run. Each task group is an atomic unit of scheduling and placement.
TaskGroups []*TaskGroup
// See agent.ApiJobToStructJob
// Update provides defaults for the TaskGroup Update stanzas
Update UpdateStrategy
Multiregion *Multiregion
// Periodic is used to define the interval the job is run at.
Periodic *PeriodicConfig
// ParameterizedJob is used to specify the job as a parameterized job
// for dispatching.
ParameterizedJob *ParameterizedJobConfig
// Dispatched is used to identify if the Job has been dispatched from a
// parameterized job.
Dispatched bool
// DispatchIdempotencyToken is optionally used to ensure that a dispatched job does not have any
// non-terminal siblings which have the same token value.
DispatchIdempotencyToken string
// Payload is the payload supplied when the job was dispatched.
Payload []byte
// Meta is used to associate arbitrary metadata with this
// job. This is opaque to Nomad.
Meta map[string]string
// ConsulToken is the Consul token that proves the submitter of the job has
// access to the Service Identity policies associated with the job's
// Consul Connect enabled services. This field is only used to transfer the
// token and is not stored after Job submission.
ConsulToken string
// ConsulNamespace is the Consul namespace
ConsulNamespace string
// VaultToken is the Vault token that proves the submitter of the job has
// access to the specified Vault policies. This field is only used to
// transfer the token and is not stored after Job submission.
VaultToken string
// VaultNamespace is the Vault namespace
VaultNamespace string
// NomadTokenID is the Accessor ID of the ACL token (if any)
// used to register this version of the job. Used by deploymentwatcher.
NomadTokenID string
// Job status
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Stable marks a job as stable. Stability is only defined on "service" and
// "system" jobs. The stability of a job will be set automatically as part
// of a deployment and can be manually set via APIs. This field is updated
// when the status of a corresponding deployment transitions to Failed
// or Successful. This field is not meaningful for jobs that don't have an
// update stanza.
Stable bool
// Version is a monotonically increasing version number that is incremented
// on each job register.
Version uint64
// SubmitTime is the time at which the job was submitted as a UnixNano in
// UTC
SubmitTime int64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
}
// NamespacedID returns the namespaced id useful for logging
func (j *Job) NamespacedID() NamespacedID {
return NamespacedID{
ID: j.ID,
Namespace: j.Namespace,
}
}
// GetID implements the IDGetter interface, required for pagination.
func (j *Job) GetID() string {
if j == nil {
return ""
}
return j.ID
}
// GetNamespace implements the NamespaceGetter interface, required for
// pagination and filtering namespaces in endpoints that support glob namespace
// requests using tokens with limited access.
func (j *Job) GetNamespace() string {
if j == nil {
return ""
}
return j.Namespace
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (j *Job) GetCreateIndex() uint64 {
if j == nil {
return 0
}
return j.CreateIndex
}
// Canonicalize is used to canonicalize fields in the Job. This should be
// called when registering a Job.
func (j *Job) Canonicalize() {
if j == nil {
return
}
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(j.Meta) == 0 {
j.Meta = nil
}
// Ensure the job is in a namespace.
if j.Namespace == "" {
j.Namespace = DefaultNamespace
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
if j.ParameterizedJob != nil {
j.ParameterizedJob.Canonicalize()
}
if j.Multiregion != nil {
j.Multiregion.Canonicalize()
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
}
// Copy returns a deep copy of the Job. It is expected that callers use recover.
// This job can panic if the deep copy failed as it uses reflection.
func (j *Job) Copy() *Job {
if j == nil {
return nil
}
nj := new(Job)
*nj = *j
nj.Datacenters = helper.CopySliceString(nj.Datacenters)
nj.Constraints = CopySliceConstraints(nj.Constraints)
nj.Affinities = CopySliceAffinities(nj.Affinities)
nj.Multiregion = nj.Multiregion.Copy()
if j.TaskGroups != nil {
tgs := make([]*TaskGroup, len(nj.TaskGroups))
for i, tg := range nj.TaskGroups {
tgs[i] = tg.Copy()
}
nj.TaskGroups = tgs
}
nj.Periodic = nj.Periodic.Copy()
nj.Meta = helper.CopyMapStringString(nj.Meta)
nj.ParameterizedJob = nj.ParameterizedJob.Copy()
return nj
}
// Validate is used to check a job for reasonable configuration
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" && j.Multiregion == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}
if j.ID == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job ID"))
} else if strings.Contains(j.ID, " ") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space"))
} else if strings.Contains(j.ID, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a null character"))
}
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
} else if strings.Contains(j.Name, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Job Name contains a null character"))
}
if j.Namespace == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem, JobTypeSysBatch:
case "":
mErr.Errors = append(mErr.Errors, errors.New("Missing job type"))
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type))
}
if j.Priority < JobMinPriority || j.Priority > JobMaxPriority {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority))
}
if len(j.Datacenters) == 0 && !j.IsMultiregion() {
mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters"))
} else {
for _, v := range j.Datacenters {
if v == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job datacenter must be non-empty string"))
}
}
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
for idx, constr := range j.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if j.Type == JobTypeSystem {
if j.Affinities != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
}
} else {
for idx, affinity := range j.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
if j.Type == JobTypeSystem {
if j.Spreads != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
}
} else {
for idx, spread := range j.Spreads {
if err := spread.Validate(); err != nil {
outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
// Check for duplicate task groups
taskGroups := make(map[string]int)
for idx, tg := range j.TaskGroups {
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1))
} else if existing, ok := taskGroups[tg.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1))
} else {
taskGroups[tg.Name] = idx
}
if tg.ShutdownDelay != nil && *tg.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}
if tg.StopAfterClientDisconnect != nil && *tg.StopAfterClientDisconnect != 0 {
if *tg.StopAfterClientDisconnect > 0 &&
!(j.Type == JobTypeBatch || j.Type == JobTypeService) {
mErr.Errors = append(mErr.Errors, errors.New("stop_after_client_disconnect can only be set in batch and service jobs"))
} else if *tg.StopAfterClientDisconnect < 0 {
mErr.Errors = append(mErr.Errors, errors.New("stop_after_client_disconnect must be a positive value"))
}
}
if j.Type == "system" && tg.Count > 1 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler",
tg.Name, tg.Count))
}
}
// Validate the task group
for _, tg := range j.TaskGroups {
if err := tg.Validate(j); err != nil {
outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
// Validate periodic is only used with batch or sysbatch jobs.
if j.IsPeriodic() && j.Periodic.Enabled {
if j.Type != JobTypeBatch && j.Type != JobTypeSysBatch {
mErr.Errors = append(mErr.Errors, fmt.Errorf(
"Periodic can only be used with %q or %q scheduler", JobTypeBatch, JobTypeSysBatch,
))
}
if err := j.Periodic.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsParameterized() {
if j.Type != JobTypeBatch && j.Type != JobTypeSysBatch {
mErr.Errors = append(mErr.Errors, fmt.Errorf(
"Parameterized job can only be used with %q or %q scheduler", JobTypeBatch, JobTypeSysBatch,
))
}
if err := j.ParameterizedJob.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
if j.IsMultiregion() {
if err := j.Multiregion.Validate(j.Type, j.Datacenters); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (j *Job) Warnings() error {
var mErr multierror.Error
// Check the groups
hasAutoPromote, allAutoPromote := false, true
for _, tg := range j.TaskGroups {
if err := tg.Warnings(j); err != nil {
outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
if u := tg.Update; u != nil {
hasAutoPromote = hasAutoPromote || u.AutoPromote
// Having no canaries implies auto-promotion since there are no canaries to promote.
allAutoPromote = allAutoPromote && (u.Canary == 0 || u.AutoPromote)
}
}
// Check AutoPromote, should be all or none
if hasAutoPromote && !allAutoPromote {
err := fmt.Errorf("auto_promote must be true for all groups to enable automatic promotion")
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// LookupTaskGroup finds a task group by name
func (j *Job) LookupTaskGroup(name string) *TaskGroup {
for _, tg := range j.TaskGroups {
if tg.Name == name {
return tg
}
}
return nil
}
// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined
// meta data for the task. When joining Job, Group and Task Meta, the precedence
// is by deepest scope (Task > Group > Job).
func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string {
group := j.LookupTaskGroup(groupName)
if group == nil {
return j.Meta
}
var meta map[string]string
task := group.LookupTask(taskName)
if task != nil {
meta = helper.CopyMapStringString(task.Meta)
}
if meta == nil {
meta = make(map[string]string, len(group.Meta)+len(j.Meta))
}
// Add the group specific meta
for k, v := range group.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
// Add the job specific meta
for k, v := range j.Meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
return meta
}
// Stopped returns if a job is stopped.
func (j *Job) Stopped() bool {
return j == nil || j.Stop
}
// HasUpdateStrategy returns if any task group in the job has an update strategy
func (j *Job) HasUpdateStrategy() bool {
for _, tg := range j.TaskGroups {
if !tg.Update.IsEmpty() {
return true
}
}
return false
}
// Stub is used to return a summary of the job
func (j *Job) Stub(summary *JobSummary) *JobListStub {
return &JobListStub{
ID: j.ID,
Namespace: j.Namespace,
ParentID: j.ParentID,
Name: j.Name,
Datacenters: j.Datacenters,
Multiregion: j.Multiregion,
Type: j.Type,
Priority: j.Priority,
Periodic: j.IsPeriodic(),
ParameterizedJob: j.IsParameterized(),
Stop: j.Stop,
Status: j.Status,
StatusDescription: j.StatusDescription,
CreateIndex: j.CreateIndex,
ModifyIndex: j.ModifyIndex,
JobModifyIndex: j.JobModifyIndex,
SubmitTime: j.SubmitTime,
JobSummary: summary,
}
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsPeriodicActive returns whether the job is an active periodic job that will
// create child jobs
func (j *Job) IsPeriodicActive() bool {
return j.IsPeriodic() && j.Periodic.Enabled && !j.Stopped() && !j.IsParameterized()
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil && !j.Dispatched
}
// IsMultiregion returns whether a job is multiregion
func (j *Job) IsMultiregion() bool {
return j.Multiregion != nil && j.Multiregion.Regions != nil && len(j.Multiregion.Regions) > 0
}
// IsPlugin returns whether a job is implements a plugin (currently just CSI)
func (j *Job) IsPlugin() bool {
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
if task.CSIPluginConfig != nil {
return true
}
}
}
return false
}
// Vault returns the set of Vault blocks per task group, per task
func (j *Job) Vault() map[string]map[string]*Vault {
blocks := make(map[string]map[string]*Vault, len(j.TaskGroups))
for _, tg := range j.TaskGroups {
tgBlocks := make(map[string]*Vault, len(tg.Tasks))
for _, task := range tg.Tasks {
if task.Vault == nil {
continue
}
tgBlocks[task.Name] = task.Vault
}
if len(tgBlocks) != 0 {
blocks[tg.Name] = tgBlocks
}
}
return blocks
}
// ConnectTasks returns the set of Consul Connect enabled tasks defined on the
// job that will require a Service Identity token in the case that Consul ACLs
// are enabled. The TaskKind.Value is the name of the Consul service.
//
// This method is meaningful only after the Job has passed through the job
// submission Mutator functions.
func (j *Job) ConnectTasks() []TaskKind {
var kinds []TaskKind
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
if task.Kind.IsConnectProxy() ||
task.Kind.IsConnectNative() ||
task.Kind.IsAnyConnectGateway() {
kinds = append(kinds, task.Kind)
}
}
}
return kinds
}
// RequiredSignals returns a mapping of task groups to tasks to their required
// set of signals
func (j *Job) RequiredSignals() map[string]map[string][]string {
signals := make(map[string]map[string][]string)
for _, tg := range j.TaskGroups {
for _, task := range tg.Tasks {
// Use this local one as a set
taskSignals := make(map[string]struct{})
// Check if the Vault change mode uses signals
if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal {
taskSignals[task.Vault.ChangeSignal] = struct{}{}
}
// If a user has specified a KillSignal, add it to required signals
if task.KillSignal != "" {
taskSignals[task.KillSignal] = struct{}{}
}
// Check if any template change mode uses signals
for _, t := range task.Templates {
if t.ChangeMode != TemplateChangeModeSignal {
continue
}
taskSignals[t.ChangeSignal] = struct{}{}
}
// Flatten and sort the signals
l := len(taskSignals)
if l == 0 {
continue
}
flat := make([]string, 0, l)
for sig := range taskSignals {
flat = append(flat, sig)
}
sort.Strings(flat)
tgSignals, ok := signals[tg.Name]
if !ok {
tgSignals = make(map[string][]string)
signals[tg.Name] = tgSignals
}
tgSignals[task.Name] = flat
}
}
return signals
}
// SpecChanged determines if the functional specification has changed between
// two job versions.
func (j *Job) SpecChanged(new *Job) bool {
if j == nil {
return new != nil
}
// Create a copy of the new job
c := new.Copy()
// Update the new job so we can do a reflect
c.Status = j.Status
c.StatusDescription = j.StatusDescription
c.Stable = j.Stable
c.Version = j.Version
c.CreateIndex = j.CreateIndex
c.ModifyIndex = j.ModifyIndex
c.JobModifyIndex = j.JobModifyIndex
c.SubmitTime = j.SubmitTime
// cgbaker: FINISH: probably need some consideration of scaling policy ID here
// Deep equals the jobs
return !reflect.DeepEqual(j, c)
}
func (j *Job) SetSubmitTime() {
j.SubmitTime = time.Now().UTC().UnixNano()
}
// JobListStub is used to return a subset of job information
// for the job list
type JobListStub struct {
ID string
ParentID string
Name string
Namespace string `json:",omitempty"`
Datacenters []string
Multiregion *Multiregion
Type string
Priority int
Periodic bool
ParameterizedJob bool
Stop bool
Status string
StatusDescription string
JobSummary *JobSummary
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
SubmitTime int64
}
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
// JobID is the ID of the job the summary is for
JobID string
// Namespace is the namespace of the job and its summary
Namespace string
// Summary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
// Children contains a summary for the children of this job.
Children *JobChildrenSummary
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// Copy returns a new copy of JobSummary
func (js *JobSummary) Copy() *JobSummary {
newJobSummary := new(JobSummary)
*newJobSummary = *js
newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary))
for k, v := range js.Summary {
newTGSummary[k] = v
}
newJobSummary.Summary = newTGSummary
newJobSummary.Children = newJobSummary.Children.Copy()
return newJobSummary
}
// JobChildrenSummary contains the summary of children job statuses
type JobChildrenSummary struct {
Pending int64
Running int64
Dead int64
}
// Copy returns a new copy of a JobChildrenSummary
func (jc *JobChildrenSummary) Copy() *JobChildrenSummary {
if jc == nil {
return nil
}
njc := new(JobChildrenSummary)
*njc = *jc
return njc
}
// TaskGroup summarizes the state of all the allocations of a particular
// TaskGroup
type TaskGroupSummary struct {
Queued int
Complete int
Failed int
Running int
Starting int
Lost int
Unknown int
}
const (
// Checks uses any registered health check state in combination with task
// states to determine if an allocation is healthy.
UpdateStrategyHealthCheck_Checks = "checks"
// TaskStates uses the task states of an allocation to determine if the
// allocation is healthy.
UpdateStrategyHealthCheck_TaskStates = "task_states"
// Manual allows the operator to manually signal to Nomad when an
// allocations is healthy. This allows more advanced health checking that is
// outside of the scope of Nomad.
UpdateStrategyHealthCheck_Manual = "manual"
)
var (
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
DefaultUpdateStrategy = &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 1,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
ProgressDeadline: 10 * time.Minute,
AutoRevert: false,
AutoPromote: false,
Canary: 0,
}
)
// UpdateStrategy is used to modify how updates are done
type UpdateStrategy struct {
// Stagger is used to determine the rate at which allocations are migrated
// due to down or draining nodes.
Stagger time.Duration
// MaxParallel is how many updates can be done in parallel
MaxParallel int
// HealthCheck specifies the mechanism in which allocations are marked
// healthy or unhealthy as part of a deployment.
HealthCheck string
// MinHealthyTime is the minimum time an allocation must be in the healthy
// state before it is marked as healthy, unblocking more allocations to be
// rolled.
MinHealthyTime time.Duration
// HealthyDeadline is the time in which an allocation must be marked as
// healthy before it is automatically transitioned to unhealthy. This time
// period doesn't count against the MinHealthyTime.
HealthyDeadline time.Duration
// ProgressDeadline is the time in which an allocation as part of the
// deployment must transition to healthy. If no allocation becomes healthy
// after the deadline, the deployment is marked as failed. If the deadline
// is zero, the first failure causes the deployment to fail.
ProgressDeadline time.Duration
// AutoRevert declares that if a deployment fails because of unhealthy
// allocations, there should be an attempt to auto-revert the job to a
// stable version.
AutoRevert bool
// AutoPromote declares that the deployment should be promoted when all canaries are
// healthy
AutoPromote bool
// Canary is the number of canaries to deploy when a change to the task
// group is detected.
Canary int
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
}
copy := new(UpdateStrategy)
*copy = *u
return copy
}
func (u *UpdateStrategy) Validate() error {
if u == nil {
return nil
}
var mErr multierror.Error
switch u.HealthCheck {
case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck))
}
if u.MaxParallel < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than zero: %d < 0", u.MaxParallel))
}
if u.Canary < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary))
}
if u.Canary == 0 && u.AutoPromote {
_ = multierror.Append(&mErr, fmt.Errorf("Auto Promote requires a Canary count greater than zero"))
}
if u.MinHealthyTime < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime))
}
if u.HealthyDeadline <= 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline))
}
if u.ProgressDeadline < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline))
}
if u.MinHealthyTime >= u.HealthyDeadline {
_ = multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline))
}
if u.ProgressDeadline != 0 && u.HealthyDeadline >= u.ProgressDeadline {
_ = multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline))
}
if u.Stagger <= 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger))
}
return mErr.ErrorOrNil()
}
func (u *UpdateStrategy) IsEmpty() bool {
if u == nil {
return true
}
return u.MaxParallel == 0
}
// Rolling returns if a rolling strategy should be used.
// TODO(alexdadgar): Remove once no longer used by the scheduler.
func (u *UpdateStrategy) Rolling() bool {
return u.Stagger > 0 && u.MaxParallel > 0
}
type Multiregion struct {
Strategy *MultiregionStrategy
Regions []*MultiregionRegion
}
func (m *Multiregion) Canonicalize() {
if m.Strategy == nil {
m.Strategy = &MultiregionStrategy{}
}
if m.Regions == nil {
m.Regions = []*MultiregionRegion{}
}
}
// Diff indicates whether the multiregion config has changed
func (m *Multiregion) Diff(m2 *Multiregion) bool {
return !reflect.DeepEqual(m, m2)
}
func (m *Multiregion) Copy() *Multiregion {
if m == nil {
return nil
}
copy := new(Multiregion)
if m.Strategy != nil {
copy.Strategy = &MultiregionStrategy{
MaxParallel: m.Strategy.MaxParallel,
OnFailure: m.Strategy.OnFailure,
}
}
for _, region := range m.Regions {
copyRegion := &MultiregionRegion{
Name: region.Name,
Count: region.Count,
Datacenters: []string{},
Meta: map[string]string{},
}
copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...)
for k, v := range region.Meta {
copyRegion.Meta[k] = v
}
copy.Regions = append(copy.Regions, copyRegion)
}
return copy
}
type MultiregionStrategy struct {
MaxParallel int
OnFailure string
}
type MultiregionRegion struct {
Name string
Count int
Datacenters []string
Meta map[string]string
}
// Namespace allows logically grouping jobs and their associated objects.
type Namespace struct {
// Name is the name of the namespace
Name string
// Description is a human readable description of the namespace
Description string
// Quota is the quota specification that the namespace should account
// against.
Quota string
// Capabilities is the set of capabilities allowed for this namespace
Capabilities *NamespaceCapabilities
// Meta is the set of metadata key/value pairs that attached to the namespace
Meta map[string]string
// Hash is the hash of the namespace which is used to efficiently replicate
// cross-regions.
Hash []byte
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
// NamespaceCapabilities represents a set of capabilities allowed for this
// namespace, to be checked at job submission time.
type NamespaceCapabilities struct {
EnabledTaskDrivers []string
DisabledTaskDrivers []string
}
func (n *Namespace) Validate() error {
var mErr multierror.Error
// Validate the name and description
if !validNamespaceName.MatchString(n.Name) {
err := fmt.Errorf("invalid name %q. Must match regex %s", n.Name, validNamespaceName)
mErr.Errors = append(mErr.Errors, err)
}
if len(n.Description) > maxNamespaceDescriptionLength {
err := fmt.Errorf("description longer than %d", maxNamespaceDescriptionLength)
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// SetHash is used to compute and set the hash of the namespace
func (n *Namespace) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
_, _ = hash.Write([]byte(n.Name))
_, _ = hash.Write([]byte(n.Description))
_, _ = hash.Write([]byte(n.Quota))
if n.Capabilities != nil {
for _, driver := range n.Capabilities.EnabledTaskDrivers {
_, _ = hash.Write([]byte(driver))
}
for _, driver := range n.Capabilities.DisabledTaskDrivers {
_, _ = hash.Write([]byte(driver))
}
}
// sort keys to ensure hash stability when meta is stored later
var keys []string
for k := range n.Meta {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
_, _ = hash.Write([]byte(k))
_, _ = hash.Write([]byte(n.Meta[k]))
}
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
n.Hash = hashVal
return hashVal
}
func (n *Namespace) Copy() *Namespace {
nc := new(Namespace)
*nc = *n
nc.Hash = make([]byte, len(n.Hash))
if n.Capabilities != nil {
c := new(NamespaceCapabilities)
*c = *n.Capabilities
c.EnabledTaskDrivers = helper.CopySliceString(n.Capabilities.EnabledTaskDrivers)
c.DisabledTaskDrivers = helper.CopySliceString(n.Capabilities.DisabledTaskDrivers)
nc.Capabilities = c
}
if n.Meta != nil {
nc.Meta = make(map[string]string, len(n.Meta))
for k, v := range n.Meta {
nc.Meta[k] = v
}
}
copy(nc.Hash, n.Hash)
return nc
}
// NamespaceListRequest is used to request a list of namespaces
type NamespaceListRequest struct {
QueryOptions
}
// NamespaceListResponse is used for a list request
type NamespaceListResponse struct {
Namespaces []*Namespace
QueryMeta
}
// NamespaceSpecificRequest is used to query a specific namespace
type NamespaceSpecificRequest struct {
Name string
QueryOptions
}
// SingleNamespaceResponse is used to return a single namespace
type SingleNamespaceResponse struct {
Namespace *Namespace
QueryMeta
}
// NamespaceSetRequest is used to query a set of namespaces
type NamespaceSetRequest struct {
Namespaces []string
QueryOptions
}
// NamespaceSetResponse is used to return a set of namespaces
type NamespaceSetResponse struct {
Namespaces map[string]*Namespace // Keyed by namespace Name
QueryMeta
}
// NamespaceDeleteRequest is used to delete a set of namespaces
type NamespaceDeleteRequest struct {
Namespaces []string
WriteRequest
}
// NamespaceUpsertRequest is used to upsert a set of namespaces
type NamespaceUpsertRequest struct {
Namespaces []*Namespace
WriteRequest
}
const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// PeriodicSpecTest is only used by unit tests. It is a sorted, comma
// separated list of unix timestamps at which to launch.
PeriodicSpecTest = "_internal_test"
)
// Periodic defines the interval a job should be run at.
type PeriodicConfig struct {
// Enabled determines if the job should be run periodically.
Enabled bool
// Spec specifies the interval the job should be run as. It is parsed based
// on the SpecType.
Spec string
// SpecType defines the format of the spec.
SpecType string
// ProhibitOverlap enforces that spawned jobs do not run in parallel.
ProhibitOverlap bool
// TimeZone is the user specified string that determines the time zone to
// launch against. The time zones must be specified from IANA Time Zone
// database, such as "America/New_York".
// Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
// Reference: https://www.iana.org/time-zones
TimeZone string
// location is the time zone to evaluate the launch time against
location *time.Location
}
func (p *PeriodicConfig) Copy() *PeriodicConfig {
if p == nil {
return nil
}
np := new(PeriodicConfig)
*np = *p
return np
}
func (p *PeriodicConfig) Validate() error {
if !p.Enabled {
return nil
}
var mErr multierror.Error
if p.Spec == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify a spec"))
}
// Check if we got a valid time zone
if p.TimeZone != "" {
if _, err := time.LoadLocation(p.TimeZone); err != nil {
_ = multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err))
}
}
switch p.SpecType {
case PeriodicSpecCron:
// Validate the cron spec
if _, err := cronexpr.Parse(p.Spec); err != nil {
_ = multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err))
}
case PeriodicSpecTest:
// No-op
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType))
}
return mErr.ErrorOrNil()
}
func (p *PeriodicConfig) Canonicalize() {
// Load the location
l, err := time.LoadLocation(p.TimeZone)
if err != nil {
p.location = time.UTC
}
p.location = l
}
// CronParseNext is a helper that parses the next time for the given expression
// but captures any panic that may occur in the underlying library.
func CronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) {
defer func() {
if recover() != nil {
t = time.Time{}
err = fmt.Errorf("failed parsing cron expression: %q", spec)
}
}()
return e.Next(fromTime), nil
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) {
switch p.SpecType {
case PeriodicSpecCron:
e, err := cronexpr.Parse(p.Spec)
if err != nil {
return time.Time{}, fmt.Errorf("failed parsing cron expression: %q: %v", p.Spec, err)
}
return CronParseNext(e, fromTime, p.Spec)
case PeriodicSpecTest:
split := strings.Split(p.Spec, ",")
if len(split) == 1 && split[0] == "" {
return time.Time{}, nil
}
// Parse the times
times := make([]time.Time, len(split))
for i, s := range split {
unix, err := strconv.Atoi(s)
if err != nil {
return time.Time{}, nil
}
times[i] = time.Unix(int64(unix), 0)
}
// Find the next match
for _, next := range times {
if fromTime.Before(next) {
return next, nil
}
}
}
return time.Time{}, nil
}
// GetLocation returns the location to use for determining the time zone to run
// the periodic job against.
func (p *PeriodicConfig) GetLocation() *time.Location {
// Jobs pre 0.5.5 will not have this
if p.location != nil {
return p.location
}
return time.UTC
}
const (
// PeriodicLaunchSuffix is the string appended to the periodic jobs ID
// when launching derived instances of it.
PeriodicLaunchSuffix = "/periodic-"
)
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Namespace string // Namespace of the periodic job
Launch time.Time // The last launch time.
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
}
const (
DispatchPayloadForbidden = "forbidden"
DispatchPayloadOptional = "optional"
DispatchPayloadRequired = "required"
// DispatchLaunchSuffix is the string appended to the parameterized job's ID
// when dispatching instances of it.
DispatchLaunchSuffix = "/dispatch-"
)
// ParameterizedJobConfig is used to configure the parameterized job
type ParameterizedJobConfig struct {
// Payload configure the payload requirements
Payload string
// MetaRequired is metadata keys that must be specified by the dispatcher
MetaRequired []string
// MetaOptional is metadata keys that may be specified by the dispatcher
MetaOptional []string
}
func (d *ParameterizedJobConfig) Validate() error {
var mErr multierror.Error
switch d.Payload {
case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload))
}
// Check that the meta configurations are disjoint sets
disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional)
if !disjoint {
_ = multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending))
}
return mErr.ErrorOrNil()
}
func (d *ParameterizedJobConfig) Canonicalize() {
if d.Payload == "" {
d.Payload = DispatchPayloadOptional
}
}
func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig {
if d == nil {
return nil
}
nd := new(ParameterizedJobConfig)
*nd = *d
nd.MetaOptional = helper.CopySliceString(nd.MetaOptional)
nd.MetaRequired = helper.CopySliceString(nd.MetaRequired)
return nd
}
// DispatchedID returns an ID appropriate for a job dispatched against a
// particular parameterized job
func DispatchedID(templateID string, t time.Time) string {
u := uuid.Generate()[:8]
return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u)
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
type DispatchPayloadConfig struct {
// File specifies a relative path to where the input data should be written
File string
}
func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig {
if d == nil {
return nil
}
nd := new(DispatchPayloadConfig)
*nd = *d
return nd
}
func (d *DispatchPayloadConfig) Validate() error {
// Verify the destination doesn't escape
escaped, err := escapingfs.PathEscapesAllocViaRelative("task/local/", d.File)
if err != nil {
return fmt.Errorf("invalid destination path: %v", err)
} else if escaped {
return fmt.Errorf("destination escapes allocation directory")
}
return nil
}
const (
TaskLifecycleHookPrestart = "prestart"
TaskLifecycleHookPoststart = "poststart"
TaskLifecycleHookPoststop = "poststop"
)
type TaskLifecycleConfig struct {
Hook string
Sidecar bool
}
func (d *TaskLifecycleConfig) Copy() *TaskLifecycleConfig {
if d == nil {
return nil
}
nd := new(TaskLifecycleConfig)
*nd = *d
return nd
}
func (d *TaskLifecycleConfig) Validate() error {
if d == nil {
return nil
}
switch d.Hook {
case TaskLifecycleHookPrestart:
case TaskLifecycleHookPoststart:
case TaskLifecycleHookPoststop:
case "":
return fmt.Errorf("no lifecycle hook provided")
default:
return fmt.Errorf("invalid hook: %v", d.Hook)
}
return nil
}
var (
// These default restart policies needs to be in sync with
// Canonicalize in api/tasks.go
DefaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 30 * time.Minute,
Mode: RestartPolicyModeFail,
}
DefaultBatchJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 3,
Interval: 24 * time.Hour,
Mode: RestartPolicyModeFail,
}
)
var (
// These default reschedule policies needs to be in sync with
// NewDefaultReschedulePolicy in api/tasks.go
DefaultServiceJobReschedulePolicy = ReschedulePolicy{
Delay: 30 * time.Second,
DelayFunction: "exponential",
MaxDelay: 1 * time.Hour,
Unlimited: true,
}
DefaultBatchJobReschedulePolicy = ReschedulePolicy{
Attempts: 1,
Interval: 24 * time.Hour,
Delay: 5 * time.Second,
DelayFunction: "constant",
}
)
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
// RestartPolicyMinInterval is the minimum interval that is accepted for a
// restart policy.
RestartPolicyMinInterval = 5 * time.Second
// ReasonWithinPolicy describes restart events that are within policy
ReasonWithinPolicy = "Restart within policy"
)
// JobScalingEvents contains the scaling events for a given job
type JobScalingEvents struct {
Namespace string
JobID string
// This map is indexed by target; currently, this is just task group
// the indexed array is sorted from newest to oldest event
// the array should have less than JobTrackedScalingEvents entries
ScalingEvents map[string][]*ScalingEvent
// Raft index
ModifyIndex uint64
}
// NewScalingEvent method for ScalingEvent objects.
func NewScalingEvent(message string) *ScalingEvent {
return &ScalingEvent{
Time: time.Now().Unix(),
Message: message,
}
}
// ScalingEvent describes a scaling event against a Job
type ScalingEvent struct {
// Unix Nanosecond timestamp for the scaling event
Time int64
// Count is the new scaling count, if provided
Count *int64
// PreviousCount is the count at the time of the scaling event
PreviousCount int64
// Message is the message describing a scaling event
Message string
// Error indicates an error state for this scaling event
Error bool
// Meta is a map of metadata returned during a scaling event
Meta map[string]interface{}
// EvalID is the ID for an evaluation if one was created as part of a scaling event
EvalID *string
// Raft index
CreateIndex uint64
}
func (e *ScalingEvent) SetError(error bool) *ScalingEvent {
e.Error = error
return e
}
func (e *ScalingEvent) SetMeta(meta map[string]interface{}) *ScalingEvent {
e.Meta = meta
return e
}
func (e *ScalingEvent) SetEvalID(evalID string) *ScalingEvent {
e.EvalID = &evalID
return e
}
// ScalingEventRequest is by for Job.Scale endpoint
// to register scaling events
type ScalingEventRequest struct {
Namespace string
JobID string
TaskGroup string
ScalingEvent *ScalingEvent
}
// ScalingPolicy specifies the scaling policy for a scaling target
type ScalingPolicy struct {
// ID is a generated UUID used for looking up the scaling policy
ID string
// Type is the type of scaling performed by the policy
Type string
// Target contains information about the target of the scaling policy, like job and group
Target map[string]string
// Policy is an opaque description of the scaling policy, passed to the autoscaler
Policy map[string]interface{}
// Min is the minimum allowable scaling count for this target
Min int64
// Max is the maximum allowable scaling count for this target
Max int64
// Enabled indicates whether this policy has been enabled/disabled
Enabled bool
CreateIndex uint64
ModifyIndex uint64
}
// JobKey returns a key that is unique to a job-scoped target, useful as a map
// key. This uses the policy type, plus target (group and task).
func (p *ScalingPolicy) JobKey() string {
return p.Type + "\000" +
p.Target[ScalingTargetGroup] + "\000" +
p.Target[ScalingTargetTask]
}
const (
ScalingTargetNamespace = "Namespace"
ScalingTargetJob = "Job"
ScalingTargetGroup = "Group"
ScalingTargetTask = "Task"
ScalingPolicyTypeHorizontal = "horizontal"
)
func (p *ScalingPolicy) Canonicalize() {
if p.Type == "" {
p.Type = ScalingPolicyTypeHorizontal
}
}
func (p *ScalingPolicy) Copy() *ScalingPolicy {
if p == nil {
return nil
}
opaquePolicyConfig, err := copystructure.Copy(p.Policy)
if err != nil {
panic(err.Error())
}
c := ScalingPolicy{
ID: p.ID,
Policy: opaquePolicyConfig.(map[string]interface{}),
Enabled: p.Enabled,
Type: p.Type,
Min: p.Min,
Max: p.Max,
CreateIndex: p.CreateIndex,
ModifyIndex: p.ModifyIndex,
}
c.Target = make(map[string]string, len(p.Target))
for k, v := range p.Target {
c.Target[k] = v
}
return &c
}
func (p *ScalingPolicy) Validate() error {
if p == nil {
return nil
}
var mErr multierror.Error
// Check policy type and target
if p.Type == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing scaling policy type"))
} else {
mErr.Errors = append(mErr.Errors, p.validateType().Errors...)
}
// Check Min and Max
if p.Max < 0 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("maximum count must be specified and non-negative"))
} else if p.Max < p.Min {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("maximum count must not be less than minimum count"))
}
if p.Min < 0 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("minimum count must be specified and non-negative"))
}
return mErr.ErrorOrNil()
}
func (p *ScalingPolicy) validateTargetHorizontal() (mErr multierror.Error) {
if len(p.Target) == 0 {
// This is probably not a Nomad horizontal policy
return
}
// Nomad horizontal policies should have Namespace, Job and TaskGroup
if p.Target[ScalingTargetNamespace] == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target namespace"))
}
if p.Target[ScalingTargetJob] == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target job"))
}
if p.Target[ScalingTargetGroup] == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("missing target group"))
}
return
}
// Diff indicates whether the specification for a given scaling policy has changed
func (p *ScalingPolicy) Diff(p2 *ScalingPolicy) bool {
copy := *p2
copy.ID = p.ID
copy.CreateIndex = p.CreateIndex
copy.ModifyIndex = p.ModifyIndex
return !reflect.DeepEqual(*p, copy)
}
// TargetTaskGroup updates a ScalingPolicy target to specify a given task group
func (p *ScalingPolicy) TargetTaskGroup(job *Job, tg *TaskGroup) *ScalingPolicy {
p.Target = map[string]string{
ScalingTargetNamespace: job.Namespace,
ScalingTargetJob: job.ID,
ScalingTargetGroup: tg.Name,
}
return p
}
// TargetTask updates a ScalingPolicy target to specify a given task
func (p *ScalingPolicy) TargetTask(job *Job, tg *TaskGroup, task *Task) *ScalingPolicy {
p.TargetTaskGroup(job, tg)
p.Target[ScalingTargetTask] = task.Name
return p
}
func (p *ScalingPolicy) Stub() *ScalingPolicyListStub {
stub := &ScalingPolicyListStub{
ID: p.ID,
Type: p.Type,
Target: make(map[string]string),
Enabled: p.Enabled,
CreateIndex: p.CreateIndex,
ModifyIndex: p.ModifyIndex,
}
for k, v := range p.Target {
stub.Target[k] = v
}
return stub
}
// GetScalingPolicies returns a slice of all scaling scaling policies for this job
func (j *Job) GetScalingPolicies() []*ScalingPolicy {
ret := make([]*ScalingPolicy, 0)
for _, tg := range j.TaskGroups {
if tg.Scaling != nil {
ret = append(ret, tg.Scaling)
}
}
ret = append(ret, j.GetEntScalingPolicies()...)
return ret
}
// ScalingPolicyListStub is used to return a subset of scaling policy information
// for the scaling policy list
type ScalingPolicyListStub struct {
ID string
Enabled bool
Type string
Target map[string]string
CreateIndex uint64
ModifyIndex uint64
}
// RestartPolicy configures how Tasks are restarted when they crash or fail.
type RestartPolicy struct {
// Attempts is the number of restart that will occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of restarts
// within.
Interval time.Duration
// Delay is the time between a failure and a restart.
Delay time.Duration
// Mode controls what happens when the task restarts more than attempt times
// in an interval.
Mode string
}
func (r *RestartPolicy) Copy() *RestartPolicy {
if r == nil {
return nil
}
nrp := new(RestartPolicy)
*nrp = *r
return nrp
}
func (r *RestartPolicy) Validate() error {
var mErr multierror.Error
switch r.Mode {
case RestartPolicyModeDelay, RestartPolicyModeFail:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode))
}
// Check for ambiguous/confusing settings
if r.Attempts == 0 && r.Mode != RestartPolicyModeFail {
_ = multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts))
}
if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval))
}
if time.Duration(r.Attempts)*r.Delay > r.Interval {
_ = multierror.Append(&mErr,
fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay))
}
return mErr.ErrorOrNil()
}
func NewRestartPolicy(jobType string) *RestartPolicy {
switch jobType {
case JobTypeService, JobTypeSystem:
rp := DefaultServiceJobRestartPolicy
return &rp
case JobTypeBatch:
rp := DefaultBatchJobRestartPolicy
return &rp
}
return nil
}
const ReschedulePolicyMinInterval = 15 * time.Second
const ReschedulePolicyMinDelay = 5 * time.Second
var RescheduleDelayFunctions = [...]string{"constant", "exponential", "fibonacci"}
// ReschedulePolicy configures how Tasks are rescheduled when they crash or fail.
type ReschedulePolicy struct {
// Attempts limits the number of rescheduling attempts that can occur in an interval.
Attempts int
// Interval is a duration in which we can limit the number of reschedule attempts.
Interval time.Duration
// Delay is a minimum duration to wait between reschedule attempts.
// The delay function determines how much subsequent reschedule attempts are delayed by.
Delay time.Duration
// DelayFunction determines how the delay progressively changes on subsequent reschedule
// attempts. Valid values are "exponential", "constant", and "fibonacci".
DelayFunction string
// MaxDelay is an upper bound on the delay.
MaxDelay time.Duration
// Unlimited allows infinite rescheduling attempts. Only allowed when delay is set
// between reschedule attempts.
Unlimited bool
}
func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
if r == nil {
return nil
}
nrp := new(ReschedulePolicy)
*nrp = *r
return nrp
}
func (r *ReschedulePolicy) Enabled() bool {
enabled := r != nil && (r.Attempts > 0 || r.Unlimited)
return enabled
}
// Validate uses different criteria to validate the reschedule policy
// Delay must be a minimum of 5 seconds
// Delay Ceiling is ignored if Delay Function is "constant"
// Number of possible attempts is validated, given the interval, delay and delay function
func (r *ReschedulePolicy) Validate() error {
if !r.Enabled() {
return nil
}
var mErr multierror.Error
// Check for ambiguous/confusing settings
if r.Attempts > 0 {
if r.Interval <= 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0"))
}
if r.Unlimited {
_ = multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+
"and Unlimited = %v is ambiguous", r.Attempts, r.Interval, r.Unlimited))
_ = multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true"))
}
}
delayPreCheck := true
// Delay should be bigger than the default
if r.Delay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
delayPreCheck = false
}
// Must use a valid delay function
if !isValidDelayFunction(r.DelayFunction) {
_ = multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions))
delayPreCheck = false
}
// Validate MaxDelay if not using linear delay progression
if r.DelayFunction != "constant" {
if r.MaxDelay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay))
delayPreCheck = false
}
if r.MaxDelay < r.Delay {
_ = multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay))
delayPreCheck = false
}
}
// Validate Interval and other delay parameters if attempts are limited
if !r.Unlimited {
if r.Interval.Nanoseconds() < ReschedulePolicyMinInterval.Nanoseconds() {
_ = multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval))
}
if !delayPreCheck {
// We can't cross validate the rest of the delay params if delayPreCheck fails, so return early
return mErr.ErrorOrNil()
}
crossValidationErr := r.validateDelayParams()
if crossValidationErr != nil {
_ = multierror.Append(&mErr, crossValidationErr)
}
}
return mErr.ErrorOrNil()
}
func isValidDelayFunction(delayFunc string) bool {
for _, value := range RescheduleDelayFunctions {
if value == delayFunc {
return true
}
}
return false
}
func (r *ReschedulePolicy) validateDelayParams() error {
ok, possibleAttempts, recommendedInterval := r.viableAttempts()
if ok {
return nil
}
var mErr multierror.Error
if r.DelayFunction == "constant" {
_ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+
"delay function %q", possibleAttempts, r.Interval, r.Delay, r.DelayFunction))
} else {
_ = multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
"delay function %q, and delay ceiling %v", possibleAttempts, r.Interval, r.Delay, r.DelayFunction, r.MaxDelay))
}
_ = multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts))
return mErr.ErrorOrNil()
}
func (r *ReschedulePolicy) viableAttempts() (bool, int, time.Duration) {
var possibleAttempts int
var recommendedInterval time.Duration
valid := true
switch r.DelayFunction {
case "constant":
recommendedInterval = time.Duration(r.Attempts) * r.Delay
if r.Interval < recommendedInterval {
possibleAttempts = int(r.Interval / r.Delay)
valid = false
}
case "exponential":
for i := 0; i < r.Attempts; i++ {
nextDelay := time.Duration(math.Pow(2, float64(i))) * r.Delay
if nextDelay > r.MaxDelay {
nextDelay = r.MaxDelay
recommendedInterval += nextDelay
} else {
recommendedInterval = nextDelay
}
if recommendedInterval < r.Interval {
possibleAttempts++
}
}
if possibleAttempts < r.Attempts {
valid = false
}
case "fibonacci":
var slots []time.Duration
slots = append(slots, r.Delay)
slots = append(slots, r.Delay)
reachedCeiling := false
for i := 2; i < r.Attempts; i++ {
var nextDelay time.Duration
if reachedCeiling {
//switch to linear
nextDelay = slots[i-1] + r.MaxDelay
} else {
nextDelay = slots[i-1] + slots[i-2]
if nextDelay > r.MaxDelay {
nextDelay = r.MaxDelay
reachedCeiling = true
}
}
slots = append(slots, nextDelay)
}
recommendedInterval = slots[len(slots)-1]
if r.Interval < recommendedInterval {
valid = false
// calculate possible attempts
for i := 0; i < len(slots); i++ {
if slots[i] > r.Interval {
possibleAttempts = i
break
}
}
}
default:
return false, 0, 0
}
if possibleAttempts < 0 { // can happen if delay is bigger than interval
possibleAttempts = 0
}
return valid, possibleAttempts, recommendedInterval
}
func NewReschedulePolicy(jobType string) *ReschedulePolicy {
switch jobType {
case JobTypeService:
rp := DefaultServiceJobReschedulePolicy
return &rp
case JobTypeBatch:
rp := DefaultBatchJobReschedulePolicy
return &rp
}
return nil
}
const (
MigrateStrategyHealthChecks = "checks"
MigrateStrategyHealthStates = "task_states"
)
type MigrateStrategy struct {
MaxParallel int
HealthCheck string
MinHealthyTime time.Duration
HealthyDeadline time.Duration
}
// DefaultMigrateStrategy is used for backwards compat with pre-0.8 Allocations
// that lack an update strategy.
//
// This function should match its counterpart in api/tasks.go
func DefaultMigrateStrategy() *MigrateStrategy {
return &MigrateStrategy{
MaxParallel: 1,
HealthCheck: MigrateStrategyHealthChecks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
}
}
func (m *MigrateStrategy) Validate() error {
var mErr multierror.Error
if m.MaxParallel < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel))
}
switch m.HealthCheck {
case MigrateStrategyHealthChecks, MigrateStrategyHealthStates:
// ok
case "":
if m.MaxParallel > 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck"))
}
default:
_ = multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck))
}
if m.MinHealthyTime < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime))
}
if m.HealthyDeadline < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline))
}
if m.MinHealthyTime > m.HealthyDeadline {
_ = multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline"))
}
return mErr.ErrorOrNil()
}
// TaskGroup is an atomic unit of placement. Each task group belongs to
// a job and may contain any number of tasks. A task group support running
// in many replicas using the same configuration..
type TaskGroup struct {
// Name of the task group
Name string
// Count is the number of replicas of this task group that should
// be scheduled.
Count int
// Update is used to control the update strategy for this task group
Update *UpdateStrategy
// Migrate is used to control the migration strategy for this task group
Migrate *MigrateStrategy
// Constraints can be specified at a task group level and apply to
// all the tasks contained.
Constraints []*Constraint
// Scaling is the list of autoscaling policies for the TaskGroup
Scaling *ScalingPolicy
// RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// Tasks are the collection of tasks that this task group needs to run
Tasks []*Task
// EphemeralDisk is the disk resources that the task group requests
EphemeralDisk *EphemeralDisk
// Meta is used to associate arbitrary metadata with this
// task group. This is opaque to Nomad.
Meta map[string]string
// ReschedulePolicy is used to configure how the scheduler should
// retry failed allocations.
ReschedulePolicy *ReschedulePolicy
// Affinities can be specified at the task group level to express
// scheduling preferences.
Affinities []*Affinity
// Spread can be specified at the task group level to express spreading
// allocations across a desired attribute, such as datacenter
Spreads []*Spread
// Networks are the network configuration for the task group. This can be
// overridden in the task.
Networks Networks
// Consul configuration specific to this task group
Consul *Consul
// Services this group provides
Services []*Service
// Volumes is a map of volumes that have been requested by the task group.
Volumes map[string]*VolumeRequest
// ShutdownDelay is the amount of time to wait between deregistering
// group services in consul and stopping tasks.
ShutdownDelay *time.Duration
// StopAfterClientDisconnect, if set, configures the client to stop the task group
// after this duration since the last known good heartbeat
StopAfterClientDisconnect *time.Duration
// MaxClientDisconnect, if set, configures the client to allow placed
// allocations for tasks in this group to attempt to resume running without a restart.
MaxClientDisconnect *time.Duration
}
func (tg *TaskGroup) Copy() *TaskGroup {
if tg == nil {
return nil
}
ntg := new(TaskGroup)
*ntg = *tg
ntg.Update = ntg.Update.Copy()
ntg.Constraints = CopySliceConstraints(ntg.Constraints)
ntg.RestartPolicy = ntg.RestartPolicy.Copy()
ntg.ReschedulePolicy = ntg.ReschedulePolicy.Copy()
ntg.Affinities = CopySliceAffinities(ntg.Affinities)
ntg.Spreads = CopySliceSpreads(ntg.Spreads)
ntg.Volumes = CopyMapVolumeRequest(ntg.Volumes)
ntg.Scaling = ntg.Scaling.Copy()
ntg.Consul = ntg.Consul.Copy()
// Copy the network objects
if tg.Networks != nil {
n := len(tg.Networks)
ntg.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
ntg.Networks[i] = tg.Networks[i].Copy()
}
}
if tg.Tasks != nil {
tasks := make([]*Task, len(ntg.Tasks))
for i, t := range ntg.Tasks {
tasks[i] = t.Copy()
}
ntg.Tasks = tasks
}
ntg.Meta = helper.CopyMapStringString(ntg.Meta)
if tg.EphemeralDisk != nil {
ntg.EphemeralDisk = tg.EphemeralDisk.Copy()
}
if tg.Services != nil {
ntg.Services = make([]*Service, len(tg.Services))
for i, s := range tg.Services {
ntg.Services[i] = s.Copy()
}
}
if tg.ShutdownDelay != nil {
ntg.ShutdownDelay = tg.ShutdownDelay
}
if tg.StopAfterClientDisconnect != nil {
ntg.StopAfterClientDisconnect = tg.StopAfterClientDisconnect
}
if tg.MaxClientDisconnect != nil {
ntg.MaxClientDisconnect = tg.MaxClientDisconnect
}
return ntg
}
// Canonicalize is used to canonicalize fields in the TaskGroup.
func (tg *TaskGroup) Canonicalize(job *Job) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(tg.Meta) == 0 {
tg.Meta = nil
}
// Set the default restart policy.
if tg.RestartPolicy == nil {
tg.RestartPolicy = NewRestartPolicy(job.Type)
}
if tg.ReschedulePolicy == nil {
tg.ReschedulePolicy = NewReschedulePolicy(job.Type)
}
// Canonicalize Migrate for service jobs
if job.Type == JobTypeService && tg.Migrate == nil {
tg.Migrate = DefaultMigrateStrategy()
}
// Set a default ephemeral disk object if the user has not requested for one
if tg.EphemeralDisk == nil {
tg.EphemeralDisk = DefaultEphemeralDisk()
}
if tg.Scaling != nil {
tg.Scaling.Canonicalize()
}
for _, service := range tg.Services {
service.Canonicalize(job.Name, tg.Name, "group", job.Namespace)
}
for _, network := range tg.Networks {
network.Canonicalize()
}
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
}
// Validate is used to check a task group for reasonable configuration
func (tg *TaskGroup) Validate(j *Job) error {
var mErr multierror.Error
if tg.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task group name"))
} else if strings.Contains(tg.Name, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Task group name contains null character"))
}
if tg.Count < 0 {
mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative"))
}
if len(tg.Tasks) == 0 {
// could be a lone consul gateway inserted by the connect mutator
mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group"))
}
if tg.MaxClientDisconnect != nil && tg.StopAfterClientDisconnect != nil {
mErr.Errors = append(mErr.Errors, errors.New("Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect"))
}
if tg.MaxClientDisconnect != nil && *tg.MaxClientDisconnect < 0 {
mErr.Errors = append(mErr.Errors, errors.New("max_client_disconnect cannot be negative"))
}
for idx, constr := range tg.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if j.Type == JobTypeSystem {
if tg.Affinities != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
}
} else {
for idx, affinity := range tg.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
if tg.RestartPolicy != nil {
if err := tg.RestartPolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name))
}
if j.Type == JobTypeSystem {
if tg.Spreads != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza"))
}
} else {
for idx, spread := range tg.Spreads {
if err := spread.Validate(); err != nil {
outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
if j.Type == JobTypeSystem {
if tg.ReschedulePolicy != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs should not have a reschedule policy"))
}
} else {
if tg.ReschedulePolicy != nil {
if err := tg.ReschedulePolicy.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name))
}
}
if tg.EphemeralDisk != nil {
if err := tg.EphemeralDisk.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name))
}
// Validate the update strategy
if u := tg.Update; u != nil {
switch j.Type {
case JobTypeService, JobTypeSystem:
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type))
}
if err := u.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
// Validate the migration strategy
switch j.Type {
case JobTypeService:
if tg.Migrate != nil {
if err := tg.Migrate.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
default:
if tg.Migrate != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow migrate block", j.Type))
}
}
// Check that there is only one leader task if any
tasks := make(map[string]int)
leaderTasks := 0
for idx, task := range tg.Tasks {
if task.Name == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1))
} else if existing, ok := tasks[task.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1))
} else {
tasks[task.Name] = idx
}
if task.Leader {
leaderTasks++
}
}
if leaderTasks > 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader"))
}
// Validate the volume requests
var canaries int
if tg.Update != nil {
canaries = tg.Update.Canary
}
for name, volReq := range tg.Volumes {
if err := volReq.Validate(tg.Count, canaries); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf(
"Task group volume validation for %s failed: %v", name, err))
}
}
// Validate task group and task network resources
if err := tg.validateNetworks(); err != nil {
outer := fmt.Errorf("Task group network validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate task group and task services
if err := tg.validateServices(); err != nil {
outer := fmt.Errorf("Task group service validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate group service script-checks
if err := tg.validateScriptChecksInGroupServices(); err != nil {
outer := fmt.Errorf("Task group service check validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate the scaling policy
if err := tg.validateScalingPolicy(j); err != nil {
outer := fmt.Errorf("Task group scaling policy validation failed: %v", err)
mErr.Errors = append(mErr.Errors, outer)
}
// Validate the tasks
for _, task := range tg.Tasks {
// Validate the task does not reference undefined volume mounts
for i, mnt := range task.VolumeMounts {
if mnt.Volume == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a volume mount (%d) referencing an empty volume", task.Name, i))
continue
}
if _, ok := tg.Volumes[mnt.Volume]; !ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a volume mount (%d) referencing undefined volume %s", task.Name, i, mnt.Volume))
continue
}
}
if err := task.Validate(tg.EphemeralDisk, j.Type, tg.Services, tg.Networks); err != nil {
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
return mErr.ErrorOrNil()
}
func (tg *TaskGroup) validateNetworks() error {
var mErr multierror.Error
portLabels := make(map[string]string)
// host_network -> static port tracking
staticPortsIndex := make(map[string]map[int]string)
for _, net := range tg.Networks {
for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
if other, ok := portLabels[port.Label]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
} else {
portLabels[port.Label] = "taskgroup network"
}
if port.Value != 0 {
hostNetwork := port.HostNetwork
if hostNetwork == "" {
hostNetwork = "default"
}
staticPorts, ok := staticPortsIndex[hostNetwork]
if !ok {
staticPorts = make(map[int]string)
}
// static port
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else if port.Value > math.MaxUint16 {
err := fmt.Errorf("Port %s (%d) cannot be greater than %d", port.Label, port.Value, math.MaxUint16)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("taskgroup network:%s", port.Label)
staticPortsIndex[hostNetwork] = staticPorts
}
}
if port.To < -1 {
err := fmt.Errorf("Port %q cannot be mapped to negative value %d", port.Label, port.To)
mErr.Errors = append(mErr.Errors, err)
} else if port.To > math.MaxUint16 {
err := fmt.Errorf("Port %q cannot be mapped to a port (%d) greater than %d", port.Label, port.To, math.MaxUint16)
mErr.Errors = append(mErr.Errors, err)
}
}
// Validate the hostname field to be a valid DNS name. If the parameter
// looks like it includes an interpolation value, we skip this. It
// would be nice to validate additional parameters, but this isn't the
// right place.
if net.Hostname != "" && !strings.Contains(net.Hostname, "${") {
if _, ok := dns.IsDomainName(net.Hostname); !ok {
mErr.Errors = append(mErr.Errors, errors.New("Hostname is not a valid DNS name"))
}
}
}
// Check for duplicate tasks or port labels, and no duplicated static ports
for _, task := range tg.Tasks {
if task.Resources == nil {
continue
}
for _, net := range task.Resources.Networks {
for _, port := range append(net.ReservedPorts, net.DynamicPorts...) {
if other, ok := portLabels[port.Label]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Port label %s already in use by %s", port.Label, other))
}
if port.Value != 0 {
hostNetwork := port.HostNetwork
if hostNetwork == "" {
hostNetwork = "default"
}
staticPorts, ok := staticPortsIndex[hostNetwork]
if !ok {
staticPorts = make(map[int]string)
}
if other, ok := staticPorts[port.Value]; ok {
err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other)
mErr.Errors = append(mErr.Errors, err)
} else if port.Value > math.MaxUint16 {
err := fmt.Errorf("Port %s (%d) cannot be greater than %d", port.Label, port.Value, math.MaxUint16)
mErr.Errors = append(mErr.Errors, err)
} else {
staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label)
staticPortsIndex[hostNetwork] = staticPorts
}
}
}
}
}
return mErr.ErrorOrNil()
}
// validateServices runs Service.Validate() on group-level services, checks
// group service checks that refer to tasks only refer to tasks that exist.
func (tg *TaskGroup) validateServices() error {
var mErr multierror.Error
knownTasks := make(map[string]struct{})
// Track the providers used for this task group. Currently, Nomad only
// allows the use of a single service provider within a task group.
configuredProviders := make(map[string]struct{})
// Create a map of known tasks and their services so we can compare
// vs the group-level services and checks
for _, task := range tg.Tasks {
knownTasks[task.Name] = struct{}{}
if task.Services == nil {
continue
}
for _, service := range task.Services {
for _, check := range service.Checks {
if check.TaskName != "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %s is invalid: only task group service checks can be assigned tasks", check.Name))
}
}
// Add the service provider to the tracking, if it has not already
// been seen.
if _, ok := configuredProviders[service.Provider]; !ok {
configuredProviders[service.Provider] = struct{}{}
}
}
}
for i, service := range tg.Services {
// Add the service provider to the tracking, if it has not already been
// seen.
if _, ok := configuredProviders[service.Provider]; !ok {
configuredProviders[service.Provider] = struct{}{}
}
if err := service.Validate(); err != nil {
outer := fmt.Errorf("Service[%d] %s validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
// we break here to avoid the risk of crashing on null-pointer
// access in a later step, accepting that we might miss out on
// error messages to provide the user.
continue
}
if service.AddressMode == AddressModeDriver {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot use address_mode=\"driver\", only services defined in a \"task\" block can use this mode", service.Name))
}
for _, check := range service.Checks {
if check.TaskName != "" {
if check.Type != ServiceCheckScript && check.Type != ServiceCheckGRPC {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Check %s invalid: only script and gRPC checks should have tasks", check.Name))
}
if check.AddressMode == AddressModeDriver {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Check %q invalid: cannot use address_mode=\"driver\", only checks defined in a \"task\" service block can use this mode", service.Name))
}
if _, ok := knownTasks[check.TaskName]; !ok {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Check %s invalid: refers to non-existent task %s", check.Name, check.TaskName))
}
}
}
}
// The initial feature release of native service discovery only allows for
// a single service provider to be used across all services in a task
// group.
if len(configuredProviders) > 1 {
mErr.Errors = append(mErr.Errors,
errors.New("Multiple service providers used: task group services must use the same provider"))
}
return mErr.ErrorOrNil()
}
// validateScriptChecksInGroupServices ensures group-level services with script
// checks know what task driver to use. Either the service.task or service.check.task
// parameter must be configured.
func (tg *TaskGroup) validateScriptChecksInGroupServices() error {
var mErr multierror.Error
for _, service := range tg.Services {
if service.TaskName == "" {
for _, check := range service.Checks {
if check.Type == "script" && check.TaskName == "" {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Service [%s]->%s or Check %s must specify task parameter",
tg.Name, service.Name, check.Name,
))
}
}
}
}
return mErr.ErrorOrNil()
}
// validateScalingPolicy ensures that the scaling policy has consistent
// min and max, not in conflict with the task group count
func (tg *TaskGroup) validateScalingPolicy(j *Job) error {
if tg.Scaling == nil {
return nil
}
var mErr multierror.Error
err := tg.Scaling.Validate()
if err != nil {
// prefix scaling policy errors
if me, ok := err.(*multierror.Error); ok {
for _, e := range me.Errors {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Scaling policy invalid: %s", e))
}
}
}
if tg.Scaling.Max < int64(tg.Count) {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be greater than maximum count in scaling policy"))
}
if int64(tg.Count) < tg.Scaling.Min && !(j.IsMultiregion() && tg.Count == 0 && j.Region == "global") {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be less than minimum count in scaling policy"))
}
return mErr.ErrorOrNil()
}
// Warnings returns a list of warnings that may be from dubious settings or
// deprecation warnings.
func (tg *TaskGroup) Warnings(j *Job) error {
var mErr multierror.Error
// Validate the update strategy
if u := tg.Update; u != nil {
// Check the counts are appropriate
if u.MaxParallel > tg.Count && !(j.IsMultiregion() && tg.Count == 0) {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+
"A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count))
}
}
// Check for mbits network field
if len(tg.Networks) > 0 && tg.Networks[0].MBits > 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("mbits has been deprecated as of Nomad 0.12.0. Please remove mbits from the network block"))
}
for _, t := range tg.Tasks {
if err := t.Warnings(); err != nil {
err = multierror.Prefix(err, fmt.Sprintf("Task %q:", t.Name))
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// LookupTask finds a task by name
func (tg *TaskGroup) LookupTask(name string) *Task {
for _, t := range tg.Tasks {
if t.Name == name {
return t
}
}
return nil
}
// UsesConnect for convenience returns true if the TaskGroup contains at least
// one service that makes use of Consul Connect features.
//
// Currently used for validating that the task group contains one or more connect
// aware services before generating a service identity token.
func (tg *TaskGroup) UsesConnect() bool {
for _, service := range tg.Services {
if service.Connect != nil {
if service.Connect.IsNative() || service.Connect.HasSidecar() || service.Connect.IsGateway() {
return true
}
}
}
return false
}
// UsesConnectGateway for convenience returns true if the TaskGroup contains at
// least one service that makes use of Consul Connect Gateway features.
func (tg *TaskGroup) UsesConnectGateway() bool {
for _, service := range tg.Services {
if service.Connect != nil {
if service.Connect.IsGateway() {
return true
}
}
}
return false
}
func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
Limit int // Restart task after this many unhealthy intervals
Grace time.Duration // Grace time to give tasks after starting to get healthy
IgnoreWarnings bool // If true treat checks in `warning` as passing
}
func (c *CheckRestart) Copy() *CheckRestart {
if c == nil {
return nil
}
nc := new(CheckRestart)
*nc = *c
return nc
}
func (c *CheckRestart) Equals(o *CheckRestart) bool {
if c == nil || o == nil {
return c == o
}
if c.Limit != o.Limit {
return false
}
if c.Grace != o.Grace {
return false
}
if c.IgnoreWarnings != o.IgnoreWarnings {
return false
}
return true
}
func (c *CheckRestart) Validate() error {
if c == nil {
return nil
}
var mErr multierror.Error
if c.Limit < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit))
}
if c.Grace < 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace))
}
return mErr.ErrorOrNil()
}
const (
// DefaultKillTimeout is the default timeout between signaling a task it
// will be killed and killing it.
DefaultKillTimeout = 5 * time.Second
)
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
}
func (l *LogConfig) Equals(o *LogConfig) bool {
if l == nil || o == nil {
return l == o
}
if l.MaxFiles != o.MaxFiles {
return false
}
if l.MaxFileSizeMB != o.MaxFileSizeMB {
return false
}
return true
}
func (l *LogConfig) Copy() *LogConfig {
if l == nil {
return nil
}
return &LogConfig{
MaxFiles: l.MaxFiles,
MaxFileSizeMB: l.MaxFileSizeMB,
}
}
// DefaultLogConfig returns the default LogConfig values.
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
}
}
// Validate returns an error if the log config specified are less than
// the minimum allowed.
func (l *LogConfig) Validate() error {
var mErr multierror.Error
if l.MaxFiles < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles))
}
if l.MaxFileSizeMB < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB))
}
return mErr.ErrorOrNil()
}
// Task is a single process typically that is executed as part of a task group.
type Task struct {
// Name of the task
Name string
// Driver is used to control which driver is used
Driver string
// User is used to determine which user will run the task. It defaults to
// the same user the Nomad client is being run as.
User string
// Config is provided to the driver to initialize
Config map[string]interface{}
// Map of environment variables to be used by the driver
Env map[string]string
// List of service definitions exposed by the Task
Services []*Service
// Vault is used to define the set of Vault policies that this task should
// have access to.
Vault *Vault
// Templates are the set of templates to be rendered for the task.
Templates []*Template
// Constraints can be specified at a task level and apply only to
// the particular task.
Constraints []*Constraint
// Affinities can be specified at the task level to express
// scheduling preferences
Affinities []*Affinity
// Resources is the resources needed by this task
Resources *Resources
// RestartPolicy of a TaskGroup
RestartPolicy *RestartPolicy
// DispatchPayload configures how the task retrieves its input from a dispatch
DispatchPayload *DispatchPayloadConfig
Lifecycle *TaskLifecycleConfig
// Meta is used to associate arbitrary metadata with this
// task. This is opaque to Nomad.
Meta map[string]string
// KillTimeout is the time between signaling a task that it will be
// killed and killing it.
KillTimeout time.Duration
// LogConfig provides configuration for log rotation
LogConfig *LogConfig
// Artifacts is a list of artifacts to download and extract before running
// the task.
Artifacts []*TaskArtifact
// Leader marks the task as the leader within the group. When the leader
// task exits, other tasks will be gracefully terminated.
Leader bool
// ShutdownDelay is the duration of the delay between deregistering a
// task from Consul and sending it a signal to shutdown. See #2441
ShutdownDelay time.Duration
// VolumeMounts is a list of Volume name <-> mount configurations that will be
// attached to this task.
VolumeMounts []*VolumeMount
// ScalingPolicies is a list of scaling policies scoped to this task
ScalingPolicies []*ScalingPolicy
// KillSignal is the kill signal to use for the task. This is an optional
// specification and defaults to SIGINT
KillSignal string
// Used internally to manage tasks according to their TaskKind. Initial use case
// is for Consul Connect
Kind TaskKind
// CSIPluginConfig is used to configure the plugin supervisor for the task.
CSIPluginConfig *TaskCSIPluginConfig
}
// UsesConnect is for conveniently detecting if the Task is able to make use
// of Consul Connect features. This will be indicated in the TaskKind of the
// Task, which exports known types of Tasks. UsesConnect will be true if the
// task is a connect proxy, connect native, or is a connect gateway.
func (t *Task) UsesConnect() bool {
return t.Kind.IsConnectNative() || t.UsesConnectSidecar()
}
func (t *Task) UsesConnectSidecar() bool {
return t.Kind.IsConnectProxy() || t.Kind.IsAnyConnectGateway()
}
func (t *Task) Copy() *Task {
if t == nil {
return nil
}
nt := new(Task)
*nt = *t
nt.Env = helper.CopyMapStringString(nt.Env)
if t.Services != nil {
services := make([]*Service, len(nt.Services))
for i, s := range nt.Services {
services[i] = s.Copy()
}
nt.Services = services
}
nt.Constraints = CopySliceConstraints(nt.Constraints)
nt.Affinities = CopySliceAffinities(nt.Affinities)
nt.VolumeMounts = CopySliceVolumeMount(nt.VolumeMounts)
nt.CSIPluginConfig = nt.CSIPluginConfig.Copy()
nt.Vault = nt.Vault.Copy()
nt.Resources = nt.Resources.Copy()
nt.LogConfig = nt.LogConfig.Copy()
nt.Meta = helper.CopyMapStringString(nt.Meta)
nt.DispatchPayload = nt.DispatchPayload.Copy()
nt.Lifecycle = nt.Lifecycle.Copy()
if t.Artifacts != nil {
artifacts := make([]*TaskArtifact, 0, len(t.Artifacts))
for _, a := range nt.Artifacts {
artifacts = append(artifacts, a.Copy())
}
nt.Artifacts = artifacts
}
if i, err := copystructure.Copy(nt.Config); err != nil {
panic(err.Error())
} else {
nt.Config = i.(map[string]interface{})
}
if t.Templates != nil {
templates := make([]*Template, len(t.Templates))
for i, tmpl := range nt.Templates {
templates[i] = tmpl.Copy()
}
nt.Templates = templates
}
return nt
}
// Canonicalize canonicalizes fields in the task.
func (t *Task) Canonicalize(job *Job, tg *TaskGroup) {
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
if len(t.Meta) == 0 {
t.Meta = nil
}
if len(t.Config) == 0 {
t.Config = nil
}
if len(t.Env) == 0 {
t.Env = nil
}
for _, service := range t.Services {
service.Canonicalize(job.Name, tg.Name, t.Name, job.Namespace)
}
// If Resources are nil initialize them to defaults, otherwise canonicalize
if t.Resources == nil {
t.Resources = DefaultResources()
} else {
t.Resources.Canonicalize()
}
if t.RestartPolicy == nil {
t.RestartPolicy = tg.RestartPolicy
}
// Set the default timeout if it is not specified.
if t.KillTimeout == 0 {
t.KillTimeout = DefaultKillTimeout
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, template := range t.Templates {
template.Canonicalize()
}
}
func (t *Task) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
// Validate is used to check a task for reasonable configuration
func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices []*Service, tgNetworks Networks) error {
var mErr multierror.Error
if t.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
}
if strings.ContainsAny(t.Name, `/\`) {
// We enforce this so that when creating the directory on disk it will
// not have any slashes.
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes"))
} else if strings.Contains(t.Name, "\000") {
mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include null characters"))
}
if t.Driver == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing task driver"))
}
if t.KillTimeout < 0 {
mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value"))
}
if t.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}
// Validate the resources.
if t.Resources == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing task resources"))
} else if err := t.Resources.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
// Validate the log config
if t.LogConfig == nil {
mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config"))
} else if err := t.LogConfig.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
for idx, constr := range t.Constraints {
if err := constr.Validate(); err != nil {
outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
switch constr.Operand {
case ConstraintDistinctHosts, ConstraintDistinctProperty:
outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand)
mErr.Errors = append(mErr.Errors, outer)
}
}
if jobType == JobTypeSystem {
if t.Affinities != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza"))
}
} else {
for idx, affinity := range t.Affinities {
if err := affinity.Validate(); err != nil {
outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
}
// Validate Services
if err := validateServices(t, tgNetworks); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if t.LogConfig != nil && ephemeralDisk != nil {
logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB)
if ephemeralDisk.SizeMB <= logUsage {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)",
logUsage, ephemeralDisk.SizeMB))
}
}
for idx, artifact := range t.Artifacts {
if err := artifact.Validate(); err != nil {
outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
}
if t.Vault != nil {
if err := t.Vault.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err))
}
}
destinations := make(map[string]int, len(t.Templates))
for idx, tmpl := range t.Templates {
if err := tmpl.Validate(); err != nil {
outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err)
mErr.Errors = append(mErr.Errors, outer)
}
if other, ok := destinations[tmpl.DestPath]; ok {
outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other)
mErr.Errors = append(mErr.Errors, outer)
} else {
destinations[tmpl.DestPath] = idx + 1
}
}
// Validate the dispatch payload block if there
if t.DispatchPayload != nil {
if err := t.DispatchPayload.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err))
}
}
// Validate the Lifecycle block if there
if t.Lifecycle != nil {
if err := t.Lifecycle.Validate(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Lifecycle validation failed: %v", err))
}
}
// Validation for TaskKind field which is used for Consul Connect integration
if t.Kind.IsConnectProxy() {
// This task is a Connect proxy so it should not have service stanzas
if len(t.Services) > 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza"))
}
if t.Leader {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set"))
}
// Ensure the proxy task has a corresponding service entry
serviceErr := ValidateConnectProxyService(t.Kind.Value(), tgServices)
if serviceErr != nil {
mErr.Errors = append(mErr.Errors, serviceErr)
}
}
// Validation for volumes
for idx, vm := range t.VolumeMounts {
if !MountPropagationModeIsValid(vm.PropagationMode) {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) has an invalid propagation mode: \"%s\"", idx, vm.PropagationMode))
}
}
// Validate CSI Plugin Config
if t.CSIPluginConfig != nil {
if t.CSIPluginConfig.ID == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig must have a non-empty PluginID"))
}
if !CSIPluginTypeIsValid(t.CSIPluginConfig.Type) {
mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"%s\"", t.CSIPluginConfig.Type))
}
// TODO: Investigate validation of the PluginMountDir. Not much we can do apart from check IsAbs until after we understand its execution environment though :(
}
return mErr.ErrorOrNil()
}
// validateServices takes a task and validates the services within it are valid
// and reference ports that exist.
func validateServices(t *Task, tgNetworks Networks) error {
var mErr multierror.Error
// Ensure that services don't ask for nonexistent ports and their names are
// unique.
servicePorts := make(map[string]map[string]struct{})
addServicePort := func(label, service string) {
if _, ok := servicePorts[label]; !ok {
servicePorts[label] = map[string]struct{}{}
}
servicePorts[label][service] = struct{}{}
}
knownServices := make(map[string]struct{})
for i, service := range t.Services {
if err := service.Validate(); err != nil {
outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err)
mErr.Errors = append(mErr.Errors, outer)
}
if service.AddressMode == AddressModeAlloc {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot use address_mode=\"alloc\", only services defined in a \"group\" block can use this mode", service.Name))
}
// Ensure that services with the same name are not being registered for
// the same port
if _, ok := knownServices[service.Name+service.PortLabel]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name))
}
knownServices[service.Name+service.PortLabel] = struct{}{}
if service.PortLabel != "" {
if service.AddressMode == "driver" {
// Numeric port labels are valid for address_mode=driver
_, err := strconv.Atoi(service.PortLabel)
if err != nil {
// Not a numeric port label, add it to list to check
addServicePort(service.PortLabel, service.Name)
}
} else {
addServicePort(service.PortLabel, service.Name)
}
}
// connect block is only allowed on group level
if service.Connect != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q cannot have \"connect\" block, only services defined in a \"group\" block can", service.Name))
}
// Ensure that check names are unique and have valid ports
knownChecks := make(map[string]struct{})
for _, check := range service.Checks {
if _, ok := knownChecks[check.Name]; ok {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name))
}
knownChecks[check.Name] = struct{}{}
if check.AddressMode == AddressModeAlloc {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q cannot use address_mode=\"alloc\", only checks defined in a \"group\" service block can use this mode", service.Name))
}
if !check.RequiresPort() {
// No need to continue validating check if it doesn't need a port
continue
}
effectivePort := check.PortLabel
if effectivePort == "" {
// Inherits from service
effectivePort = service.PortLabel
}
if effectivePort == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is missing a port", check.Name))
continue
}
isNumeric := false
portNumber, err := strconv.Atoi(effectivePort)
if err == nil {
isNumeric = true
}
// Numeric ports are fine for address_mode = "driver"
if check.AddressMode == "driver" && isNumeric {
if portNumber <= 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q has invalid numeric port %d", check.Name, portNumber))
}
continue
}
if isNumeric {
mErr.Errors = append(mErr.Errors, fmt.Errorf(`check %q cannot use a numeric port %d without setting address_mode="driver"`, check.Name, portNumber))
continue
}
// PortLabel must exist, report errors by its parent service
addServicePort(effectivePort, service.Name)
}
}
// Get the set of group port labels.
portLabels := make(map[string]struct{})
if len(tgNetworks) > 0 {
ports := tgNetworks[0].PortLabels()
for portLabel := range ports {
portLabels[portLabel] = struct{}{}
}
}
// COMPAT(0.13)
// Append the set of task port labels. (Note that network resources on the
// task resources are deprecated, but we must let them continue working; a
// warning will be emitted on job submission).
if t.Resources != nil {
for _, network := range t.Resources.Networks {
for portLabel := range network.PortLabels() {
portLabels[portLabel] = struct{}{}
}
}
}
// Iterate over a sorted list of keys to make error listings stable
keys := make([]string, 0, len(servicePorts))
for p := range servicePorts {
keys = append(keys, p)
}
sort.Strings(keys)
// Ensure all ports referenced in services exist.
for _, servicePort := range keys {
services := servicePorts[servicePort]
_, ok := portLabels[servicePort]
if !ok {
names := make([]string, 0, len(services))
for name := range services {
names = append(names, name)
}
// Keep order deterministic
sort.Strings(names)
joined := strings.Join(names, ", ")
err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined)
mErr.Errors = append(mErr.Errors, err)
}
}
// Ensure address mode is valid
return mErr.ErrorOrNil()
}
func (t *Task) Warnings() error {
var mErr multierror.Error
// Validate the resources
if t.Resources != nil && t.Resources.IOPS != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza."))
}
if t.Resources != nil && len(t.Resources.Networks) != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block."))
}
for idx, tmpl := range t.Templates {
if err := tmpl.Warnings(); err != nil {
err = multierror.Prefix(err, fmt.Sprintf("Template[%d]", idx))
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// TaskKind identifies the special kinds of tasks using the following format:
// '<kind_name>(:<identifier>)`. The TaskKind can optionally include an identifier that
// is opaque to the Task. This identifier can be used to relate the task to some
// other entity based on the kind.
//
// For example, a task may have the TaskKind of `connect-proxy:service` where
// 'connect-proxy' is the kind name and 'service' is the identifier that relates the
// task to the service name of which it is a connect proxy for.
type TaskKind string
func NewTaskKind(name, identifier string) TaskKind {
return TaskKind(fmt.Sprintf("%s:%s", name, identifier))
}
// Name returns the kind name portion of the TaskKind
func (k TaskKind) Name() string {
return strings.Split(string(k), ":")[0]
}
// Value returns the identifier of the TaskKind or an empty string if it doesn't
// include one.
func (k TaskKind) Value() string {
if s := strings.SplitN(string(k), ":", 2); len(s) > 1 {
return s[1]
}
return ""
}
func (k TaskKind) hasPrefix(prefix string) bool {
return strings.HasPrefix(string(k), prefix+":") && len(k) > len(prefix)+1
}
// IsConnectProxy returns true if the TaskKind is connect-proxy.
func (k TaskKind) IsConnectProxy() bool {
return k.hasPrefix(ConnectProxyPrefix)
}
// IsConnectNative returns true if the TaskKind is connect-native.
func (k TaskKind) IsConnectNative() bool {
return k.hasPrefix(ConnectNativePrefix)
}
// IsConnectIngress returns true if the TaskKind is connect-ingress.
func (k TaskKind) IsConnectIngress() bool {
return k.hasPrefix(ConnectIngressPrefix)
}
// IsConnectTerminating returns true if the TaskKind is connect-terminating.
func (k TaskKind) IsConnectTerminating() bool {
return k.hasPrefix(ConnectTerminatingPrefix)
}
// IsConnectMesh returns true if the TaskKind is connect-mesh.
func (k TaskKind) IsConnectMesh() bool {
return k.hasPrefix(ConnectMeshPrefix)
}
// IsAnyConnectGateway returns true if the TaskKind represents any one of the
// supported connect gateway types.
func (k TaskKind) IsAnyConnectGateway() bool {
switch {
case k.IsConnectIngress():
return true
case k.IsConnectTerminating():
return true
case k.IsConnectMesh():
return true
default:
return false
}
}
const (
// ConnectProxyPrefix is the prefix used for fields referencing a Consul Connect
// Proxy
ConnectProxyPrefix = "connect-proxy"
// ConnectNativePrefix is the prefix used for fields referencing a Connect
// Native Task
ConnectNativePrefix = "connect-native"
// ConnectIngressPrefix is the prefix used for fields referencing a Consul
// Connect Ingress Gateway Proxy.
ConnectIngressPrefix = "connect-ingress"
// ConnectTerminatingPrefix is the prefix used for fields referencing a Consul
// Connect Terminating Gateway Proxy.
ConnectTerminatingPrefix = "connect-terminating"
// ConnectMeshPrefix is the prefix used for fields referencing a Consul Connect
// Mesh Gateway Proxy.
ConnectMeshPrefix = "connect-mesh"
)
// ValidateConnectProxyService checks that the service that is being
// proxied by this task exists in the task group and contains
// valid Connect config.
func ValidateConnectProxyService(serviceName string, tgServices []*Service) error {
found := false
names := make([]string, 0, len(tgServices))
for _, svc := range tgServices {
if svc.Connect == nil || svc.Connect.SidecarService == nil {
continue
}
if svc.Name == serviceName {
found = true
break
}
// Build up list of mismatched Connect service names for error
// reporting.
names = append(names, svc.Name)
}
if !found {
if len(names) == 0 {
return fmt.Errorf("No Connect services in task group with Connect proxy (%q)", serviceName)
} else {
return fmt.Errorf("Connect proxy service name (%q) not found in Connect services from task group: %s", serviceName, names)
}
}
return nil
}
const (
// TemplateChangeModeNoop marks that no action should be taken if the
// template is re-rendered
TemplateChangeModeNoop = "noop"
// TemplateChangeModeSignal marks that the task should be signaled if the
// template is re-rendered
TemplateChangeModeSignal = "signal"
// TemplateChangeModeRestart marks that the task should be restarted if the
// template is re-rendered
TemplateChangeModeRestart = "restart"
)
var (
// TemplateChangeModeInvalidError is the error for when an invalid change
// mode is given
TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart")
)
// Template represents a template configuration to be rendered for a given task
type Template struct {
// SourcePath is the path to the template to be rendered
SourcePath string
// DestPath is the path to where the template should be rendered
DestPath string
// EmbeddedTmpl store the raw template. This is useful for smaller templates
// where they are embedded in the job file rather than sent as an artifact
EmbeddedTmpl string
// ChangeMode indicates what should be done if the template is re-rendered
ChangeMode string
// ChangeSignal is the signal that should be sent if the change mode
// requires it.
ChangeSignal string
// Splay is used to avoid coordinated restarts of processes by applying a
// random wait between 0 and the given splay value before signalling the
// application of a change
Splay time.Duration
// Perms is the permission the file should be written out with.
Perms string
// LeftDelim and RightDelim are optional configurations to control what
// delimiter is utilized when parsing the template.
LeftDelim string
RightDelim string
// Envvars enables exposing the template as environment variables
// instead of as a file. The template must be of the form:
//
// VAR_NAME_1={{ key service/my-key }}
// VAR_NAME_2=raw string and {{ env "attr.kernel.name" }}
//
// Lines will be split on the initial "=" with the first part being the
// key name and the second part the value.
// Empty lines and lines starting with # will be ignored, but to avoid
// escaping issues #s within lines will not be treated as comments.
Envvars bool
// VaultGrace is the grace duration between lease renewal and reacquiring a
// secret. If the lease of a secret is less than the grace, a new secret is
// acquired.
// COMPAT(0.12) VaultGrace has been ignored by Vault since Vault v0.5.
VaultGrace time.Duration
// WaitConfig is used to override the global WaitConfig on a per-template basis
Wait *WaitConfig
}
// DefaultTemplate returns a default template.
func DefaultTemplate() *Template {
return &Template{
ChangeMode: TemplateChangeModeRestart,
Splay: 5 * time.Second,
Perms: "0644",
}
}
func (t *Template) Copy() *Template {
if t == nil {
return nil
}
nt := new(Template)
*nt = *t
if t.Wait != nil {
nt.Wait = t.Wait.Copy()
}
return nt
}
func (t *Template) Canonicalize() {
if t.ChangeSignal != "" {
t.ChangeSignal = strings.ToUpper(t.ChangeSignal)
}
}
func (t *Template) Validate() error {
var mErr multierror.Error
// Verify we have something to render
if t.SourcePath == "" && t.EmbeddedTmpl == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template"))
}
// Verify we can render somewhere
if t.DestPath == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template"))
}
// Verify the destination doesn't escape
escaped, err := escapingfs.PathEscapesAllocViaRelative("task", t.DestPath)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
// Verify a proper change mode
switch t.ChangeMode {
case TemplateChangeModeNoop, TemplateChangeModeRestart:
case TemplateChangeModeSignal:
if t.ChangeSignal == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal"))
}
if t.Envvars {
_ = multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates"))
}
default:
_ = multierror.Append(&mErr, TemplateChangeModeInvalidError)
}
// Verify the splay is positive
if t.Splay < 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value"))
}
// Verify the permissions
if t.Perms != "" {
if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil {
_ = multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err))
}
}
if err = t.Wait.Validate(); err != nil {
_ = multierror.Append(&mErr, err)
}
return mErr.ErrorOrNil()
}
func (t *Template) Warnings() error {
var mErr multierror.Error
// Deprecation notice for vault_grace
if t.VaultGrace != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."))
}
return mErr.ErrorOrNil()
}
// DiffID fulfills the DiffableWithID interface.
func (t *Template) DiffID() string {
return t.DestPath
}
// WaitConfig is the Min/Max duration used by the Consul Template Watcher. Consul
// Template relies on pointer based business logic. This struct uses pointers so
// that we tell the different between zero values and unset values.
type WaitConfig struct {
Min *time.Duration
Max *time.Duration
}
// Copy returns a deep copy of this configuration.
func (wc *WaitConfig) Copy() *WaitConfig {
if wc == nil {
return nil
}
nwc := new(WaitConfig)
if wc.Min != nil {
nwc.Min = &*wc.Min
}
if wc.Max != nil {
nwc.Max = &*wc.Max
}
return nwc
}
func (wc *WaitConfig) Equals(o *WaitConfig) bool {
if wc.Min == nil && o.Min != nil {
return false
}
if wc.Max == nil && o.Max != nil {
return false
}
if wc.Min != nil && (o.Min == nil || *wc.Min != *o.Min) {
return false
}
if wc.Max != nil && (o.Max == nil || *wc.Max != *o.Max) {
return false
}
return true
}
// Validate that the min is not greater than the max
func (wc *WaitConfig) Validate() error {
if wc == nil {
return nil
}
// If either one is nil, they aren't comparable, so they can't be invalid.
if wc.Min == nil || wc.Max == nil {
return nil
}
if *wc.Min > *wc.Max {
return fmt.Errorf("wait min %s is greater than max %s", wc.Min, wc.Max)
}
return nil
}
// AllocState records a single event that changes the state of the whole allocation
type AllocStateField uint8
const (
AllocStateFieldClientStatus AllocStateField = iota
)
type AllocState struct {
Field AllocStateField
Value string
Time time.Time
}
// TaskHandle is optional handle to a task propogated to the servers for use
// by remote tasks. Since remote tasks are not implicitly lost when the node
// they are assigned to is down, their state is migrated to the replacement
// allocation.
//
// Minimal set of fields from plugins/drivers/task_handle.go:TaskHandle
type TaskHandle struct {
// Version of driver state. Used by the driver to gracefully handle
// plugin upgrades.
Version int
// Driver-specific state containing a handle to the remote task.
DriverState []byte
}
func (h *TaskHandle) Copy() *TaskHandle {
if h == nil {
return nil
}
newTH := TaskHandle{
Version: h.Version,
DriverState: make([]byte, len(h.DriverState)),
}
copy(newTH.DriverState, h.DriverState)
return &newTH
}
// Set of possible states for a task.
const (
TaskStatePending = "pending" // The task is waiting to be run.
TaskStateRunning = "running" // The task is currently running.
TaskStateDead = "dead" // Terminal state of task.
)
// TaskState tracks the current state of a task and events that caused state
// transitions.
type TaskState struct {
// The current state of the task.
State string
// Failed marks a task as having failed
Failed bool
// Restarts is the number of times the task has restarted
Restarts uint64
// LastRestart is the time the task last restarted. It is updated each time the
// task restarts
LastRestart time.Time
// StartedAt is the time the task is started. It is updated each time the
// task starts
StartedAt time.Time
// FinishedAt is the time at which the task transitioned to dead and will
// not be started again.
FinishedAt time.Time
// Series of task events that transition the state of the task.
Events []*TaskEvent
// Experimental - TaskHandle is based on drivers.TaskHandle and used
// by remote task drivers to migrate task handles between allocations.
TaskHandle *TaskHandle
}
// NewTaskState returns a TaskState initialized in the Pending state.
func NewTaskState() *TaskState {
return &TaskState{
State: TaskStatePending,
}
}
// Canonicalize ensures the TaskState has a State set. It should default to
// Pending.
func (ts *TaskState) Canonicalize() {
if ts.State == "" {
ts.State = TaskStatePending
}
}
func (ts *TaskState) Copy() *TaskState {
if ts == nil {
return nil
}
newTS := new(TaskState)
*newTS = *ts
if ts.Events != nil {
newTS.Events = make([]*TaskEvent, len(ts.Events))
for i, e := range ts.Events {
newTS.Events[i] = e.Copy()
}
}
newTS.TaskHandle = ts.TaskHandle.Copy()
return newTS
}
// Successful returns whether a task finished successfully. Only meaningful for
// for batch allocations or ephemeral (non-sidecar) lifecycle tasks part of a
// service or system allocation.
func (ts *TaskState) Successful() bool {
return ts.State == TaskStateDead && !ts.Failed
}
const (
// TaskSetupFailure indicates that the task could not be started due to a
// a setup failure.
TaskSetupFailure = "Setup Failure"
// TaskDriveFailure indicates that the task could not be started due to a
// failure in the driver. TaskDriverFailure is considered Recoverable.
TaskDriverFailure = "Driver Failure"
// TaskReceived signals that the task has been pulled by the client at the
// given timestamp.
TaskReceived = "Received"
// TaskFailedValidation indicates the task was invalid and as such was not run.
// TaskFailedValidation is not considered Recoverable.
TaskFailedValidation = "Failed Validation"
// TaskStarted signals that the task was started and its timestamp can be
// used to determine the running length of the task.
TaskStarted = "Started"
// TaskTerminated indicates that the task was started and exited.
TaskTerminated = "Terminated"
// TaskKilling indicates a kill signal has been sent to the task.
TaskKilling = "Killing"
// TaskKilled indicates a user has killed the task.
TaskKilled = "Killed"
// TaskRestarting indicates that task terminated and is being restarted.
TaskRestarting = "Restarting"
// TaskNotRestarting indicates that the task has failed and is not being
// restarted because it has exceeded its restart policy.
TaskNotRestarting = "Not Restarting"
// TaskRestartSignal indicates that the task has been signalled to be
// restarted
TaskRestartSignal = "Restart Signaled"
// TaskSignaling indicates that the task is being signalled.
TaskSignaling = "Signaling"
// TaskDownloadingArtifacts means the task is downloading the artifacts
// specified in the task.
TaskDownloadingArtifacts = "Downloading Artifacts"
// TaskArtifactDownloadFailed indicates that downloading the artifacts
// failed.
TaskArtifactDownloadFailed = "Failed Artifact Download"
// TaskBuildingTaskDir indicates that the task directory/chroot is being
// built.
TaskBuildingTaskDir = "Building Task Directory"
// TaskSetup indicates the task runner is setting up the task environment
TaskSetup = "Task Setup"
// TaskDiskExceeded indicates that one of the tasks in a taskgroup has
// exceeded the requested disk resources.
TaskDiskExceeded = "Disk Resources Exceeded"
// TaskSiblingFailed indicates that a sibling task in the task group has
// failed.
TaskSiblingFailed = "Sibling Task Failed"
// TaskDriverMessage is an informational event message emitted by
// drivers such as when they're performing a long running action like
// downloading an image.
TaskDriverMessage = "Driver"
// TaskLeaderDead indicates that the leader task within the has finished.
TaskLeaderDead = "Leader Task Dead"
// TaskMainDead indicates that the main tasks have dead
TaskMainDead = "Main Tasks Dead"
// TaskHookFailed indicates that one of the hooks for a task failed.
TaskHookFailed = "Task hook failed"
// TaskRestoreFailed indicates Nomad was unable to reattach to a
// restored task.
TaskRestoreFailed = "Failed Restoring Task"
// TaskPluginUnhealthy indicates that a plugin managed by Nomad became unhealthy
TaskPluginUnhealthy = "Plugin became unhealthy"
// TaskPluginHealthy indicates that a plugin managed by Nomad became healthy
TaskPluginHealthy = "Plugin became healthy"
// TaskClientReconnected indicates that the client running the task disconnected.
TaskClientReconnected = "Reconnected"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64 // Unix Nanosecond timestamp
Message string // A possible message explaining the termination of the task.
// DisplayMessage is a human friendly message about the event
DisplayMessage string
// Details is a map with annotated info about the event
Details map[string]string
// DEPRECATION NOTICE: The following fields are deprecated and will be removed
// in a future release. Field values are available in the Details map.
// FailsTask marks whether this event fails the task.
// Deprecated, use Details["fails_task"] to access this.
FailsTask bool
// Restart fields.
// Deprecated, use Details["restart_reason"] to access this.
RestartReason string
// Setup Failure fields.
// Deprecated, use Details["setup_error"] to access this.
SetupError string
// Driver Failure fields.
// Deprecated, use Details["driver_error"] to access this.
DriverError string // A driver error occurred while starting the task.
// Task Terminated Fields.
// Deprecated, use Details["exit_code"] to access this.
ExitCode int // The exit code of the task.
// Deprecated, use Details["signal"] to access this.
Signal int // The signal that terminated the task.
// Killing fields
// Deprecated, use Details["kill_timeout"] to access this.
KillTimeout time.Duration
// Task Killed Fields.
// Deprecated, use Details["kill_error"] to access this.
KillError string // Error killing the task.
// KillReason is the reason the task was killed
// Deprecated, use Details["kill_reason"] to access this.
KillReason string
// TaskRestarting fields.
// Deprecated, use Details["start_delay"] to access this.
StartDelay int64 // The sleep period before restarting the task in unix nanoseconds.
// Artifact Download fields
// Deprecated, use Details["download_error"] to access this.
DownloadError string // Error downloading artifacts
// Validation fields
// Deprecated, use Details["validation_error"] to access this.
ValidationError string // Validation error
// The maximum allowed task disk size.
// Deprecated, use Details["disk_limit"] to access this.
DiskLimit int64
// Name of the sibling task that caused termination of the task that
// the TaskEvent refers to.
// Deprecated, use Details["failed_sibling"] to access this.
FailedSibling string
// VaultError is the error from token renewal
// Deprecated, use Details["vault_renewal_error"] to access this.
VaultError string
// TaskSignalReason indicates the reason the task is being signalled.
// Deprecated, use Details["task_signal_reason"] to access this.
TaskSignalReason string
// TaskSignal is the signal that was sent to the task
// Deprecated, use Details["task_signal"] to access this.
TaskSignal string
// DriverMessage indicates a driver action being taken.
// Deprecated, use Details["driver_message"] to access this.
DriverMessage string
// GenericSource is the source of a message.
// Deprecated, is redundant with event type.
GenericSource string
}
func (e *TaskEvent) PopulateEventDisplayMessage() {
// Build up the description based on the event type.
if e == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why?
return
}
if e.DisplayMessage != "" {
return
}
var desc string
switch e.Type {
case TaskSetup:
desc = e.Message
case TaskStarted:
desc = "Task started by client"
case TaskReceived:
desc = "Task received by client"
case TaskFailedValidation:
if e.ValidationError != "" {
desc = e.ValidationError
} else {
desc = "Validation of task failed"
}
case TaskSetupFailure:
if e.SetupError != "" {
desc = e.SetupError
} else {
desc = "Task setup failed"
}
case TaskDriverFailure:
if e.DriverError != "" {
desc = e.DriverError
} else {
desc = "Failed to start task"
}
case TaskDownloadingArtifacts:
desc = "Client is downloading artifacts"
case TaskArtifactDownloadFailed:
if e.DownloadError != "" {
desc = e.DownloadError
} else {
desc = "Failed to download artifacts"
}
case TaskKilling:
if e.KillReason != "" {
desc = e.KillReason
} else if e.KillTimeout != 0 {
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", e.KillTimeout)
} else {
desc = "Sent interrupt"
}
case TaskKilled:
if e.KillError != "" {
desc = e.KillError
} else {
desc = "Task successfully killed"
}
case TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("Exit Code: %d", e.ExitCode))
if e.Signal != 0 {
parts = append(parts, fmt.Sprintf("Signal: %d", e.Signal))
}
if e.Message != "" {
parts = append(parts, fmt.Sprintf("Exit Message: %q", e.Message))
}
desc = strings.Join(parts, ", ")
case TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(e.StartDelay))
if e.RestartReason != "" && e.RestartReason != ReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", e.RestartReason, in)
} else {
desc = in
}
case TaskNotRestarting:
if e.RestartReason != "" {
desc = e.RestartReason
} else {
desc = "Task exceeded restart policy"
}
case TaskSiblingFailed:
if e.FailedSibling != "" {
desc = fmt.Sprintf("Task's sibling %q failed", e.FailedSibling)
} else {
desc = "Task's sibling failed"
}
case TaskSignaling:
sig := e.TaskSignal
reason := e.TaskSignalReason
if sig == "" && reason == "" {
desc = "Task being sent a signal"
} else if sig == "" {
desc = reason
} else if reason == "" {
desc = fmt.Sprintf("Task being sent signal %v", sig)
} else {
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
}
case TaskRestartSignal:
if e.RestartReason != "" {
desc = e.RestartReason
} else {
desc = "Task signaled to restart"
}
case TaskDriverMessage:
desc = e.DriverMessage
case TaskLeaderDead:
desc = "Leader Task in Group dead"
case TaskMainDead:
desc = "Main tasks in the group died"
case TaskClientReconnected:
desc = "Client reconnected"
default:
desc = e.Message
}
e.DisplayMessage = desc
}
func (e *TaskEvent) GoString() string {
return fmt.Sprintf("%v - %v", e.Time, e.Type)
}
// SetDisplayMessage sets the display message of TaskEvent
func (e *TaskEvent) SetDisplayMessage(msg string) *TaskEvent {
e.DisplayMessage = msg
return e
}
// SetMessage sets the message of TaskEvent
func (e *TaskEvent) SetMessage(msg string) *TaskEvent {
e.Message = msg
e.Details["message"] = msg
return e
}
func (e *TaskEvent) Copy() *TaskEvent {
if e == nil {
return nil
}
copy := new(TaskEvent)
*copy = *e
return copy
}
func NewTaskEvent(event string) *TaskEvent {
return &TaskEvent{
Type: event,
Time: time.Now().UnixNano(),
Details: make(map[string]string),
}
}
// SetSetupError is used to store an error that occurred while setting up the
// task
func (e *TaskEvent) SetSetupError(err error) *TaskEvent {
if err != nil {
e.SetupError = err.Error()
e.Details["setup_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetFailsTask() *TaskEvent {
e.FailsTask = true
e.Details["fails_task"] = "true"
return e
}
func (e *TaskEvent) SetDriverError(err error) *TaskEvent {
if err != nil {
e.DriverError = err.Error()
e.Details["driver_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetExitCode(c int) *TaskEvent {
e.ExitCode = c
e.Details["exit_code"] = fmt.Sprintf("%d", c)
return e
}
func (e *TaskEvent) SetSignal(s int) *TaskEvent {
e.Signal = s
e.Details["signal"] = fmt.Sprintf("%d", s)
return e
}
func (e *TaskEvent) SetSignalText(s string) *TaskEvent {
e.Details["signal"] = s
return e
}
func (e *TaskEvent) SetExitMessage(err error) *TaskEvent {
if err != nil {
e.Message = err.Error()
e.Details["exit_message"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillError(err error) *TaskEvent {
if err != nil {
e.KillError = err.Error()
e.Details["kill_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillReason(r string) *TaskEvent {
e.KillReason = r
e.Details["kill_reason"] = r
return e
}
func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent {
e.StartDelay = int64(delay)
e.Details["start_delay"] = fmt.Sprintf("%d", delay)
return e
}
func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent {
e.RestartReason = reason
e.Details["restart_reason"] = reason
return e
}
func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent {
e.TaskSignalReason = r
e.Details["task_signal_reason"] = r
return e
}
func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent {
e.TaskSignal = s.String()
e.Details["task_signal"] = s.String()
return e
}
func (e *TaskEvent) SetDownloadError(err error) *TaskEvent {
if err != nil {
e.DownloadError = err.Error()
e.Details["download_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetValidationError(err error) *TaskEvent {
if err != nil {
e.ValidationError = err.Error()
e.Details["validation_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent {
e.KillTimeout = timeout
e.Details["kill_timeout"] = timeout.String()
return e
}
func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent {
e.DiskLimit = limit
e.Details["disk_limit"] = fmt.Sprintf("%d", limit)
return e
}
func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent {
e.FailedSibling = sibling
e.Details["failed_sibling"] = sibling
return e
}
func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent {
if err != nil {
e.VaultError = err.Error()
e.Details["vault_renewal_error"] = err.Error()
}
return e
}
func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent {
e.DriverMessage = m
e.Details["driver_message"] = m
return e
}
func (e *TaskEvent) SetOOMKilled(oom bool) *TaskEvent {
e.Details["oom_killed"] = strconv.FormatBool(oom)
return e
}
// TaskArtifact is an artifact to download before running the task.
type TaskArtifact struct {
// GetterSource is the source to download an artifact using go-getter
GetterSource string
// GetterOptions are options to use when downloading the artifact using
// go-getter.
GetterOptions map[string]string
// GetterHeaders are headers to use when downloading the artifact using
// go-getter.
GetterHeaders map[string]string
// GetterMode is the go-getter.ClientMode for fetching resources.
// Defaults to "any" but can be set to "file" or "dir".
GetterMode string
// RelativeDest is the download destination given relative to the task's
// directory.
RelativeDest string
}
func (ta *TaskArtifact) Copy() *TaskArtifact {
if ta == nil {
return nil
}
return &TaskArtifact{
GetterSource: ta.GetterSource,
GetterOptions: helper.CopyMapStringString(ta.GetterOptions),
GetterHeaders: helper.CopyMapStringString(ta.GetterHeaders),
GetterMode: ta.GetterMode,
RelativeDest: ta.RelativeDest,
}
}
func (ta *TaskArtifact) GoString() string {
return fmt.Sprintf("%+v", ta)
}
// DiffID fulfills the DiffableWithID interface.
func (ta *TaskArtifact) DiffID() string {
return ta.RelativeDest
}
// hashStringMap appends a deterministic hash of m onto h.
func hashStringMap(h hash.Hash, m map[string]string) {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
_, _ = h.Write([]byte(k))
_, _ = h.Write([]byte(m[k]))
}
}
// Hash creates a unique identifier for a TaskArtifact as the same GetterSource
// may be specified multiple times with different destinations.
func (ta *TaskArtifact) Hash() string {
h, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
_, _ = h.Write([]byte(ta.GetterSource))
hashStringMap(h, ta.GetterOptions)
hashStringMap(h, ta.GetterHeaders)
_, _ = h.Write([]byte(ta.GetterMode))
_, _ = h.Write([]byte(ta.RelativeDest))
return base64.RawStdEncoding.EncodeToString(h.Sum(nil))
}
func (ta *TaskArtifact) Validate() error {
// Verify the source
var mErr multierror.Error
if ta.GetterSource == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified"))
}
switch ta.GetterMode {
case "":
// Default to any
ta.GetterMode = GetterModeAny
case GetterModeAny, GetterModeFile, GetterModeDir:
// Ok
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s",
ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir))
}
escaped, err := escapingfs.PathEscapesAllocViaRelative("task", ta.RelativeDest)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err))
} else if escaped {
mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory"))
}
if err := ta.validateChecksum(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
func (ta *TaskArtifact) validateChecksum() error {
check, ok := ta.GetterOptions["checksum"]
if !ok {
return nil
}
// Job struct validation occurs before interpolation resolution can be effective.
// Skip checking if checksum contain variable reference, and artifacts fetching will
// eventually fail, if checksum is indeed invalid.
if args.ContainsEnv(check) {
return nil
}
check = strings.TrimSpace(check)
if check == "" {
return fmt.Errorf("checksum value cannot be empty")
}
parts := strings.Split(check, ":")
if l := len(parts); l != 2 {
return fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)
}
checksumVal := parts[1]
checksumBytes, err := hex.DecodeString(checksumVal)
if err != nil {
return fmt.Errorf("invalid checksum: %v", err)
}
checksumType := parts[0]
expectedLength := 0
switch checksumType {
case "md5":
expectedLength = md5.Size
case "sha1":
expectedLength = sha1.Size
case "sha256":
expectedLength = sha256.Size
case "sha512":
expectedLength = sha512.Size
default:
return fmt.Errorf("unsupported checksum type: %s", checksumType)
}
if len(checksumBytes) != expectedLength {
return fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)
}
return nil
}
const (
ConstraintDistinctProperty = "distinct_property"
ConstraintDistinctHosts = "distinct_hosts"
ConstraintRegex = "regexp"
ConstraintVersion = "version"
ConstraintSemver = "semver"
ConstraintSetContains = "set_contains"
ConstraintSetContainsAll = "set_contains_all"
ConstraintSetContainsAny = "set_contains_any"
ConstraintAttributeIsSet = "is_set"
ConstraintAttributeIsNotSet = "is_not_set"
)
// A Constraint is used to restrict placement options.
type Constraint struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near
}
// Equals checks if two constraints are equal.
func (c *Constraint) Equals(o *Constraint) bool {
return c == o ||
c.LTarget == o.LTarget &&
c.RTarget == o.RTarget &&
c.Operand == o.Operand
}
// Equal is like Equals but with one less s.
func (c *Constraint) Equal(o *Constraint) bool {
return c.Equals(o)
}
func (c *Constraint) Copy() *Constraint {
if c == nil {
return nil
}
return &Constraint{
LTarget: c.LTarget,
RTarget: c.RTarget,
Operand: c.Operand,
}
}
func (c *Constraint) String() string {
return fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget)
}
func (c *Constraint) Validate() error {
var mErr multierror.Error
if c.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand"))
}
// requireLtarget specifies whether the constraint requires an LTarget to be
// provided.
requireLtarget := true
// Perform additional validation based on operand
switch c.Operand {
case ConstraintDistinctHosts:
requireLtarget = false
case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget"))
}
case ConstraintRegex:
if _, err := regexp.Compile(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err))
}
case ConstraintSemver:
if _, err := semver.NewConstraint(c.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver constraint is invalid: %v", err))
}
case ConstraintDistinctProperty:
// If a count is set, make sure it is convertible to a uint64
if c.RTarget != "" {
count, err := strconv.ParseUint(c.RTarget, 10, 64)
if err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err))
} else if count < 1 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count))
}
}
case ConstraintAttributeIsSet, ConstraintAttributeIsNotSet:
if c.RTarget != "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q does not support an RTarget", c.Operand))
}
case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
if c.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand))
}
// Ensure we have an LTarget for the constraints that need one
if requireLtarget && c.LTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint"))
}
return mErr.ErrorOrNil()
}
type Constraints []*Constraint
// Equals compares Constraints as a set
func (xs *Constraints) Equals(ys *Constraints) bool {
if xs == ys {
return true
}
if xs == nil || ys == nil {
return false
}
if len(*xs) != len(*ys) {
return false
}
SETEQUALS:
for _, x := range *xs {
for _, y := range *ys {
if x.Equals(y) {
continue SETEQUALS
}
}
return false
}
return true
}
// Affinity is used to score placement options based on a weight
type Affinity struct {
LTarget string // Left-hand target
RTarget string // Right-hand target
Operand string // Affinity operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any
Weight int8 // Weight applied to nodes that match the affinity. Can be negative
}
// Equals checks if two affinities are equal.
func (a *Affinity) Equals(o *Affinity) bool {
return a == o ||
a.LTarget == o.LTarget &&
a.RTarget == o.RTarget &&
a.Operand == o.Operand &&
a.Weight == o.Weight
}
func (a *Affinity) Equal(o *Affinity) bool {
return a.Equals(o)
}
func (a *Affinity) Copy() *Affinity {
if a == nil {
return nil
}
return &Affinity{
LTarget: a.LTarget,
RTarget: a.RTarget,
Operand: a.Operand,
Weight: a.Weight,
}
}
func (a *Affinity) String() string {
return fmt.Sprintf("%s %s %s %v", a.LTarget, a.Operand, a.RTarget, a.Weight)
}
func (a *Affinity) Validate() error {
var mErr multierror.Error
if a.Operand == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing affinity operand"))
}
// Perform additional validation based on operand
switch a.Operand {
case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains:
if a.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains operators require an RTarget"))
}
case ConstraintRegex:
if _, err := regexp.Compile(a.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err))
}
case ConstraintVersion:
if _, err := version.NewConstraint(a.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Version affinity is invalid: %v", err))
}
case ConstraintSemver:
if _, err := semver.NewConstraint(a.RTarget); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Semver affinity is invalid: %v", err))
}
case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=":
if a.RTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", a.Operand))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown affinity operator %q", a.Operand))
}
// Ensure we have an LTarget
if a.LTarget == "" {
mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required"))
}
// Ensure that weight is between -100 and 100, and not zero
if a.Weight == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight cannot be zero"))
}
if a.Weight > 100 || a.Weight < -100 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight must be within the range [-100,100]"))
}
return mErr.ErrorOrNil()
}
// Spread is used to specify desired distribution of allocations according to weight
type Spread struct {
// Attribute is the node attribute used as the spread criteria
Attribute string
// Weight is the relative weight of this spread, useful when there are multiple
// spread and affinities
Weight int8
// SpreadTarget is used to describe desired percentages for each attribute value
SpreadTarget []*SpreadTarget
// Memoized string representation
str string
}
type Affinities []*Affinity
// Equals compares Affinities as a set
func (xs *Affinities) Equals(ys *Affinities) bool {
if xs == ys {
return true
}
if xs == nil || ys == nil {
return false
}
if len(*xs) != len(*ys) {
return false
}
SETEQUALS:
for _, x := range *xs {
for _, y := range *ys {
if x.Equals(y) {
continue SETEQUALS
}
}
return false
}
return true
}
func (s *Spread) Copy() *Spread {
if s == nil {
return nil
}
ns := new(Spread)
*ns = *s
ns.SpreadTarget = CopySliceSpreadTarget(s.SpreadTarget)
return ns
}
func (s *Spread) String() string {
if s.str != "" {
return s.str
}
s.str = fmt.Sprintf("%s %s %v", s.Attribute, s.SpreadTarget, s.Weight)
return s.str
}
func (s *Spread) Validate() error {
var mErr multierror.Error
if s.Attribute == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute"))
}
if s.Weight <= 0 || s.Weight > 100 {
mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100"))
}
seen := make(map[string]struct{})
sumPercent := uint32(0)
for _, target := range s.SpreadTarget {
// Make sure there are no duplicates
_, ok := seen[target.Value]
if !ok {
seen[target.Value] = struct{}{}
} else {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target value %q already defined", target.Value))
}
if target.Percent > 100 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Spread target percentage for value %q must be between 0 and 100", target.Value))
}
sumPercent += uint32(target.Percent)
}
if sumPercent > 100 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent))
}
return mErr.ErrorOrNil()
}
// SpreadTarget is used to specify desired percentages for each attribute value
type SpreadTarget struct {
// Value is a single attribute value, like "dc1"
Value string
// Percent is the desired percentage of allocs
Percent uint8
// Memoized string representation
str string
}
func (s *SpreadTarget) Copy() *SpreadTarget {
if s == nil {
return nil
}
ns := new(SpreadTarget)
*ns = *s
return ns
}
func (s *SpreadTarget) String() string {
if s.str != "" {
return s.str
}
s.str = fmt.Sprintf("%q %v%%", s.Value, s.Percent)
return s.str
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
// Sticky indicates whether the allocation is sticky to a node
Sticky bool
// SizeMB is the size of the local disk
SizeMB int
// Migrate determines if Nomad client should migrate the allocation dir for
// sticky allocations
Migrate bool
}
// DefaultEphemeralDisk returns a EphemeralDisk with default configurations
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
SizeMB: 300,
}
}
// Validate validates EphemeralDisk
func (d *EphemeralDisk) Validate() error {
if d.SizeMB < 10 {
return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB)
}
return nil
}
// Copy copies the EphemeralDisk struct and returns a new one
func (d *EphemeralDisk) Copy() *EphemeralDisk {
ld := new(EphemeralDisk)
*ld = *d
return ld
}
var (
// VaultUnrecoverableError matches unrecoverable errors returned by a Vault
// server
VaultUnrecoverableError = regexp.MustCompile(`Code:\s+40(0|3|4)`)
)
const (
// VaultChangeModeNoop takes no action when a new token is retrieved.
VaultChangeModeNoop = "noop"
// VaultChangeModeSignal signals the task when a new token is retrieved.
VaultChangeModeSignal = "signal"
// VaultChangeModeRestart restarts the task when a new token is retrieved.
VaultChangeModeRestart = "restart"
)
// Vault stores the set of permissions a task needs access to from Vault.
type Vault struct {
// Policies is the set of policies that the task needs access to
Policies []string
// Namespace is the vault namespace that should be used.
Namespace string
// Env marks whether the Vault Token should be exposed as an environment
// variable
Env bool
// ChangeMode is used to configure the task's behavior when the Vault
// token changes because the original token could not be renewed in time.
ChangeMode string
// ChangeSignal is the signal sent to the task when a new token is
// retrieved. This is only valid when using the signal change mode.
ChangeSignal string
}
func DefaultVaultBlock() *Vault {
return &Vault{
Env: true,
ChangeMode: VaultChangeModeRestart,
}
}
// Copy returns a copy of this Vault block.
func (v *Vault) Copy() *Vault {
if v == nil {
return nil
}
nv := new(Vault)
*nv = *v
return nv
}
func (v *Vault) Canonicalize() {
if v.ChangeSignal != "" {
v.ChangeSignal = strings.ToUpper(v.ChangeSignal)
}
if v.ChangeMode == "" {
v.ChangeMode = VaultChangeModeRestart
}
}
// Validate returns if the Vault block is valid.
func (v *Vault) Validate() error {
if v == nil {
return nil
}
var mErr multierror.Error
if len(v.Policies) == 0 {
_ = multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty"))
}
for _, p := range v.Policies {
if p == "root" {
_ = multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy"))
}
}
switch v.ChangeMode {
case VaultChangeModeSignal:
if v.ChangeSignal == "" {
_ = multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal))
}
case VaultChangeModeNoop, VaultChangeModeRestart:
default:
_ = multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode))
}
return mErr.ErrorOrNil()
}
const (
// DeploymentStatuses are the various states a deployment can be be in
DeploymentStatusRunning = "running"
DeploymentStatusPaused = "paused"
DeploymentStatusFailed = "failed"
DeploymentStatusSuccessful = "successful"
DeploymentStatusCancelled = "cancelled"
DeploymentStatusPending = "pending"
DeploymentStatusBlocked = "blocked"
DeploymentStatusUnblocking = "unblocking"
// TODO Statuses and Descriptions do not match 1:1 and we sometimes use the Description as a status flag
// DeploymentStatusDescriptions are the various descriptions of the states a
// deployment can be in.
DeploymentStatusDescriptionRunning = "Deployment is running"
DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires manual promotion"
DeploymentStatusDescriptionRunningAutoPromotion = "Deployment is running pending automatic promotion"
DeploymentStatusDescriptionPaused = "Deployment is paused"
DeploymentStatusDescriptionSuccessful = "Deployment completed successfully"
DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped"
DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job"
DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations"
DeploymentStatusDescriptionProgressDeadline = "Failed due to progress deadline"
DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed"
// used only in multiregion deployments
DeploymentStatusDescriptionFailedByPeer = "Failed because of an error in peer region"
DeploymentStatusDescriptionBlocked = "Deployment is complete but waiting for peer region"
DeploymentStatusDescriptionUnblocking = "Deployment is unblocking remaining regions"
DeploymentStatusDescriptionPendingForPeer = "Deployment is pending, waiting for peer region"
)
// DeploymentStatusDescriptionRollback is used to get the status description of
// a deployment when rolling back to an older job.
func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionRollbackNoop is used to get the status description of
// a deployment when rolling back is not possible because it has the same specification
func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string {
return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion)
}
// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of
// a deployment when there is no target to rollback to but autorevert is desired.
func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string {
return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription)
}
// Deployment is the object that represents a job deployment which is used to
// transition a job between versions.
type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// Namespace is the namespace the deployment is created in
Namespace string
// JobID is the job the deployment is created for
JobID string
// JobVersion is the version of the job at which the deployment is tracking
JobVersion uint64
// JobModifyIndex is the ModifyIndex of the job which the deployment is
// tracking.
JobModifyIndex uint64
// JobSpecModifyIndex is the JobModifyIndex of the job which the
// deployment is tracking.
JobSpecModifyIndex uint64
// JobCreateIndex is the create index of the job which the deployment is
// tracking. It is needed so that if the job gets stopped and reran we can
// present the correct list of deployments for the job and not old ones.
JobCreateIndex uint64
// Multiregion specifies if deployment is part of multiregion deployment
IsMultiregion bool
// TaskGroups is the set of task groups effected by the deployment and their
// current deployment status.
TaskGroups map[string]*DeploymentState
// The status of the deployment
Status string
// StatusDescription allows a human readable description of the deployment
// status.
StatusDescription string
// EvalPriority tracks the priority of the evaluation which lead to the
// creation of this Deployment object. Any additional evaluations created
// as a result of this deployment can therefore inherit this value, which
// is not guaranteed to be that of the job priority parameter.
EvalPriority int
CreateIndex uint64
ModifyIndex uint64
}
// NewDeployment creates a new deployment given the job.
func NewDeployment(job *Job, evalPriority int) *Deployment {
return &Deployment{
ID: uuid.Generate(),
Namespace: job.Namespace,
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
JobSpecModifyIndex: job.JobModifyIndex,
JobCreateIndex: job.CreateIndex,
IsMultiregion: job.IsMultiregion(),
Status: DeploymentStatusRunning,
StatusDescription: DeploymentStatusDescriptionRunning,
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
EvalPriority: evalPriority,
}
}
func (d *Deployment) Copy() *Deployment {
if d == nil {
return nil
}
c := &Deployment{}
*c = *d
c.TaskGroups = nil
if l := len(d.TaskGroups); d.TaskGroups != nil {
c.TaskGroups = make(map[string]*DeploymentState, l)
for tg, s := range d.TaskGroups {
c.TaskGroups[tg] = s.Copy()
}
}
return c
}
// Active returns whether the deployment is active or terminal.
func (d *Deployment) Active() bool {
switch d.Status {
case DeploymentStatusRunning, DeploymentStatusPaused, DeploymentStatusBlocked, DeploymentStatusUnblocking, DeploymentStatusPending:
return true
default:
return false
}
}
// GetID is a helper for getting the ID when the object may be nil
func (d *Deployment) GetID() string {
if d == nil {
return ""
}
return d.ID
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (d *Deployment) GetCreateIndex() uint64 {
if d == nil {
return 0
}
return d.CreateIndex
}
// HasPlacedCanaries returns whether the deployment has placed canaries
func (d *Deployment) HasPlacedCanaries() bool {
if d == nil || len(d.TaskGroups) == 0 {
return false
}
for _, group := range d.TaskGroups {
if len(group.PlacedCanaries) != 0 {
return true
}
}
return false
}
// RequiresPromotion returns whether the deployment requires promotion to
// continue
func (d *Deployment) RequiresPromotion() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.Promoted {
return true
}
}
return false
}
// HasAutoPromote determines if all taskgroups are marked auto_promote
func (d *Deployment) HasAutoPromote() bool {
if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning {
return false
}
for _, group := range d.TaskGroups {
if group.DesiredCanaries > 0 && !group.AutoPromote {
return false
}
}
return true
}
func (d *Deployment) GoString() string {
base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription)
for group, state := range d.TaskGroups {
base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state)
}
return base
}
// DeploymentState tracks the state of a deployment for a given task group.
type DeploymentState struct {
// AutoRevert marks whether the task group has indicated the job should be
// reverted on failure
AutoRevert bool
// AutoPromote marks promotion triggered automatically by healthy canaries
// copied from TaskGroup UpdateStrategy in scheduler.reconcile
AutoPromote bool
// ProgressDeadline is the deadline by which an allocation must transition
// to healthy before the deployment is considered failed. This value is set
// by the jobspec `update.progress_deadline` field.
ProgressDeadline time.Duration
// RequireProgressBy is the time by which an allocation must transition to
// healthy before the deployment is considered failed. This value is reset
// to "now" + ProgressDeadline when an allocation updates the deployment.
RequireProgressBy time.Time
// Promoted marks whether the canaries have been promoted
Promoted bool
// PlacedCanaries is the set of placed canary allocations
PlacedCanaries []string
// DesiredCanaries is the number of canaries that should be created.
DesiredCanaries int
// DesiredTotal is the total number of allocations that should be created as
// part of the deployment.
DesiredTotal int
// PlacedAllocs is the number of allocations that have been placed
PlacedAllocs int
// HealthyAllocs is the number of allocations that have been marked healthy.
HealthyAllocs int
// UnhealthyAllocs are allocations that have been marked as unhealthy.
UnhealthyAllocs int
}
func (d *DeploymentState) GoString() string {
base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal)
base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries)
base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries)
base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted)
base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs)
base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs)
base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs)
base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert)
base += fmt.Sprintf("\n\tAutoPromote: %v", d.AutoPromote)
return base
}
func (d *DeploymentState) Copy() *DeploymentState {
c := &DeploymentState{}
*c = *d
c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries)
return c
}
// DeploymentStatusUpdate is used to update the status of a given deployment
type DeploymentStatusUpdate struct {
// DeploymentID is the ID of the deployment to update
DeploymentID string
// Status is the new status of the deployment.
Status string
// StatusDescription is the new status description of the deployment.
StatusDescription string
}
// RescheduleTracker encapsulates previous reschedule events
type RescheduleTracker struct {
Events []*RescheduleEvent
}
func (rt *RescheduleTracker) Copy() *RescheduleTracker {
if rt == nil {
return nil
}
nt := &RescheduleTracker{}
*nt = *rt
rescheduleEvents := make([]*RescheduleEvent, 0, len(rt.Events))
for _, tracker := range rt.Events {
rescheduleEvents = append(rescheduleEvents, tracker.Copy())
}
nt.Events = rescheduleEvents
return nt
}
// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
type RescheduleEvent struct {
// RescheduleTime is the timestamp of a reschedule attempt
RescheduleTime int64
// PrevAllocID is the ID of the previous allocation being restarted
PrevAllocID string
// PrevNodeID is the node ID of the previous allocation
PrevNodeID string
// Delay is the reschedule delay associated with the attempt
Delay time.Duration
}
func NewRescheduleEvent(rescheduleTime int64, prevAllocID string, prevNodeID string, delay time.Duration) *RescheduleEvent {
return &RescheduleEvent{RescheduleTime: rescheduleTime,
PrevAllocID: prevAllocID,
PrevNodeID: prevNodeID,
Delay: delay}
}
func (re *RescheduleEvent) Copy() *RescheduleEvent {
if re == nil {
return nil
}
copy := new(RescheduleEvent)
*copy = *re
return copy
}
// DesiredTransition is used to mark an allocation as having a desired state
// transition. This information can be used by the scheduler to make the
// correct decision.
type DesiredTransition struct {
// Migrate is used to indicate that this allocation should be stopped and
// migrated to another node.
Migrate *bool
// Reschedule is used to indicate that this allocation is eligible to be
// rescheduled. Most allocations are automatically eligible for
// rescheduling, so this field is only required when an allocation is not
// automatically eligible. An example is an allocation that is part of a
// deployment.
Reschedule *bool
// ForceReschedule is used to indicate that this allocation must be rescheduled.
// This field is only used when operators want to force a placement even if
// a failed allocation is not eligible to be rescheduled
ForceReschedule *bool
// NoShutdownDelay, if set to true, will override the group and
// task shutdown_delay configuration and ignore the delay for any
// allocations stopped as a result of this Deregister call.
NoShutdownDelay *bool
}
// Merge merges the two desired transitions, preferring the values from the
// passed in object.
func (d *DesiredTransition) Merge(o *DesiredTransition) {
if o.Migrate != nil {
d.Migrate = o.Migrate
}
if o.Reschedule != nil {
d.Reschedule = o.Reschedule
}
if o.ForceReschedule != nil {
d.ForceReschedule = o.ForceReschedule
}
if o.NoShutdownDelay != nil {
d.NoShutdownDelay = o.NoShutdownDelay
}
}
// ShouldMigrate returns whether the transition object dictates a migration.
func (d *DesiredTransition) ShouldMigrate() bool {
return d.Migrate != nil && *d.Migrate
}
// ShouldReschedule returns whether the transition object dictates a
// rescheduling.
func (d *DesiredTransition) ShouldReschedule() bool {
return d.Reschedule != nil && *d.Reschedule
}
// ShouldForceReschedule returns whether the transition object dictates a
// forced rescheduling.
func (d *DesiredTransition) ShouldForceReschedule() bool {
if d == nil {
return false
}
return d.ForceReschedule != nil && *d.ForceReschedule
}
// ShouldIgnoreShutdownDelay returns whether the transition object dictates
// that shutdown skip any shutdown delays.
func (d *DesiredTransition) ShouldIgnoreShutdownDelay() bool {
if d == nil {
return false
}
return d.NoShutdownDelay != nil && *d.NoShutdownDelay
}
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
AllocClientStatusUnknown = "unknown"
)
// Allocation is used to allocate the placement of a task group to a node.
type Allocation struct {
// msgpack omit empty fields during serialization
_struct bool `codec:",omitempty"` // nolint: structcheck
// ID of the allocation (UUID)
ID string
// Namespace is the namespace the allocation is created in
Namespace string
// ID of the evaluation that generated this allocation
EvalID string
// Name is a logical name of the allocation.
Name string
// NodeID is the node this is being placed on
NodeID string
// NodeName is the name of the node this is being placed on.
NodeName string
// Job is the parent job of the task group being allocated.
// This is copied at allocation time to avoid issues if the job
// definition is updated.
JobID string
Job *Job
// TaskGroup is the name of the task group that should be run
TaskGroup string
// COMPAT(0.11): Remove in 0.11
// Resources is the total set of resources allocated as part
// of this allocation of the task group. Dynamic ports will be set by
// the scheduler.
Resources *Resources
// SharedResources are the resources that are shared by all the tasks in an
// allocation
// Deprecated: use AllocatedResources.Shared instead.
// Keep field to allow us to handle upgrade paths from old versions
SharedResources *Resources
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources. Dynamic ports will be
// set by the scheduler.
// Deprecated: use AllocatedResources.Tasks instead.
// Keep field to allow us to handle upgrade paths from old versions
TaskResources map[string]*Resources
// AllocatedResources is the total resources allocated for the task group.
AllocatedResources *AllocatedResources
// Metrics associated with this allocation
Metrics *AllocMetric
// Desired Status of the allocation on the client
DesiredStatus string
// DesiredStatusDescription is meant to provide more human useful information
DesiredDescription string
// DesiredTransition is used to indicate that a state transition
// is desired for a given reason.
DesiredTransition DesiredTransition
// Status of the allocation on the client
ClientStatus string
// ClientStatusDescription is meant to provide more human useful information
ClientDescription string
// TaskStates stores the state of each task,
TaskStates map[string]*TaskState
// AllocStates track meta data associated with changes to the state of the whole allocation, like becoming lost
AllocStates []*AllocState
// PreviousAllocation is the allocation that this allocation is replacing
PreviousAllocation string
// NextAllocation is the allocation that this allocation is being replaced by
NextAllocation string
// DeploymentID identifies an allocation as being created from a
// particular deployment
DeploymentID string
// DeploymentStatus captures the status of the allocation as part of the
// given deployment
DeploymentStatus *AllocDeploymentStatus
// RescheduleTrackers captures details of previous reschedule attempts of the allocation
RescheduleTracker *RescheduleTracker
// NetworkStatus captures networking details of an allocation known at runtime
NetworkStatus *AllocNetworkStatus
// FollowupEvalID captures a follow up evaluation created to handle a failed allocation
// that can be rescheduled in the future
FollowupEvalID string
// PreemptedAllocations captures IDs of any allocations that were preempted
// in order to place this allocation
PreemptedAllocations []string
// PreemptedByAllocation tracks the alloc ID of the allocation that caused this allocation
// to stop running because it got preempted
PreemptedByAllocation string
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
// AllocModifyIndex is not updated when the client updates allocations. This
// lets the client pull only the allocs updated by the server.
AllocModifyIndex uint64
// CreateTime is the time the allocation has finished scheduling and been
// verified by the plan applier.
CreateTime int64
// ModifyTime is the time the allocation was last updated.
ModifyTime int64
}
// GetID implements the IDGetter interface, required for pagination.
func (a *Allocation) GetID() string {
if a == nil {
return ""
}
return a.ID
}
// GetNamespace implements the NamespaceGetter interface, required for
// pagination and filtering namespaces in endpoints that support glob namespace
// requests using tokens with limited access.
func (a *Allocation) GetNamespace() string {
if a == nil {
return ""
}
return a.Namespace
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (a *Allocation) GetCreateIndex() uint64 {
if a == nil {
return 0
}
return a.CreateIndex
}
// ConsulNamespace returns the Consul namespace of the task group associated
// with this allocation.
func (a *Allocation) ConsulNamespace() string {
return a.Job.LookupTaskGroup(a.TaskGroup).Consul.GetNamespace()
}
func (a *Allocation) JobNamespacedID() NamespacedID {
return NewNamespacedID(a.JobID, a.Namespace)
}
// Index returns the index of the allocation. If the allocation is from a task
// group with count greater than 1, there will be multiple allocations for it.
func (a *Allocation) Index() uint {
l := len(a.Name)
prefix := len(a.JobID) + len(a.TaskGroup) + 2
if l <= 3 || l <= prefix {
return uint(0)
}
strNum := a.Name[prefix : len(a.Name)-1]
num, _ := strconv.Atoi(strNum)
return uint(num)
}
// Copy provides a copy of the allocation and deep copies the job
func (a *Allocation) Copy() *Allocation {
return a.copyImpl(true)
}
// CopySkipJob provides a copy of the allocation but doesn't deep copy the job
func (a *Allocation) CopySkipJob() *Allocation {
return a.copyImpl(false)
}
// Canonicalize Allocation to ensure fields are initialized to the expectations
// of this version of Nomad. Should be called when restoring persisted
// Allocations or receiving Allocations from Nomad agents potentially on an
// older version of Nomad.
func (a *Allocation) Canonicalize() {
if a.AllocatedResources == nil && a.TaskResources != nil {
ar := AllocatedResources{}
tasks := make(map[string]*AllocatedTaskResources, len(a.TaskResources))
for name, tr := range a.TaskResources {
atr := AllocatedTaskResources{}
atr.Cpu.CpuShares = int64(tr.CPU)
atr.Memory.MemoryMB = int64(tr.MemoryMB)
atr.Networks = tr.Networks.Copy()
tasks[name] = &atr
}
ar.Tasks = tasks
if a.SharedResources != nil {
ar.Shared.DiskMB = int64(a.SharedResources.DiskMB)
ar.Shared.Networks = a.SharedResources.Networks.Copy()
}
a.AllocatedResources = &ar
}
a.Job.Canonicalize()
}
func (a *Allocation) copyImpl(job bool) *Allocation {
if a == nil {
return nil
}
na := new(Allocation)
*na = *a
if job {
na.Job = na.Job.Copy()
}
na.AllocatedResources = na.AllocatedResources.Copy()
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
if a.TaskResources != nil {
tr := make(map[string]*Resources, len(na.TaskResources))
for task, resource := range na.TaskResources {
tr[task] = resource.Copy()
}
na.TaskResources = tr
}
na.Metrics = na.Metrics.Copy()
na.DeploymentStatus = na.DeploymentStatus.Copy()
if a.TaskStates != nil {
ts := make(map[string]*TaskState, len(na.TaskStates))
for task, state := range na.TaskStates {
ts[task] = state.Copy()
}
na.TaskStates = ts
}
na.RescheduleTracker = a.RescheduleTracker.Copy()
na.PreemptedAllocations = helper.CopySliceString(a.PreemptedAllocations)
return na
}
// TerminalStatus returns if the desired or actual status is terminal and
// will no longer transition.
func (a *Allocation) TerminalStatus() bool {
// First check the desired state and if that isn't terminal, check client
// state.
return a.ServerTerminalStatus() || a.ClientTerminalStatus()
}
// ServerTerminalStatus returns true if the desired state of the allocation is terminal
func (a *Allocation) ServerTerminalStatus() bool {
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return true
default:
return false
}
}
// ClientTerminalStatus returns if the client status is terminal and will no longer transition
func (a *Allocation) ClientTerminalStatus() bool {
switch a.ClientStatus {
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
return true
default:
return false
}
}
// ShouldReschedule returns if the allocation is eligible to be rescheduled according
// to its status and ReschedulePolicy given its failure time
func (a *Allocation) ShouldReschedule(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
// First check the desired state
switch a.DesiredStatus {
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
return false
default:
}
switch a.ClientStatus {
case AllocClientStatusFailed:
return a.RescheduleEligible(reschedulePolicy, failTime)
default:
return false
}
}
// RescheduleEligible returns if the allocation is eligible to be rescheduled according
// to its ReschedulePolicy and the current state of its reschedule trackers
func (a *Allocation) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool {
if reschedulePolicy == nil {
return false
}
attempts := reschedulePolicy.Attempts
enabled := attempts > 0 || reschedulePolicy.Unlimited
if !enabled {
return false
}
if reschedulePolicy.Unlimited {
return true
}
// Early return true if there are no attempts yet and the number of allowed attempts is > 0
if (a.RescheduleTracker == nil || len(a.RescheduleTracker.Events) == 0) && attempts > 0 {
return true
}
attempted, _ := a.rescheduleInfo(reschedulePolicy, failTime)
return attempted < attempts
}
func (a *Allocation) rescheduleInfo(reschedulePolicy *ReschedulePolicy, failTime time.Time) (int, int) {
if reschedulePolicy == nil {
return 0, 0
}
attempts := reschedulePolicy.Attempts
interval := reschedulePolicy.Interval
attempted := 0
if a.RescheduleTracker != nil && attempts > 0 {
for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- {
lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime
timeDiff := failTime.UTC().UnixNano() - lastAttempt
if timeDiff < interval.Nanoseconds() {
attempted += 1
}
}
}
return attempted, attempts
}
func (a *Allocation) RescheduleInfo() (int, int) {
return a.rescheduleInfo(a.ReschedulePolicy(), a.LastEventTime())
}
// LastEventTime is the time of the last task event in the allocation.
// It is used to determine allocation failure time. If the FinishedAt field
// is not set, the alloc's modify time is used
func (a *Allocation) LastEventTime() time.Time {
var lastEventTime time.Time
if a.TaskStates != nil {
for _, s := range a.TaskStates {
if lastEventTime.IsZero() || s.FinishedAt.After(lastEventTime) {
lastEventTime = s.FinishedAt
}
}
}
if lastEventTime.IsZero() {
return time.Unix(0, a.ModifyTime).UTC()
}
return lastEventTime
}
// ReschedulePolicy returns the reschedule policy based on the task group
func (a *Allocation) ReschedulePolicy() *ReschedulePolicy {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return nil
}
return tg.ReschedulePolicy
}
// MigrateStrategy returns the migrate strategy based on the task group
func (a *Allocation) MigrateStrategy() *MigrateStrategy {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return nil
}
return tg.Migrate
}
// NextRescheduleTime returns a time on or after which the allocation is eligible to be rescheduled,
// and whether the next reschedule time is within policy's interval if the policy doesn't allow unlimited reschedules
func (a *Allocation) NextRescheduleTime() (time.Time, bool) {
failTime := a.LastEventTime()
reschedulePolicy := a.ReschedulePolicy()
if a.DesiredStatus == AllocDesiredStatusStop || a.ClientStatus != AllocClientStatusFailed || failTime.IsZero() || reschedulePolicy == nil {
return time.Time{}, false
}
return a.nextRescheduleTime(failTime, reschedulePolicy)
}
func (a *Allocation) nextRescheduleTime(failTime time.Time, reschedulePolicy *ReschedulePolicy) (time.Time, bool) {
nextDelay := a.NextDelay()
nextRescheduleTime := failTime.Add(nextDelay)
rescheduleEligible := reschedulePolicy.Unlimited || (reschedulePolicy.Attempts > 0 && a.RescheduleTracker == nil)
if reschedulePolicy.Attempts > 0 && a.RescheduleTracker != nil && a.RescheduleTracker.Events != nil {
// Check for eligibility based on the interval if max attempts is set
attempted, attempts := a.rescheduleInfo(reschedulePolicy, failTime)
rescheduleEligible = attempted < attempts && nextDelay < reschedulePolicy.Interval
}
return nextRescheduleTime, rescheduleEligible
}
// NextRescheduleTimeByFailTime works like NextRescheduleTime but allows callers
// specify a failure time. Useful for things like determining whether to reschedule
// an alloc on a disconnected node.
func (a *Allocation) NextRescheduleTimeByFailTime(failTime time.Time) (time.Time, bool) {
reschedulePolicy := a.ReschedulePolicy()
if reschedulePolicy == nil {
return time.Time{}, false
}
return a.nextRescheduleTime(failTime, reschedulePolicy)
}
// ShouldClientStop tests an alloc for StopAfterClientDisconnect configuration
func (a *Allocation) ShouldClientStop() bool {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil ||
tg.StopAfterClientDisconnect == nil ||
*tg.StopAfterClientDisconnect == 0*time.Nanosecond {
return false
}
return true
}
// WaitClientStop uses the reschedule delay mechanism to block rescheduling until
// StopAfterClientDisconnect's block interval passes
func (a *Allocation) WaitClientStop() time.Time {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// An alloc can only be marked lost once, so use the first lost transition
var t time.Time
for _, s := range a.AllocStates {
if s.Field == AllocStateFieldClientStatus &&
s.Value == AllocClientStatusLost {
t = s.Time
break
}
}
// On the first pass, the alloc hasn't been marked lost yet, and so we start
// counting from now
if t.IsZero() {
t = time.Now().UTC()
}
// Find the max kill timeout
kill := DefaultKillTimeout
for _, t := range tg.Tasks {
if t.KillTimeout > kill {
kill = t.KillTimeout
}
}
return t.Add(*tg.StopAfterClientDisconnect + kill)
}
// DisconnectTimeout uses the MaxClientDisconnect to compute when the allocation
// should transition to lost.
func (a *Allocation) DisconnectTimeout(now time.Time) time.Time {
if a == nil || a.Job == nil {
return now
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
timeout := tg.MaxClientDisconnect
if timeout == nil {
return now
}
return now.Add(*timeout)
}
// SupportsDisconnectedClients determines whether both the server and the task group
// are configured to allow the allocation to reconnect after network connectivity
// has been lost and then restored.
func (a *Allocation) SupportsDisconnectedClients(serverSupportsDisconnectedClients bool) bool {
if !serverSupportsDisconnectedClients {
return false
}
if a.Job != nil {
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg != nil {
return tg.MaxClientDisconnect != nil
}
}
return false
}
// NextDelay returns a duration after which the allocation can be rescheduled.
// It is calculated according to the delay function and previous reschedule attempts.
func (a *Allocation) NextDelay() time.Duration {
policy := a.ReschedulePolicy()
// Can be nil if the task group was updated to remove its reschedule policy
if policy == nil {
return 0
}
delayDur := policy.Delay
if a.RescheduleTracker == nil || a.RescheduleTracker.Events == nil || len(a.RescheduleTracker.Events) == 0 {
return delayDur
}
events := a.RescheduleTracker.Events
switch policy.DelayFunction {
case "exponential":
delayDur = a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1].Delay * 2
case "fibonacci":
if len(events) >= 2 {
fibN1Delay := events[len(events)-1].Delay
fibN2Delay := events[len(events)-2].Delay
// Handle reset of delay ceiling which should cause
// a new series to start
if fibN2Delay == policy.MaxDelay && fibN1Delay == policy.Delay {
delayDur = fibN1Delay
} else {
delayDur = fibN1Delay + fibN2Delay
}
}
default:
return delayDur
}
if policy.MaxDelay > 0 && delayDur > policy.MaxDelay {
delayDur = policy.MaxDelay
// check if delay needs to be reset
lastRescheduleEvent := a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1]
timeDiff := a.LastEventTime().UTC().UnixNano() - lastRescheduleEvent.RescheduleTime
if timeDiff > delayDur.Nanoseconds() {
delayDur = policy.Delay
}
}
return delayDur
}
// Terminated returns if the allocation is in a terminal state on a client.
func (a *Allocation) Terminated() bool {
if a.ClientStatus == AllocClientStatusFailed ||
a.ClientStatus == AllocClientStatusComplete ||
a.ClientStatus == AllocClientStatusLost {
return true
}
return false
}
// SetStop updates the allocation in place to a DesiredStatus stop, with the ClientStatus
func (a *Allocation) SetStop(clientStatus, clientDesc string) {
a.DesiredStatus = AllocDesiredStatusStop
a.ClientStatus = clientStatus
a.ClientDescription = clientDesc
a.AppendState(AllocStateFieldClientStatus, clientStatus)
}
// AppendState creates and appends an AllocState entry recording the time of the state
// transition. Used to mark the transition to lost
func (a *Allocation) AppendState(field AllocStateField, value string) {
a.AllocStates = append(a.AllocStates, &AllocState{
Field: field,
Value: value,
Time: time.Now().UTC(),
})
}
// RanSuccessfully returns whether the client has ran the allocation and all
// tasks finished successfully. Critically this function returns whether the
// allocation has ran to completion and not just that the alloc has converged to
// its desired state. That is to say that a batch allocation must have finished
// with exit code 0 on all task groups. This doesn't really have meaning on a
// non-batch allocation because a service and system allocation should not
// finish.
func (a *Allocation) RanSuccessfully() bool {
// Handle the case the client hasn't started the allocation.
if len(a.TaskStates) == 0 {
return false
}
// Check to see if all the tasks finished successfully in the allocation
allSuccess := true
for _, state := range a.TaskStates {
allSuccess = allSuccess && state.Successful()
}
return allSuccess
}
// ShouldMigrate returns if the allocation needs data migration
func (a *Allocation) ShouldMigrate() bool {
if a.PreviousAllocation == "" {
return false
}
if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
// if the task group is nil or the ephemeral disk block isn't present then
// we won't migrate
if tg == nil || tg.EphemeralDisk == nil {
return false
}
// We won't migrate any data is the user hasn't enabled migration or the
// disk is not marked as sticky
if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky {
return false
}
return true
}
// SetEventDisplayMessages populates the display message if its not already set,
// a temporary fix to handle old allocations that don't have it.
// This method will be removed in a future release.
func (a *Allocation) SetEventDisplayMessages() {
setDisplayMsg(a.TaskStates)
}
// ComparableResources returns the resources on the allocation
// handling upgrade paths. After 0.11 calls to this should be replaced with:
// alloc.AllocatedResources.Comparable()
//
// COMPAT(0.11): Remove in 0.11
func (a *Allocation) ComparableResources() *ComparableResources {
// Alloc already has 0.9+ behavior
if a.AllocatedResources != nil {
return a.AllocatedResources.Comparable()
}
var resources *Resources
if a.Resources != nil {
resources = a.Resources
} else if a.TaskResources != nil {
resources = new(Resources)
resources.Add(a.SharedResources)
for _, taskResource := range a.TaskResources {
resources.Add(taskResource)
}
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: int64(resources.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: int64(resources.MemoryMB),
MemoryMaxMB: int64(resources.MemoryMaxMB),
},
Networks: resources.Networks,
},
Shared: AllocatedSharedResources{
DiskMB: int64(resources.DiskMB),
},
}
}
// LookupTask by name from the Allocation. Returns nil if the Job is not set, the
// TaskGroup does not exist, or the task name cannot be found.
func (a *Allocation) LookupTask(name string) *Task {
if a.Job == nil {
return nil
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return nil
}
return tg.LookupTask(name)
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub(fields *AllocStubFields) *AllocListStub {
s := &AllocListStub{
ID: a.ID,
EvalID: a.EvalID,
Name: a.Name,
Namespace: a.Namespace,
NodeID: a.NodeID,
NodeName: a.NodeName,
JobID: a.JobID,
JobType: a.Job.Type,
JobVersion: a.Job.Version,
TaskGroup: a.TaskGroup,
DesiredStatus: a.DesiredStatus,
DesiredDescription: a.DesiredDescription,
ClientStatus: a.ClientStatus,
ClientDescription: a.ClientDescription,
DesiredTransition: a.DesiredTransition,
TaskStates: a.TaskStates,
DeploymentStatus: a.DeploymentStatus,
FollowupEvalID: a.FollowupEvalID,
RescheduleTracker: a.RescheduleTracker,
PreemptedAllocations: a.PreemptedAllocations,
PreemptedByAllocation: a.PreemptedByAllocation,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
CreateTime: a.CreateTime,
ModifyTime: a.ModifyTime,
}
if fields != nil {
if fields.Resources {
s.AllocatedResources = a.AllocatedResources
}
if !fields.TaskStates {
s.TaskStates = nil
}
}
return s
}
// AllocationDiff converts an Allocation type to an AllocationDiff type
// If at any time, modification are made to AllocationDiff so that an
// Allocation can no longer be safely converted to AllocationDiff,
// this method should be changed accordingly.
func (a *Allocation) AllocationDiff() *AllocationDiff {
return (*AllocationDiff)(a)
}
// Expired determines whether an allocation has exceeded its MaxClientDisonnect
// duration relative to the passed time stamp.
func (a *Allocation) Expired(now time.Time) bool {
if a == nil || a.Job == nil {
return false
}
// If alloc is not Unknown it cannot be expired.
if a.ClientStatus != AllocClientStatusUnknown {
return false
}
lastUnknown := a.LastUnknown()
if lastUnknown.IsZero() {
return false
}
tg := a.Job.LookupTaskGroup(a.TaskGroup)
if tg == nil {
return false
}
if tg.MaxClientDisconnect == nil {
return false
}
expiry := lastUnknown.Add(*tg.MaxClientDisconnect)
return now.UTC().After(expiry) || now.UTC().Equal(expiry)
}
// LastUnknown returns the timestamp for the last time the allocation
// transitioned into the unknown client status.
func (a *Allocation) LastUnknown() time.Time {
var lastUnknown time.Time
for _, s := range a.AllocStates {
if s.Field == AllocStateFieldClientStatus &&
s.Value == AllocClientStatusUnknown {
if lastUnknown.IsZero() || lastUnknown.Before(s.Time) {
lastUnknown = s.Time
}
}
}
return lastUnknown.UTC()
}
// Reconnected determines whether a reconnect event has occurred for any task
// and whether that event occurred within the allowable duration specified by MaxClientDisconnect.
func (a *Allocation) Reconnected() (bool, bool) {
var lastReconnect time.Time
for _, taskState := range a.TaskStates {
for _, taskEvent := range taskState.Events {
if taskEvent.Type != TaskClientReconnected {
continue
}
eventTime := time.Unix(0, taskEvent.Time).UTC()
if lastReconnect.IsZero() || lastReconnect.Before(eventTime) {
lastReconnect = eventTime
}
}
}
if lastReconnect.IsZero() {
return false, false
}
return true, a.Expired(lastReconnect)
}
// AllocationDiff is another named type for Allocation (to use the same fields),
// which is used to represent the delta for an Allocation. If you need a method
// defined on the al
type AllocationDiff Allocation
// AllocListStub is used to return a subset of alloc information
type AllocListStub struct {
ID string
EvalID string
Name string
Namespace string
NodeID string
NodeName string
JobID string
JobType string
JobVersion uint64
TaskGroup string
AllocatedResources *AllocatedResources `json:",omitempty"`
DesiredStatus string
DesiredDescription string
ClientStatus string
ClientDescription string
DesiredTransition DesiredTransition
TaskStates map[string]*TaskState
DeploymentStatus *AllocDeploymentStatus
FollowupEvalID string
RescheduleTracker *RescheduleTracker
PreemptedAllocations []string
PreemptedByAllocation string
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
// SetEventDisplayMessages populates the display message if its not already
// set, a temporary fix to handle old allocations that don't have it. This
// method will be removed in a future release.
func (a *AllocListStub) SetEventDisplayMessages() {
setDisplayMsg(a.TaskStates)
}
func setDisplayMsg(taskStates map[string]*TaskState) {
for _, taskState := range taskStates {
for _, event := range taskState.Events {
event.PopulateEventDisplayMessage()
}
}
}
// AllocStubFields defines which fields are included in the AllocListStub.
type AllocStubFields struct {
// Resources includes resource-related fields if true.
Resources bool
// TaskStates removes the TaskStates field if false (default is to
// include TaskStates).
TaskStates bool
}
func NewAllocStubFields() *AllocStubFields {
return &AllocStubFields{
// Maintain backward compatibility by retaining task states by
// default.
TaskStates: true,
}
}
// AllocMetric is used to track various metrics while attempting
// to make an allocation. These are used to debug a job, or to better
// understand the pressure within the system.
type AllocMetric struct {
// NodesEvaluated is the number of nodes that were evaluated
NodesEvaluated int
// NodesFiltered is the number of nodes filtered due to a constraint
NodesFiltered int
// NodesAvailable is the number of nodes available for evaluation per DC.
NodesAvailable map[string]int
// ClassFiltered is the number of nodes filtered by class
ClassFiltered map[string]int
// ConstraintFiltered is the number of failures caused by constraint
ConstraintFiltered map[string]int
// NodesExhausted is the number of nodes skipped due to being
// exhausted of at least one resource
NodesExhausted int
// ClassExhausted is the number of nodes exhausted by class
ClassExhausted map[string]int
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// QuotaExhausted provides the exhausted dimensions
QuotaExhausted []string
// ResourcesExhausted provides the amount of resources exhausted by task
// during the allocation placement
ResourcesExhausted map[string]*Resources
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
// Deprecated: Replaced by ScoreMetaData in Nomad 0.9
Scores map[string]float64
// ScoreMetaData is a slice of top scoring nodes displayed in the CLI
ScoreMetaData []*NodeScoreMeta
// nodeScoreMeta is used to keep scores for a single node id. It is cleared out after
// we receive normalized score during the last step of the scoring stack.
nodeScoreMeta *NodeScoreMeta
// topScores is used to maintain a heap of the top K nodes with
// the highest normalized score
topScores *kheap.ScoreHeap
// AllocationTime is a measure of how long the allocation
// attempt took. This can affect performance and SLAs.
AllocationTime time.Duration
// CoalescedFailures indicates the number of other
// allocations that were coalesced into this failed allocation.
// This is to prevent creating many failed allocations for a
// single task group.
CoalescedFailures int
}
func (a *AllocMetric) Copy() *AllocMetric {
if a == nil {
return nil
}
na := new(AllocMetric)
*na = *a
na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable)
na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered)
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
na.ScoreMetaData = CopySliceNodeScoreMeta(na.ScoreMetaData)
return na
}
func (a *AllocMetric) EvaluateNode() {
a.NodesEvaluated += 1
}
func (a *AllocMetric) FilterNode(node *Node, constraint string) {
a.NodesFiltered += 1
if node != nil && node.NodeClass != "" {
if a.ClassFiltered == nil {
a.ClassFiltered = make(map[string]int)
}
a.ClassFiltered[node.NodeClass] += 1
}
if constraint != "" {
if a.ConstraintFiltered == nil {
a.ConstraintFiltered = make(map[string]int)
}
a.ConstraintFiltered[constraint] += 1
}
}
func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
a.NodesExhausted += 1
if node != nil && node.NodeClass != "" {
if a.ClassExhausted == nil {
a.ClassExhausted = make(map[string]int)
}
a.ClassExhausted[node.NodeClass] += 1
}
if dimension != "" {
if a.DimensionExhausted == nil {
a.DimensionExhausted = make(map[string]int)
}
a.DimensionExhausted[dimension] += 1
}
}
func (a *AllocMetric) ExhaustQuota(dimensions []string) {
if a.QuotaExhausted == nil {
a.QuotaExhausted = make([]string, 0, len(dimensions))
}
a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
}
// ExhaustResources updates the amount of resources exhausted for the
// allocation because of the given task group.
func (a *AllocMetric) ExhaustResources(tg *TaskGroup) {
if a.DimensionExhausted == nil {
return
}
if a.ResourcesExhausted == nil {
a.ResourcesExhausted = make(map[string]*Resources)
}
for _, t := range tg.Tasks {
exhaustedResources := a.ResourcesExhausted[t.Name]
if exhaustedResources == nil {
exhaustedResources = &Resources{}
}
if a.DimensionExhausted["memory"] > 0 {
exhaustedResources.MemoryMB += t.Resources.MemoryMB
}
if a.DimensionExhausted["cpu"] > 0 {
exhaustedResources.CPU += t.Resources.CPU
}
a.ResourcesExhausted[t.Name] = exhaustedResources
}
}
// ScoreNode is used to gather top K scoring nodes in a heap
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
// Create nodeScoreMeta lazily if its the first time or if its a new node
if a.nodeScoreMeta == nil || a.nodeScoreMeta.NodeID != node.ID {
a.nodeScoreMeta = &NodeScoreMeta{
NodeID: node.ID,
Scores: make(map[string]float64),
}
}
if name == NormScorerName {
a.nodeScoreMeta.NormScore = score
// Once we have the normalized score we can push to the heap
// that tracks top K by normalized score
// Create the heap if its not there already
if a.topScores == nil {
a.topScores = kheap.NewScoreHeap(MaxRetainedNodeScores)
}
heap.Push(a.topScores, a.nodeScoreMeta)
// Clear out this entry because its now in the heap
a.nodeScoreMeta = nil
} else {
a.nodeScoreMeta.Scores[name] = score
}
}
// PopulateScoreMetaData populates a map of scorer to scoring metadata
// The map is populated by popping elements from a heap of top K scores
// maintained per scorer
func (a *AllocMetric) PopulateScoreMetaData() {
if a.topScores == nil {
return
}
if a.ScoreMetaData == nil {
a.ScoreMetaData = make([]*NodeScoreMeta, a.topScores.Len())
}
heapItems := a.topScores.GetItemsReverse()
for i, item := range heapItems {
a.ScoreMetaData[i] = item.(*NodeScoreMeta)
}
}
// MaxNormScore returns the ScoreMetaData entry with the highest normalized
// score.
func (a *AllocMetric) MaxNormScore() *NodeScoreMeta {
if a == nil || len(a.ScoreMetaData) == 0 {
return nil
}
return a.ScoreMetaData[0]
}
// NodeScoreMeta captures scoring meta data derived from
// different scoring factors.
type NodeScoreMeta struct {
NodeID string
Scores map[string]float64
NormScore float64
}
func (s *NodeScoreMeta) Copy() *NodeScoreMeta {
if s == nil {
return nil
}
ns := new(NodeScoreMeta)
*ns = *s
return ns
}
func (s *NodeScoreMeta) String() string {
return fmt.Sprintf("%s %f %v", s.NodeID, s.NormScore, s.Scores)
}
func (s *NodeScoreMeta) Score() float64 {
return s.NormScore
}
func (s *NodeScoreMeta) Data() interface{} {
return s
}
// AllocNetworkStatus captures the status of an allocation's network during runtime.
// Depending on the network mode, an allocation's address may need to be known to other
// systems in Nomad such as service registration.
type AllocNetworkStatus struct {
InterfaceName string
Address string
DNS *DNSConfig
}
func (a *AllocNetworkStatus) Copy() *AllocNetworkStatus {
if a == nil {
return nil
}
return &AllocNetworkStatus{
InterfaceName: a.InterfaceName,
Address: a.Address,
DNS: a.DNS.Copy(),
}
}
// AllocDeploymentStatus captures the status of the allocation as part of the
// deployment. This can include things like if the allocation has been marked as
// healthy.
type AllocDeploymentStatus struct {
// Healthy marks whether the allocation has been marked healthy or unhealthy
// as part of a deployment. It can be unset if it has neither been marked
// healthy or unhealthy.
Healthy *bool
// Timestamp is the time at which the health status was set.
Timestamp time.Time
// Canary marks whether the allocation is a canary or not. A canary that has
// been promoted will have this field set to false.
Canary bool
// ModifyIndex is the raft index in which the deployment status was last
// changed.
ModifyIndex uint64
}
// HasHealth returns true if the allocation has its health set.
func (a *AllocDeploymentStatus) HasHealth() bool {
return a != nil && a.Healthy != nil
}
// IsHealthy returns if the allocation is marked as healthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsHealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && *a.Healthy
}
// IsUnhealthy returns if the allocation is marked as unhealthy as part of a
// deployment
func (a *AllocDeploymentStatus) IsUnhealthy() bool {
if a == nil {
return false
}
return a.Healthy != nil && !*a.Healthy
}
// IsCanary returns if the allocation is marked as a canary
func (a *AllocDeploymentStatus) IsCanary() bool {
if a == nil {
return false
}
return a.Canary
}
func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus {
if a == nil {
return nil
}
c := new(AllocDeploymentStatus)
*c = *a
if a.Healthy != nil {
c.Healthy = helper.BoolToPtr(*a.Healthy)
}
return c
}
const (
EvalStatusBlocked = "blocked"
EvalStatusPending = "pending"
EvalStatusComplete = "complete"
EvalStatusFailed = "failed"
EvalStatusCancelled = "canceled"
)
const (
EvalTriggerJobRegister = "job-register"
EvalTriggerJobDeregister = "job-deregister"
EvalTriggerPeriodicJob = "periodic-job"
EvalTriggerNodeDrain = "node-drain"
EvalTriggerNodeUpdate = "node-update"
EvalTriggerAllocStop = "alloc-stop"
EvalTriggerScheduled = "scheduled"
EvalTriggerRollingUpdate = "rolling-update"
EvalTriggerDeploymentWatcher = "deployment-watcher"
EvalTriggerFailedFollowUp = "failed-follow-up"
EvalTriggerMaxPlans = "max-plan-attempts"
EvalTriggerRetryFailedAlloc = "alloc-failure"
EvalTriggerQueuedAllocs = "queued-allocs"
EvalTriggerPreemption = "preemption"
EvalTriggerScaling = "job-scaling"
EvalTriggerMaxDisconnectTimeout = "max-disconnect-timeout"
EvalTriggerReconnect = "reconnect"
)
const (
// CoreJobEvalGC is used for the garbage collection of evaluations
// and allocations. We periodically scan evaluations in a terminal state,
// in which all the corresponding allocations are also terminal. We
// delete these out of the system to bound the state.
CoreJobEvalGC = "eval-gc"
// CoreJobNodeGC is used for the garbage collection of failed nodes.
// We periodically scan nodes in a terminal state, and if they have no
// corresponding allocations we delete these out of the system.
CoreJobNodeGC = "node-gc"
// CoreJobJobGC is used for the garbage collection of eligible jobs. We
// periodically scan garbage collectible jobs and check if both their
// evaluations and allocations are terminal. If so, we delete these out of
// the system.
CoreJobJobGC = "job-gc"
// CoreJobDeploymentGC is used for the garbage collection of eligible
// deployments. We periodically scan garbage collectible deployments and
// check if they are terminal. If so, we delete these out of the system.
CoreJobDeploymentGC = "deployment-gc"
// CoreJobCSIVolumeClaimGC is use for the garbage collection of CSI
// volume claims. We periodically scan volumes to see if no allocs are
// claiming them. If so, we unclaim the volume.
CoreJobCSIVolumeClaimGC = "csi-volume-claim-gc"
// CoreJobCSIPluginGC is use for the garbage collection of CSI plugins.
// We periodically scan plugins to see if they have no associated volumes
// or allocs running them. If so, we delete the plugin.
CoreJobCSIPluginGC = "csi-plugin-gc"
// CoreJobOneTimeTokenGC is use for the garbage collection of one-time
// tokens. We periodically scan for expired tokens and delete them.
CoreJobOneTimeTokenGC = "one-time-token-gc"
// CoreJobForceGC is used to force garbage collection of all GCable objects.
CoreJobForceGC = "force-gc"
)
// Evaluation is used anytime we need to apply business logic as a result
// of a change to our desired state (job specification) or the emergent state
// (registered nodes). When the inputs change, we need to "evaluate" them,
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// msgpack omit empty fields during serialization
_struct bool `codec:",omitempty"` // nolint: structcheck
// ID is a randomly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string
// Namespace is the namespace the evaluation is created in
Namespace string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
// Type is used to control which schedulers are available to handle
// this evaluation.
Type string
// TriggeredBy is used to give some insight into why this Eval
// was created. (Job change, node failure, alloc failure, etc).
TriggeredBy string
// JobID is the job this evaluation is scoped to. Evaluations cannot
// be run in parallel for a given JobID, so we serialize on this.
JobID string
// JobModifyIndex is the modify index of the job at the time
// the evaluation was created
JobModifyIndex uint64
// NodeID is the node that was affected triggering the evaluation.
NodeID string
// NodeModifyIndex is the modify index of the node at the time
// the evaluation was created
NodeModifyIndex uint64
// DeploymentID is the ID of the deployment that triggered the evaluation.
DeploymentID string
// Status of the evaluation
Status string
// StatusDescription is meant to provide more human useful information
StatusDescription string
// Wait is a minimum wait time for running the eval. This is used to
// support a rolling upgrade in versions prior to 0.7.0
// Deprecated
Wait time.Duration
// WaitUntil is the time when this eval should be run. This is used to
// supported delayed rescheduling of failed allocations, and delayed
// stopping of allocations that are configured with max_client_disconnect.
WaitUntil time.Time
// NextEval is the evaluation ID for the eval created to do a followup.
// This is used to support rolling upgrades and failed-follow-up evals, where
// we need a chain of evaluations.
NextEval string
// PreviousEval is the evaluation ID for the eval creating this one to do a followup.
// This is used to support rolling upgrades and failed-follow-up evals, where
// we need a chain of evaluations.
PreviousEval string
// BlockedEval is the evaluation ID for a created blocked eval. A
// blocked eval will be created if all allocations could not be placed due
// to constraints or lacking resources.
BlockedEval string
// RelatedEvals is a list of all the evaluations that are related (next,
// previous, or blocked) to this one. It may be nil if not requested.
RelatedEvals []*EvaluationStub
// FailedTGAllocs are task groups which have allocations that could not be
// made, but the metrics are persisted so that the user can use the feedback
// to determine the cause.
FailedTGAllocs map[string]*AllocMetric
// ClassEligibility tracks computed node classes that have been explicitly
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// QuotaLimitReached marks whether a quota limit was reached for the
// evaluation.
QuotaLimitReached string
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
// AnnotatePlan triggers the scheduler to provide additional annotations
// during the evaluation. This should not be set during normal operations.
AnnotatePlan bool
// QueuedAllocations is the number of unplaced allocations at the time the
// evaluation was processed. The map is keyed by Task Group names.
QueuedAllocations map[string]int
// LeaderACL provides the ACL token to when issuing RPCs back to the
// leader. This will be a valid management token as long as the leader is
// active. This should not ever be exposed via the API.
LeaderACL string
// SnapshotIndex is the Raft index of the snapshot used to process the
// evaluation. The index will either be set when it has gone through the
// scheduler or if a blocked evaluation is being created. The index is set
// in this case so we can determine if an early unblocking is required since
// capacity has changed since the evaluation was created. This can result in
// the SnapshotIndex being less than the CreateIndex.
SnapshotIndex uint64
// Raft Indexes
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
type EvaluationStub struct {
ID string
Namespace string
Priority int
Type string
TriggeredBy string
JobID string
NodeID string
DeploymentID string
Status string
StatusDescription string
WaitUntil time.Time
NextEval string
PreviousEval string
BlockedEval string
CreateIndex uint64
ModifyIndex uint64
CreateTime int64
ModifyTime int64
}
// GetID implements the IDGetter interface, required for pagination.
func (e *Evaluation) GetID() string {
if e == nil {
return ""
}
return e.ID
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (e *Evaluation) GetCreateIndex() uint64 {
if e == nil {
return 0
}
return e.CreateIndex
}
// TerminalStatus returns if the current status is terminal and
// will no longer transition.
func (e *Evaluation) TerminalStatus() bool {
switch e.Status {
case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled:
return true
default:
return false
}
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
}
func (e *Evaluation) RelatedIDs() []string {
if e == nil {
return nil
}
ids := []string{e.NextEval, e.PreviousEval, e.BlockedEval}
related := make([]string, 0, len(ids))
for _, id := range ids {
if id != "" {
related = append(related, id)
}
}
return related
}
func (e *Evaluation) Stub() *EvaluationStub {
if e == nil {
return nil
}
return &EvaluationStub{
ID: e.ID,
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
JobID: e.JobID,
NodeID: e.NodeID,
DeploymentID: e.DeploymentID,
Status: e.Status,
StatusDescription: e.StatusDescription,
WaitUntil: e.WaitUntil,
NextEval: e.NextEval,
PreviousEval: e.PreviousEval,
BlockedEval: e.BlockedEval,
CreateIndex: e.CreateIndex,
ModifyIndex: e.ModifyIndex,
CreateTime: e.CreateTime,
ModifyTime: e.ModifyTime,
}
}
func (e *Evaluation) Copy() *Evaluation {
if e == nil {
return nil
}
ne := new(Evaluation)
*ne = *e
// Copy ClassEligibility
if e.ClassEligibility != nil {
classes := make(map[string]bool, len(e.ClassEligibility))
for class, elig := range e.ClassEligibility {
classes[class] = elig
}
ne.ClassEligibility = classes
}
// Copy FailedTGAllocs
if e.FailedTGAllocs != nil {
failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs))
for tg, metric := range e.FailedTGAllocs {
failedTGs[tg] = metric.Copy()
}
ne.FailedTGAllocs = failedTGs
}
// Copy queued allocations
if e.QueuedAllocations != nil {
queuedAllocations := make(map[string]int, len(e.QueuedAllocations))
for tg, num := range e.QueuedAllocations {
queuedAllocations[tg] = num
}
ne.QueuedAllocations = queuedAllocations
}
return ne
}
// ShouldEnqueue checks if a given evaluation should be enqueued into the
// eval_broker
func (e *Evaluation) ShouldEnqueue() bool {
switch e.Status {
case EvalStatusPending:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// ShouldBlock checks if a given evaluation should be entered into the blocked
// eval tracker.
func (e *Evaluation) ShouldBlock() bool {
switch e.Status {
case EvalStatusBlocked:
return true
case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled:
return false
default:
panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status))
}
}
// MakePlan is used to make a plan from the given evaluation
// for a given Job
func (e *Evaluation) MakePlan(j *Job) *Plan {
p := &Plan{
EvalID: e.ID,
Priority: e.Priority,
Job: j,
NodeUpdate: make(map[string][]*Allocation),
NodeAllocation: make(map[string][]*Allocation),
NodePreemptions: make(map[string][]*Allocation),
}
if j != nil {
p.AllAtOnce = j.AllAtOnce
}
return p
}
// NextRollingEval creates an evaluation to followup this eval for rolling updates
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
now := time.Now().UTC().UnixNano()
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
CreateTime: now,
ModifyTime: now,
}
}
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible, whether the job has escaped computed node classes and whether the
// quota limit was reached.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
escaped bool, quotaReached string, failedTGAllocs map[string]*AllocMetric) *Evaluation {
now := time.Now().UTC().UnixNano()
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerQueuedAllocs,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusBlocked,
PreviousEval: e.ID,
FailedTGAllocs: failedTGAllocs,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
QuotaLimitReached: quotaReached,
CreateTime: now,
ModifyTime: now,
}
}
// CreateFailedFollowUpEval creates a follow up evaluation when the current one
// has been marked as failed because it has hit the delivery limit and will not
// be retried by the eval_broker. Callers should copy the created eval's ID to
// into the old eval's NextEval field.
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
now := time.Now().UTC().UnixNano()
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,
JobID: e.JobID,
JobModifyIndex: e.JobModifyIndex,
Status: EvalStatusPending,
Wait: wait,
PreviousEval: e.ID,
CreateTime: now,
ModifyTime: now,
}
}
// UpdateModifyTime takes into account that clocks on different servers may be
// slightly out of sync. Even in case of a leader change, this method will
// guarantee that ModifyTime will always be after CreateTime.
func (e *Evaluation) UpdateModifyTime() {
now := time.Now().UTC().UnixNano()
if now <= e.CreateTime {
e.ModifyTime = e.CreateTime + 1
} else {
e.ModifyTime = now
}
}
// Plan is used to submit a commit plan for task allocations. These
// are submitted to the leader which verifies that resources have
// not been overcommitted before admitting the plan.
type Plan struct {
// msgpack omit empty fields during serialization
_struct bool `codec:",omitempty"` // nolint: structcheck
// EvalID is the evaluation ID this plan is associated with
EvalID string
// EvalToken is used to prevent a split-brain processing of
// an evaluation. There should only be a single scheduler running
// an Eval at a time, but this could be violated after a leadership
// transition. This unique token is used to reject plans that are
// being submitted from a different leader.
EvalToken string
// Priority is the priority of the upstream job
Priority int
// AllAtOnce is used to control if incremental scheduling of task groups
// is allowed or if we must do a gang scheduling of the entire job.
// If this is false, a plan may be partially applied. Otherwise, the
// entire plan must be able to make progress.
AllAtOnce bool
// Job is the parent job of all the allocations in the Plan.
// Since a Plan only involves a single Job, we can reduce the size
// of the plan by only including it once.
Job *Job
// NodeUpdate contains all the allocations to be stopped or evicted for
// each node.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations for each node.
// The evicts must be considered prior to the allocations.
NodeAllocation map[string][]*Allocation
// Annotations contains annotations by the scheduler to be used by operators
// to understand the decisions made by the scheduler.
Annotations *PlanAnnotations
// Deployment is the deployment created or updated by the scheduler that
// should be applied by the planner.
Deployment *Deployment
// DeploymentUpdates is a set of status updates to apply to the given
// deployments. This allows the scheduler to cancel any unneeded deployment
// because the job is stopped or the update block is removed.
DeploymentUpdates []*DeploymentStatusUpdate
// NodePreemptions is a map from node id to a set of allocations from other
// lower priority jobs that are preempted. Preempted allocations are marked
// as evicted.
NodePreemptions map[string][]*Allocation
// SnapshotIndex is the Raft index of the snapshot used to create the
// Plan. The leader will wait to evaluate the plan until its StateStore
// has reached at least this index.
SnapshotIndex uint64
}
func (p *Plan) GoString() string {
out := fmt.Sprintf("(eval %s", p.EvalID[:8])
if p.Job != nil {
out += fmt.Sprintf(", job %s", p.Job.ID)
}
if p.Deployment != nil {
out += fmt.Sprintf(", deploy %s", p.Deployment.ID[:8])
}
if len(p.NodeUpdate) > 0 {
out += ", NodeUpdates: "
for node, allocs := range p.NodeUpdate {
out += fmt.Sprintf("(node[%s]", node[:8])
for _, alloc := range allocs {
out += fmt.Sprintf(" (%s stop/evict)", alloc.ID[:8])
}
out += ")"
}
}
if len(p.NodeAllocation) > 0 {
out += ", NodeAllocations: "
for node, allocs := range p.NodeAllocation {
out += fmt.Sprintf("(node[%s]", node[:8])
for _, alloc := range allocs {
out += fmt.Sprintf(" (%s %s %s)",
alloc.ID[:8], alloc.Name, alloc.DesiredStatus,
)
}
out += ")"
}
}
if len(p.NodePreemptions) > 0 {
out += ", NodePreemptions: "
for node, allocs := range p.NodePreemptions {
out += fmt.Sprintf("(node[%s]", node[:8])
for _, alloc := range allocs {
out += fmt.Sprintf(" (%s %s %s)",
alloc.ID[:8], alloc.Name, alloc.DesiredStatus,
)
}
out += ")"
}
}
if len(p.DeploymentUpdates) > 0 {
out += ", DeploymentUpdates: "
for _, dupdate := range p.DeploymentUpdates {
out += fmt.Sprintf("(%s %s)",
dupdate.DeploymentID[:8], dupdate.Status)
}
}
if p.Annotations != nil {
out += ", Annotations: "
for tg, updates := range p.Annotations.DesiredTGUpdates {
out += fmt.Sprintf("(update[%s] %v)", tg, updates)
}
for _, preempted := range p.Annotations.PreemptedAllocs {
out += fmt.Sprintf("(preempt %s)", preempted.ID[:8])
}
}
out += ")"
return out
}
// AppendStoppedAlloc marks an allocation to be stopped. The clientStatus of the
// allocation may be optionally set by passing in a non-empty value.
func (p *Plan) AppendStoppedAlloc(alloc *Allocation, desiredDesc, clientStatus, followupEvalID string) {
newAlloc := new(Allocation)
*newAlloc = *alloc
// If the job is not set in the plan we are deregistering a job so we
// extract the job from the allocation.
if p.Job == nil && newAlloc.Job != nil {
p.Job = newAlloc.Job
}
// Normalize the job
newAlloc.Job = nil
// Strip the resources as it can be rebuilt.
newAlloc.Resources = nil
newAlloc.DesiredStatus = AllocDesiredStatusStop
newAlloc.DesiredDescription = desiredDesc
if clientStatus != "" {
newAlloc.ClientStatus = clientStatus
}
newAlloc.AppendState(AllocStateFieldClientStatus, clientStatus)
if followupEvalID != "" {
newAlloc.FollowupEvalID = followupEvalID
}
node := alloc.NodeID
existing := p.NodeUpdate[node]
p.NodeUpdate[node] = append(existing, newAlloc)
}
// AppendPreemptedAlloc is used to append an allocation that's being preempted to the plan.
// To minimize the size of the plan, this only sets a minimal set of fields in the allocation
func (p *Plan) AppendPreemptedAlloc(alloc *Allocation, preemptingAllocID string) {
newAlloc := &Allocation{}
newAlloc.ID = alloc.ID
newAlloc.JobID = alloc.JobID
newAlloc.Namespace = alloc.Namespace
newAlloc.DesiredStatus = AllocDesiredStatusEvict
newAlloc.PreemptedByAllocation = preemptingAllocID
desiredDesc := fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID)
newAlloc.DesiredDescription = desiredDesc
// TaskResources are needed by the plan applier to check if allocations fit
// after removing preempted allocations
if alloc.AllocatedResources != nil {
newAlloc.AllocatedResources = alloc.AllocatedResources
} else {
// COMPAT Remove in version 0.11
newAlloc.TaskResources = alloc.TaskResources
newAlloc.SharedResources = alloc.SharedResources
}
// Append this alloc to slice for this node
node := alloc.NodeID
existing := p.NodePreemptions[node]
p.NodePreemptions[node] = append(existing, newAlloc)
}
// AppendUnknownAlloc marks an allocation as unknown.
func (p *Plan) AppendUnknownAlloc(alloc *Allocation) {
// Strip the resources as they can be rebuilt.
alloc.Resources = nil
existing := p.NodeAllocation[alloc.NodeID]
p.NodeAllocation[alloc.NodeID] = append(existing, alloc)
}
func (p *Plan) PopUpdate(alloc *Allocation) {
existing := p.NodeUpdate[alloc.NodeID]
n := len(existing)
if n > 0 && existing[n-1].ID == alloc.ID {
existing = existing[:n-1]
if len(existing) > 0 {
p.NodeUpdate[alloc.NodeID] = existing
} else {
delete(p.NodeUpdate, alloc.NodeID)
}
}
}
// AppendAlloc appends the alloc to the plan allocations.
// Uses the passed job if explicitly passed, otherwise
// it is assumed the alloc will use the plan Job version.
func (p *Plan) AppendAlloc(alloc *Allocation, job *Job) {
node := alloc.NodeID
existing := p.NodeAllocation[node]
alloc.Job = job
p.NodeAllocation[node] = append(existing, alloc)
}
// IsNoOp checks if this plan would do nothing
func (p *Plan) IsNoOp() bool {
return len(p.NodeUpdate) == 0 &&
len(p.NodeAllocation) == 0 &&
p.Deployment == nil &&
len(p.DeploymentUpdates) == 0
}
// NormalizeAllocations normalizes allocations to remove fields that can
// be fetched from the MemDB instead of sending over the wire
func (p *Plan) NormalizeAllocations() {
for _, allocs := range p.NodeUpdate {
for i, alloc := range allocs {
allocs[i] = &Allocation{
ID: alloc.ID,
DesiredDescription: alloc.DesiredDescription,
ClientStatus: alloc.ClientStatus,
FollowupEvalID: alloc.FollowupEvalID,
}
}
}
for _, allocs := range p.NodePreemptions {
for i, alloc := range allocs {
allocs[i] = &Allocation{
ID: alloc.ID,
PreemptedByAllocation: alloc.PreemptedByAllocation,
}
}
}
}
// PlanResult is the result of a plan submitted to the leader.
type PlanResult struct {
// NodeUpdate contains all the evictions and stops that were committed.
NodeUpdate map[string][]*Allocation
// NodeAllocation contains all the allocations that were committed.
NodeAllocation map[string][]*Allocation
// Deployment is the deployment that was committed.
Deployment *Deployment
// DeploymentUpdates is the set of deployment updates that were committed.
DeploymentUpdates []*DeploymentStatusUpdate
// NodePreemptions is a map from node id to a set of allocations from other
// lower priority jobs that are preempted. Preempted allocations are marked
// as stopped.
NodePreemptions map[string][]*Allocation
// RefreshIndex is the index the worker should refresh state up to.
// This allows all evictions and allocations to be materialized.
// If any allocations were rejected due to stale data (node state,
// over committed) this can be used to force a worker refresh.
RefreshIndex uint64
// AllocIndex is the Raft index in which the evictions and
// allocations took place. This is used for the write index.
AllocIndex uint64
}
// IsNoOp checks if this plan result would do nothing
func (p *PlanResult) IsNoOp() bool {
return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 &&
len(p.DeploymentUpdates) == 0 && p.Deployment == nil
}
// FullCommit is used to check if all the allocations in a plan
// were committed as part of the result. Returns if there was
// a match, and the number of expected and actual allocations.
func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) {
expected := 0
actual := 0
for name, allocList := range plan.NodeAllocation {
didAlloc := p.NodeAllocation[name]
expected += len(allocList)
actual += len(didAlloc)
}
return actual == expected, expected, actual
}
// PlanAnnotations holds annotations made by the scheduler to give further debug
// information to operators.
type PlanAnnotations struct {
// DesiredTGUpdates is the set of desired updates per task group.
DesiredTGUpdates map[string]*DesiredUpdates
// PreemptedAllocs is the set of allocations to be preempted to make the placement successful.
PreemptedAllocs []*AllocListStub
}
// DesiredUpdates is the set of changes the scheduler would like to make given
// sufficient resources and cluster capacity.
type DesiredUpdates struct {
Ignore uint64
Place uint64
Migrate uint64
Stop uint64
InPlaceUpdate uint64
DestructiveUpdate uint64
Canary uint64
Preemptions uint64
}
func (d *DesiredUpdates) GoString() string {
return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)",
d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary)
}
// msgpackHandle is a shared handle for encoding/decoding of structs
var MsgpackHandle = func() *codec.MsgpackHandle {
h := &codec.MsgpackHandle{}
h.RawToString = true
// maintain binary format from time prior to upgrading latest ugorji
h.BasicHandle.TimeNotBuiltin = true
// Sets the default type for decoding a map into a nil interface{}.
// This is necessary in particular because we store the driver configs as a
// nil interface{}.
h.MapType = reflect.TypeOf(map[string]interface{}(nil))
// only review struct codec tags
h.TypeInfos = codec.NewTypeInfos([]string{"codec"})
return h
}()
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err string
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) error {
if e == nil {
return nil
}
return &RecoverableError{
Err: e.Error(),
Recoverable: recoverable,
}
}
// WrapRecoverable wraps an existing error in a new RecoverableError with a new
// message. If the error was recoverable before the returned error is as well;
// otherwise it is unrecoverable.
func WrapRecoverable(msg string, err error) error {
return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)}
}
func (r *RecoverableError) Error() string {
return r.Err
}
func (r *RecoverableError) IsRecoverable() bool {
return r.Recoverable
}
func (r *RecoverableError) IsUnrecoverable() bool {
return !r.Recoverable
}
// Recoverable is an interface for errors to implement to indicate whether or
// not they are fatal or recoverable.
type Recoverable interface {
error
IsRecoverable() bool
}
// IsRecoverable returns true if error is a RecoverableError with
// Recoverable=true. Otherwise false is returned.
func IsRecoverable(e error) bool {
if re, ok := e.(Recoverable); ok {
return re.IsRecoverable()
}
return false
}
// WrappedServerError wraps an error and satisfies
// both the Recoverable and the ServerSideError interfaces
type WrappedServerError struct {
Err error
}
// NewWrappedServerError is used to create a wrapped server side error
func NewWrappedServerError(e error) error {
return &WrappedServerError{
Err: e,
}
}
func (r *WrappedServerError) IsRecoverable() bool {
return IsRecoverable(r.Err)
}
func (r *WrappedServerError) Error() string {
return r.Err.Error()
}
func (r *WrappedServerError) IsServerSide() bool {
return true
}
// ServerSideError is an interface for errors to implement to indicate
// errors occurring after the request makes it to a server
type ServerSideError interface {
error
IsServerSide() bool
}
// IsServerSide returns true if error is a wrapped
// server side error
func IsServerSide(e error) bool {
if se, ok := e.(ServerSideError); ok {
return se.IsServerSide()
}
return false
}
// ACLPolicy is used to represent an ACL policy
type ACLPolicy struct {
Name string // Unique name
Description string // Human readable
Rules string // HCL or JSON format
RulesJSON *acl.Policy // Generated from Rules on read
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL policy
func (a *ACLPolicy) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
_, _ = hash.Write([]byte(a.Name))
_, _ = hash.Write([]byte(a.Description))
_, _ = hash.Write([]byte(a.Rules))
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
a.Hash = hashVal
return hashVal
}
func (a *ACLPolicy) Stub() *ACLPolicyListStub {
return &ACLPolicyListStub{
Name: a.Name,
Description: a.Description,
Hash: a.Hash,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
func (a *ACLPolicy) Validate() error {
var mErr multierror.Error
if !validPolicyName.MatchString(a.Name) {
err := fmt.Errorf("invalid name '%s'", a.Name)
mErr.Errors = append(mErr.Errors, err)
}
if _, err := acl.Parse(a.Rules); err != nil {
err = fmt.Errorf("failed to parse rules: %v", err)
mErr.Errors = append(mErr.Errors, err)
}
if len(a.Description) > maxPolicyDescriptionLength {
err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength)
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
// ACLPolicyListStub is used to for listing ACL policies
type ACLPolicyListStub struct {
Name string
Description string
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// ACLPolicyListRequest is used to request a list of policies
type ACLPolicyListRequest struct {
QueryOptions
}
// ACLPolicySpecificRequest is used to query a specific policy
type ACLPolicySpecificRequest struct {
Name string
QueryOptions
}
// ACLPolicySetRequest is used to query a set of policies
type ACLPolicySetRequest struct {
Names []string
QueryOptions
}
// ACLPolicyListResponse is used for a list request
type ACLPolicyListResponse struct {
Policies []*ACLPolicyListStub
QueryMeta
}
// SingleACLPolicyResponse is used to return a single policy
type SingleACLPolicyResponse struct {
Policy *ACLPolicy
QueryMeta
}
// ACLPolicySetResponse is used to return a set of policies
type ACLPolicySetResponse struct {
Policies map[string]*ACLPolicy
QueryMeta
}
// ACLPolicyDeleteRequest is used to delete a set of policies
type ACLPolicyDeleteRequest struct {
Names []string
WriteRequest
}
// ACLPolicyUpsertRequest is used to upsert a set of policies
type ACLPolicyUpsertRequest struct {
Policies []*ACLPolicy
WriteRequest
}
// ACLToken represents a client token which is used to Authenticate
type ACLToken struct {
AccessorID string // Public Accessor ID (UUID)
SecretID string // Secret ID, private (UUID)
Name string // Human friendly name
Type string // Client or Management
Policies []string // Policies this token ties to
Global bool // Global or Region local
Hash []byte
CreateTime time.Time // Time of creation
CreateIndex uint64
ModifyIndex uint64
}
// GetID implements the IDGetter interface, required for pagination.
func (a *ACLToken) GetID() string {
if a == nil {
return ""
}
return a.AccessorID
}
// GetCreateIndex implements the CreateIndexGetter interface, required for
// pagination.
func (a *ACLToken) GetCreateIndex() uint64 {
if a == nil {
return 0
}
return a.CreateIndex
}
func (a *ACLToken) Copy() *ACLToken {
c := new(ACLToken)
*c = *a
c.Policies = make([]string, len(a.Policies))
copy(c.Policies, a.Policies)
c.Hash = make([]byte, len(a.Hash))
copy(c.Hash, a.Hash)
return c
}
var (
// AnonymousACLToken is used no SecretID is provided, and the
// request is made anonymously.
AnonymousACLToken = &ACLToken{
AccessorID: "anonymous",
Name: "Anonymous Token",
Type: ACLClientToken,
Policies: []string{"anonymous"},
Global: false,
}
)
type ACLTokenListStub struct {
AccessorID string
Name string
Type string
Policies []string
Global bool
Hash []byte
CreateTime time.Time
CreateIndex uint64
ModifyIndex uint64
}
// SetHash is used to compute and set the hash of the ACL token
func (a *ACLToken) SetHash() []byte {
// Initialize a 256bit Blake2 hash (32 bytes)
hash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
// Write all the user set fields
_, _ = hash.Write([]byte(a.Name))
_, _ = hash.Write([]byte(a.Type))
for _, policyName := range a.Policies {
_, _ = hash.Write([]byte(policyName))
}
if a.Global {
_, _ = hash.Write([]byte("global"))
} else {
_, _ = hash.Write([]byte("local"))
}
// Finalize the hash
hashVal := hash.Sum(nil)
// Set and return the hash
a.Hash = hashVal
return hashVal
}
func (a *ACLToken) Stub() *ACLTokenListStub {
return &ACLTokenListStub{
AccessorID: a.AccessorID,
Name: a.Name,
Type: a.Type,
Policies: a.Policies,
Global: a.Global,
Hash: a.Hash,
CreateTime: a.CreateTime,
CreateIndex: a.CreateIndex,
ModifyIndex: a.ModifyIndex,
}
}
// Validate is used to check a token for reasonableness
func (a *ACLToken) Validate() error {
var mErr multierror.Error
if len(a.Name) > maxTokenNameLength {
mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long"))
}
switch a.Type {
case ACLClientToken:
if len(a.Policies) == 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies"))
}
case ACLManagementToken:
if len(a.Policies) != 0 {
mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies"))
}
default:
mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management"))
}
return mErr.ErrorOrNil()
}
// PolicySubset checks if a given set of policies is a subset of the token
func (a *ACLToken) PolicySubset(policies []string) bool {
// Hot-path the management tokens, superset of all policies.
if a.Type == ACLManagementToken {
return true
}
associatedPolicies := make(map[string]struct{}, len(a.Policies))
for _, policy := range a.Policies {
associatedPolicies[policy] = struct{}{}
}
for _, policy := range policies {
if _, ok := associatedPolicies[policy]; !ok {
return false
}
}
return true
}
// ACLTokenListRequest is used to request a list of tokens
type ACLTokenListRequest struct {
GlobalOnly bool
QueryOptions
}
// ACLTokenSpecificRequest is used to query a specific token
type ACLTokenSpecificRequest struct {
AccessorID string
QueryOptions
}
// ACLTokenSetRequest is used to query a set of tokens
type ACLTokenSetRequest struct {
AccessorIDS []string
QueryOptions
}
// ACLTokenListResponse is used for a list request
type ACLTokenListResponse struct {
Tokens []*ACLTokenListStub
QueryMeta
}
// SingleACLTokenResponse is used to return a single token
type SingleACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenSetResponse is used to return a set of token
type ACLTokenSetResponse struct {
Tokens map[string]*ACLToken // Keyed by Accessor ID
QueryMeta
}
// ResolveACLTokenRequest is used to resolve a specific token
type ResolveACLTokenRequest struct {
SecretID string
QueryOptions
}
// ResolveACLTokenResponse is used to resolve a single token
type ResolveACLTokenResponse struct {
Token *ACLToken
QueryMeta
}
// ACLTokenDeleteRequest is used to delete a set of tokens
type ACLTokenDeleteRequest struct {
AccessorIDs []string
WriteRequest
}
// ACLTokenBootstrapRequest is used to bootstrap ACLs
type ACLTokenBootstrapRequest struct {
Token *ACLToken // Not client specifiable
ResetIndex uint64 // Reset index is used to clear the bootstrap token
BootstrapSecret string
WriteRequest
}
// ACLTokenUpsertRequest is used to upsert a set of tokens
type ACLTokenUpsertRequest struct {
Tokens []*ACLToken
WriteRequest
}
// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest
type ACLTokenUpsertResponse struct {
Tokens []*ACLToken
WriteMeta
}
// OneTimeToken is used to log into the web UI using a token provided by the
// command line.
type OneTimeToken struct {
OneTimeSecretID string
AccessorID string
ExpiresAt time.Time
CreateIndex uint64
ModifyIndex uint64
}
// OneTimeTokenUpsertRequest is the request for a UpsertOneTimeToken RPC
type OneTimeTokenUpsertRequest struct {
WriteRequest
}
// OneTimeTokenUpsertResponse is the response to a UpsertOneTimeToken RPC.
type OneTimeTokenUpsertResponse struct {
OneTimeToken *OneTimeToken
WriteMeta
}
// OneTimeTokenExchangeRequest is a request to swap the one-time token with
// the backing ACL token
type OneTimeTokenExchangeRequest struct {
OneTimeSecretID string
WriteRequest
}
// OneTimeTokenExchangeResponse is the response to swapping the one-time token
// with the backing ACL token
type OneTimeTokenExchangeResponse struct {
Token *ACLToken
WriteMeta
}
// OneTimeTokenDeleteRequest is a request to delete a group of one-time tokens
type OneTimeTokenDeleteRequest struct {
AccessorIDs []string
WriteRequest
}
// OneTimeTokenExpireRequest is a request to delete all expired one-time tokens
type OneTimeTokenExpireRequest struct {
WriteRequest
}
// RpcError is used for serializing errors with a potential error code
type RpcError struct {
Message string
Code *int64
}
func NewRpcError(err error, code *int64) *RpcError {
return &RpcError{
Message: err.Error(),
Code: code,
}
}
func (r *RpcError) Error() string {
return r.Message
}
|
package common
import (
"io/ioutil"
"os"
"strings"
"testing"
. "github.com/onsi/gomega"
)
var (
testAppName = "test-app-1"
testAppDir = strings.Join([]string{"/home/dokku/", testAppName}, "")
testEnvFile = strings.Join([]string{testAppDir, "/ENV"}, "")
testEnvLine = "export testKey=TESTING"
testAppName2 = "01-test-app-1"
testAppDir2 = strings.Join([]string{"/home/dokku/", testAppName2}, "")
testEnvFile2 = strings.Join([]string{testAppDir2, "/ENV"}, "")
testEnvLine2 = "export testKey=TESTING"
)
func setupTestApp() (err error) {
Expect(os.MkdirAll(testAppDir, 0644)).To(Succeed())
b := []byte(testEnvLine + "\n")
if err = ioutil.WriteFile(testEnvFile, b, 0644); err != nil {
return
}
return
}
func setupTestApp2() (err error) {
Expect(os.MkdirAll(testAppDir2, 0644)).To(Succeed())
b := []byte(testEnvLine2 + "\n")
if err = ioutil.WriteFile(testEnvFile2, b, 0644); err != nil {
return
}
return
}
func teardownTestApp() {
os.RemoveAll(testAppDir)
}
func teardownTestApp2() {
os.RemoveAll(testAppDir2)
}
func TestCommonGetEnv(t *testing.T) {
RegisterTestingT(t)
Expect(MustGetEnv("DOKKU_ROOT")).To(Equal("/home/dokku"))
}
func TestCommonGetAppImageRepo(t *testing.T) {
RegisterTestingT(t)
Expect(GetAppImageRepo("testapp")).To(Equal("dokku/testapp"))
}
func TestCommonVerifyImageInvalid(t *testing.T) {
RegisterTestingT(t)
Expect(VerifyImage("testapp")).To(Equal(false))
}
func TestCommonVerifyAppNameInvalid(t *testing.T) {
RegisterTestingT(t)
err := VerifyAppName("1994testApp")
Expect(err).To(HaveOccurred())
}
func TestCommonVerifyAppName(t *testing.T) {
RegisterTestingT(t)
Expect(setupTestApp()).To(Succeed())
Expect(VerifyAppName(testAppName)).To(Succeed())
teardownTestApp()
RegisterTestingT(t)
Expect(setupTestApp2()).To(Succeed())
Expect(VerifyAppName(testAppName2)).To(Succeed())
teardownTestApp2()
}
func TestCommonDokkuAppsError(t *testing.T) {
RegisterTestingT(t)
_, err := DokkuApps()
Expect(err).To(HaveOccurred())
}
func TestCommonDokkuApps(t *testing.T) {
RegisterTestingT(t)
os.Setenv("PLUGIN_ENABLED_PATH", "/var/lib/dokku/plugins/enabled")
Expect(setupTestApp()).To(Succeed())
apps, err := DokkuApps()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(apps[0]).To(Equal(testAppName))
teardownTestApp()
}
func TestCommonStripInlineComments(t *testing.T) {
RegisterTestingT(t)
text := StripInlineComments(strings.Join([]string{testEnvLine, "# testing comment"}, " "))
Expect(text).To(Equal(testEnvLine))
}
fix: ensure go tests have PLUGIN_ENABLED_PATH set
package common
import (
"io/ioutil"
"os"
"strings"
"testing"
. "github.com/onsi/gomega"
)
var (
testAppName = "test-app-1"
testAppDir = strings.Join([]string{"/home/dokku/", testAppName}, "")
testEnvFile = strings.Join([]string{testAppDir, "/ENV"}, "")
testEnvLine = "export testKey=TESTING"
testAppName2 = "01-test-app-1"
testAppDir2 = strings.Join([]string{"/home/dokku/", testAppName2}, "")
testEnvFile2 = strings.Join([]string{testAppDir2, "/ENV"}, "")
testEnvLine2 = "export testKey=TESTING"
)
func setupTests() (err error) {
return os.Setenv("PLUGIN_ENABLED_PATH", "/var/lib/dokku/plugins/enabled")
}
func setupTestApp() (err error) {
Expect(os.MkdirAll(testAppDir, 0644)).To(Succeed())
b := []byte(testEnvLine + "\n")
if err = ioutil.WriteFile(testEnvFile, b, 0644); err != nil {
return
}
return
}
func setupTestApp2() (err error) {
Expect(os.MkdirAll(testAppDir2, 0644)).To(Succeed())
b := []byte(testEnvLine2 + "\n")
if err = ioutil.WriteFile(testEnvFile2, b, 0644); err != nil {
return
}
return
}
func teardownTestApp() {
os.RemoveAll(testAppDir)
}
func teardownTestApp2() {
os.RemoveAll(testAppDir2)
}
func TestCommonGetEnv(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(MustGetEnv("DOKKU_ROOT")).To(Equal("/home/dokku"))
}
func TestCommonGetAppImageRepo(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(GetAppImageRepo("testapp")).To(Equal("dokku/testapp"))
}
func TestCommonVerifyImageInvalid(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(VerifyImage("testapp")).To(Equal(false))
}
func TestCommonVerifyAppNameInvalid(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(VerifyAppName("1994testApp")).To(HaveOccurred())
}
func TestCommonVerifyAppName(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(setupTestApp()).To(Succeed())
Expect(VerifyAppName(testAppName)).To(Succeed())
teardownTestApp()
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(setupTestApp2()).To(Succeed())
Expect(VerifyAppName(testAppName2)).To(Succeed())
teardownTestApp2()
}
func TestCommonDokkuAppsError(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
_, err := DokkuApps()
Expect(err).To(HaveOccurred())
}
func TestCommonDokkuApps(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
Expect(setupTestApp()).To(Succeed())
apps, err := DokkuApps()
Expect(err).NotTo(HaveOccurred())
Expect(apps).To(HaveLen(1))
Expect(apps[0]).To(Equal(testAppName))
teardownTestApp()
}
func TestCommonStripInlineComments(t *testing.T) {
RegisterTestingT(t)
Expect(setupTests()).To(Succeed())
text := StripInlineComments(strings.Join([]string{testEnvLine, "# testing comment"}, " "))
Expect(text).To(Equal(testEnvLine))
}
|
package common
import (
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/hashicorp/packer/template/interpolate"
)
// AccessConfig is for common configuration related to AWS access
type AccessConfig struct {
AccessKey string `mapstructure:"access_key"`
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"`
MFACode string `mapstructure:"mfa_code"`
ProfileName string `mapstructure:"profile"`
RawRegion string `mapstructure:"region"`
SecretKey string `mapstructure:"secret_key"`
SkipValidation bool `mapstructure:"skip_region_validation"`
Token string `mapstructure:"token"`
session *session.Session
}
// Config returns a valid aws.Config object for access to AWS services, or
// an error if the authentication and region couldn't be resolved
func (c *AccessConfig) Session() (*session.Session, error) {
if c.session != nil {
return c.session, nil
}
region, err := c.region()
if err != nil {
return nil, err
}
if c.ProfileName != "" {
if err := os.Setenv("AWS_PROFILE", c.ProfileName); err != nil {
log.Printf("Set env error: %s", err)
}
}
config := aws.NewConfig().WithRegion(region).WithMaxRetries(11).WithCredentialsChainVerboseErrors(true)
if c.CustomEndpointEc2 != "" {
config = config.WithEndpoint(c.CustomEndpointEc2)
}
if c.AccessKey != "" {
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: c.AccessKey,
SecretAccessKey: c.SecretKey,
SessionToken: c.Token,
},
},
})
config = config.WithCredentials(creds)
}
opts := session.Options{
SharedConfigState: session.SharedConfigEnable,
Config: *config,
}
if c.MFACode != "" {
opts.AssumeRoleTokenProvider = func() (string, error) {
return c.MFACode, nil
}
}
c.session, err = session.NewSessionWithOptions(opts)
if err != nil {
return nil, err
}
return c.session, nil
}
// region returns either the region from config or region from metadata service
func (c *AccessConfig) region() (string, error) {
if c.RawRegion != "" {
if !c.SkipValidation {
if valid := ValidateRegion(c.RawRegion); !valid {
return "", fmt.Errorf("Not a valid region: %s", c.RawRegion)
}
}
return c.RawRegion, nil
}
sess := session.New()
ec2meta := ec2metadata.New(sess)
identity, err := ec2meta.GetInstanceIdentityDocument()
if err != nil {
log.Println("Error getting region from metadata service, "+
"probably because we're not running on AWS.", err)
return "", nil
}
return identity.Region, nil
}
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
var errs []error
if c.RawRegion != "" && !c.SkipValidation {
if valid := ValidateRegion(c.RawRegion); !valid {
errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion))
}
}
if len(errs) > 0 {
return errs
}
return nil
}
clean up
package common
import (
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/hashicorp/packer/template/interpolate"
)
// AccessConfig is for common configuration related to AWS access
type AccessConfig struct {
AccessKey string `mapstructure:"access_key"`
CustomEndpointEc2 string `mapstructure:"custom_endpoint_ec2"`
MFACode string `mapstructure:"mfa_code"`
ProfileName string `mapstructure:"profile"`
RawRegion string `mapstructure:"region"`
SecretKey string `mapstructure:"secret_key"`
SkipValidation bool `mapstructure:"skip_region_validation"`
Token string `mapstructure:"token"`
session *session.Session
}
// Config returns a valid aws.Config object for access to AWS services, or
// an error if the authentication and region couldn't be resolved
func (c *AccessConfig) Session() (*session.Session, error) {
if c.session != nil {
return c.session, nil
}
if c.ProfileName != "" {
if err := os.Setenv("AWS_PROFILE", c.ProfileName); err != nil {
log.Printf("Set env error: %s", err)
}
}
config := aws.NewConfig().WithMaxRetries(11).WithCredentialsChainVerboseErrors(true)
if region := c.region(); region != "" {
config = config.WithRegion(region)
}
if c.CustomEndpointEc2 != "" {
config = config.WithEndpoint(c.CustomEndpointEc2)
}
if c.AccessKey != "" {
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: c.AccessKey,
SecretAccessKey: c.SecretKey,
SessionToken: c.Token,
},
},
})
config = config.WithCredentials(creds)
}
opts := session.Options{
SharedConfigState: session.SharedConfigEnable,
Config: *config,
}
if c.MFACode != "" {
opts.AssumeRoleTokenProvider = func() (string, error) {
return c.MFACode, nil
}
}
var err error
c.session, err = session.NewSessionWithOptions(opts)
if err != nil {
return nil, err
}
return c.session, nil
}
// region returns either the region from config or region from metadata service
func (c *AccessConfig) region() string {
if c.RawRegion != "" {
return c.RawRegion
}
sess := session.New()
ec2meta := ec2metadata.New(sess)
region, err := ec2meta.Region()
if err != nil {
log.Println("Error getting region from metadata service, "+
"probably because we're not running on AWS.", err)
return ""
}
return region
}
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
var errs []error
if c.RawRegion != "" && !c.SkipValidation {
if valid := ValidateRegion(c.RawRegion); !valid {
errs = append(errs, fmt.Errorf("Unknown region: %s", c.RawRegion))
}
}
if len(errs) > 0 {
return errs
}
return nil
}
|
package logging
import (
"fmt"
"path"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
)
const (
prefix = "github.com/control-center/serviced/"
vendorprefix = prefix + "vendor/"
)
type ContextHook struct{}
func (hook ContextHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (hook ContextHook) Fire(entry *logrus.Entry) error {
pc := make([]uintptr, 3, 3)
count := runtime.Callers(6, pc)
for i := 0; i < count; i++ {
fu := runtime.FuncForPC(pc[i] - 1)
name := fu.Name()
if strings.HasPrefix(name, prefix) && !strings.HasPrefix(name, vendorprefix) {
file, line := fu.FileLine(pc[i] - 1)
entry.SetField("location", fmt.Sprintf("%s:%d", path.Base(file), line))
break
}
}
return nil
}
Added some comments
package logging
import (
"fmt"
"path"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
)
const (
prefix = "github.com/control-center/serviced/"
vendorprefix = prefix + "vendor/"
)
// ContextHook is a hook to provide context in log messages
type ContextHook struct{}
// Levels satisfies the logrus.Hook interface. This hook applies to all levels.
func (hook ContextHook) Levels() []logrus.Level {
return logrus.AllLevels
}
// Fire satisfies the logrus.Hook interface. This impl figures out the file and
// line number of the caller and adds them to the data.
func (hook ContextHook) Fire(entry *logrus.Entry) error {
pc := make([]uintptr, 3, 3)
count := runtime.Callers(6, pc)
for i := 0; i < count; i++ {
fu := runtime.FuncForPC(pc[i] - 1)
name := fu.Name()
if strings.HasPrefix(name, prefix) && !strings.HasPrefix(name, vendorprefix) {
file, line := fu.FileLine(pc[i] - 1)
entry.SetField("location", fmt.Sprintf("%s:%d", path.Base(file), line))
break
}
}
return nil
}
|
package fuse
// all of the code for DirEntryList.
import (
"bytes"
"log"
"unsafe"
"github.com/hanwen/go-fuse/raw"
)
var _ = log.Print
var eightPadding [8]byte
// DirEntry is a type for PathFileSystem and NodeFileSystem to return
// directory contents in.
type DirEntry struct {
Mode uint32
Name string
}
type DirEntryList struct {
buf *bytes.Buffer
offset uint64
maxSize int
}
func NewDirEntryList(data []byte, off uint64) *DirEntryList {
return &DirEntryList{
buf: bytes.NewBuffer(data[:0]),
maxSize: len(data),
offset: off,
}
}
func (l *DirEntryList) AddDirEntry(e DirEntry) bool {
return l.Add(e.Name, uint64(raw.FUSE_UNKNOWN_INO), e.Mode)
}
func (l *DirEntryList) Add(name string, inode uint64, mode uint32) bool {
dirent := raw.Dirent{
Off: l.offset+1,
Ino: inode,
NameLen: uint32(len(name)),
Typ: ModeToType(mode),
}
padding := 8 - len(name)&7
if padding == 8 {
padding = 0
}
delta := padding + int(unsafe.Sizeof(raw.Dirent{})) + len(name)
newLen := delta + l.buf.Len()
if newLen > l.maxSize {
return false
}
_, err := l.buf.Write(asSlice(unsafe.Pointer(&dirent), unsafe.Sizeof(raw.Dirent{})))
if err != nil {
panic("Serialization of Dirent failed")
}
l.buf.WriteString(name)
if padding > 0 {
l.buf.Write(eightPadding[:padding])
}
l.offset = dirent.Off
if l.buf.Len() != newLen {
log.Panicf("newLen mismatch %d %d", l.buf.Len(), newLen)
}
return true
}
func (l *DirEntryList) Bytes() []byte {
return l.buf.Bytes()
}
////////////////////////////////////////////////////////////////
type rawDir interface {
ReadDir(out *DirEntryList, input *ReadIn) (Status)
Release()
}
type connectorDir struct {
node FsNode
stream []DirEntry
lastOffset uint64
}
func (d *connectorDir) ReadDir(list *DirEntryList, input *ReadIn) (code Status) {
if d.stream == nil {
return OK
}
// rewinddir() should be as if reopening directory.
// TODO - test this.
if d.lastOffset > 0 && input.Offset == 0 {
d.stream, code = d.node.OpenDir(nil)
if !code.Ok() {
return code
}
}
todo := d.stream[input.Offset:]
for _, e := range todo {
if !list.AddDirEntry(e) {
break
}
}
d.lastOffset = list.offset
return OK
}
// Read everything so we make goroutines exit.
func (d *connectorDir) Release() {
}
Simplify dirent serialization.
package fuse
// all of the code for DirEntryList.
import (
"log"
"unsafe"
"github.com/hanwen/go-fuse/raw"
)
var _ = log.Print
var eightPadding [8]byte
const direntSize = int(unsafe.Sizeof(raw.Dirent{}))
// DirEntry is a type for PathFileSystem and NodeFileSystem to return
// directory contents in.
type DirEntry struct {
Mode uint32
Name string
}
type DirEntryList struct {
buf []byte
offset uint64
}
func NewDirEntryList(data []byte, off uint64) *DirEntryList {
return &DirEntryList{
buf: data[:0],
offset: off,
}
}
func (l *DirEntryList) AddDirEntry(e DirEntry) bool {
return l.Add(e.Name, uint64(raw.FUSE_UNKNOWN_INO), e.Mode)
}
func (l *DirEntryList) Add(name string, inode uint64, mode uint32) bool {
padding := (8 - len(name)&7)&7
delta := padding + direntSize + len(name)
oldLen := len(l.buf)
newLen := delta + oldLen
if newLen > cap(l.buf) {
return false
}
l.buf = l.buf[:newLen]
dirent := (*raw.Dirent)(unsafe.Pointer(&l.buf[oldLen]))
dirent.Off = l.offset+1
dirent.Ino = inode
dirent.NameLen= uint32(len(name))
dirent.Typ = ModeToType(mode)
oldLen += direntSize
copy(l.buf[oldLen:], name)
oldLen += len(name)
if padding > 0 {
copy(l.buf[oldLen:], eightPadding[:padding])
}
l.offset = dirent.Off
return true
}
func (l *DirEntryList) Bytes() []byte {
return l.buf
}
////////////////////////////////////////////////////////////////
type rawDir interface {
ReadDir(out *DirEntryList, input *ReadIn) (Status)
Release()
}
type connectorDir struct {
node FsNode
stream []DirEntry
lastOffset uint64
}
func (d *connectorDir) ReadDir(list *DirEntryList, input *ReadIn) (code Status) {
if d.stream == nil {
return OK
}
// rewinddir() should be as if reopening directory.
// TODO - test this.
if d.lastOffset > 0 && input.Offset == 0 {
d.stream, code = d.node.OpenDir(nil)
if !code.Ok() {
return code
}
}
todo := d.stream[input.Offset:]
for _, e := range todo {
if !list.AddDirEntry(e) {
break
}
}
d.lastOffset = list.offset
return OK
}
// Read everything so we make goroutines exit.
func (d *connectorDir) Release() {
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.