CombinedText stringlengths 4 3.42M |
|---|
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Import docs from a git branch and format them for gh-pages.
// usage: go run _tools/release_docs/main.go _tools/release_docs/api-reference-process.go --branch release-1.0 --output-dir v1.0 --version v1.0
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
)
var (
branch = flag.String("branch", "", "The git branch from which to pull docs. (e.g. release-1.0, master).")
outputDir = flag.String("output-dir", "", "The directory in which to save results.")
version = flag.String("version", "", "The release version. It should be the same as the version segment in the URL, e.g., when importing docs that will be hosted at k8s.io/v1.0/, the version flag should be \"v1.0\".")
remote = flag.String("remote", "upstream", "Optional: The name of the remote repo from which to pull docs.")
apiReference = flag.Bool("apiReference", true, "Optional: Whether update api reference")
subdirs = []string{"docs", "examples"}
)
func fileExistsInBranch(path string) bool {
out, err := exec.Command("git", "ls-tree", fmt.Sprintf("%s/%s", *remote, *branch), path).Output()
return err == nil && len(out) != 0
}
func fixURL(filename string, u *url.URL) bool {
if u.Host != "" || u.Path == "" {
return false
}
target := filepath.Join(filepath.Dir(filename), u.Path)
if fi, err := os.Stat(target); os.IsNotExist(err) {
// We're linking to something we didn't copy over. Send
// it through the redirector.
rel, err := filepath.Rel(*outputDir, target)
if err != nil {
return false
}
if fileExistsInBranch(rel) {
u.Path = filepath.Join("HEAD", rel)
u.Host = "releases.k8s.io"
u.Scheme = "https"
return true
}
} else if fi.IsDir() {
// If there's no README.md in the directory, redirect to github
// for the directory view.
files, err := filepath.Glob(target + "/*")
if err != nil {
return false
}
hasReadme := false
for _, f := range files {
if strings.ToLower(filepath.Base(f)) == "readme.md" {
hasReadme = true
}
}
if !hasReadme {
rel, err := filepath.Rel(*outputDir, target)
if err != nil {
return false
}
u.Path = filepath.Join("HEAD", rel)
u.Host = "releases.k8s.io"
u.Scheme = "https"
return true
}
} else if strings.HasSuffix(u.Path, ".md") {
u.Path = u.Path[:len(u.Path)-3] + ".html"
return true
}
return false
}
func processFile(prefix, filename string) error {
fileBytes, err := ioutil.ReadFile(filename)
if err != nil {
return err
}
title := getTitle(fileBytes)
if len(title) == 0 {
title = filename[len(prefix)+1 : len(filename)-len(".md")]
}
output := rewriteLinks(filename, fileBytes)
output = rewriteCodeBlocks(output)
output = rewriteHTMLPreviewLinks(output)
output = rewriteDownloadLinks(output)
f, err := os.Create(filename)
if err != nil {
return err
}
_, err = f.WriteString(fmt.Sprintf("---\nlayout: docwithnav\ntitle: %q\n---\n", title))
if err != nil {
return err
}
_, err = f.Write(output)
return err
}
var (
// Finds markdown links of the form [foo](bar "alt-text").
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
// Finds markdown refence style link definitions
refRE = regexp.MustCompile(`(?m)^\[([^]]*)\]:\s+(.*)$`)
// Splits the link target into link target and alt-text.
altTextRE = regexp.MustCompile(`(.*)( ".*")`)
)
func rewriteLinks(filename string, fileBytes []byte) []byte {
getParts := func(re *regexp.Regexp, in []byte) (text, link, caption string, changed bool) {
match := re.FindSubmatch(in)
text = string(match[1])
link = strings.TrimSpace(string(match[2]))
if parts := altTextRE.FindStringSubmatch(link); parts != nil {
link = parts[1]
caption = parts[2]
}
u, err := url.Parse(link)
if err != nil || !fixURL(filename, u) {
return "", "", "", false
}
return text, u.String(), caption, true
}
for _, conversion := range []struct {
re *regexp.Regexp
format string
}{
{linkRE, "[%s](%s)"},
{refRE, "[%s]: %s"},
} {
fileBytes = conversion.re.ReplaceAllFunc(fileBytes, func(in []byte) (out []byte) {
text, link, caption, changed := getParts(conversion.re, in)
if !changed {
return in
}
return []byte(fmt.Sprintf(conversion.format, text, link+caption))
})
}
return fileBytes
}
var (
// Allow more than 3 ticks because people write this stuff.
ticticticRE = regexp.MustCompile("^`{3,}\\s*(.*)$")
notTicticticRE = regexp.MustCompile("^```(.*)```")
languageFixups = map[string]string{
"shell": "sh",
"golang": "go",
}
)
func rewriteCodeBlocks(fileBytes []byte) []byte {
lines := strings.Split(string(fileBytes), "\n")
inside := false
highlight := false
output := []string{}
for i := range lines {
trimmed := []byte(strings.TrimLeft(lines[i], " "))
if !ticticticRE.Match(trimmed) || notTicticticRE.Match(trimmed) {
output = append(output, lines[i])
continue
}
if !inside {
out := ticticticRE.FindSubmatch(trimmed)
lang := strings.ToLower(string(out[1]))
// Can't syntax highlight unknown language.
if fixedLang := languageFixups[lang]; fixedLang != "" {
lang = fixedLang
}
if lang != "" {
// The "redcarpet" markdown renderer will accept ```lang, but
// "kramdown" will not. They both accept this, format, and we
// need a hook to fixup language codes anyway (until we have a
// munger in master).
output = append(output, fmt.Sprintf("{%% highlight %s %%}", lang))
highlight = true
} else {
output = append(output, lines[i])
}
output = append(output, `{% raw %}`)
} else {
output = append(output, `{% endraw %}`)
if highlight {
output = append(output, `{% endhighlight %}`)
highlight = false
} else {
output = append(output, lines[i])
}
}
inside = !inside
}
return []byte(strings.Join(output, "\n") + "\n")
}
var (
htmlPreviewRE = regexp.MustCompile(`https://htmlpreview.github.io/\?https://github.com/kubernetes/kubernetes/[^/]*`)
)
// For example, this rewrites
// https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/v1.1.0/docs/api-reference/v1/definitions.html#_v1_pod
// To
// http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod
func rewriteHTMLPreviewLinks(fileBytes []byte) []byte {
return htmlPreviewRE.ReplaceAll(fileBytes, []byte(fmt.Sprintf("http://kubernetes.io/%s", *version)))
}
var (
downloadLinkRE = regexp.MustCompile("\\[Download example\\]\\((.*\\.)(yaml|json)\\?raw=true\\)")
)
// Drops "?raw=true" from download example links. For example this rewrites
// "[Download example](pod.yaml?raw=true)"
// to
// "[Download example](pod.yaml)"
func rewriteDownloadLinks(fileBytes []byte) []byte {
return downloadLinkRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte {
matches := downloadLinkRE.FindSubmatch(in)
fileName := string(matches[1])
extension := string(matches[2])
newLink := "[Download example](" + fileName + extension + ")"
return []byte(newLink)
})
}
var (
// matches "# headers" and "## headers"
atxTitleRE = regexp.MustCompile(`(?m)^\s*##?\s+(?P<title>.*)$`)
// matches
// Headers
// =======
// and
// Headers
// --
setextTitleRE = regexp.MustCompile("(?m)^(?P<title>.+)\n((=+)|(-+))$")
ignoredRE = regexp.MustCompile(`[*_\\]`)
)
// removeLinks removes markdown links from the input leaving only the
// display text.
func removeLinks(input []byte) []byte {
indices := linkRE.FindAllSubmatchIndex(input, -1)
if len(indices) == 0 {
return input
}
out := make([]byte, 0, len(input))
cur := 0
for _, index := range indices {
linkStart, linkEnd, textStart, textEnd := index[0], index[1], index[2], index[3]
// append bytes between previous match and this one
out = append(out, input[cur:linkStart]...)
// extract and append link text
out = append(out, input[textStart:textEnd]...)
// update cur
cur = linkEnd
}
// pick up the remaining and return it
return append(out, input[cur:len(input)]...)
}
// findTitleMatch returns the start of the match and the "title" subgroup of
// bytes. If the regexp doesn't match, it will return -1 and nil.
func findTitleMatch(titleRE *regexp.Regexp, input []byte) (start int, title []byte) {
indices := titleRE.FindSubmatchIndex(input)
if len(indices) == 0 {
return -1, nil
}
for i, name := range titleRE.SubexpNames() {
if name == "title" {
start, end := indices[2*i], indices[2*i+1]
return indices[0], input[start:end]
}
}
// there was no grouped named title
return -1, nil
}
func getTitle(fileBytes []byte) string {
atxStart, atxMatch := findTitleMatch(atxTitleRE, fileBytes)
setextStart, setextMatch := findTitleMatch(setextTitleRE, fileBytes)
var title []byte
switch {
case atxStart == -1 && setextStart == -1:
return ""
case atxStart == -1:
title = setextMatch
case setextStart == -1:
title = atxMatch
case setextStart < atxStart:
title = setextMatch
default:
title = atxMatch
}
// Handle the case where there's a link in the header.
title = removeLinks(title)
// Take out all markdown stylings.
return string(ignoredRE.ReplaceAll(title, nil))
}
func runGitUpdate(remote string) error {
cmd := exec.Command("git", "fetch", remote)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git fetch failed: %v\n%s", err, out)
}
return err
}
func copyFiles(remoteRepo, directory, branch string) error {
if err := runGitUpdate(remoteRepo); err != nil {
return err
}
if !strings.HasSuffix(directory, "/") {
directory += "/"
}
prefix := fmt.Sprintf("--prefix=%s", directory)
tagRef := fmt.Sprintf("%s/%s", remoteRepo, branch)
gitArgs := append([]string{"archive", "--format=tar", prefix, tagRef}, subdirs...)
gitCmd := exec.Command("git", gitArgs...)
tarCmd := exec.Command("tar", "-x")
var err error
tarCmd.Stdin, err = gitCmd.StdoutPipe()
if err != nil {
return err
}
gitStderr, err := gitCmd.StderrPipe()
if err != nil {
return err
}
gitErrs := bytes.Buffer{}
fmt.Printf("Copying docs and examples from %s to %s\n", tagRef, directory)
if err = tarCmd.Start(); err != nil {
return fmt.Errorf("tar command failed: %v", err)
}
if err = gitCmd.Run(); err != nil {
gitErrs.ReadFrom(gitStderr)
return fmt.Errorf("git command failed: %v\n%s", err, gitErrs.String())
}
return tarCmd.Wait()
}
func copySingleFile(src, dst string) (err error) {
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
cerr := out.Close()
if err == nil {
err = cerr
}
}()
_, err = io.Copy(out, in)
return
}
func main() {
flag.Parse()
if len(*branch) == 0 {
fmt.Println("You must specify a branch with --branch.")
os.Exit(1)
}
if len(*outputDir) == 0 {
fmt.Println("You must specify an output dir with --output-dir.")
os.Exit(1)
}
if len(*version) == 0 {
fmt.Println("You must specify the release version with --version.")
os.Exit(1)
}
if err := checkCWD(); err != nil {
fmt.Printf("Could not find the kubernetes root: %v\n", err)
os.Exit(1)
}
if err := copyFiles(*remote, *outputDir, *branch); err != nil {
fmt.Printf("Error copying files: %v\n", err)
os.Exit(1)
}
for _, subDir := range subdirs {
prefix := path.Join(*outputDir, subDir)
err := filepath.Walk(prefix, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && strings.HasSuffix(info.Name(), ".md") {
fmt.Printf("Processing %s\n", path)
if err = processFile(prefix, path); err != nil {
return err
}
if strings.ToLower(info.Name()) == "readme.md" {
newpath := path[0:len(path)-len("readme.md")] + "index.md"
fmt.Printf("Copying %s to %s\n", path, newpath)
if err = copySingleFile(path, newpath); err != nil {
return err
}
}
}
if *apiReference && !info.IsDir() && (info.Name() == "definitions.html" || info.Name() == "operations.html") {
fmt.Printf("Processing %s\n", path)
err := processHTML(path, *version, *outputDir)
if err != nil {
return err
}
}
return nil
})
if err != nil {
fmt.Printf("Error while processing markdown and html files: %v\n", err)
os.Exit(1)
}
}
}
func checkCWD() error {
dir, err := exec.Command("git", "rev-parse", "--show-toplevel").Output()
if err != nil {
return err
}
return os.Chdir(strings.TrimSpace(string(dir)))
}
Fixing gh-pages munger to link to branch from which docs are fetched rather than HEAD
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Import docs from a git branch and format them for gh-pages.
// usage: go run _tools/release_docs/main.go _tools/release_docs/api-reference-process.go --branch release-1.0 --output-dir v1.0 --version v1.0
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
)
var (
branch = flag.String("branch", "", "The git branch from which to pull docs. (e.g. release-1.0, master).")
outputDir = flag.String("output-dir", "", "The directory in which to save results.")
version = flag.String("version", "", "The release version. It should be the same as the version segment in the URL, e.g., when importing docs that will be hosted at k8s.io/v1.0/, the version flag should be \"v1.0\".")
remote = flag.String("remote", "upstream", "Optional: The name of the remote repo from which to pull docs.")
apiReference = flag.Bool("apiReference", true, "Optional: Whether update api reference")
subdirs = []string{"docs", "examples"}
)
func fileExistsInBranch(path string) bool {
out, err := exec.Command("git", "ls-tree", fmt.Sprintf("%s/%s", *remote, *branch), path).Output()
return err == nil && len(out) != 0
}
func fixURL(filename string, u *url.URL) bool {
if u.Host != "" || u.Path == "" {
return false
}
target := filepath.Join(filepath.Dir(filename), u.Path)
if fi, err := os.Stat(target); os.IsNotExist(err) {
// We're linking to something we didn't copy over. Send
// it through the redirector.
rel, err := filepath.Rel(*outputDir, target)
if err != nil {
return false
}
if fileExistsInBranch(rel) {
u.Path = filepath.Join(*branch, rel)
u.Host = "releases.k8s.io"
u.Scheme = "https"
return true
}
} else if fi.IsDir() {
// If there's no README.md in the directory, redirect to github
// for the directory view.
files, err := filepath.Glob(target + "/*")
if err != nil {
return false
}
hasReadme := false
for _, f := range files {
if strings.ToLower(filepath.Base(f)) == "readme.md" {
hasReadme = true
}
}
if !hasReadme {
rel, err := filepath.Rel(*outputDir, target)
if err != nil {
return false
}
u.Path = filepath.Join(*branch, rel)
u.Host = "releases.k8s.io"
u.Scheme = "https"
return true
}
} else if strings.HasSuffix(u.Path, ".md") {
u.Path = u.Path[:len(u.Path)-3] + ".html"
return true
}
return false
}
func processFile(prefix, filename string) error {
fileBytes, err := ioutil.ReadFile(filename)
if err != nil {
return err
}
title := getTitle(fileBytes)
if len(title) == 0 {
title = filename[len(prefix)+1 : len(filename)-len(".md")]
}
output := rewriteLinks(filename, fileBytes)
output = rewriteCodeBlocks(output)
output = rewriteHTMLPreviewLinks(output)
output = rewriteDownloadLinks(output)
f, err := os.Create(filename)
if err != nil {
return err
}
_, err = f.WriteString(fmt.Sprintf("---\nlayout: docwithnav\ntitle: %q\n---\n", title))
if err != nil {
return err
}
_, err = f.Write(output)
return err
}
var (
// Finds markdown links of the form [foo](bar "alt-text").
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
// Finds markdown refence style link definitions
refRE = regexp.MustCompile(`(?m)^\[([^]]*)\]:\s+(.*)$`)
// Splits the link target into link target and alt-text.
altTextRE = regexp.MustCompile(`(.*)( ".*")`)
)
func rewriteLinks(filename string, fileBytes []byte) []byte {
getParts := func(re *regexp.Regexp, in []byte) (text, link, caption string, changed bool) {
match := re.FindSubmatch(in)
text = string(match[1])
link = strings.TrimSpace(string(match[2]))
if parts := altTextRE.FindStringSubmatch(link); parts != nil {
link = parts[1]
caption = parts[2]
}
u, err := url.Parse(link)
if err != nil || !fixURL(filename, u) {
return "", "", "", false
}
return text, u.String(), caption, true
}
for _, conversion := range []struct {
re *regexp.Regexp
format string
}{
{linkRE, "[%s](%s)"},
{refRE, "[%s]: %s"},
} {
fileBytes = conversion.re.ReplaceAllFunc(fileBytes, func(in []byte) (out []byte) {
text, link, caption, changed := getParts(conversion.re, in)
if !changed {
return in
}
return []byte(fmt.Sprintf(conversion.format, text, link+caption))
})
}
return fileBytes
}
var (
// Allow more than 3 ticks because people write this stuff.
ticticticRE = regexp.MustCompile("^`{3,}\\s*(.*)$")
notTicticticRE = regexp.MustCompile("^```(.*)```")
languageFixups = map[string]string{
"shell": "sh",
"golang": "go",
}
)
func rewriteCodeBlocks(fileBytes []byte) []byte {
lines := strings.Split(string(fileBytes), "\n")
inside := false
highlight := false
output := []string{}
for i := range lines {
trimmed := []byte(strings.TrimLeft(lines[i], " "))
if !ticticticRE.Match(trimmed) || notTicticticRE.Match(trimmed) {
output = append(output, lines[i])
continue
}
if !inside {
out := ticticticRE.FindSubmatch(trimmed)
lang := strings.ToLower(string(out[1]))
// Can't syntax highlight unknown language.
if fixedLang := languageFixups[lang]; fixedLang != "" {
lang = fixedLang
}
if lang != "" {
// The "redcarpet" markdown renderer will accept ```lang, but
// "kramdown" will not. They both accept this, format, and we
// need a hook to fixup language codes anyway (until we have a
// munger in master).
output = append(output, fmt.Sprintf("{%% highlight %s %%}", lang))
highlight = true
} else {
output = append(output, lines[i])
}
output = append(output, `{% raw %}`)
} else {
output = append(output, `{% endraw %}`)
if highlight {
output = append(output, `{% endhighlight %}`)
highlight = false
} else {
output = append(output, lines[i])
}
}
inside = !inside
}
return []byte(strings.Join(output, "\n") + "\n")
}
var (
htmlPreviewRE = regexp.MustCompile(`https://htmlpreview.github.io/\?https://github.com/kubernetes/kubernetes/[^/]*`)
)
// For example, this rewrites
// https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/v1.1.0/docs/api-reference/v1/definitions.html#_v1_pod
// To
// http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod
func rewriteHTMLPreviewLinks(fileBytes []byte) []byte {
return htmlPreviewRE.ReplaceAll(fileBytes, []byte(fmt.Sprintf("http://kubernetes.io/%s", *version)))
}
var (
downloadLinkRE = regexp.MustCompile("\\[Download example\\]\\((.*\\.)(yaml|json)\\?raw=true\\)")
)
// Drops "?raw=true" from download example links. For example this rewrites
// "[Download example](pod.yaml?raw=true)"
// to
// "[Download example](pod.yaml)"
func rewriteDownloadLinks(fileBytes []byte) []byte {
return downloadLinkRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte {
matches := downloadLinkRE.FindSubmatch(in)
fileName := string(matches[1])
extension := string(matches[2])
newLink := "[Download example](" + fileName + extension + ")"
return []byte(newLink)
})
}
var (
// matches "# headers" and "## headers"
atxTitleRE = regexp.MustCompile(`(?m)^\s*##?\s+(?P<title>.*)$`)
// matches
// Headers
// =======
// and
// Headers
// --
setextTitleRE = regexp.MustCompile("(?m)^(?P<title>.+)\n((=+)|(-+))$")
ignoredRE = regexp.MustCompile(`[*_\\]`)
)
// removeLinks removes markdown links from the input leaving only the
// display text.
func removeLinks(input []byte) []byte {
indices := linkRE.FindAllSubmatchIndex(input, -1)
if len(indices) == 0 {
return input
}
out := make([]byte, 0, len(input))
cur := 0
for _, index := range indices {
linkStart, linkEnd, textStart, textEnd := index[0], index[1], index[2], index[3]
// append bytes between previous match and this one
out = append(out, input[cur:linkStart]...)
// extract and append link text
out = append(out, input[textStart:textEnd]...)
// update cur
cur = linkEnd
}
// pick up the remaining and return it
return append(out, input[cur:len(input)]...)
}
// findTitleMatch returns the start of the match and the "title" subgroup of
// bytes. If the regexp doesn't match, it will return -1 and nil.
func findTitleMatch(titleRE *regexp.Regexp, input []byte) (start int, title []byte) {
indices := titleRE.FindSubmatchIndex(input)
if len(indices) == 0 {
return -1, nil
}
for i, name := range titleRE.SubexpNames() {
if name == "title" {
start, end := indices[2*i], indices[2*i+1]
return indices[0], input[start:end]
}
}
// there was no grouped named title
return -1, nil
}
func getTitle(fileBytes []byte) string {
atxStart, atxMatch := findTitleMatch(atxTitleRE, fileBytes)
setextStart, setextMatch := findTitleMatch(setextTitleRE, fileBytes)
var title []byte
switch {
case atxStart == -1 && setextStart == -1:
return ""
case atxStart == -1:
title = setextMatch
case setextStart == -1:
title = atxMatch
case setextStart < atxStart:
title = setextMatch
default:
title = atxMatch
}
// Handle the case where there's a link in the header.
title = removeLinks(title)
// Take out all markdown stylings.
return string(ignoredRE.ReplaceAll(title, nil))
}
func runGitUpdate(remote string) error {
cmd := exec.Command("git", "fetch", remote)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git fetch failed: %v\n%s", err, out)
}
return err
}
func copyFiles(remoteRepo, directory, branch string) error {
if err := runGitUpdate(remoteRepo); err != nil {
return err
}
if !strings.HasSuffix(directory, "/") {
directory += "/"
}
prefix := fmt.Sprintf("--prefix=%s", directory)
tagRef := fmt.Sprintf("%s/%s", remoteRepo, branch)
gitArgs := append([]string{"archive", "--format=tar", prefix, tagRef}, subdirs...)
gitCmd := exec.Command("git", gitArgs...)
tarCmd := exec.Command("tar", "-x")
var err error
tarCmd.Stdin, err = gitCmd.StdoutPipe()
if err != nil {
return err
}
gitStderr, err := gitCmd.StderrPipe()
if err != nil {
return err
}
gitErrs := bytes.Buffer{}
fmt.Printf("Copying docs and examples from %s to %s\n", tagRef, directory)
if err = tarCmd.Start(); err != nil {
return fmt.Errorf("tar command failed: %v", err)
}
if err = gitCmd.Run(); err != nil {
gitErrs.ReadFrom(gitStderr)
return fmt.Errorf("git command failed: %v\n%s", err, gitErrs.String())
}
return tarCmd.Wait()
}
func copySingleFile(src, dst string) (err error) {
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
cerr := out.Close()
if err == nil {
err = cerr
}
}()
_, err = io.Copy(out, in)
return
}
func main() {
flag.Parse()
if len(*branch) == 0 {
fmt.Println("You must specify a branch with --branch.")
os.Exit(1)
}
if len(*outputDir) == 0 {
fmt.Println("You must specify an output dir with --output-dir.")
os.Exit(1)
}
if len(*version) == 0 {
fmt.Println("You must specify the release version with --version.")
os.Exit(1)
}
if err := checkCWD(); err != nil {
fmt.Printf("Could not find the kubernetes root: %v\n", err)
os.Exit(1)
}
if err := copyFiles(*remote, *outputDir, *branch); err != nil {
fmt.Printf("Error copying files: %v\n", err)
os.Exit(1)
}
for _, subDir := range subdirs {
prefix := path.Join(*outputDir, subDir)
err := filepath.Walk(prefix, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && strings.HasSuffix(info.Name(), ".md") {
fmt.Printf("Processing %s\n", path)
if err = processFile(prefix, path); err != nil {
return err
}
if strings.ToLower(info.Name()) == "readme.md" {
newpath := path[0:len(path)-len("readme.md")] + "index.md"
fmt.Printf("Copying %s to %s\n", path, newpath)
if err = copySingleFile(path, newpath); err != nil {
return err
}
}
}
if *apiReference && !info.IsDir() && (info.Name() == "definitions.html" || info.Name() == "operations.html") {
fmt.Printf("Processing %s\n", path)
err := processHTML(path, *version, *outputDir)
if err != nil {
return err
}
}
return nil
})
if err != nil {
fmt.Printf("Error while processing markdown and html files: %v\n", err)
os.Exit(1)
}
}
}
func checkCWD() error {
dir, err := exec.Command("git", "rev-parse", "--show-toplevel").Output()
if err != nil {
return err
}
return os.Chdir(strings.TrimSpace(string(dir)))
}
|
package bot
import "fmt"
func NewBot(cfg BotConfig) {
bot := &Bot{}
bot.Read = cfg.Readchan
bot.Send = cfg.Sendchan
bot.Channel = cfg.Channel
bot.init()
}
func (bot *Bot) init() {
fmt.Printf("new bot in %s\n", bot.Channel)
for {
m := <-bot.Read
fmt.Printf("#%s %s : %s\n", m.Channel, m.Username, m.Message)
if m.MessageType == "sub" {
fmt.Printf("%s subbed for %d months in a row\n", m.Username, m.Length)
}
go bot.Handle(m)
}
}
func (bot *Bot) Say(message string) {
m := fmt.Sprintf("PRIVMSG #%s : %s", bot.Channel, message)
bot.Send <- m
}
another fix
package bot
import (
"fmt"
"strings"
)
func NewBot(cfg BotConfig) {
bot := &Bot{}
bot.Read = cfg.Readchan
bot.Send = cfg.Sendchan
bot.Channel = cfg.Channel
bot.init()
}
func (bot *Bot) init() {
fmt.Printf("new bot in %s\n", bot.Channel)
for {
m := <-bot.Read
fmt.Printf("#%s %s :%s\n", m.Channel, m.Username, m.Message)
if m.MessageType == "sub" {
fmt.Printf("%s subbed for %d months in a row\n", m.Username, m.Length)
}
go bot.Handle(m)
}
}
func (bot *Bot) Say(message string) {
if !strings.HasPrefix(message, ".") {
message = ". " + message
}
m := fmt.Sprintf("PRIVMSG #%s :%s", bot.Channel, message)
bot.Send <- m
}
|
package schema
import "errors"
// AnyOf validates if any of the sub field validators validates.
type AnyOf []FieldValidator
// Compile implements Compiler interface.
func (v *AnyOf) Compile() (err error) {
for _, sv := range *v {
if c, ok := sv.(Compiler); ok {
if err = c.Compile(); err != nil {
return
}
}
}
return
}
// Validate ensures that at least one sub-validator validates.
func (v AnyOf) Validate(value interface{}) (interface{}, error) {
for _, validator := range v {
var err error
if value, err = validator.Validate(value); err == nil {
return value, nil
}
}
// TODO: combine errors.
return nil, errors.New("invalid")
}
Fix incorrect behaviour for AnyOf validator
package schema
import "errors"
// AnyOf validates if any of the sub field validators validates.
type AnyOf []FieldValidator
// Compile implements Compiler interface.
func (v *AnyOf) Compile() (err error) {
for _, sv := range *v {
if c, ok := sv.(Compiler); ok {
if err = c.Compile(); err != nil {
return
}
}
}
return
}
// Validate ensures that at least one sub-validator validates.
func (v AnyOf) Validate(value interface{}) (interface{}, error) {
for _, validator := range v {
if value, err := validator.Validate(value); err == nil {
return value, nil
}
}
// TODO: combine errors.
return nil, errors.New("invalid")
}
|
package deployer
import (
"fmt"
"reflect"
"strings"
"time"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
v33 "github.com/rancher/rancher/pkg/apis/project.cattle.io/v3"
"github.com/rancher/norman/controller"
"github.com/rancher/rancher/pkg/catalog/manager"
alertutil "github.com/rancher/rancher/pkg/controllers/managementuserlegacy/alert/common"
"github.com/rancher/rancher/pkg/controllers/managementuserlegacy/helm/common"
v1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
projectv3 "github.com/rancher/rancher/pkg/generated/norman/project.cattle.io/v3"
monitorutil "github.com/rancher/rancher/pkg/monitoring"
"github.com/rancher/rancher/pkg/namespace"
"github.com/rancher/rancher/pkg/ref"
"github.com/rancher/rancher/pkg/types/config"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
const (
initVersion = "initializing"
)
var (
ServiceName = "alerting"
waitCatalogSyncInterval = 60 * time.Second
)
const (
defaultGroupIntervalSeconds = 180
)
type AlertService struct {
clusterName string
clusterLister v3.ClusterLister
catalogLister v3.CatalogLister
catalogManager manager.CatalogManager
apps projectv3.AppInterface
appLister projectv3.AppLister
oldClusterAlerts v3.ClusterAlertInterface
oldProjectAlerts v3.ProjectAlertInterface
oldProjectAlertLister v3.ProjectAlertLister
clusterAlertGroups v3.ClusterAlertGroupInterface
projectAlertGroups v3.ProjectAlertGroupInterface
clusterAlertRules v3.ClusterAlertRuleInterface
projectAlertRules v3.ProjectAlertRuleInterface
projectLister v3.ProjectLister
namespaces v1.NamespaceInterface
templateLister v3.CatalogTemplateLister
}
func NewService() *AlertService {
return &AlertService{}
}
func (l *AlertService) Init(cluster *config.UserContext) {
l.clusterName = cluster.ClusterName
l.clusterLister = cluster.Management.Management.Clusters("").Controller().Lister()
l.catalogLister = cluster.Management.Management.Catalogs(metav1.NamespaceAll).Controller().Lister()
l.oldClusterAlerts = cluster.Management.Management.ClusterAlerts(cluster.ClusterName)
l.oldProjectAlerts = cluster.Management.Management.ProjectAlerts(metav1.NamespaceAll)
l.oldProjectAlertLister = cluster.Management.Management.ProjectAlerts("").Controller().Lister()
l.clusterAlertGroups = cluster.Management.Management.ClusterAlertGroups(cluster.ClusterName)
l.projectAlertGroups = cluster.Management.Management.ProjectAlertGroups(metav1.NamespaceAll)
l.clusterAlertRules = cluster.Management.Management.ClusterAlertRules(cluster.ClusterName)
l.projectAlertRules = cluster.Management.Management.ProjectAlertRules(metav1.NamespaceAll)
l.projectLister = cluster.Management.Management.Projects(cluster.ClusterName).Controller().Lister()
l.apps = cluster.Management.Project.Apps(metav1.NamespaceAll)
l.appLister = cluster.Management.Project.Apps("").Controller().Lister()
l.namespaces = cluster.Core.Namespaces(metav1.NamespaceAll)
l.templateLister = cluster.Management.Management.CatalogTemplates(metav1.NamespaceAll).Controller().Lister()
l.catalogManager = cluster.Management.CatalogManager
}
func (l *AlertService) Version() (string, error) {
return fmt.Sprintf("%s-%s", monitorutil.RancherMonitoringTemplateName, initVersion), nil
}
func (l *AlertService) Upgrade(currentVersion string) (string, error) {
template, err := l.templateLister.Get(namespace.GlobalNamespace, monitorutil.RancherMonitoringTemplateName)
if err != nil {
return "", fmt.Errorf("get template %s:%s failed, %v", namespace.GlobalNamespace, monitorutil.RancherMonitoringTemplateName, err)
}
templateVersion, err := l.catalogManager.LatestAvailableTemplateVersion(template, l.clusterName)
if err != nil {
return "", err
}
systemCatalogName := template.Spec.CatalogID
newExternalID := templateVersion.ExternalID
newVersion, _, err := common.ParseExternalID(newExternalID)
if err != nil {
return "", err
}
appName, _ := monitorutil.ClusterAlertManagerInfo()
//migrate legacy
if !strings.Contains(currentVersion, monitorutil.RancherMonitoringTemplateName) {
if err := l.migrateLegacyClusterAlert(); err != nil {
return "", err
}
if err := l.migrateLegacyProjectAlert(); err != nil {
return "", err
}
if err := l.removeLegacyAlerting(); err != nil {
return "", err
}
}
//remove finalizer from legacy ProjectAlert
if err := l.removeFinalizerFromLegacyAlerting(); err != nil {
return "", err
}
//upgrade old app
defaultSystemProjects, err := l.projectLister.List(metav1.NamespaceAll, labels.Set(systemProjectLabel).AsSelector())
if err != nil {
return "", fmt.Errorf("list system project failed, %v", err)
}
if len(defaultSystemProjects) == 0 {
return "", fmt.Errorf("get system project failed")
}
systemProject := defaultSystemProjects[0]
if systemProject == nil {
return "", fmt.Errorf("get system project failed")
}
app, err := l.appLister.Get(systemProject.Name, appName)
if err != nil {
if apierrors.IsNotFound(err) {
return newVersion, nil
}
return "", fmt.Errorf("get app %s:%s failed, %v", systemProject.Name, appName, err)
}
newApp := app.DeepCopy()
newApp.Spec.ExternalID = newExternalID
newApp.Spec.Answers["operator.enabled"] = "false"
if !reflect.DeepEqual(newApp, app) {
// check cluster ready before upgrade, because helm will not retry if got cluster not ready error
cluster, err := l.clusterLister.Get(metav1.NamespaceAll, l.clusterName)
if err != nil {
return "", fmt.Errorf("get cluster %s failed, %v", l.clusterName, err)
}
if !v32.ClusterConditionReady.IsTrue(cluster) {
return "", fmt.Errorf("cluster %v not ready", l.clusterName)
}
systemCatalog, err := l.catalogLister.Get(metav1.NamespaceAll, systemCatalogName)
if err != nil {
return "", fmt.Errorf("get catalog %s failed, %v", systemCatalogName, err)
}
if !v32.CatalogConditionUpgraded.IsTrue(systemCatalog) || !v32.CatalogConditionRefreshed.IsTrue(systemCatalog) || !v32.CatalogConditionDiskCached.IsTrue(systemCatalog) {
return "", fmt.Errorf("catalog %v not ready", systemCatalogName)
}
// add force upgrade to handle chart compatibility in different version
v33.AppConditionForceUpgrade.Unknown(newApp)
if _, err = l.apps.Update(newApp); err != nil {
return "", fmt.Errorf("update app %s:%s failed, %v", app.Namespace, app.Name, err)
}
}
return newVersion, nil
}
func (l *AlertService) migrateLegacyClusterAlert() error {
oldClusterAlert, err := l.oldClusterAlerts.List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("get old cluster alert failed, %s", err)
}
for _, v := range oldClusterAlert.Items {
migrationGroupName := fmt.Sprintf("migrate-group-%s", v.Name)
groupID := alertutil.GetGroupID(l.clusterName, migrationGroupName)
name := fmt.Sprintf("migrate-%s", v.Name)
newClusterRule := &v3.ClusterAlertRule{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: l.clusterName,
},
Spec: v32.ClusterAlertRuleSpec{
ClusterName: l.clusterName,
GroupName: groupID,
CommonRuleField: v32.CommonRuleField{
DisplayName: v.Spec.DisplayName,
Severity: v.Spec.Severity,
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
},
}
if v.Spec.TargetNode != nil {
newClusterRule.Spec.NodeRule = &v32.NodeRule{
NodeName: v.Spec.TargetNode.NodeName,
Selector: v.Spec.TargetNode.Selector,
Condition: v.Spec.TargetNode.Condition,
MemThreshold: v.Spec.TargetNode.MemThreshold,
CPUThreshold: v.Spec.TargetNode.CPUThreshold,
}
}
if v.Spec.TargetEvent != nil {
newClusterRule.Spec.EventRule = &v32.EventRule{
EventType: v.Spec.TargetEvent.EventType,
ResourceKind: v.Spec.TargetEvent.ResourceKind,
}
}
if v.Spec.TargetSystemService != nil {
newClusterRule.Spec.SystemServiceRule = &v32.SystemServiceRule{
Condition: v.Spec.TargetSystemService.Condition,
}
}
oldClusterRule, err := l.clusterAlertRules.Get(newClusterRule.Name, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("migrate %s:%s failed, get alert rule failed, %v", v.Namespace, v.Name, err)
}
if _, err = l.clusterAlertRules.Create(newClusterRule); err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("migrate %s:%s failed, create alert rule failed, %v", v.Namespace, v.Name, err)
}
} else {
updatedClusterRule := oldClusterRule.DeepCopy()
updatedClusterRule.Spec = newClusterRule.Spec
if _, err := l.clusterAlertRules.Update(updatedClusterRule); err != nil {
return fmt.Errorf("migrate %s:%s failed, update alert rule failed, %v", v.Namespace, v.Name, err)
}
}
legacyGroup := &v32.ClusterAlertGroup{
ObjectMeta: metav1.ObjectMeta{
Name: migrationGroupName,
Namespace: l.clusterName,
},
Spec: v32.ClusterGroupSpec{
ClusterName: l.clusterName,
CommonGroupField: v32.CommonGroupField{
DisplayName: "Migrate group",
Description: "Migrate alert from last version",
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
Recipients: v.Spec.Recipients,
},
}
_, err = l.clusterAlertGroups.Create(legacyGroup)
if err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("migrate failed, create alert group %s:%s failed, %v", l.clusterName, migrationGroupName, err)
}
}
return nil
}
func (l *AlertService) migrateLegacyProjectAlert() error {
oldProjectAlert, err := l.oldProjectAlertLister.List("", labels.NewSelector())
if err != nil {
return fmt.Errorf("get old project alert failed, %s", err)
}
oldProjectAlertGroup := make(map[string][]*v3.ProjectAlert)
for _, v := range oldProjectAlert {
if controller.ObjectInCluster(l.clusterName, v) {
oldProjectAlertGroup[v.Spec.ProjectName] = append(oldProjectAlertGroup[v.Spec.ProjectName], v)
}
}
for projectID, oldAlerts := range oldProjectAlertGroup {
_, projectName := ref.Parse(projectID)
for _, v := range oldAlerts {
migrationGroupName := fmt.Sprintf("migrate-group-%s", v.Name)
groupID := alertutil.GetGroupID(projectName, migrationGroupName)
migrationRuleName := fmt.Sprintf("migrate-rule-%s", v.Name)
newProjectRule := &v3.ProjectAlertRule{
ObjectMeta: metav1.ObjectMeta{
Name: migrationRuleName,
Namespace: projectName,
},
Spec: v32.ProjectAlertRuleSpec{
ProjectName: projectID,
GroupName: groupID,
CommonRuleField: v32.CommonRuleField{
DisplayName: v.Spec.DisplayName,
Severity: v.Spec.Severity,
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
},
}
if v.Spec.TargetPod != nil {
newProjectRule.Spec.PodRule = &v32.PodRule{
PodName: v.Spec.TargetPod.PodName,
Condition: v.Spec.TargetPod.Condition,
RestartTimes: v.Spec.TargetPod.RestartTimes,
RestartIntervalSeconds: v.Spec.TargetPod.RestartIntervalSeconds,
}
}
if v.Spec.TargetWorkload != nil {
newProjectRule.Spec.WorkloadRule = &v32.WorkloadRule{
WorkloadID: v.Spec.TargetWorkload.WorkloadID,
Selector: v.Spec.TargetWorkload.Selector,
AvailablePercentage: v.Spec.TargetWorkload.AvailablePercentage,
}
}
oldProjectRule, err := l.projectAlertRules.GetNamespaced(projectName, newProjectRule.Name, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("migrate %s:%s failed, get alert rule failed, %v", v.Namespace, v.Name, err)
}
if _, err = l.projectAlertRules.Create(newProjectRule); err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("migrate %s:%s failed, create alert rule failed, %v", v.Namespace, v.Name, err)
}
} else {
updatedProjectRule := oldProjectRule.DeepCopy()
updatedProjectRule.Spec = newProjectRule.Spec
if _, err := l.projectAlertRules.Update(updatedProjectRule); err != nil {
return fmt.Errorf("migrate %s:%s failed, update alert rule failed, %v", v.Namespace, v.Name, err)
}
}
legacyGroup := &v3.ProjectAlertGroup{
ObjectMeta: metav1.ObjectMeta{
Name: migrationGroupName,
Namespace: projectName,
},
Spec: v32.ProjectGroupSpec{
ProjectName: projectID,
CommonGroupField: v32.CommonGroupField{
DisplayName: "Migrate group",
Description: "Migrate alert from last version",
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
Recipients: v.Spec.Recipients,
},
}
legacyGroup, err = l.projectAlertGroups.Create(legacyGroup)
if err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("create migrate alert group %s:%s failed, %v", legacyGroup.Namespace, legacyGroup.Name, err)
}
}
}
return nil
}
func (l *AlertService) removeLegacyAlerting() error {
legacyAlertmanagerNamespace := "cattle-alerting"
if err := l.namespaces.Delete(legacyAlertmanagerNamespace, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "failed to remove legacy alerting namespace when upgrade")
}
return nil
}
func (l *AlertService) removeFinalizerFromLegacyAlerting() error {
oldProjectAlert, err := l.oldProjectAlertLister.List("", labels.NewSelector())
if err != nil {
return errors.Wrap(err, "list legacy projectAlerts failed")
}
for _, v := range oldProjectAlert {
if len(v.Finalizers) == 0 {
continue
}
newObj := v.DeepCopy()
newObj.SetFinalizers([]string{})
if _, err = l.oldProjectAlerts.Update(newObj); err != nil {
return errors.Wrapf(err, "remove finalizer from legacy projectAlert %s:%s failed", newObj.Namespace, newObj.Name)
}
}
return nil
}
fix v1 monitoring app panic (#37532)
package deployer
import (
"fmt"
"reflect"
"strings"
"time"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
v33 "github.com/rancher/rancher/pkg/apis/project.cattle.io/v3"
"github.com/rancher/norman/controller"
"github.com/rancher/rancher/pkg/catalog/manager"
alertutil "github.com/rancher/rancher/pkg/controllers/managementuserlegacy/alert/common"
"github.com/rancher/rancher/pkg/controllers/managementuserlegacy/helm/common"
v1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
projectv3 "github.com/rancher/rancher/pkg/generated/norman/project.cattle.io/v3"
monitorutil "github.com/rancher/rancher/pkg/monitoring"
"github.com/rancher/rancher/pkg/namespace"
"github.com/rancher/rancher/pkg/ref"
"github.com/rancher/rancher/pkg/types/config"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
const (
initVersion = "initializing"
)
var (
ServiceName = "alerting"
waitCatalogSyncInterval = 60 * time.Second
)
const (
defaultGroupIntervalSeconds = 180
)
type AlertService struct {
clusterName string
clusterLister v3.ClusterLister
catalogLister v3.CatalogLister
catalogManager manager.CatalogManager
apps projectv3.AppInterface
appLister projectv3.AppLister
oldClusterAlerts v3.ClusterAlertInterface
oldProjectAlerts v3.ProjectAlertInterface
oldProjectAlertLister v3.ProjectAlertLister
clusterAlertGroups v3.ClusterAlertGroupInterface
projectAlertGroups v3.ProjectAlertGroupInterface
clusterAlertRules v3.ClusterAlertRuleInterface
projectAlertRules v3.ProjectAlertRuleInterface
projectLister v3.ProjectLister
namespaces v1.NamespaceInterface
templateLister v3.CatalogTemplateLister
}
func NewService() *AlertService {
return &AlertService{}
}
func (l *AlertService) Init(cluster *config.UserContext) {
l.clusterName = cluster.ClusterName
l.clusterLister = cluster.Management.Management.Clusters("").Controller().Lister()
l.catalogLister = cluster.Management.Management.Catalogs(metav1.NamespaceAll).Controller().Lister()
l.oldClusterAlerts = cluster.Management.Management.ClusterAlerts(cluster.ClusterName)
l.oldProjectAlerts = cluster.Management.Management.ProjectAlerts(metav1.NamespaceAll)
l.oldProjectAlertLister = cluster.Management.Management.ProjectAlerts("").Controller().Lister()
l.clusterAlertGroups = cluster.Management.Management.ClusterAlertGroups(cluster.ClusterName)
l.projectAlertGroups = cluster.Management.Management.ProjectAlertGroups(metav1.NamespaceAll)
l.clusterAlertRules = cluster.Management.Management.ClusterAlertRules(cluster.ClusterName)
l.projectAlertRules = cluster.Management.Management.ProjectAlertRules(metav1.NamespaceAll)
l.projectLister = cluster.Management.Management.Projects(cluster.ClusterName).Controller().Lister()
l.apps = cluster.Management.Project.Apps(metav1.NamespaceAll)
l.appLister = cluster.Management.Project.Apps("").Controller().Lister()
l.namespaces = cluster.Core.Namespaces(metav1.NamespaceAll)
l.templateLister = cluster.Management.Management.CatalogTemplates(metav1.NamespaceAll).Controller().Lister()
l.catalogManager = cluster.Management.CatalogManager
}
func (l *AlertService) Version() (string, error) {
return fmt.Sprintf("%s-%s", monitorutil.RancherMonitoringTemplateName, initVersion), nil
}
func (l *AlertService) Upgrade(currentVersion string) (string, error) {
template, err := l.templateLister.Get(namespace.GlobalNamespace, monitorutil.RancherMonitoringTemplateName)
if err != nil {
return "", fmt.Errorf("get template %s:%s failed, %v", namespace.GlobalNamespace, monitorutil.RancherMonitoringTemplateName, err)
}
templateVersion, err := l.catalogManager.LatestAvailableTemplateVersion(template, l.clusterName)
if err != nil {
return "", err
}
systemCatalogName := template.Spec.CatalogID
newExternalID := templateVersion.ExternalID
newVersion, _, err := common.ParseExternalID(newExternalID)
if err != nil {
return "", err
}
appName, _ := monitorutil.ClusterAlertManagerInfo()
//migrate legacy
if !strings.Contains(currentVersion, monitorutil.RancherMonitoringTemplateName) {
if err := l.migrateLegacyClusterAlert(); err != nil {
return "", err
}
if err := l.migrateLegacyProjectAlert(); err != nil {
return "", err
}
if err := l.removeLegacyAlerting(); err != nil {
return "", err
}
}
//remove finalizer from legacy ProjectAlert
if err := l.removeFinalizerFromLegacyAlerting(); err != nil {
return "", err
}
//upgrade old app
defaultSystemProjects, err := l.projectLister.List(metav1.NamespaceAll, labels.Set(systemProjectLabel).AsSelector())
if err != nil {
return "", fmt.Errorf("list system project failed, %v", err)
}
if len(defaultSystemProjects) == 0 {
return "", fmt.Errorf("get system project failed")
}
systemProject := defaultSystemProjects[0]
if systemProject == nil {
return "", fmt.Errorf("get system project failed")
}
app, err := l.appLister.Get(systemProject.Name, appName)
if err != nil {
if apierrors.IsNotFound(err) {
return newVersion, nil
}
return "", fmt.Errorf("get app %s:%s failed, %v", systemProject.Name, appName, err)
}
newApp := app.DeepCopy()
newApp.Spec.ExternalID = newExternalID
if newApp.Spec.Answers == nil {
newApp.Spec.Answers = make(map[string]string)
}
newApp.Spec.Answers["operator.enabled"] = "false"
if !reflect.DeepEqual(newApp, app) {
// check cluster ready before upgrade, because helm will not retry if got cluster not ready error
cluster, err := l.clusterLister.Get(metav1.NamespaceAll, l.clusterName)
if err != nil {
return "", fmt.Errorf("get cluster %s failed, %v", l.clusterName, err)
}
if !v32.ClusterConditionReady.IsTrue(cluster) {
return "", fmt.Errorf("cluster %v not ready", l.clusterName)
}
systemCatalog, err := l.catalogLister.Get(metav1.NamespaceAll, systemCatalogName)
if err != nil {
return "", fmt.Errorf("get catalog %s failed, %v", systemCatalogName, err)
}
if !v32.CatalogConditionUpgraded.IsTrue(systemCatalog) || !v32.CatalogConditionRefreshed.IsTrue(systemCatalog) || !v32.CatalogConditionDiskCached.IsTrue(systemCatalog) {
return "", fmt.Errorf("catalog %v not ready", systemCatalogName)
}
// add force upgrade to handle chart compatibility in different version
v33.AppConditionForceUpgrade.Unknown(newApp)
if _, err = l.apps.Update(newApp); err != nil {
return "", fmt.Errorf("update app %s:%s failed, %v", app.Namespace, app.Name, err)
}
}
return newVersion, nil
}
func (l *AlertService) migrateLegacyClusterAlert() error {
oldClusterAlert, err := l.oldClusterAlerts.List(metav1.ListOptions{})
if err != nil {
return fmt.Errorf("get old cluster alert failed, %s", err)
}
for _, v := range oldClusterAlert.Items {
migrationGroupName := fmt.Sprintf("migrate-group-%s", v.Name)
groupID := alertutil.GetGroupID(l.clusterName, migrationGroupName)
name := fmt.Sprintf("migrate-%s", v.Name)
newClusterRule := &v3.ClusterAlertRule{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: l.clusterName,
},
Spec: v32.ClusterAlertRuleSpec{
ClusterName: l.clusterName,
GroupName: groupID,
CommonRuleField: v32.CommonRuleField{
DisplayName: v.Spec.DisplayName,
Severity: v.Spec.Severity,
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
},
}
if v.Spec.TargetNode != nil {
newClusterRule.Spec.NodeRule = &v32.NodeRule{
NodeName: v.Spec.TargetNode.NodeName,
Selector: v.Spec.TargetNode.Selector,
Condition: v.Spec.TargetNode.Condition,
MemThreshold: v.Spec.TargetNode.MemThreshold,
CPUThreshold: v.Spec.TargetNode.CPUThreshold,
}
}
if v.Spec.TargetEvent != nil {
newClusterRule.Spec.EventRule = &v32.EventRule{
EventType: v.Spec.TargetEvent.EventType,
ResourceKind: v.Spec.TargetEvent.ResourceKind,
}
}
if v.Spec.TargetSystemService != nil {
newClusterRule.Spec.SystemServiceRule = &v32.SystemServiceRule{
Condition: v.Spec.TargetSystemService.Condition,
}
}
oldClusterRule, err := l.clusterAlertRules.Get(newClusterRule.Name, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("migrate %s:%s failed, get alert rule failed, %v", v.Namespace, v.Name, err)
}
if _, err = l.clusterAlertRules.Create(newClusterRule); err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("migrate %s:%s failed, create alert rule failed, %v", v.Namespace, v.Name, err)
}
} else {
updatedClusterRule := oldClusterRule.DeepCopy()
updatedClusterRule.Spec = newClusterRule.Spec
if _, err := l.clusterAlertRules.Update(updatedClusterRule); err != nil {
return fmt.Errorf("migrate %s:%s failed, update alert rule failed, %v", v.Namespace, v.Name, err)
}
}
legacyGroup := &v32.ClusterAlertGroup{
ObjectMeta: metav1.ObjectMeta{
Name: migrationGroupName,
Namespace: l.clusterName,
},
Spec: v32.ClusterGroupSpec{
ClusterName: l.clusterName,
CommonGroupField: v32.CommonGroupField{
DisplayName: "Migrate group",
Description: "Migrate alert from last version",
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
Recipients: v.Spec.Recipients,
},
}
_, err = l.clusterAlertGroups.Create(legacyGroup)
if err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("migrate failed, create alert group %s:%s failed, %v", l.clusterName, migrationGroupName, err)
}
}
return nil
}
func (l *AlertService) migrateLegacyProjectAlert() error {
oldProjectAlert, err := l.oldProjectAlertLister.List("", labels.NewSelector())
if err != nil {
return fmt.Errorf("get old project alert failed, %s", err)
}
oldProjectAlertGroup := make(map[string][]*v3.ProjectAlert)
for _, v := range oldProjectAlert {
if controller.ObjectInCluster(l.clusterName, v) {
oldProjectAlertGroup[v.Spec.ProjectName] = append(oldProjectAlertGroup[v.Spec.ProjectName], v)
}
}
for projectID, oldAlerts := range oldProjectAlertGroup {
_, projectName := ref.Parse(projectID)
for _, v := range oldAlerts {
migrationGroupName := fmt.Sprintf("migrate-group-%s", v.Name)
groupID := alertutil.GetGroupID(projectName, migrationGroupName)
migrationRuleName := fmt.Sprintf("migrate-rule-%s", v.Name)
newProjectRule := &v3.ProjectAlertRule{
ObjectMeta: metav1.ObjectMeta{
Name: migrationRuleName,
Namespace: projectName,
},
Spec: v32.ProjectAlertRuleSpec{
ProjectName: projectID,
GroupName: groupID,
CommonRuleField: v32.CommonRuleField{
DisplayName: v.Spec.DisplayName,
Severity: v.Spec.Severity,
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
},
}
if v.Spec.TargetPod != nil {
newProjectRule.Spec.PodRule = &v32.PodRule{
PodName: v.Spec.TargetPod.PodName,
Condition: v.Spec.TargetPod.Condition,
RestartTimes: v.Spec.TargetPod.RestartTimes,
RestartIntervalSeconds: v.Spec.TargetPod.RestartIntervalSeconds,
}
}
if v.Spec.TargetWorkload != nil {
newProjectRule.Spec.WorkloadRule = &v32.WorkloadRule{
WorkloadID: v.Spec.TargetWorkload.WorkloadID,
Selector: v.Spec.TargetWorkload.Selector,
AvailablePercentage: v.Spec.TargetWorkload.AvailablePercentage,
}
}
oldProjectRule, err := l.projectAlertRules.GetNamespaced(projectName, newProjectRule.Name, metav1.GetOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("migrate %s:%s failed, get alert rule failed, %v", v.Namespace, v.Name, err)
}
if _, err = l.projectAlertRules.Create(newProjectRule); err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("migrate %s:%s failed, create alert rule failed, %v", v.Namespace, v.Name, err)
}
} else {
updatedProjectRule := oldProjectRule.DeepCopy()
updatedProjectRule.Spec = newProjectRule.Spec
if _, err := l.projectAlertRules.Update(updatedProjectRule); err != nil {
return fmt.Errorf("migrate %s:%s failed, update alert rule failed, %v", v.Namespace, v.Name, err)
}
}
legacyGroup := &v3.ProjectAlertGroup{
ObjectMeta: metav1.ObjectMeta{
Name: migrationGroupName,
Namespace: projectName,
},
Spec: v32.ProjectGroupSpec{
ProjectName: projectID,
CommonGroupField: v32.CommonGroupField{
DisplayName: "Migrate group",
Description: "Migrate alert from last version",
TimingField: v32.TimingField{
GroupWaitSeconds: v.Spec.InitialWaitSeconds,
GroupIntervalSeconds: defaultGroupIntervalSeconds,
RepeatIntervalSeconds: v.Spec.RepeatIntervalSeconds,
},
},
Recipients: v.Spec.Recipients,
},
}
legacyGroup, err = l.projectAlertGroups.Create(legacyGroup)
if err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("create migrate alert group %s:%s failed, %v", legacyGroup.Namespace, legacyGroup.Name, err)
}
}
}
return nil
}
func (l *AlertService) removeLegacyAlerting() error {
legacyAlertmanagerNamespace := "cattle-alerting"
if err := l.namespaces.Delete(legacyAlertmanagerNamespace, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "failed to remove legacy alerting namespace when upgrade")
}
return nil
}
func (l *AlertService) removeFinalizerFromLegacyAlerting() error {
oldProjectAlert, err := l.oldProjectAlertLister.List("", labels.NewSelector())
if err != nil {
return errors.Wrap(err, "list legacy projectAlerts failed")
}
for _, v := range oldProjectAlert {
if len(v.Finalizers) == 0 {
continue
}
newObj := v.DeepCopy()
newObj.SetFinalizers([]string{})
if _, err = l.oldProjectAlerts.Update(newObj); err != nil {
return errors.Wrapf(err, "remove finalizer from legacy projectAlert %s:%s failed", newObj.Namespace, newObj.Name)
}
}
return nil
}
|
// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package interp
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"mvdan.cc/sh/syntax"
)
func anyOfLit(v interface{}, vals ...string) string {
word, _ := v.(*syntax.Word)
if word == nil || len(word.Parts) != 1 {
return ""
}
lit, ok := word.Parts[0].(*syntax.Lit)
if !ok {
return ""
}
for _, val := range vals {
if lit.Value == val {
return val
}
}
return ""
}
func (r *Runner) quotedElems(pe *syntax.ParamExp) []string {
if pe == nil {
return nil
}
if pe.Param.Value == "@" {
return r.Params
}
if anyOfLit(pe.Index, "@") == "" {
return nil
}
val, _ := r.lookupVar(pe.Param.Value)
switch x := val.Value.(type) {
case IndexArray:
return x
}
return nil
}
func (r *Runner) paramExp(pe *syntax.ParamExp) string {
name := pe.Param.Value
var vr Variable
set := false
index := pe.Index
switch name {
case "#":
vr.Value = StringVal(strconv.Itoa(len(r.Params)))
case "@", "*":
vr.Value = IndexArray(r.Params)
index = &syntax.Word{Parts: []syntax.WordPart{
&syntax.Lit{Value: name},
}}
case "?":
vr.Value = StringVal(strconv.Itoa(r.exit))
case "$":
vr.Value = StringVal(strconv.Itoa(os.Getpid()))
case "PPID":
vr.Value = StringVal(strconv.Itoa(os.Getppid()))
case "LINENO":
line := uint64(pe.Pos().Line())
vr.Value = StringVal(strconv.FormatUint(line, 10))
default:
if n, err := strconv.Atoi(name); err == nil {
if i := n - 1; i < len(r.Params) {
vr.Value, set = StringVal(r.Params[i]), true
}
} else {
vr, set = r.lookupVar(name)
}
}
str := r.varStr(vr, 0)
if index != nil {
str = r.varInd(vr, index, 0)
}
if pe.Length {
n := 1
if anyOfLit(index, "@", "*") != "" {
switch x := vr.Value.(type) {
case IndexArray:
n = len(x)
case AssocArray:
n = len(x)
}
} else {
n = utf8.RuneCountInString(str)
}
str = strconv.Itoa(n)
}
switch {
case pe.Excl:
if pe.Names != 0 {
str = strings.Join(r.namesByPrefix(pe.Param.Value), " ")
} else if vr.NameRef {
str = string(vr.Value.(StringVal))
} else if str != "" {
vr, set = r.lookupVar(str)
str = r.varStr(vr, 0)
}
}
slicePos := func(expr syntax.ArithmExpr) int {
p := r.arithm(expr)
if p < 0 {
p = len(str) + p
if p < 0 {
p = len(str)
}
} else if p > len(str) {
p = len(str)
}
return p
}
if pe.Slice != nil {
if pe.Slice.Offset != nil {
offset := slicePos(pe.Slice.Offset)
str = str[offset:]
}
if pe.Slice.Length != nil {
length := slicePos(pe.Slice.Length)
str = str[:length]
}
}
if pe.Repl != nil {
orig := r.lonePattern(pe.Repl.Orig)
with := r.loneWord(pe.Repl.With)
n := 1
if pe.Repl.All {
n = -1
}
locs := findAllIndex(orig, str, n)
buf := r.strBuilder()
last := 0
for _, loc := range locs {
buf.WriteString(str[last:loc[0]])
buf.WriteString(with)
last = loc[1]
}
buf.WriteString(str[last:])
str = buf.String()
}
if pe.Exp != nil {
arg := r.loneWord(pe.Exp.Word)
switch pe.Exp.Op {
case syntax.SubstColPlus:
if str == "" {
break
}
fallthrough
case syntax.SubstPlus:
if set {
str = arg
}
case syntax.SubstMinus:
if set {
break
}
fallthrough
case syntax.SubstColMinus:
if str == "" {
str = arg
}
case syntax.SubstQuest:
if set {
break
}
fallthrough
case syntax.SubstColQuest:
if str == "" {
r.errf("%s\n", arg)
r.exit = 1
r.lastExit()
}
case syntax.SubstAssgn:
if set {
break
}
fallthrough
case syntax.SubstColAssgn:
if str == "" {
r.setVarString(name, arg)
str = arg
}
case syntax.RemSmallPrefix:
str = removePattern(str, arg, false, false)
case syntax.RemLargePrefix:
str = removePattern(str, arg, false, true)
case syntax.RemSmallSuffix:
str = removePattern(str, arg, true, false)
case syntax.RemLargeSuffix:
str = removePattern(str, arg, true, true)
case syntax.UpperFirst:
rs := []rune(str)
if len(rs) > 0 {
rs[0] = unicode.ToUpper(rs[0])
}
str = string(rs)
case syntax.UpperAll:
str = strings.ToUpper(str)
case syntax.LowerFirst:
rs := []rune(str)
if len(rs) > 0 {
rs[0] = unicode.ToLower(rs[0])
}
str = string(rs)
case syntax.LowerAll:
str = strings.ToLower(str)
case syntax.OtherParamOps:
switch arg {
case "Q":
str = strconv.Quote(str)
case "E":
tail := str
var rns []rune
for tail != "" {
var rn rune
rn, _, tail, _ = strconv.UnquoteChar(tail, 0)
rns = append(rns, rn)
}
str = string(rns)
case "P", "A", "a":
panic(fmt.Sprintf("unhandled @%s param expansion", arg))
default:
panic(fmt.Sprintf("unexpected @%s param expansion", arg))
}
}
}
return str
}
func removePattern(str, pattern string, fromEnd, greedy bool) string {
expr, err := syntax.TranslatePattern(pattern, greedy)
if err != nil {
return str
}
switch {
case fromEnd && !greedy:
// use .* to get the right-most (shortest) match
expr = ".*(" + expr + ")$"
case fromEnd:
// simple suffix
expr = "(" + expr + ")$"
default:
// simple prefix
expr = "^(" + expr + ")"
}
// no need to check error as TranslatePattern returns one
rx := regexp.MustCompile(expr)
if loc := rx.FindStringSubmatchIndex(str); loc != nil {
// remove the original pattern (the submatch)
str = str[:loc[2]] + str[loc[3]:]
}
return str
}
interp: simplify parameter expansion operator flow
Now that we enforce only one of these at a time, we can put them all in
a big switch instead of caring about the order in which they run.
// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package interp
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"mvdan.cc/sh/syntax"
)
func anyOfLit(v interface{}, vals ...string) string {
word, _ := v.(*syntax.Word)
if word == nil || len(word.Parts) != 1 {
return ""
}
lit, ok := word.Parts[0].(*syntax.Lit)
if !ok {
return ""
}
for _, val := range vals {
if lit.Value == val {
return val
}
}
return ""
}
func (r *Runner) quotedElems(pe *syntax.ParamExp) []string {
if pe == nil {
return nil
}
if pe.Param.Value == "@" {
return r.Params
}
if anyOfLit(pe.Index, "@") == "" {
return nil
}
val, _ := r.lookupVar(pe.Param.Value)
switch x := val.Value.(type) {
case IndexArray:
return x
}
return nil
}
func (r *Runner) paramExp(pe *syntax.ParamExp) string {
name := pe.Param.Value
var vr Variable
set := false
index := pe.Index
switch name {
case "#":
vr.Value = StringVal(strconv.Itoa(len(r.Params)))
case "@", "*":
vr.Value = IndexArray(r.Params)
index = &syntax.Word{Parts: []syntax.WordPart{
&syntax.Lit{Value: name},
}}
case "?":
vr.Value = StringVal(strconv.Itoa(r.exit))
case "$":
vr.Value = StringVal(strconv.Itoa(os.Getpid()))
case "PPID":
vr.Value = StringVal(strconv.Itoa(os.Getppid()))
case "LINENO":
line := uint64(pe.Pos().Line())
vr.Value = StringVal(strconv.FormatUint(line, 10))
default:
if n, err := strconv.Atoi(name); err == nil {
if i := n - 1; i < len(r.Params) {
vr.Value, set = StringVal(r.Params[i]), true
}
} else {
vr, set = r.lookupVar(name)
}
}
str := r.varStr(vr, 0)
if index != nil {
str = r.varInd(vr, index, 0)
}
slicePos := func(expr syntax.ArithmExpr) int {
p := r.arithm(expr)
if p < 0 {
p = len(str) + p
if p < 0 {
p = len(str)
}
} else if p > len(str) {
p = len(str)
}
return p
}
switch {
case pe.Length:
n := 1
if anyOfLit(index, "@", "*") != "" {
switch x := vr.Value.(type) {
case IndexArray:
n = len(x)
case AssocArray:
n = len(x)
}
} else {
n = utf8.RuneCountInString(str)
}
str = strconv.Itoa(n)
case pe.Excl:
if pe.Names != 0 {
str = strings.Join(r.namesByPrefix(pe.Param.Value), " ")
} else if vr.NameRef {
str = string(vr.Value.(StringVal))
} else if str != "" {
vr, _ = r.lookupVar(str)
str = r.varStr(vr, 0)
}
case pe.Slice != nil:
if pe.Slice.Offset != nil {
offset := slicePos(pe.Slice.Offset)
str = str[offset:]
}
if pe.Slice.Length != nil {
length := slicePos(pe.Slice.Length)
str = str[:length]
}
case pe.Repl != nil:
orig := r.lonePattern(pe.Repl.Orig)
with := r.loneWord(pe.Repl.With)
n := 1
if pe.Repl.All {
n = -1
}
locs := findAllIndex(orig, str, n)
buf := r.strBuilder()
last := 0
for _, loc := range locs {
buf.WriteString(str[last:loc[0]])
buf.WriteString(with)
last = loc[1]
}
buf.WriteString(str[last:])
str = buf.String()
case pe.Exp != nil:
arg := r.loneWord(pe.Exp.Word)
switch pe.Exp.Op {
case syntax.SubstColPlus:
if str == "" {
break
}
fallthrough
case syntax.SubstPlus:
if set {
str = arg
}
case syntax.SubstMinus:
if set {
break
}
fallthrough
case syntax.SubstColMinus:
if str == "" {
str = arg
}
case syntax.SubstQuest:
if set {
break
}
fallthrough
case syntax.SubstColQuest:
if str == "" {
r.errf("%s\n", arg)
r.exit = 1
r.lastExit()
}
case syntax.SubstAssgn:
if set {
break
}
fallthrough
case syntax.SubstColAssgn:
if str == "" {
r.setVarString(name, arg)
str = arg
}
case syntax.RemSmallPrefix:
str = removePattern(str, arg, false, false)
case syntax.RemLargePrefix:
str = removePattern(str, arg, false, true)
case syntax.RemSmallSuffix:
str = removePattern(str, arg, true, false)
case syntax.RemLargeSuffix:
str = removePattern(str, arg, true, true)
case syntax.UpperFirst:
rs := []rune(str)
if len(rs) > 0 {
rs[0] = unicode.ToUpper(rs[0])
}
str = string(rs)
case syntax.UpperAll:
str = strings.ToUpper(str)
case syntax.LowerFirst:
rs := []rune(str)
if len(rs) > 0 {
rs[0] = unicode.ToLower(rs[0])
}
str = string(rs)
case syntax.LowerAll:
str = strings.ToLower(str)
case syntax.OtherParamOps:
switch arg {
case "Q":
str = strconv.Quote(str)
case "E":
tail := str
var rns []rune
for tail != "" {
var rn rune
rn, _, tail, _ = strconv.UnquoteChar(tail, 0)
rns = append(rns, rn)
}
str = string(rns)
case "P", "A", "a":
panic(fmt.Sprintf("unhandled @%s param expansion", arg))
default:
panic(fmt.Sprintf("unexpected @%s param expansion", arg))
}
}
}
return str
}
func removePattern(str, pattern string, fromEnd, greedy bool) string {
expr, err := syntax.TranslatePattern(pattern, greedy)
if err != nil {
return str
}
switch {
case fromEnd && !greedy:
// use .* to get the right-most (shortest) match
expr = ".*(" + expr + ")$"
case fromEnd:
// simple suffix
expr = "(" + expr + ")$"
default:
// simple prefix
expr = "^(" + expr + ")"
}
// no need to check error as TranslatePattern returns one
rx := regexp.MustCompile(expr)
if loc := rx.FindStringSubmatchIndex(str); loc != nil {
// remove the original pattern (the submatch)
str = str[:loc[2]] + str[loc[3]:]
}
return str
}
|
package main
import (
"fmt"
"net"
"bytes"
"io/ioutil"
"os/exec"
"flag"
"github.com/writeas/writeas-telnet/store"
)
var (
banner []byte
outDir string
staticDir string
debugging bool
rsyncHost string
)
const (
colBlue = "\033[0;34m"
colGreen = "\033[0;32m"
colBGreen = "\033[1;32m"
colCyan = "\033[0;36m"
colBRed = "\033[1;31m"
colBold = "\033[1;37m"
noCol = "\033[0m"
hr = "————————————————————————————————————————————————————————————————————————————————"
)
func main() {
// Get any arguments
outDirPtr := flag.String("o", "/var/write", "Directory where text files will be stored.")
staticDirPtr := flag.String("s", "./static", "Directory where required static files exist.")
rsyncHostPtr := flag.String("h", "", "Hostname of the server to rsync saved files to.")
portPtr := flag.Int("p", 2323, "Port to listen on.")
debugPtr := flag.Bool("debug", false, "Enables garrulous debug logging.")
flag.Parse()
outDir = *outDirPtr
staticDir = *staticDirPtr
rsyncHost = *rsyncHostPtr
debugging = *debugPtr
fmt.Print("\nCONFIG:\n")
fmt.Printf("Output directory : %s\n", outDir)
fmt.Printf("Static directory : %s\n", staticDir)
fmt.Printf("rsync host : %s\n", rsyncHost)
fmt.Printf("Debugging enabled : %t\n\n", debugging)
fmt.Print("Initializing...")
var err error
banner, err = ioutil.ReadFile(staticDir + "/banner.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println("DONE")
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", *portPtr))
if err != nil {
panic(err)
}
fmt.Printf("Listening on localhost:%d\n", *portPtr)
for {
conn, err := ln.Accept()
if err != nil {
fmt.Println(err)
continue
}
go handleConnection(conn)
}
}
func output(c net.Conn, m string) bool {
_, err := c.Write([]byte(m))
if err != nil {
c.Close()
return false
}
return true
}
func outputBytes(c net.Conn, m []byte) bool {
_, err := c.Write(m)
if err != nil {
c.Close()
return false
}
return true
}
func handleConnection(c net.Conn) {
outputBytes(c, banner)
output(c, fmt.Sprintf("\n%sWelcome to write.as!%s\n", colBGreen, noCol))
output(c, fmt.Sprintf("If this is freaking you out, you can get notified of the %sbrowser-based%s launch\ninstead at https://write.as.\n\n", colBold, noCol))
waitForEnter(c)
c.Close()
fmt.Printf("Connection from %v closed.\n", c.RemoteAddr())
}
func waitForEnter(c net.Conn) {
b := make([]byte, 4)
output(c, fmt.Sprintf("%sPress Enter to continue...%s\n", colBRed, noCol))
for {
n, err := c.Read(b)
if debugging {
fmt.Print(b[0:n])
fmt.Printf("\n%d: %s\n", n, b[0:n])
}
if bytes.IndexRune(b[0:n], '\n') > -1 {
break
}
if err != nil || n == 0 {
c.Close()
break
}
}
output(c, fmt.Sprintf("Enter anything you like.\nPress %sCtrl-D%s to publish and quit.\n%s\n", colBold, noCol, hr))
readInput(c)
}
func checkExit(b []byte, n int) bool {
return n > 0 && bytes.IndexRune(b[0:n], '\n') == -1
}
func readInput(c net.Conn) {
defer c.Close()
b := make([]byte, 4096)
var post bytes.Buffer
for {
n, err := c.Read(b)
post.Write(b[0:n])
if debugging {
fmt.Print(b[0:n])
fmt.Printf("\n%d: %s\n", n, b[0:n])
}
if checkExit(b, n) {
file, err := store.SavePost(outDir, post.Bytes())
if err != nil {
fmt.Printf("There was an error saving: %s\n", err)
output(c, "Something went terribly wrong, sorry. Try again later?\n\n")
break
}
output(c, fmt.Sprintf("\n%s\nPosted to %shttp://nerds.write.as/%s%s", hr, colBlue, file, noCol))
if rsyncHost != "" {
output(c, "\nPosting to secure site...")
exec.Command("rsync", "-ptgou", outDir + "/" + file, rsyncHost + ":").Run()
output(c, fmt.Sprintf("\nPosted! View at %shttps://write.as/%s%s", colBlue, file, noCol))
}
output(c, "\nSee you later.\n\n")
break
}
if err != nil || n == 0 {
break
}
}
}
Store posts in database
package main
import (
"fmt"
"net"
"bytes"
"io/ioutil"
"os"
"flag"
"database/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/writeas/writeas-telnet/store"
)
var (
banner []byte
staticDir string
debugging bool
db *sql.DB
)
const (
colBlue = "\033[0;34m"
colGreen = "\033[0;32m"
colBGreen = "\033[1;32m"
colCyan = "\033[0;36m"
colBRed = "\033[1;31m"
colBold = "\033[1;37m"
noCol = "\033[0m"
hr = "————————————————————————————————————————————————————————————————————————————————"
)
func main() {
// Get any arguments
staticDirPtr := flag.String("s", "./static", "Directory where required static files exist.")
portPtr := flag.Int("p", 2323, "Port to listen on.")
debugPtr := flag.Bool("debug", false, "Enables garrulous debug logging.")
flag.Parse()
staticDir = *staticDirPtr
debugging = *debugPtr
fmt.Print("\nCONFIG:\n")
fmt.Printf("Static directory : %s\n", staticDir)
fmt.Printf("Debugging enabled : %t\n\n", debugging)
fmt.Print("Initializing...")
var err error
banner, err = ioutil.ReadFile(staticDir + "/banner.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println("DONE")
// Connect to database
dbUser := os.Getenv("WA_USER")
dbPassword := os.Getenv("WA_PASSWORD")
dbHost := os.Getenv("WA_HOST")
if dbUser == "" || dbPassword == "" {
fmt.Println("Database user or password not set.")
return
}
fmt.Print("Connecting to database...")
db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:3306)/writeas?charset=utf8mb4", dbUser, dbPassword, dbHost))
if err != nil {
fmt.Printf("\n%s\n", err)
return
}
defer db.Close()
fmt.Println("CONNECTED")
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", *portPtr))
if err != nil {
panic(err)
}
fmt.Printf("Listening on localhost:%d\n", *portPtr)
for {
conn, err := ln.Accept()
if err != nil {
fmt.Println(err)
continue
}
go handleConnection(conn)
}
}
func output(c net.Conn, m string) bool {
_, err := c.Write([]byte(m))
if err != nil {
c.Close()
return false
}
return true
}
func outputBytes(c net.Conn, m []byte) bool {
_, err := c.Write(m)
if err != nil {
c.Close()
return false
}
return true
}
func handleConnection(c net.Conn) {
outputBytes(c, banner)
output(c, fmt.Sprintf("\n%sWelcome to write.as!%s\n", colBGreen, noCol))
output(c, fmt.Sprintf("If this is freaking you out, you can get notified of the %sbrowser-based%s launch\ninstead at https://write.as.\n\n", colBold, noCol))
waitForEnter(c)
c.Close()
fmt.Printf("Connection from %v closed.\n", c.RemoteAddr())
}
func waitForEnter(c net.Conn) {
b := make([]byte, 4)
output(c, fmt.Sprintf("%sPress Enter to continue...%s\n", colBRed, noCol))
for {
n, err := c.Read(b)
if debugging {
fmt.Print(b[0:n])
fmt.Printf("\n%d: %s\n", n, b[0:n])
}
if bytes.IndexRune(b[0:n], '\n') > -1 {
break
}
if err != nil || n == 0 {
c.Close()
break
}
}
output(c, fmt.Sprintf("Enter anything you like.\nPress %sCtrl-D%s to publish and quit.\n%s\n", colBold, noCol, hr))
readInput(c)
}
func checkExit(b []byte, n int) bool {
return n > 0 && bytes.IndexRune(b[0:n], '\n') == -1
}
func readInput(c net.Conn) {
defer c.Close()
b := make([]byte, 4096)
var post bytes.Buffer
for {
n, err := c.Read(b)
post.Write(b[0:n])
if debugging {
fmt.Print(b[0:n])
fmt.Printf("\n%d: %s\n", n, b[0:n])
}
if checkExit(b, n) {
friendlyId := store.GenerateFriendlyRandomString(store.FriendlyIdLen)
editToken := store.Generate62RandomString(32)
_, err := db.Exec("INSERT INTO posts (id, content, modify_token) VALUES (?, ?, ?)", friendlyId, post.Bytes(), editToken)
if err != nil {
fmt.Printf("There was an error saving: %s\n", err)
output(c, "Something went terribly wrong, sorry. Try again later?\n\n")
break
}
output(c, fmt.Sprintf("\n%s\nPosted! View at %shttps://write.as/%s%s", hr, colBlue, friendlyId, noCol))
output(c, "\nSee you later.\n\n")
break
}
if err != nil || n == 0 {
break
}
}
}
|
package wsmux
import (
"encoding/binary"
"io"
"net"
"sync"
"time"
)
const (
// DefaultCapacity of read buffer of stream
DefaultCapacity = 1024
)
/*Stream States:
created = stream has been created. Buffer is empty. Has not been accepted.
accepted = stream has been accepted. read write operations permitted.
closed = stream has been closed.
remoteClosed = remote side has been closed.
dead = closed & remoteClosed. Buffer may still have data.
*/
type streamState int
const (
created streamState = iota
accepted
closed
remoteClosed
dead
)
type stream struct {
id uint32 // id of the stream. Used for logging.
m sync.Mutex // mutex for state transitions
c *sync.Cond // used for broadcasting when closed, data read, or data pushed to buffer
b *buffer // read buffer
unblocked uint32 // number of bytes that can be sent to remote
endErr error // error causes stream to close
state streamState // current state of the stream
accepted chan struct{} // closed when stream is accepted. Used in session.Open()
session *Session // assosciated session. used for sending frames and logging
readTimer *time.Timer // timer for read operations
writeTimer *time.Timer // timer for write operations
readDeadlineExceeded bool // true when readTimer fires
writeDeadlineExceeded bool // true when writeTimer fires
}
func newStream(id uint32, session *Session) *stream {
str := &stream{
id: id,
b: newBuffer(session.streamBufferSize),
unblocked: 0,
state: created,
accepted: make(chan struct{}),
endErr: nil,
readTimer: nil,
writeTimer: nil,
readDeadlineExceeded: false,
writeDeadlineExceeded: false,
session: session,
}
str.c = sync.NewCond(&str.m)
return str
}
// HandleFrame processes frames received by the stream
func (s *stream) HandleFrame(fr frame) {
switch fr.msg {
case msgACK:
// if not accepted then close accepted channel, otherwise unblock
read := binary.LittleEndian.Uint32(fr.payload)
select {
case <-s.accepted:
s.UnblockAndBroadcast(read)
default:
s.AcceptStream(read)
}
case msgDAT:
s.session.logger.Printf("stream %d received DAT frame: %v", s.id, fr)
s.PushAndBroadcast(fr.payload)
case msgFIN:
s.session.logger.Printf("remote stream %d closed connection", s.id)
s.setRemoteClosed()
}
}
// onExpired is an internal helper method which sets val = true and broadcasts
func (s *stream) onExpired(val *bool) func() {
return func() {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
*val = true
}
}
// SetReadDeadline sets the read timer
func (s *stream) SetReadDeadline(t time.Time) error {
s.m.Lock()
defer s.m.Unlock()
// stop timer if not nil
if s.readTimer != nil {
_ = s.readTimer.Stop()
s.readTimer = nil
}
// clear deadline exceeded
s.readDeadlineExceeded = false
if !t.IsZero() {
delay := t.Sub(time.Now())
s.readTimer = time.AfterFunc(delay, s.onExpired(&s.readDeadlineExceeded))
}
return nil
}
// SetWriteDeadline sets the write timer
func (s *stream) SetWriteDeadline(t time.Time) error {
s.m.Lock()
defer s.m.Unlock()
//stop timer if not nil
if s.writeTimer != nil {
_ = s.writeTimer.Stop()
s.writeTimer = nil
}
// clear deadline exceeded
s.writeDeadlineExceeded = false
if !t.IsZero() {
delay := t.Sub(time.Now())
s.writeTimer = time.AfterFunc(delay, s.onExpired(&s.writeDeadlineExceeded))
}
return nil
}
// UnblockAndBroadcast unblocks bytes and broadcasts so that writes can
// continue
func (s *stream) UnblockAndBroadcast(read uint32) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
defer s.session.logger.Printf("unblock broadcasted : stream %d", s.id)
s.unblocked += read
}
// PushAndBroadcast adds data to the read buffer and broadcasts so that
// reads can continue
func (s *stream) PushAndBroadcast(buf []byte) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
defer s.session.logger.Printf("push broadcasted : stream %d", s.id)
_, err := s.b.Write(buf)
s.endErr = err
}
// AcceptStream accepts the current stream by closing the accepted channel
func (s *stream) AcceptStream(read uint32) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
s.unblocked += read
s.state = accepted
close(s.accepted)
}
// SetDeadline sets the read and write deadlines for the stream
func (s *stream) SetDeadline(t time.Time) error {
if err := s.SetReadDeadline(t); err != nil {
s.endErr = err
return err
}
if err := s.SetWriteDeadline(t); err != nil {
s.endErr = err
return err
}
return nil
}
func (s *stream) IsRemovable() bool {
s.m.Lock()
defer s.m.Unlock()
return s.state == dead && s.b.Len() == 0
}
// setRemoteClosed sets the value of stream.remoteClosed. This indicates that the remote has sent a fin packet
func (s *stream) setRemoteClosed() {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
if s.state == closed {
s.state = dead
} else {
s.state = remoteClosed
}
}
//LocalAddr returns the local address of the underlying connection
func (s *stream) LocalAddr() net.Addr {
return s.session.conn.LocalAddr()
}
// RemoteAddr returns the remote address of the underlying connection
func (s *stream) RemoteAddr() net.Addr {
return s.session.conn.RemoteAddr()
}
// Close closes the stream
func (s *stream) Close() error {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
switch s.state {
// return nil if already closed
case dead:
return nil
case closed:
return nil
case remoteClosed:
s.state = dead
default:
s.state = closed
}
if err := s.session.send(newFinFrame(s.id)); err != nil {
return err
}
return nil
}
// Read reads bytes from the stream
func (s *stream) Read(buf []byte) (int, error) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
s.session.logger.Printf("stream %d: read requested", s.id)
for s.b.Len() == 0 && s.endErr == nil && !s.readDeadlineExceeded && s.state != remoteClosed && s.state != dead {
s.session.logger.Printf("stream %d: read waiting", s.id)
// wait
s.c.Wait()
}
// return EOF if remoteClosed (remoteClosed + deadOnEmpty + dead)
if s.b.Len() == 0 && (s.state == remoteClosed || s.state == dead) {
return 0, io.EOF
}
if s.readDeadlineExceeded {
return 0, ErrReadTimeout
}
if s.endErr != nil {
return 0, s.endErr
}
n, _ := s.b.Read(buf)
if err := s.session.send(newAckFrame(s.id, uint32(n))); err != nil {
return n, err
}
s.session.logger.Printf("stream %d: read completed", s.id)
return n, nil
}
// Write writes bytes to the stream
func (s *stream) Write(buf []byte) (int, error) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
l, w := len(buf), 0
for w < l {
for s.unblocked == 0 && s.endErr == nil && !s.writeDeadlineExceeded && s.state != closed && s.state != dead {
s.session.logger.Printf("stream %d: write waiting", s.id)
// wait for signal
s.c.Wait()
}
// if stream is closed or waiting to be empty then abort
// unblocked not checked as stream can be closed, but bytes may be unblocked by remote
if s.state == closed || s.state == dead {
return w, ErrBrokenPipe
}
if s.writeDeadlineExceeded {
return w, ErrWriteTimeout
}
if s.endErr != nil {
return w, s.endErr
}
cap := min(len(buf), int(s.unblocked))
if err := s.session.send(newDataFrame(s.id, buf[:cap])); err != nil {
return w, err
}
buf = buf[cap:]
s.unblocked -= uint32(cap)
w += cap
}
return w, nil
}
// Kill forces the stream into the dead state
func (s *stream) Kill() {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
s.session.logger.Printf("stream %d killed", s.id)
s.state = dead
}
changed stream state names
package wsmux
import (
"encoding/binary"
"io"
"net"
"sync"
"time"
)
const (
// DefaultCapacity of read buffer of stream
DefaultCapacity = 1024
)
/*Stream States:
streamCreated = stream has been streamCreated. Buffer is empty. Has not been streamAccepted.
streamAccepted = stream has been streamAccepted. read write operations permitted.
streamClosed = stream has been streamClosed.
streamRemoteClosed = remote side has been streamClosed.
streamDead = streamClosed & streamRemoteClosed. Buffer may still have data.
*/
type streamState int
const (
streamCreated streamState = iota
streamAccepted
streamClosed
streamRemoteClosed
streamDead
)
type stream struct {
id uint32 // id of the stream. Used for logging.
m sync.Mutex // mutex for state transitions
c *sync.Cond // used for broadcasting when streamClosed, data read, or data pushed to buffer
b *buffer // read buffer
unblocked uint32 // number of bytes that can be sent to remote
endErr error // error causes stream to close
state streamState // current state of the stream
accepted chan struct{} // streamClosed when stream is streamAccepted. Used in session.Open()
session *Session // assosciated session. used for sending frames and logging
readTimer *time.Timer // timer for read operations
writeTimer *time.Timer // timer for write operations
readDeadlineExceeded bool // true when readTimer fires
writeDeadlineExceeded bool // true when writeTimer fires
}
func newStream(id uint32, session *Session) *stream {
str := &stream{
id: id,
b: newBuffer(session.streamBufferSize),
unblocked: 0,
state: streamCreated,
accepted: make(chan struct{}),
endErr: nil,
readTimer: nil,
writeTimer: nil,
readDeadlineExceeded: false,
writeDeadlineExceeded: false,
session: session,
}
str.c = sync.NewCond(&str.m)
return str
}
// HandleFrame processes frames received by the stream
func (s *stream) HandleFrame(fr frame) {
switch fr.msg {
case msgACK:
// if not streamAccepted then close streamAccepted channel, otherwise unblock
read := binary.LittleEndian.Uint32(fr.payload)
select {
case <-s.accepted:
s.UnblockAndBroadcast(read)
default:
s.AcceptStream(read)
}
case msgDAT:
s.session.logger.Printf("stream %d received DAT frame: %v", s.id, fr)
s.PushAndBroadcast(fr.payload)
case msgFIN:
s.session.logger.Printf("remote stream %d streamClosed connection", s.id)
s.setRemoteClosed()
}
}
// onExpired is an internal helper method which sets val = true and broadcasts
func (s *stream) onExpired(val *bool) func() {
return func() {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
*val = true
}
}
// SetReadDeadline sets the read timer
func (s *stream) SetReadDeadline(t time.Time) error {
s.m.Lock()
defer s.m.Unlock()
// stop timer if not nil
if s.readTimer != nil {
_ = s.readTimer.Stop()
s.readTimer = nil
}
// clear streamDeadline exceeded
s.readDeadlineExceeded = false
if !t.IsZero() {
delay := t.Sub(time.Now())
s.readTimer = time.AfterFunc(delay, s.onExpired(&s.readDeadlineExceeded))
}
return nil
}
// SetWriteDeadline sets the write timer
func (s *stream) SetWriteDeadline(t time.Time) error {
s.m.Lock()
defer s.m.Unlock()
//stop timer if not nil
if s.writeTimer != nil {
_ = s.writeTimer.Stop()
s.writeTimer = nil
}
// clear streamDeadline exceeded
s.writeDeadlineExceeded = false
if !t.IsZero() {
delay := t.Sub(time.Now())
s.writeTimer = time.AfterFunc(delay, s.onExpired(&s.writeDeadlineExceeded))
}
return nil
}
// UnblockAndBroadcast unblocks bytes and broadcasts so that writes can
// continue
func (s *stream) UnblockAndBroadcast(read uint32) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
defer s.session.logger.Printf("unblock broadcasted : stream %d", s.id)
s.unblocked += read
}
// PushAndBroadcast adds data to the read buffer and broadcasts so that
// reads can continue
func (s *stream) PushAndBroadcast(buf []byte) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
defer s.session.logger.Printf("push broadcasted : stream %d", s.id)
_, err := s.b.Write(buf)
s.endErr = err
}
// AcceptStream accepts the current stream by closing the streamAccepted channel
func (s *stream) AcceptStream(read uint32) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
s.unblocked += read
s.state = streamAccepted
close(s.accepted)
}
// SetDeadline sets the read and write streamDeadlines for the stream
func (s *stream) SetDeadline(t time.Time) error {
if err := s.SetReadDeadline(t); err != nil {
s.endErr = err
return err
}
if err := s.SetWriteDeadline(t); err != nil {
s.endErr = err
return err
}
return nil
}
func (s *stream) IsRemovable() bool {
s.m.Lock()
defer s.m.Unlock()
return s.state == streamDead && s.b.Len() == 0
}
// setRemoteClosed sets the value of stream.streamRemoteClosed. This indicates that the remote has sent a fin packet
func (s *stream) setRemoteClosed() {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
if s.state == streamClosed {
s.state = streamDead
} else {
s.state = streamRemoteClosed
}
}
//LocalAddr returns the local address of the underlying connection
func (s *stream) LocalAddr() net.Addr {
return s.session.conn.LocalAddr()
}
// RemoteAddr returns the remote address of the underlying connection
func (s *stream) RemoteAddr() net.Addr {
return s.session.conn.RemoteAddr()
}
// Close closes the stream
func (s *stream) Close() error {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
switch s.state {
// return nil if already streamClosed
case streamDead:
return nil
case streamClosed:
return nil
case streamRemoteClosed:
s.state = streamDead
default:
s.state = streamClosed
}
if err := s.session.send(newFinFrame(s.id)); err != nil {
return err
}
return nil
}
// Read reads bytes from the stream
func (s *stream) Read(buf []byte) (int, error) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
s.session.logger.Printf("stream %d: read requested", s.id)
for s.b.Len() == 0 && s.endErr == nil && !s.readDeadlineExceeded && s.state != streamRemoteClosed && s.state != streamDead {
s.session.logger.Printf("stream %d: read waiting", s.id)
// wait
s.c.Wait()
}
// return EOF if streamRemoteClosed (streamRemoteClosed + streamDeadOnEmpty + streamDead)
if s.b.Len() == 0 && (s.state == streamRemoteClosed || s.state == streamDead) {
return 0, io.EOF
}
if s.readDeadlineExceeded {
return 0, ErrReadTimeout
}
if s.endErr != nil {
return 0, s.endErr
}
n, _ := s.b.Read(buf)
if err := s.session.send(newAckFrame(s.id, uint32(n))); err != nil {
return n, err
}
s.session.logger.Printf("stream %d: read completed", s.id)
return n, nil
}
// Write writes bytes to the stream
func (s *stream) Write(buf []byte) (int, error) {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
l, w := len(buf), 0
for w < l {
for s.unblocked == 0 && s.endErr == nil && !s.writeDeadlineExceeded && s.state != streamClosed && s.state != streamDead {
s.session.logger.Printf("stream %d: write waiting", s.id)
// wait for signal
s.c.Wait()
}
// if stream is streamClosed or waiting to be empty then abort
// unblocked not checked as stream can be streamClosed, but bytes may be unblocked by remote
if s.state == streamClosed || s.state == streamDead {
return w, ErrBrokenPipe
}
if s.writeDeadlineExceeded {
return w, ErrWriteTimeout
}
if s.endErr != nil {
return w, s.endErr
}
cap := min(len(buf), int(s.unblocked))
if err := s.session.send(newDataFrame(s.id, buf[:cap])); err != nil {
return w, err
}
buf = buf[cap:]
s.unblocked -= uint32(cap)
w += cap
}
return w, nil
}
// Kill forces the stream into the streamDead state
func (s *stream) Kill() {
s.m.Lock()
defer s.m.Unlock()
defer s.c.Broadcast()
s.session.logger.Printf("stream %d killed", s.id)
s.state = streamDead
}
|
package wuxia
//Config is the settings needed by the static generatot. It is inspired by the
//jekyll configuration options.
//
// The format can either be json, yaml or toml
// TODO: add yaml and toml support.
type Config struct {
Source string `json:"source"`
Destination string `json:"destination"`
StaticDir string `json:"statc_dir"`
TemplatesDir string `json:"templates_dir"`
ThemeDir string `json:"theme_dir"`
DefaultTheme string `json:"default_theme"`
Safe bool `json:"safe"`
Excluede []string `json:"exclude"`
Include []string `json:"include"`
}
//DefaultConfig retruns *Config with default settings
func DefaultConfig() *Config {
return &Config{
Source: "src",
Destination: "dest",
StaticDir: "static",
ThemeDir: "themes",
TemplatesDir: "templates",
DefaultTheme: "doxsey",
Safe: true,
Excluede: []string{
".git/*", "CONTRIBUTING.md",
},
Include: []string{
"LICENCE.md",
},
}
}
//System configuration for the whole static generator system.
type System struct {
Boot *Boot `json:"boot"`
Config *Config `json:"config"`
Plan *Plan `json:"plan"`
}
//Boot necessary info to bootstrap the Generator.
type Boot struct {
ConfigiFile string `json:"config_file"`
PlanFile string `json:"plan_file"`
ENV map[string]string `json:"env"`
}
//Theme discreption of a theme.
type Theme struct {
Name string `json:"name"`
Author []Author `json:"author"`
}
//Author description of the author of the project being built.
type Author struct {
Name string `json:"name"`
Github string `json:"github"`
Twitter string `json:"twitter"`
Linkedin string `json:"linkedin"`
Email string `json:"email"`
Website string `json:"website"`
}
// Plan is the execution planner object. It states the steps and stages on which
// the execution process should take.
type Plan struct {
Title string `json:"title"`
// Modules that are supposed to be loaded before the execution starts. The
// execution process wont start if one of the dependencies is missing.
Dependency []string `json:"dependencies"`
TemplateEngine string `json:"template_engine"`
Before []string `json:"before"`
Exec []string `json:"exec"`
After []string `json:"after"`
}
//File is a representation of a file unit as it is passed arouund for
//processing.
// File content is passed as a string so as to allow easy trasition between Go
// and javascript boundary.
type File struct {
Name string `json:"name"`
Meta map[string]interface{} `json:"meta"`
Contents string `json:"contents"`
}
Add plugin_dir config setting
package wuxia
//Config is the settings needed by the static generatot. It is inspired by the
//jekyll configuration options.
//
// The format can either be json, yaml or toml
// TODO: add yaml and toml support.
type Config struct {
Source string `json:"source"`
Destination string `json:"destination"`
StaticDir string `json:"statc_dir"`
TemplatesDir string `json:"templates_dir"`
ThemeDir string `json:"theme_dir"`
DefaultTheme string `json:"default_theme"`
PluginDir string `json:"plugin_dir"`
Safe bool `json:"safe"`
Excluede []string `json:"exclude"`
Include []string `json:"include"`
}
//DefaultConfig retruns *Config with default settings
func DefaultConfig() *Config {
return &Config{
Source: "src",
Destination: "dest",
StaticDir: "static",
ThemeDir: "themes",
TemplatesDir: "templates",
DefaultTheme: "doxsey",
PluginDir: "plugins",
Safe: true,
Excluede: []string{
"CONTRIBUTING.md",
},
Include: []string{
"LICENCE.md",
},
}
}
//System configuration for the whole static generator system.
type System struct {
Boot *Boot `json:"boot"`
Config *Config `json:"config"`
Plan *Plan `json:"plan"`
}
//Boot necessary info to bootstrap the Generator.
type Boot struct {
ConfigiFile string `json:"config_file"`
PlanFile string `json:"plan_file"`
ENV map[string]string `json:"env"`
}
//Theme discreption of a theme.
type Theme struct {
Name string `json:"name"`
Author []Author `json:"author"`
}
//Author description of the author of the project being built.
type Author struct {
Name string `json:"name"`
Github string `json:"github"`
Twitter string `json:"twitter"`
Linkedin string `json:"linkedin"`
Email string `json:"email"`
Website string `json:"website"`
}
// Plan is the execution planner object. It states the steps and stages on which
// the execution process should take.
type Plan struct {
Title string `json:"title"`
// Modules that are supposed to be loaded before the execution starts. The
// execution process wont start if one of the dependencies is missing.
Dependency []string `json:"dependencies"`
TemplateEngine string `json:"template_engine"`
Before []string `json:"before"`
Exec []string `json:"exec"`
After []string `json:"after"`
}
//File is a representation of a file unit as it is passed arouund for
//processing.
// File content is passed as a string so as to allow easy trasition between Go
// and javascript boundary.
type File struct {
Name string `json:"name"`
Meta map[string]interface{} `json:"meta"`
Contents string `json:"contents"`
}
|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package elasticsearch
import (
"encoding/json"
"fmt"
"github.com/jasonish/evebox/eve"
"github.com/jasonish/evebox/log"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"math/rand"
"net/http"
"sync"
"time"
)
const AtTimestampFormat = "2006-01-02T15:04:05.999Z"
var templateCheckLock sync.Mutex
var entropyLock sync.Mutex
var lastSeed int64 = 0
type BulkEveIndexer struct {
es *ElasticSearch
queued uint
buf []byte
// The entropy source for ulid generation.
entropy *rand.Rand
}
func NewIndexer(es *ElasticSearch) *BulkEveIndexer {
indexer := BulkEveIndexer{
es: es,
}
indexer.initEntropy()
return &indexer
}
func (i *BulkEveIndexer) initEntropy() {
entropyLock.Lock()
defer entropyLock.Unlock()
seed := lastSeed
for seed == lastSeed {
seed = time.Now().UnixNano()
}
lastSeed = seed
i.entropy = rand.New(rand.NewSource(seed))
}
func (i *BulkEveIndexer) DecodeResponse(response *http.Response) (*Response, error) {
return DecodeResponse(response)
}
func (i *BulkEveIndexer) Submit(event eve.EveEvent) error {
timestamp := event.Timestamp()
event["@timestamp"] = timestamp.UTC().Format(AtTimestampFormat)
index := fmt.Sprintf("%s-%s", i.es.EventIndexPrefix,
timestamp.UTC().Format("2006.01.02"))
header := BulkCreateHeader{}
header.Create.Index = index
header.Create.Type = "log"
id := ulid.MustNew(ulid.Timestamp(timestamp), i.entropy).String()
header.Create.Id = id
rheader, _ := json.Marshal(header)
revent, _ := json.Marshal(event)
i.buf = append(i.buf, rheader...)
i.buf = append(i.buf, []byte("\n")...)
i.buf = append(i.buf, revent...)
i.buf = append(i.buf, []byte("\n")...)
i.queued++
return nil
}
func (i *BulkEveIndexer) Commit() (interface{}, error) {
// Check if the template exists for the index before adding events.
// If not, try to install it.
//
// This is wrapped in lock so only on go-routine ends up doing this.
//
// Probably need to rethink this, perhaps do it on startup. But periodic
// checks are required in case Elastic Search was re-installed or something
// and the templates lost.
templateCheckLock.Lock()
exists, err := i.es.TemplateExists(i.es.EventIndexPrefix)
if err != nil {
log.Error("Failed to check if template %s exists: %v",
i.es.EventIndexPrefix, err)
templateCheckLock.Unlock()
return nil, errors.Errorf("no template installed for configured index")
} else if !exists {
log.Warning("Template %s does not exist, will create.",
i.es.EventIndexPrefix)
err := i.es.LoadTemplate(i.es.EventIndexPrefix, 0)
if err != nil {
log.Error("Failed to install template: %v", err)
templateCheckLock.Unlock()
return nil, errors.Errorf("failed to install template for configured index")
}
}
templateCheckLock.Unlock()
if len(i.buf) > 0 {
response, err := i.es.httpClient.PostBytes("_bulk",
"application/json", i.buf)
if err != nil {
return nil, err
}
i.buf = i.buf[:0]
i.queued = 0
bulkResponse, err := i.DecodeResponse(response)
return bulkResponse, err
}
return nil, nil
}
es: use doc type "doc"
to be more compatible with logstash and filebeat
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package elasticsearch
import (
"encoding/json"
"fmt"
"github.com/jasonish/evebox/eve"
"github.com/jasonish/evebox/log"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"math/rand"
"net/http"
"sync"
"time"
)
const AtTimestampFormat = "2006-01-02T15:04:05.999Z"
var templateCheckLock sync.Mutex
var entropyLock sync.Mutex
var lastSeed int64 = 0
type BulkEveIndexer struct {
es *ElasticSearch
queued uint
buf []byte
// The entropy source for ulid generation.
entropy *rand.Rand
}
func NewIndexer(es *ElasticSearch) *BulkEveIndexer {
indexer := BulkEveIndexer{
es: es,
}
indexer.initEntropy()
return &indexer
}
func (i *BulkEveIndexer) initEntropy() {
entropyLock.Lock()
defer entropyLock.Unlock()
seed := lastSeed
for seed == lastSeed {
seed = time.Now().UnixNano()
}
lastSeed = seed
i.entropy = rand.New(rand.NewSource(seed))
}
func (i *BulkEveIndexer) DecodeResponse(response *http.Response) (*Response, error) {
return DecodeResponse(response)
}
func (i *BulkEveIndexer) Submit(event eve.EveEvent) error {
timestamp := event.Timestamp()
event["@timestamp"] = timestamp.UTC().Format(AtTimestampFormat)
index := fmt.Sprintf("%s-%s", i.es.EventIndexPrefix,
timestamp.UTC().Format("2006.01.02"))
header := BulkCreateHeader{}
header.Create.Index = index
header.Create.Type = "doc"
id := ulid.MustNew(ulid.Timestamp(timestamp), i.entropy).String()
header.Create.Id = id
rheader, _ := json.Marshal(header)
revent, _ := json.Marshal(event)
i.buf = append(i.buf, rheader...)
i.buf = append(i.buf, []byte("\n")...)
i.buf = append(i.buf, revent...)
i.buf = append(i.buf, []byte("\n")...)
i.queued++
return nil
}
func (i *BulkEveIndexer) Commit() (interface{}, error) {
// Check if the template exists for the index before adding events.
// If not, try to install it.
//
// This is wrapped in lock so only on go-routine ends up doing this.
//
// Probably need to rethink this, perhaps do it on startup. But periodic
// checks are required in case Elastic Search was re-installed or something
// and the templates lost.
templateCheckLock.Lock()
exists, err := i.es.TemplateExists(i.es.EventIndexPrefix)
if err != nil {
log.Error("Failed to check if template %s exists: %v",
i.es.EventIndexPrefix, err)
templateCheckLock.Unlock()
return nil, errors.Errorf("no template installed for configured index")
} else if !exists {
log.Warning("Template %s does not exist, will create.",
i.es.EventIndexPrefix)
err := i.es.LoadTemplate(i.es.EventIndexPrefix, 0)
if err != nil {
log.Error("Failed to install template: %v", err)
templateCheckLock.Unlock()
return nil, errors.Errorf("failed to install template for configured index")
}
}
templateCheckLock.Unlock()
if len(i.buf) > 0 {
response, err := i.es.httpClient.PostBytes("_bulk",
"application/json", i.buf)
if err != nil {
return nil, err
}
i.buf = i.buf[:0]
i.queued = 0
bulkResponse, err := i.DecodeResponse(response)
return bulkResponse, err
}
return nil, nil
}
|
package wuxia
//Config is the settings needed by the static generatot. It is inspired by the
//jekyll configuration options.
//
// The format can either be json, yaml or toml
// TODO: add yaml and toml support.
type Config struct {
Source string `json:"source"`
Destination string `json:"destination"`
Safe bool `json:"safe"`
Excluede []string `json:"exclude"`
Include []string `json:"include"`
KeepFiles []string `json:"keep_files"`
TimeZone string `json:"timezone"`
Encoding string `json:"encoding"`
Port int `json:"port"`
Host string `json:"host"`
BaseURL string `json:"base_url"`
}
//System configuration for the whole static generator system.
type System struct {
Boot *Boot `json:"boot"`
Config *Config `json:"config"`
Plan *Plan `json:"plan"`
}
//Boot necessary info to bootstrap the Generator.
type Boot struct {
ConfigiFile string `json:"config_file"`
PlanFile string `json:"plan_file"`
ENV map[string]string `json:"env"`
}
//Theme discreption of a theme.
type Theme struct {
Name string `json:"name"`
Author []Author `json:"author"`
}
//Author description of the author of the project being built.
type Author struct {
Name string `json:"name"`
Github string `json:"github"`
Twitter string `json:"twitter"`
Linkedin string `json:"linkedin"`
Email string `json:"email"`
Website string `json:"website"`
}
// Plan is the execution planner object. It states the steps and stages on which
// the execution process should take.
type Plan struct {
Title string `json:"title"`
// Modules that are supposed to be loaded before the execution starts. The
// execution process wont start if one of the dependencies is missing.
Dependency []string `json:"dependencies"`
TemplateEngine string `json:"template_engine"`
Before []string `json:"before"`
Exec []string `json:"exec"`
After []string `json:"after"`
}
//File is a representation of a file unit as it is passed arouund for
//processing.
// File content is passed as a string so as to allow easy trasition between Go
// and javascript boundary.
type File struct {
Name string `json:"name"`
Meta map[string]interface{} `json:"meta"`
Contents string `json:"contents"`
}
Add DefaultConfig
package wuxia
//Config is the settings needed by the static generatot. It is inspired by the
//jekyll configuration options.
//
// The format can either be json, yaml or toml
// TODO: add yaml and toml support.
type Config struct {
Source string `json:"source"`
Destination string `json:"destination"`
Safe bool `json:"safe"`
Excluede []string `json:"exclude"`
Include []string `json:"include"`
KeepFiles []string `json:"keep_files"`
TimeZone string `json:"timezone"`
Encoding string `json:"encoding"`
Port int `json:"port"`
Host string `json:"host"`
BaseURL string `json:"base_url"`
}
//DefaultConfig retruns *Config with default settings
func DefaultConfig() *Config {
return &Config{
Source: "src",
Destination: "dest",
Safe: true,
Excluede: []string{
"CONTRITUTING", "CONTRIBUTING.md",
},
Include: []string{
"LICENCE.md",
},
}
}
//System configuration for the whole static generator system.
type System struct {
Boot *Boot `json:"boot"`
Config *Config `json:"config"`
Plan *Plan `json:"plan"`
}
//Boot necessary info to bootstrap the Generator.
type Boot struct {
ConfigiFile string `json:"config_file"`
PlanFile string `json:"plan_file"`
ENV map[string]string `json:"env"`
}
//Theme discreption of a theme.
type Theme struct {
Name string `json:"name"`
Author []Author `json:"author"`
}
//Author description of the author of the project being built.
type Author struct {
Name string `json:"name"`
Github string `json:"github"`
Twitter string `json:"twitter"`
Linkedin string `json:"linkedin"`
Email string `json:"email"`
Website string `json:"website"`
}
// Plan is the execution planner object. It states the steps and stages on which
// the execution process should take.
type Plan struct {
Title string `json:"title"`
// Modules that are supposed to be loaded before the execution starts. The
// execution process wont start if one of the dependencies is missing.
Dependency []string `json:"dependencies"`
TemplateEngine string `json:"template_engine"`
Before []string `json:"before"`
Exec []string `json:"exec"`
After []string `json:"after"`
}
//File is a representation of a file unit as it is passed arouund for
//processing.
// File content is passed as a string so as to allow easy trasition between Go
// and javascript boundary.
type File struct {
Name string `json:"name"`
Meta map[string]interface{} `json:"meta"`
Contents string `json:"contents"`
}
|
// Package errors provides common error handling tools
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package errors
// The LFS error system provides a simple wrapper around Go errors and the
// ability to inspect errors. It is strongly influenced by Dave Cheney's post
// at http://dave.cheney.net/2014/12/24/inspecting-errors.
//
// When passing errors out of lfs package functions, the return type should
// always be `error`. The wrappedError details are not exported. If an error is
// the kind of error a caller should need to investigate, an IsXError()
// function is provided that tells the caller if the error is of that type.
// There should only be a handfull of cases where a simple `error` is
// insufficient.
//
// The error behaviors can be nested when created. For example, the not
// implemented error can also be marked as a fatal error:
//
// func LfsFunction() error {
// err := functionCall()
// if err != nil {
// return newFatalError(newNotImplementedError(err))
// }
// return nil
// }
//
// Then in the caller:
//
// err := lfs.LfsFunction()
// if lfs.IsNotImplementedError(err) {
// log.Print("feature not implemented")
// }
// if lfs.IsFatalError(err) {
// os.Exit(1)
// }
//
// Wrapped errors contain a context, which is a map[string]string. These
// contexts can be accessed through the Error*Context functions. Calling these
// functions on a regular Go error will have no effect.
//
// Example:
//
// err := lfs.SomeFunction()
// errors.ErrorSetContext(err, "foo", "bar")
// errors.ErrorGetContext(err, "foo") // => "bar"
// errors.ErrorDelContext(err, "foo")
//
// Wrapped errors also contain the stack from the point at which they are
// called. The stack is accessed via ErrorStack(). Calling ErrorStack() on a
// regular Go error will return an empty byte slice.
import (
"fmt"
"runtime"
"github.com/pkg/errors"
)
// New returns an error with the supplied message. New also records the stack
// trace at thepoint it was called.
func New(message string) error {
return errors.New(message)
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return errors.Errorf(format, args...)
}
// Wrap wraps an error with an additional message.
func Wrap(err error, msg string) error {
return newWrappedError(err, msg)
}
// Wrapf wraps an error with an additional formatted message.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
err = errors.New("")
}
message := fmt.Sprintf(format, args...)
return newWrappedError(err, message)
}
type errorWithCause interface {
Error() string
Cause() error
}
func parentOf(err error) error {
if c, ok := err.(errorWithCause); ok {
return c.Cause()
}
return nil
}
// IsFatalError indicates that the error is fatal and the process should exit
// immediately after handling the error.
func IsFatalError(err error) bool {
if e, ok := err.(interface {
Fatal() bool
}); ok {
return e.Fatal()
}
if parent := parentOf(err); parent != nil {
return IsFatalError(parent)
}
return false
}
// IsNotImplementedError indicates the client attempted to use a feature the
// server has not implemented (e.g. the batch endpoint).
func IsNotImplementedError(err error) bool {
if e, ok := err.(interface {
NotImplemented() bool
}); ok {
return e.NotImplemented()
}
if parent := parentOf(err); parent != nil {
return IsNotImplementedError(parent)
}
return false
}
// IsAuthError indicates the client provided a request with invalid or no
// authentication credentials when credentials are required (e.g. HTTP 401).
func IsAuthError(err error) bool {
if e, ok := err.(interface {
AuthError() bool
}); ok {
return e.AuthError()
}
if parent := parentOf(err); parent != nil {
return IsAuthError(parent)
}
return false
}
// IsSmudgeError indicates an error while smudging a files.
func IsSmudgeError(err error) bool {
if e, ok := err.(interface {
SmudgeError() bool
}); ok {
return e.SmudgeError()
}
if parent := parentOf(err); parent != nil {
return IsSmudgeError(parent)
}
return false
}
// IsCleanPointerError indicates an error while cleaning a file.
func IsCleanPointerError(err error) bool {
if e, ok := err.(interface {
CleanPointerError() bool
}); ok {
return e.CleanPointerError()
}
if parent := parentOf(err); parent != nil {
return IsCleanPointerError(parent)
}
return false
}
// IsNotAPointerError indicates the parsed data is not an LFS pointer.
func IsNotAPointerError(err error) bool {
if e, ok := err.(interface {
NotAPointerError() bool
}); ok {
return e.NotAPointerError()
}
if parent := parentOf(err); parent != nil {
return IsNotAPointerError(parent)
}
return false
}
// IsBadPointerKeyError indicates that the parsed data has an invalid key.
func IsBadPointerKeyError(err error) bool {
if e, ok := err.(interface {
BadPointerKeyError() bool
}); ok {
return e.BadPointerKeyError()
}
if parent := parentOf(err); parent != nil {
return IsBadPointerKeyError(parent)
}
return false
}
// If an error is abad pointer error of any type, returns NotAPointerError
func StandardizeBadPointerError(err error) error {
if IsBadPointerKeyError(err) {
badErr := err.(badPointerKeyError)
if badErr.Expected == "version" {
return NewNotAPointerError(err)
}
}
return err
}
// IsDownloadDeclinedError indicates that the smudge operation should not download.
// TODO: I don't really like using errors to control that flow, it should be refactored.
func IsDownloadDeclinedError(err error) bool {
if e, ok := err.(interface {
DownloadDeclinedError() bool
}); ok {
return e.DownloadDeclinedError()
}
if parent := parentOf(err); parent != nil {
return IsDownloadDeclinedError(parent)
}
return false
}
// IsRetriableError indicates the low level transfer had an error but the
// caller may retry the operation.
func IsRetriableError(err error) bool {
if e, ok := err.(interface {
RetriableError() bool
}); ok {
return e.RetriableError()
}
if parent := parentOf(err); parent != nil {
return IsRetriableError(parent)
}
return false
}
// ErrorSetContext sets a value in the error's context. If the error has not
// been wrapped, it does nothing.
func ErrorSetContext(err error, key string, value interface{}) {
if e, ok := err.(errorWrapper); ok {
e.Set(key, value)
}
}
// ErrorGetContext gets a value from the error's context. If the error has not
// been wrapped, it returns an empty string.
func ErrorGetContext(err error, key string) interface{} {
if e, ok := err.(errorWrapper); ok {
return e.Get(key)
}
return ""
}
// ErrorDelContext removes a value from the error's context. If the error has
// not been wrapped, it does nothing.
func ErrorDelContext(err error, key string) {
if e, ok := err.(errorWrapper); ok {
e.Del(key)
}
}
// ErrorContext returns the context map for an error if it is a wrappedError.
// If it is not a wrappedError it will return an empty map.
func ErrorContext(err error) map[string]interface{} {
if e, ok := err.(errorWrapper); ok {
return e.Context()
}
return nil
}
type errorWrapper interface {
errorWithCause
Set(string, interface{})
Get(string) interface{}
Del(string)
Context() map[string]interface{}
}
// wrappedError is the base error wrapper. It provides a Message string, a
// stack, and a context map around a regular Go error.
type wrappedError struct {
errorWithCause
context map[string]interface{}
}
// newWrappedError creates a wrappedError.
func newWrappedError(err error, message string) errorWrapper {
if err == nil {
err = errors.New("Error")
}
var errWithCause errorWithCause
if len(message) > 0 {
errWithCause = errors.Wrap(err, message).(errorWithCause)
} else if ewc, ok := err.(errorWithCause); ok {
errWithCause = ewc
} else {
errWithCause = errors.Wrap(err, "LFS").(errorWithCause)
}
return &wrappedError{
context: make(map[string]interface{}),
errorWithCause: errWithCause,
}
}
// Set sets the value for the key in the context.
func (e wrappedError) Set(key string, val interface{}) {
e.context[key] = val
}
// Get gets the value for a key in the context.
func (e wrappedError) Get(key string) interface{} {
return e.context[key]
}
// Del removes a key from the context.
func (e wrappedError) Del(key string) {
delete(e.context, key)
}
// Context returns the underlying context.
func (e wrappedError) Context() map[string]interface{} {
return e.context
}
// Definitions for IsFatalError()
type fatalError struct {
errorWrapper
}
func (e fatalError) Fatal() bool {
return true
}
func NewFatalError(err error) error {
return fatalError{newWrappedError(err, "Fatal error")}
}
// Definitions for IsNotImplementedError()
type notImplementedError struct {
errorWrapper
}
func (e notImplementedError) NotImplemented() bool {
return true
}
func NewNotImplementedError(err error) error {
return notImplementedError{newWrappedError(err, "Not implemented")}
}
// Definitions for IsAuthError()
type authError struct {
errorWrapper
}
func (e authError) AuthError() bool {
return true
}
func NewAuthError(err error) error {
return authError{newWrappedError(err, "Authentication required")}
}
// Definitions for IsSmudgeError()
type smudgeError struct {
errorWrapper
}
func (e smudgeError) SmudgeError() bool {
return true
}
func NewSmudgeError(err error, oid, filename string) error {
e := smudgeError{newWrappedError(err, "Smudge error")}
ErrorSetContext(e, "OID", oid)
ErrorSetContext(e, "FileName", filename)
return e
}
// Definitions for IsCleanPointerError()
type cleanPointerError struct {
errorWrapper
}
func (e cleanPointerError) CleanPointerError() bool {
return true
}
func NewCleanPointerError(pointer interface{}, bytes []byte) error {
err := New("pointer error")
e := cleanPointerError{newWrappedError(err, "clean")}
ErrorSetContext(e, "pointer", pointer)
ErrorSetContext(e, "bytes", bytes)
return e
}
// Definitions for IsNotAPointerError()
type notAPointerError struct {
errorWrapper
}
func (e notAPointerError) NotAPointerError() bool {
return true
}
func NewNotAPointerError(err error) error {
return notAPointerError{newWrappedError(err, "Pointer file error")}
}
type badPointerKeyError struct {
Expected string
Actual string
errorWrapper
}
func (e badPointerKeyError) BadPointerKeyError() bool {
return true
}
func NewBadPointerKeyError(expected, actual string) error {
err := Errorf("Expected key %s, got %s", expected, actual)
return badPointerKeyError{expected, actual, newWrappedError(err, "pointer parsing")}
}
// Definitions for IsDownloadDeclinedError()
type downloadDeclinedError struct {
errorWrapper
}
func (e downloadDeclinedError) DownloadDeclinedError() bool {
return true
}
func NewDownloadDeclinedError(err error, msg string) error {
return downloadDeclinedError{newWrappedError(err, msg)}
}
// Definitions for IsRetriableError()
type retriableError struct {
errorWrapper
}
func (e retriableError) RetriableError() bool {
return true
}
func NewRetriableError(err error) error {
return retriableError{newWrappedError(err, "")}
}
// Stack returns a byte slice containing the runtime.Stack()
func Stack() []byte {
stackBuf := make([]byte, 1024*1024)
written := runtime.Stack(stackBuf, false)
return stackBuf[:written]
}
errors: implement fmt.Formatter
// Package errors provides common error handling tools
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package errors
// The LFS error system provides a simple wrapper around Go errors and the
// ability to inspect errors. It is strongly influenced by Dave Cheney's post
// at http://dave.cheney.net/2014/12/24/inspecting-errors.
//
// When passing errors out of lfs package functions, the return type should
// always be `error`. The wrappedError details are not exported. If an error is
// the kind of error a caller should need to investigate, an IsXError()
// function is provided that tells the caller if the error is of that type.
// There should only be a handfull of cases where a simple `error` is
// insufficient.
//
// The error behaviors can be nested when created. For example, the not
// implemented error can also be marked as a fatal error:
//
// func LfsFunction() error {
// err := functionCall()
// if err != nil {
// return newFatalError(newNotImplementedError(err))
// }
// return nil
// }
//
// Then in the caller:
//
// err := lfs.LfsFunction()
// if lfs.IsNotImplementedError(err) {
// log.Print("feature not implemented")
// }
// if lfs.IsFatalError(err) {
// os.Exit(1)
// }
//
// Wrapped errors contain a context, which is a map[string]string. These
// contexts can be accessed through the Error*Context functions. Calling these
// functions on a regular Go error will have no effect.
//
// Example:
//
// err := lfs.SomeFunction()
// errors.ErrorSetContext(err, "foo", "bar")
// errors.ErrorGetContext(err, "foo") // => "bar"
// errors.ErrorDelContext(err, "foo")
//
// Wrapped errors also contain the stack from the point at which they are
// called. The stack is accessed via ErrorStack(). Calling ErrorStack() on a
// regular Go error will return an empty byte slice.
import (
"fmt"
"runtime"
"github.com/pkg/errors"
)
// New returns an error with the supplied message. New also records the stack
// trace at thepoint it was called.
func New(message string) error {
return errors.New(message)
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return errors.Errorf(format, args...)
}
// Wrap wraps an error with an additional message.
func Wrap(err error, msg string) error {
return newWrappedError(err, msg)
}
// Wrapf wraps an error with an additional formatted message.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
err = errors.New("")
}
message := fmt.Sprintf(format, args...)
return newWrappedError(err, message)
}
type errorWithCause interface {
Cause() error
StackTrace() errors.StackTrace
error
fmt.Formatter
}
func parentOf(err error) error {
if c, ok := err.(errorWithCause); ok {
return c.Cause()
}
return nil
}
// IsFatalError indicates that the error is fatal and the process should exit
// immediately after handling the error.
func IsFatalError(err error) bool {
if e, ok := err.(interface {
Fatal() bool
}); ok {
return e.Fatal()
}
if parent := parentOf(err); parent != nil {
return IsFatalError(parent)
}
return false
}
// IsNotImplementedError indicates the client attempted to use a feature the
// server has not implemented (e.g. the batch endpoint).
func IsNotImplementedError(err error) bool {
if e, ok := err.(interface {
NotImplemented() bool
}); ok {
return e.NotImplemented()
}
if parent := parentOf(err); parent != nil {
return IsNotImplementedError(parent)
}
return false
}
// IsAuthError indicates the client provided a request with invalid or no
// authentication credentials when credentials are required (e.g. HTTP 401).
func IsAuthError(err error) bool {
if e, ok := err.(interface {
AuthError() bool
}); ok {
return e.AuthError()
}
if parent := parentOf(err); parent != nil {
return IsAuthError(parent)
}
return false
}
// IsSmudgeError indicates an error while smudging a files.
func IsSmudgeError(err error) bool {
if e, ok := err.(interface {
SmudgeError() bool
}); ok {
return e.SmudgeError()
}
if parent := parentOf(err); parent != nil {
return IsSmudgeError(parent)
}
return false
}
// IsCleanPointerError indicates an error while cleaning a file.
func IsCleanPointerError(err error) bool {
if e, ok := err.(interface {
CleanPointerError() bool
}); ok {
return e.CleanPointerError()
}
if parent := parentOf(err); parent != nil {
return IsCleanPointerError(parent)
}
return false
}
// IsNotAPointerError indicates the parsed data is not an LFS pointer.
func IsNotAPointerError(err error) bool {
if e, ok := err.(interface {
NotAPointerError() bool
}); ok {
return e.NotAPointerError()
}
if parent := parentOf(err); parent != nil {
return IsNotAPointerError(parent)
}
return false
}
// IsBadPointerKeyError indicates that the parsed data has an invalid key.
func IsBadPointerKeyError(err error) bool {
if e, ok := err.(interface {
BadPointerKeyError() bool
}); ok {
return e.BadPointerKeyError()
}
if parent := parentOf(err); parent != nil {
return IsBadPointerKeyError(parent)
}
return false
}
// If an error is abad pointer error of any type, returns NotAPointerError
func StandardizeBadPointerError(err error) error {
if IsBadPointerKeyError(err) {
badErr := err.(badPointerKeyError)
if badErr.Expected == "version" {
return NewNotAPointerError(err)
}
}
return err
}
// IsDownloadDeclinedError indicates that the smudge operation should not download.
// TODO: I don't really like using errors to control that flow, it should be refactored.
func IsDownloadDeclinedError(err error) bool {
if e, ok := err.(interface {
DownloadDeclinedError() bool
}); ok {
return e.DownloadDeclinedError()
}
if parent := parentOf(err); parent != nil {
return IsDownloadDeclinedError(parent)
}
return false
}
// IsRetriableError indicates the low level transfer had an error but the
// caller may retry the operation.
func IsRetriableError(err error) bool {
if e, ok := err.(interface {
RetriableError() bool
}); ok {
return e.RetriableError()
}
if parent := parentOf(err); parent != nil {
return IsRetriableError(parent)
}
return false
}
// ErrorSetContext sets a value in the error's context. If the error has not
// been wrapped, it does nothing.
func ErrorSetContext(err error, key string, value interface{}) {
if e, ok := err.(errorWrapper); ok {
e.Set(key, value)
}
}
// ErrorGetContext gets a value from the error's context. If the error has not
// been wrapped, it returns an empty string.
func ErrorGetContext(err error, key string) interface{} {
if e, ok := err.(errorWrapper); ok {
return e.Get(key)
}
return ""
}
// ErrorDelContext removes a value from the error's context. If the error has
// not been wrapped, it does nothing.
func ErrorDelContext(err error, key string) {
if e, ok := err.(errorWrapper); ok {
e.Del(key)
}
}
// ErrorContext returns the context map for an error if it is a wrappedError.
// If it is not a wrappedError it will return an empty map.
func ErrorContext(err error) map[string]interface{} {
if e, ok := err.(errorWrapper); ok {
return e.Context()
}
return nil
}
type errorWrapper interface {
errorWithCause
Set(string, interface{})
Get(string) interface{}
Del(string)
Context() map[string]interface{}
}
// wrappedError is the base error wrapper. It provides a Message string, a
// stack, and a context map around a regular Go error.
type wrappedError struct {
errorWithCause
context map[string]interface{}
}
// newWrappedError creates a wrappedError.
func newWrappedError(err error, message string) errorWrapper {
if err == nil {
err = errors.New("Error")
}
var errWithCause errorWithCause
if len(message) > 0 {
errWithCause = errors.Wrap(err, message).(errorWithCause)
} else if ewc, ok := err.(errorWithCause); ok {
errWithCause = ewc
} else {
errWithCause = errors.Wrap(err, "LFS").(errorWithCause)
}
return &wrappedError{
context: make(map[string]interface{}),
errorWithCause: errWithCause,
}
}
// Set sets the value for the key in the context.
func (e wrappedError) Set(key string, val interface{}) {
e.context[key] = val
}
// Get gets the value for a key in the context.
func (e wrappedError) Get(key string) interface{} {
return e.context[key]
}
// Del removes a key from the context.
func (e wrappedError) Del(key string) {
delete(e.context, key)
}
// Context returns the underlying context.
func (e wrappedError) Context() map[string]interface{} {
return e.context
}
// Definitions for IsFatalError()
type fatalError struct {
errorWrapper
}
func (e fatalError) Fatal() bool {
return true
}
func NewFatalError(err error) error {
return fatalError{newWrappedError(err, "Fatal error")}
}
// Definitions for IsNotImplementedError()
type notImplementedError struct {
errorWrapper
}
func (e notImplementedError) NotImplemented() bool {
return true
}
func NewNotImplementedError(err error) error {
return notImplementedError{newWrappedError(err, "Not implemented")}
}
// Definitions for IsAuthError()
type authError struct {
errorWrapper
}
func (e authError) AuthError() bool {
return true
}
func NewAuthError(err error) error {
return authError{newWrappedError(err, "Authentication required")}
}
// Definitions for IsSmudgeError()
type smudgeError struct {
errorWrapper
}
func (e smudgeError) SmudgeError() bool {
return true
}
func NewSmudgeError(err error, oid, filename string) error {
e := smudgeError{newWrappedError(err, "Smudge error")}
ErrorSetContext(e, "OID", oid)
ErrorSetContext(e, "FileName", filename)
return e
}
// Definitions for IsCleanPointerError()
type cleanPointerError struct {
errorWrapper
}
func (e cleanPointerError) CleanPointerError() bool {
return true
}
func NewCleanPointerError(pointer interface{}, bytes []byte) error {
err := New("pointer error")
e := cleanPointerError{newWrappedError(err, "clean")}
ErrorSetContext(e, "pointer", pointer)
ErrorSetContext(e, "bytes", bytes)
return e
}
// Definitions for IsNotAPointerError()
type notAPointerError struct {
errorWrapper
}
func (e notAPointerError) NotAPointerError() bool {
return true
}
func NewNotAPointerError(err error) error {
return notAPointerError{newWrappedError(err, "Pointer file error")}
}
type badPointerKeyError struct {
Expected string
Actual string
errorWrapper
}
func (e badPointerKeyError) BadPointerKeyError() bool {
return true
}
func NewBadPointerKeyError(expected, actual string) error {
err := Errorf("Expected key %s, got %s", expected, actual)
return badPointerKeyError{expected, actual, newWrappedError(err, "pointer parsing")}
}
// Definitions for IsDownloadDeclinedError()
type downloadDeclinedError struct {
errorWrapper
}
func (e downloadDeclinedError) DownloadDeclinedError() bool {
return true
}
func NewDownloadDeclinedError(err error, msg string) error {
return downloadDeclinedError{newWrappedError(err, msg)}
}
// Definitions for IsRetriableError()
type retriableError struct {
errorWrapper
}
func (e retriableError) RetriableError() bool {
return true
}
func NewRetriableError(err error) error {
return retriableError{newWrappedError(err, "")}
}
// Stack returns a byte slice containing the runtime.Stack()
func Stack() []byte {
stackBuf := make([]byte, 1024*1024)
written := runtime.Stack(stackBuf, false)
return stackBuf[:written]
}
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import "fmt"
// Code mirrors gRPC's codes.Code.
type Code uint32
// Descriptions are copied from
// https://github.com/grpc/grpc-go/blob/master/codes/codes.go for developer
// convenience.
const (
// OK is returned on success.
OK Code = 0
// Canceled indicates the operation was cancelled (typically by the caller).
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
// if a Status value received from another address space belongs to
// an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion.
// For operations that change the state of the system, this error may be
// returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed
// long enough for the deadline to expire.
DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was
// not found.
NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one
// already exists.
AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to
// execute the specified operation. It must not be used for rejections
// caused by exhausting some resource (use ResourceExhausted
// instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
PermissionDenied Code = 7
// Unauthenticated indicates the request does not have valid
// authentication credentials for the operation.
Unauthenticated Code = 16
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the
// system is not in a state required for the operation's execution.
// For example, directory to be deleted may be non-empty, an rmdir
// operation is applied to a non-directory, etc.
//
// A litmus test that may help a service implementor in deciding
// between FailedPrecondition, Aborted, and Unavailable:
// (a) Use Unavailable if the client can retry just the failing call.
// (b) Use Aborted if the client should retry at a higher-level
// (e.g., restarting a read-modify-write sequence).
// (c) Use FailedPrecondition if the client should not retry until
// the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, FailedPrecondition
// should be returned since the client should not retry unless
// they have first fixed up the directory by deleting files from it.
// (d) Use FailedPrecondition if the client performs conditional
// REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting
// read-modify-write on the same resource.
FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a
// concurrency issue like sequencer check failures, transaction aborts,
// etc.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
Aborted Code = 10
// OutOfRange means operation was attempted past the valid range.
// E.g., seeking or reading past end of file.
//
// Unlike InvalidArgument, this error indicates a problem that may
// be fixed if the system state changes. For example, a 32-bit file
// system will generate InvalidArgument if asked to read at an
// offset that is not in the range [0,2^32-1], but it will generate
// OutOfRange if asked to read from an offset past the current
// file size.
//
// There is a fair bit of overlap between FailedPrecondition and
// OutOfRange. We recommend using OutOfRange (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not
// supported/enabled in this service.
Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors,
// something is very broken.
Internal Code = 13
// Unavailable indicates the service is currently unavailable.
// This is a most likely a transient condition and may be corrected
// by retrying with a backoff.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption.
DataLoss Code = 15
// Codes are expected to map 1:1 to gRPC codes. If you want to add a new
// value that's not on gRPC consider carefully the implications before
// you do so.
)
// TrillianError associates an error message with a failure code in order to
// make error translation possible by other layers (e.g., TrillianError to
// gRPC).
//
// TrillianErrors contain user-visible messages and codes, both of which should
// be chosen from the perspective of the RPC caller.
type TrillianError interface {
error
// Code returns the corresponding code to the error.
Code() Code
}
type trillianError struct {
code Code
message string
}
func (e *trillianError) Error() string {
return e.message
}
func (e *trillianError) Code() Code {
return e.code
}
// ErrorCode returns the assigned Code if err is a TrillianError.
// If err is nil, OK is returned.
// If err is not a TrillianError, Unknown is returned.
func ErrorCode(err error) Code {
if err == nil {
return OK
}
if err, ok := err.(TrillianError); ok {
return err.Code()
}
return Unknown
}
// Errorf creates a TrillianError from the specified code and message.
//
// Note that errors created by this package are meant to be user-visible,
// therefore both code and message should be chosen from the perspective of the
// RPC caller.
func Errorf(code Code, format string, a ...interface{}) error {
return &trillianError{code, fmt.Sprintf(format, a...)}
}
// New creates a TrillianError from the specified code and message.
//
// Note that errors created by this package are meant to be user-visible,
// therefore both code and message should be chosen from the perspective of the
// RPC caller.
func New(code Code, msg string) error {
return &trillianError{code, msg}
}
Tweak comment on TrillianErrors (#561)
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
import "fmt"
// Code mirrors gRPC's codes.Code.
type Code uint32
// Descriptions are copied from
// https://github.com/grpc/grpc-go/blob/master/codes/codes.go for developer
// convenience.
const (
// OK is returned on success.
OK Code = 0
// Canceled indicates the operation was cancelled (typically by the caller).
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
// if a Status value received from another address space belongs to
// an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion.
// For operations that change the state of the system, this error may be
// returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed
// long enough for the deadline to expire.
DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was
// not found.
NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one
// already exists.
AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to
// execute the specified operation. It must not be used for rejections
// caused by exhausting some resource (use ResourceExhausted
// instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
PermissionDenied Code = 7
// Unauthenticated indicates the request does not have valid
// authentication credentials for the operation.
Unauthenticated Code = 16
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the
// system is not in a state required for the operation's execution.
// For example, directory to be deleted may be non-empty, an rmdir
// operation is applied to a non-directory, etc.
//
// A litmus test that may help a service implementor in deciding
// between FailedPrecondition, Aborted, and Unavailable:
// (a) Use Unavailable if the client can retry just the failing call.
// (b) Use Aborted if the client should retry at a higher-level
// (e.g., restarting a read-modify-write sequence).
// (c) Use FailedPrecondition if the client should not retry until
// the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, FailedPrecondition
// should be returned since the client should not retry unless
// they have first fixed up the directory by deleting files from it.
// (d) Use FailedPrecondition if the client performs conditional
// REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting
// read-modify-write on the same resource.
FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a
// concurrency issue like sequencer check failures, transaction aborts,
// etc.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
Aborted Code = 10
// OutOfRange means operation was attempted past the valid range.
// E.g., seeking or reading past end of file.
//
// Unlike InvalidArgument, this error indicates a problem that may
// be fixed if the system state changes. For example, a 32-bit file
// system will generate InvalidArgument if asked to read at an
// offset that is not in the range [0,2^32-1], but it will generate
// OutOfRange if asked to read from an offset past the current
// file size.
//
// There is a fair bit of overlap between FailedPrecondition and
// OutOfRange. We recommend using OutOfRange (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not
// supported/enabled in this service.
Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors,
// something is very broken.
Internal Code = 13
// Unavailable indicates the service is currently unavailable.
// This is a most likely a transient condition and may be corrected
// by retrying with a backoff.
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption.
DataLoss Code = 15
// Codes are expected to map 1:1 to gRPC codes. If you want to add a new
// value that's not on gRPC consider carefully the implications before
// you do so.
)
// TrillianError associates an error message with a failure code in order to
// make error translation possible by other layers (e.g., TrillianError to
// gRPC).
//
// TrillianError instances contain user-visible messages and codes, both of
// which should be chosen from the perspective of the RPC caller.
type TrillianError interface {
error
// Code returns the corresponding code to the error.
Code() Code
}
type trillianError struct {
code Code
message string
}
func (e *trillianError) Error() string {
return e.message
}
func (e *trillianError) Code() Code {
return e.code
}
// ErrorCode returns the assigned Code if err is a TrillianError.
// If err is nil, OK is returned.
// If err is not a TrillianError, Unknown is returned.
func ErrorCode(err error) Code {
if err == nil {
return OK
}
if err, ok := err.(TrillianError); ok {
return err.Code()
}
return Unknown
}
// Errorf creates a TrillianError from the specified code and message.
//
// Note that errors created by this package are meant to be user-visible,
// therefore both code and message should be chosen from the perspective of the
// RPC caller.
func Errorf(code Code, format string, a ...interface{}) error {
return &trillianError{code, fmt.Sprintf(format, a...)}
}
// New creates a TrillianError from the specified code and message.
//
// Note that errors created by this package are meant to be user-visible,
// therefore both code and message should be chosen from the perspective of the
// RPC caller.
func New(code Code, msg string) error {
return &trillianError{code, msg}
}
|
package main
import (
"fmt"
"log"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/dchest/uniuri"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
)
var db gorm.DB
var err error
type User struct {
Id int64
Username string `form:"username" binding:"required"`
Password string `form:"password" binding:"required"`
Tokens []Token
Collections []Collection
}
type Token struct {
Id int64 `json:"id"`
UserId int64 `json:"-"`
Token string `json:"token"`
Timestamp time.Time `json:"timestamp"`
}
type Collection struct {
Id int64 `json:"id"`
UserId int64 `json:"-"`
Name string `form:"name" binding:"required" json:"name"`
}
func GenerateToken() string {
return uniuri.NewLenChars(20, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%&()+=-_?"))
}
func postUserCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
var user User
var resp Response
c.BindWith(&user, binding.Form)
if user.Username == "" || user.Password == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var count int
db.Model(User{}).Where("username = ?", user.Username).Count(&count)
fmt.Println(count)
if count == 0 {
var hashedPassword []byte
hashedPassword, err = bcrypt.GenerateFromPassword([]byte(user.Password), 10)
if err != nil {
log.Fatal(err)
}
user.Password = string(hashedPassword)
db.Create(&user)
resp.Success = true
c.JSON(200, resp)
} else {
resp.Success = false
resp.Error = fmt.Sprintf("The username '%s' is already in use.", user.Username)
c.JSON(409, resp)
}
}
}
func postTokenCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Token string `json:"token,omitempty"`
}
var user User
var resp Response
c.BindWith(&user, binding.Form)
if user.Username == "" || user.Password == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var count int
db.Model(User{}).Where("username = ?", user.Username).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "User not found."
c.JSON(404, resp)
} else {
var eUser User
db.Where(&User{Username: user.Username}).First(&eUser)
err = bcrypt.CompareHashAndPassword([]byte(eUser.Password), []byte(user.Password))
if err != nil {
resp.Success = false
resp.Error = "Incorrect username/password combination."
c.JSON(403, resp)
} else {
contender := GenerateToken()
for {
var count int
db.Model(Token{}).Where("token = ?", contender).Count(&count)
if count == 0 {
break
} else {
contender = GenerateToken()
}
}
var token Token
token.Token = contender
token.Timestamp = time.Now()
db.Create(&token)
eUser.Tokens = append(eUser.Tokens, token)
db.Save(&eUser)
resp.Success = true
resp.Token = token.Token
c.JSON(200, resp)
}
}
}
}
func getTokenCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Tokens []Token `json:"tokens,omitempty"`
}
var resp Response
token := c.Request.FormValue("token")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var tokens []Token
db.Model(User{}).Where("user_id = ?", tokenRecord.UserId).Find(&tokens)
resp.Success = true
resp.Tokens = tokens
c.JSON(200, resp)
}
}
}
func postCollectionCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
var resp Response
var collection Collection
token := c.Request.FormValue("token")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
c.BindWith(&collection, binding.Form)
if collection.Name == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var user User
db.Model(User{}).Where("id = ?", tokenRecord.UserId).First(&user)
var collections []Collection
db.Model(Collection{}).Where("user_id = ?", user.Id).Find(&collections)
duplicate := false
for _, item := range collections {
if item.Name == collection.Name {
duplicate = true
break
}
}
if duplicate {
resp.Success = false
resp.Error = fmt.Sprintf("A collection named '%s' already exists.", collection.Name)
c.JSON(409, resp)
} else {
db.Create(&collection)
user.Collections = append(user.Collections, collection)
db.Save(&user)
resp.Success = true
c.JSON(200, resp)
}
}
}
}
}
func getCollectionCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Collections []Collection `json:"collections,omitempty"`
}
var resp Response
token := c.Request.FormValue("token")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var collections []Collection
db.Model(Collection{}).Where("user_id = ?", tokenRecord.UserId).Find(&collections)
resp.Success = true
resp.Collections = collections
c.JSON(200, resp)
}
}
}
func main() {
r := gin.Default()
db, err = gorm.Open("sqlite3", "nestor_server.db")
if err != nil {
log.Fatal(err)
}
db.DB()
db.DB().Ping()
db.CreateTable(User{})
db.CreateTable(Token{})
db.CreateTable(Collection{})
r.POST("/users/", postUserCollection)
r.GET("/tokens/", getTokenCollection)
r.POST("/tokens/", postTokenCollection)
r.GET("/collections/", getCollectionCollection)
r.POST("/collections/", postCollectionCollection)
r.Run(":8000")
}
Added a route for uploading keys
package main
import (
"fmt"
"log"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/dchest/uniuri"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
)
var db gorm.DB
var err error
type User struct {
Id int64
Username string `form:"username" binding:"required"`
Password string `form:"password" binding:"required"`
Tokens []Token
Collections []Collection
Keys []Key
}
type Token struct {
Id int64 `json:"id"`
UserId int64 `json:"-"`
Token string `json:"token"`
Timestamp time.Time `json:"timestamp"`
}
type Collection struct {
Id int64 `json:"id"`
UserId int64 `json:"-"`
Name string `form:"name" binding:"required" json:"name"`
Keys []Key
}
type Key struct {
Id int64
UserId int64
CollectionId int64
Name string `form:"name" binding:"required"`
Key string `form:"key" binding:"required"`
Timestamp time.Time
}
func GenerateToken() string {
return uniuri.NewLenChars(20, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%&()+=-_?"))
}
func postUserCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
var user User
var resp Response
c.BindWith(&user, binding.Form)
if user.Username == "" || user.Password == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var count int
db.Model(User{}).Where("username = ?", user.Username).Count(&count)
fmt.Println(count)
if count == 0 {
var hashedPassword []byte
hashedPassword, err = bcrypt.GenerateFromPassword([]byte(user.Password), 10)
if err != nil {
log.Fatal(err)
}
user.Password = string(hashedPassword)
db.Create(&user)
resp.Success = true
c.JSON(200, resp)
} else {
resp.Success = false
resp.Error = fmt.Sprintf("The username '%s' is already in use.", user.Username)
c.JSON(409, resp)
}
}
}
func postTokenCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Token string `json:"token,omitempty"`
}
var user User
var resp Response
c.BindWith(&user, binding.Form)
if user.Username == "" || user.Password == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var count int
db.Model(User{}).Where("username = ?", user.Username).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "User not found."
c.JSON(404, resp)
} else {
var eUser User
db.Where(&User{Username: user.Username}).First(&eUser)
err = bcrypt.CompareHashAndPassword([]byte(eUser.Password), []byte(user.Password))
if err != nil {
resp.Success = false
resp.Error = "Incorrect username/password combination."
c.JSON(403, resp)
} else {
contender := GenerateToken()
for {
var count int
db.Model(Token{}).Where("token = ?", contender).Count(&count)
if count == 0 {
break
} else {
contender = GenerateToken()
}
}
var token Token
token.Token = contender
token.Timestamp = time.Now()
db.Create(&token)
eUser.Tokens = append(eUser.Tokens, token)
db.Save(&eUser)
resp.Success = true
resp.Token = token.Token
c.JSON(200, resp)
}
}
}
}
func getTokenCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Tokens []Token `json:"tokens,omitempty"`
}
var resp Response
token := c.Request.FormValue("token")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var tokens []Token
db.Model(User{}).Where("user_id = ?", tokenRecord.UserId).Find(&tokens)
resp.Success = true
resp.Tokens = tokens
c.JSON(200, resp)
}
}
}
func postCollectionCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
var resp Response
var collection Collection
token := c.Request.FormValue("token")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
c.BindWith(&collection, binding.Form)
if collection.Name == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var user User
db.Model(User{}).Where("id = ?", tokenRecord.UserId).First(&user)
var collections []Collection
db.Model(Collection{}).Where("user_id = ?", user.Id).Find(&collections)
duplicate := false
for _, item := range collections {
if item.Name == collection.Name {
duplicate = true
break
}
}
if duplicate {
resp.Success = false
resp.Error = fmt.Sprintf("A collection named '%s' already exists.", collection.Name)
c.JSON(409, resp)
} else {
db.Create(&collection)
user.Collections = append(user.Collections, collection)
db.Save(&user)
resp.Success = true
c.JSON(200, resp)
}
}
}
}
}
func getCollectionCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
Collections []Collection `json:"collections,omitempty"`
}
var resp Response
token := c.Request.FormValue("token")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var collections []Collection
db.Model(Collection{}).Where("user_id = ?", tokenRecord.UserId).Find(&collections)
resp.Success = true
resp.Collections = collections
c.JSON(200, resp)
}
}
}
func postKeyCollection(c *gin.Context) {
type Response struct {
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
var resp Response
var key Key
token := c.Request.FormValue("token")
collection := c.Request.FormValue("collection")
if token == "" {
resp.Success = false
resp.Error = "Failed to authenticate with a token."
c.JSON(403, resp)
} else {
var count int
db.Model(Token{}).Where("token = ?", token).Count(&count)
if count == 0 {
resp.Success = false
resp.Error = "Token not found."
c.JSON(404, resp)
} else {
c.BindWith(&key, binding.Form)
if key.Name == "" || key.Key == "" || collection == "" {
resp.Success = false
resp.Error = "Incomplete form submission."
c.JSON(400, resp)
} else {
var tokenRecord Token
db.Model(Token{}).Where("token = ?", token).First(&tokenRecord)
var user User
db.Model(User{}).Where("id = ?", tokenRecord.UserId).First(&user)
var collectionRecord Collection
db.Model(Collection{}).Where("id = ?", collection).First(&collectionRecord)
if collectionRecord.UserId != user.Id {
resp.Success = false
resp.Error = "You aren't authorized to access this collection."
c.JSON(403, resp)
} else {
var keys []Key
db.Model(Key{}).Where("user_id = ?", user.Id).Find(&keys)
duplicate := false
for _, item := range keys {
if item.Name == key.Name {
duplicate = true
break
}
}
if duplicate {
resp.Success = false
resp.Error = fmt.Sprintf("A key named '%s' already exists.", key.Name)
c.JSON(409, resp)
} else {
key.UserId = user.Id
key.Timestamp = time.Now()
db.Create(&key)
user.Keys = append(user.Keys, key)
collectionRecord.Keys = append(collectionRecord.Keys, key)
db.Save(&user)
db.Save(&collectionRecord)
resp.Success = true
c.JSON(200, resp)
}
}
}
}
}
}
func main() {
r := gin.Default()
db, err = gorm.Open("sqlite3", "nestor_server.db")
if err != nil {
log.Fatal(err)
}
db.DB()
db.DB().Ping()
db.CreateTable(User{})
db.CreateTable(Token{})
db.CreateTable(Collection{})
db.CreateTable(Key{})
r.POST("/users/", postUserCollection)
r.GET("/tokens/", getTokenCollection)
r.POST("/tokens/", postTokenCollection)
r.GET("/collections/", getCollectionCollection)
r.POST("/collections/", postCollectionCollection)
r.POST("/keys/", postKeyCollection)
r.Run(":8000")
}
|
package main
import (
"context"
"fmt"
"os"
"strconv"
"sync"
"time"
"github.com/rs/zerolog/log"
"github.com/wzhliang/xing"
"github.com/wzhliang/xing/examples/hello"
)
var success = 0
var m sync.Mutex
func _assert(err error) {
if err != nil {
log.Error().Msgf("Client: %v", err)
}
}
func _assertReturn(req, resp string) {
if req != resp {
log.Error().Msgf("Client: %s != %s", req, resp)
}
success++
}
func main() {
url := "amqp://guest:guest@localhost:5672/"
mq := os.Getenv("RABBITMQ")
if mq == "" {
mq = url
}
producer, err := xing.NewClient("orchestration.controller", mq,
xing.SetIdentifier(&xing.RandomIdentifier{}),
xing.SetSerializer(&xing.JSONSerializer{}),
)
if err != nil {
log.Error().Msg("failed to create new client")
return
}
cli := hello.NewGreeterClient("host.server", producer)
n, err := strconv.Atoi(os.Args[1])
if err != nil {
log.Error().Str("#", os.Args[1]).Msg("Wrong argument")
}
for i := 0; i < n; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 5000*time.Millisecond)
ret, err := cli.Hello(ctx, &hello.HelloRequest{
Name: "鸠摩智",
})
_assert(err)
if err == nil {
_assertReturn("yo", ret.Greeting)
}
cancel()
ctx, cancel = context.WithTimeout(context.Background(), 5000*time.Millisecond)
ret, err = cli.Hello(ctx, &hello.HelloRequest{
Name: "王语嫣",
})
_assert(err)
if err == nil {
_assertReturn("美女好", ret.Greeting)
}
cancel()
ctx, cancel = context.WithTimeout(context.Background(), 5000*time.Millisecond)
ret, err = cli.Hello(ctx, &hello.HelloRequest{
Name: "段誉",
})
_assert(err)
if err == nil {
_assertReturn("陛下好", ret.Greeting)
}
cancel()
time.Sleep(1 * time.Millisecond)
}
fmt.Printf("success=%d\n", success)
if success != n*3 {
fmt.Printf("test failed")
}
producer.Close()
}
make this a test for worker reconnect
package main
import (
"context"
"fmt"
"os"
"strconv"
"sync"
"time"
"github.com/rs/zerolog/log"
"github.com/wzhliang/xing"
"github.com/wzhliang/xing/examples/hello"
)
var success = 0
var m sync.Mutex
func _assert(err error) {
if err != nil {
log.Error().Msgf("Client: %v", err)
}
}
func _assertReturn(req, resp string) {
if req != resp {
log.Error().Msgf("Client: %s != %s", req, resp)
}
success++
}
func main() {
url := "amqp://guest:guest@localhost:5672/"
mq := os.Getenv("RABBITMQ")
if mq == "" {
mq = url
}
producer, err := xing.NewClient("orchestration.controller", mq,
xing.SetIdentifier(&xing.RandomIdentifier{}),
xing.SetSerializer(&xing.JSONSerializer{}),
)
if err != nil {
log.Error().Msg("failed to create new client")
return
}
cli := hello.NewGreeterClient("host.server", producer)
n, err := strconv.Atoi(os.Args[1])
if err != nil {
log.Error().Str("#", os.Args[1]).Msg("Wrong argument")
}
for i := 0; i < n; i++ {
ctx, cancel := context.WithTimeout(context.Background(), 5000*time.Millisecond)
ret, err := cli.Hello(ctx, &hello.HelloRequest{
Name: "鸠摩智",
})
_assert(err)
if err == nil {
_assertReturn("yo", ret.Greeting)
}
cancel()
time.Sleep(20 * time.Second)
}
fmt.Printf("success=%d\n", success)
if success != n*3 {
fmt.Printf("test failed")
}
producer.Close()
}
|
// Copyright 2017-2021 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package http
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/cookiejar"
"net/http/httputil"
"net/url"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/OWASP/Amass/v3/filter"
amassnet "github.com/OWASP/Amass/v3/net"
"github.com/OWASP/Amass/v3/net/dns"
"github.com/PuerkitoBio/goquery"
"github.com/caffix/stringset"
"github.com/geziyor/geziyor"
"github.com/geziyor/geziyor/client"
)
const (
// Accept is the default HTTP Accept header value used by Amass.
Accept = "text/html,application/json,application/xhtml+xml,application/xml;q=0.5,*/*;q=0.2"
// AcceptLang is the default HTTP Accept-Language header value used by Amass.
AcceptLang = "en-US,en;q=0.5"
httpTimeout = 30 * time.Second
handshakeTimeout = 10 * time.Second
)
var (
// UserAgent is the default user agent used by Amass during HTTP requests.
UserAgent string
subRE = dns.AnySubdomainRegex()
crawlRE = regexp.MustCompile(`\.\w{2,6}($|\?|#)`)
crawlFileEnds = []string{"html", "do", "action", "cgi"}
crawlFileStarts = []string{"js", "htm", "as", "php", "inc"}
nameStripRE = regexp.MustCompile(`^u[0-9a-f]{4}|20|22|25|27|2b|2f|3d|3a|40`)
)
// DefaultClient is the same HTTP client used by the package methods.
var DefaultClient *http.Client
// BasicAuth contains the data used for HTTP basic authentication.
type BasicAuth struct {
Username string
Password string
}
func init() {
jar, _ := cookiejar.New(nil)
DefaultClient = &http.Client{
Timeout: httpTimeout,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: amassnet.DialContext,
MaxIdleConns: 200,
MaxConnsPerHost: 50,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: handshakeTimeout,
ExpectContinueTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
Jar: jar,
}
switch runtime.GOOS {
case "windows":
UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"
case "darwin":
UserAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"
default:
UserAgent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"
}
}
// CopyCookies copies cookies from one domain to another. Some of our data
// sources rely on shared auth tokens and this avoids sending extra requests
// to have the site reissue cookies for the other domains.
func CopyCookies(src string, dest string) {
srcURL, _ := url.Parse(src)
destURL, _ := url.Parse(dest)
DefaultClient.Jar.SetCookies(destURL, DefaultClient.Jar.Cookies(srcURL))
}
// CheckCookie checks if a cookie exists in the cookie jar for a given host
func CheckCookie(urlString string, cookieName string) bool {
cookieURL, _ := url.Parse(urlString)
found := false
for _, cookie := range DefaultClient.Jar.Cookies(cookieURL) {
if cookie.Name == cookieName {
found = true
break
}
}
return found
}
// RequestWebPage returns a string containing the entire response for the provided URL when successful.
func RequestWebPage(ctx context.Context, u string, body io.Reader, hvals map[string]string, auth *BasicAuth) (string, error) {
method := "GET"
if body != nil {
method = "POST"
}
req, err := http.NewRequestWithContext(ctx, method, u, body)
if err != nil {
return "", err
}
req.Close = true
if auth != nil && auth.Username != "" && auth.Password != "" {
req.SetBasicAuth(auth.Username, auth.Password)
}
req.Header.Set("User-Agent", UserAgent)
req.Header.Set("Accept", Accept)
req.Header.Set("Accept-Language", AcceptLang)
for k, v := range hvals {
req.Header.Set(k, v)
}
resp, err := DefaultClient.Do(req)
if err != nil {
return "", err
}
in, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
err = fmt.Errorf("%d: %s", resp.StatusCode, resp.Status)
}
return string(in), err
}
// Crawl will spider the web page at the URL argument looking for DNS names within the scope argument.
func Crawl(ctx context.Context, u string, scope []string, max int, f filter.Filter) ([]string, error) {
select {
case <-ctx.Done():
return nil, fmt.Errorf("The context expired")
default:
}
newScope := append([]string{}, scope...)
target := subRE.FindString(u)
if target != "" {
var found bool
for _, domain := range newScope {
if target == domain {
found = true
break
}
}
if !found {
newScope = append(newScope, target)
}
}
if f == nil {
f = filter.NewStringFilter()
defer f.Close()
}
var count int
var m sync.Mutex
results := stringset.New()
defer results.Close()
g := geziyor.NewGeziyor(&geziyor.Options{
AllowedDomains: newScope,
StartURLs: []string{u},
Timeout: 5 * time.Minute,
RobotsTxtDisabled: true,
UserAgent: UserAgent,
LogDisabled: true,
ConcurrentRequests: 5,
RequestDelay: 750 * time.Millisecond,
RequestDelayRandomize: true,
ParseFunc: func(g *geziyor.Geziyor, r *client.Response) {
resp, err := httputil.DumpResponse(interface{}(r).(*http.Response), true)
if err == nil {
for _, n := range subRE.FindAllString(string(resp), -1) {
if name := CleanName(n); whichDomain(name, scope) != "" {
m.Lock()
results.Insert(name)
m.Unlock()
}
}
}
processURL := func(u *url.URL) {
// Attempt to save the name in our results
m.Lock()
results.Insert(u.Hostname())
m.Unlock()
if s := crawlFilterURLs(u, f); s != "" {
// Be sure the crawl has not exceeded the maximum links to be followed
m.Lock()
count++
current := count
m.Unlock()
if max <= 0 || current < max {
g.Get(s, g.Opt.ParseFunc)
}
}
}
r.HTMLDoc.Find("a").Each(func(i int, s *goquery.Selection) {
if href, ok := s.Attr("href"); ok {
if u, err := r.JoinURL(href); err == nil && whichDomain(u.Hostname(), newScope) != "" {
processURL(u)
}
}
})
r.HTMLDoc.Find("script").Each(func(i int, s *goquery.Selection) {
if src, ok := s.Attr("src"); ok {
if u, err := r.JoinURL(src); err == nil && whichDomain(u.Hostname(), newScope) != "" {
processURL(u)
}
}
})
},
})
options := &client.Options{
MaxBodySize: 100 * 1024 * 1024, // 100MB
RetryTimes: 2,
RetryHTTPCodes: []int{408, 500, 502, 503, 504, 522, 524},
}
g.Client = client.NewClient(options)
g.Client.Client = http.DefaultClient
done := make(chan struct{}, 2)
go func() {
g.Start()
done <- struct{}{}
}()
var err error
select {
case <-ctx.Done():
err = fmt.Errorf("The context expired during the crawl of %s", u)
case <-done:
if len(results.Slice()) == 0 {
err = fmt.Errorf("No DNS names were discovered during the crawl of %s", u)
}
}
return results.Slice(), err
}
func crawlFilterURLs(p *url.URL, f filter.Filter) string {
// Check that the URL has an appropriate scheme for scraping
if !p.IsAbs() || (p.Scheme != "http" && p.Scheme != "https") {
return ""
}
// If the URL path has a file extension, check that it's of interest
if ext := crawlRE.FindString(p.Path); ext != "" {
ext = strings.ToLower(ext)
var found bool
for _, s := range crawlFileStarts {
if strings.HasPrefix(ext, "." + s) {
found = true
break
}
}
for _, e := range crawlFileEnds {
if strings.HasSuffix(ext, e) {
found = true
break
}
}
if !found {
return ""
}
}
// Remove fragments and check if we've seen this URL before
p.Fragment = ""
p.RawFragment = ""
if f.Duplicate(p.String()) {
return ""
}
return p.String()
}
func whichDomain(name string, scope []string) string {
n := strings.TrimSpace(name)
for _, d := range scope {
if strings.HasSuffix(n, d) {
// fork made me do it :>
nlen := len(n)
dlen := len(d)
// Check for exact match first to guard against out of bound index
if nlen == dlen || n[nlen-dlen-1] == '.' {
return d
}
}
}
return ""
}
// PullCertificateNames attempts to pull a cert from one or more ports on an IP.
func PullCertificateNames(ctx context.Context, addr string, ports []int) []string {
var names []string
// Check hosts for certificates that contain subdomain names
for _, port := range ports {
select {
case <-ctx.Done():
return names
default:
}
c, err := TLSConn(ctx, addr, port)
if err != nil {
continue
}
// Get the correct certificate in the chain
certChain := c.ConnectionState().PeerCertificates
cert := certChain[0]
// Create the new requests from names found within the cert
names = append(names, namesFromCert(cert)...)
}
return names
}
// TLSConn attempts to make a TLS connection with the host on given port
func TLSConn(ctx context.Context, host string, port int) (*tls.Conn, error) {
// Set the maximum time allowed for making the connection
tCtx, cancel := context.WithTimeout(ctx, handshakeTimeout)
defer cancel()
// Obtain the connection
conn, err := amassnet.DialContext(tCtx, "tcp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
defer conn.Close()
c := tls.Client(conn, &tls.Config{InsecureSkipVerify: true})
// Attempt to acquire the certificate chain
errChan := make(chan error, 2)
go func() {
errChan <- c.Handshake()
}()
t := time.NewTimer(handshakeTimeout)
select {
case <-t.C:
err = errors.New("Handshake timeout")
case e := <-errChan:
err = e
}
t.Stop()
return c, err
}
func namesFromCert(cert *x509.Certificate) []string {
var cn string
for _, name := range cert.Subject.Names {
oid := name.Type
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
if oid[3] == 3 {
cn = fmt.Sprintf("%s", name.Value)
break
}
}
}
subdomains := stringset.New()
defer subdomains.Close()
// Add the subject common name to the list of subdomain names
commonName := dns.RemoveAsteriskLabel(cn)
if commonName != "" {
subdomains.Insert(commonName)
}
// Add the cert DNS names to the list of subdomain names
for _, name := range cert.DNSNames {
n := dns.RemoveAsteriskLabel(name)
if n != "" {
subdomains.Insert(n)
}
}
return subdomains.Slice()
}
// ClientCountryCode returns the country code for the public-facing IP address for the host of the process.
func ClientCountryCode(ctx context.Context) string {
headers := map[string]string{"Content-Type": "application/json"}
page, err := RequestWebPage(ctx, "https://ipapi.co/json", nil, headers, nil)
if err != nil {
return ""
}
// Extract the country code from the REST API results
var ipinfo struct {
CountryCode string `json:"country"`
}
if err := json.Unmarshal([]byte(page), &ipinfo); err != nil {
return ""
}
return strings.ToLower(ipinfo.CountryCode)
}
// CleanName will clean up the names scraped from the web.
func CleanName(name string) string {
var err error
name, err = strconv.Unquote("\"" + strings.TrimSpace(name) + "\"")
if err == nil {
name = subRE.FindString(name)
}
name = strings.ToLower(name)
for {
name = strings.Trim(name, "-.")
if i := nameStripRE.FindStringIndex(name); i != nil {
name = name[i[1]:]
} else {
break
}
}
return name
}
Geziyor deprecated the use of JoinURL
// Copyright 2017-2021 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package http
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/cookiejar"
"net/http/httputil"
"net/url"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/OWASP/Amass/v3/filter"
amassnet "github.com/OWASP/Amass/v3/net"
"github.com/OWASP/Amass/v3/net/dns"
"github.com/PuerkitoBio/goquery"
"github.com/caffix/stringset"
"github.com/geziyor/geziyor"
"github.com/geziyor/geziyor/client"
)
const (
// Accept is the default HTTP Accept header value used by Amass.
Accept = "text/html,application/json,application/xhtml+xml,application/xml;q=0.5,*/*;q=0.2"
// AcceptLang is the default HTTP Accept-Language header value used by Amass.
AcceptLang = "en-US,en;q=0.5"
httpTimeout = 30 * time.Second
handshakeTimeout = 10 * time.Second
)
var (
// UserAgent is the default user agent used by Amass during HTTP requests.
UserAgent string
subRE = dns.AnySubdomainRegex()
crawlRE = regexp.MustCompile(`\.\w{2,6}($|\?|#)`)
crawlFileEnds = []string{"html", "do", "action", "cgi"}
crawlFileStarts = []string{"js", "htm", "as", "php", "inc"}
nameStripRE = regexp.MustCompile(`^u[0-9a-f]{4}|20|22|25|27|2b|2f|3d|3a|40`)
)
// DefaultClient is the same HTTP client used by the package methods.
var DefaultClient *http.Client
// BasicAuth contains the data used for HTTP basic authentication.
type BasicAuth struct {
Username string
Password string
}
func init() {
jar, _ := cookiejar.New(nil)
DefaultClient = &http.Client{
Timeout: httpTimeout,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: amassnet.DialContext,
MaxIdleConns: 200,
MaxConnsPerHost: 50,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: handshakeTimeout,
ExpectContinueTimeout: 5 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
Jar: jar,
}
switch runtime.GOOS {
case "windows":
UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"
case "darwin":
UserAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"
default:
UserAgent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36"
}
}
// CopyCookies copies cookies from one domain to another. Some of our data
// sources rely on shared auth tokens and this avoids sending extra requests
// to have the site reissue cookies for the other domains.
func CopyCookies(src string, dest string) {
srcURL, _ := url.Parse(src)
destURL, _ := url.Parse(dest)
DefaultClient.Jar.SetCookies(destURL, DefaultClient.Jar.Cookies(srcURL))
}
// CheckCookie checks if a cookie exists in the cookie jar for a given host
func CheckCookie(urlString string, cookieName string) bool {
cookieURL, _ := url.Parse(urlString)
found := false
for _, cookie := range DefaultClient.Jar.Cookies(cookieURL) {
if cookie.Name == cookieName {
found = true
break
}
}
return found
}
// RequestWebPage returns a string containing the entire response for the provided URL when successful.
func RequestWebPage(ctx context.Context, u string, body io.Reader, hvals map[string]string, auth *BasicAuth) (string, error) {
method := "GET"
if body != nil {
method = "POST"
}
req, err := http.NewRequestWithContext(ctx, method, u, body)
if err != nil {
return "", err
}
req.Close = true
if auth != nil && auth.Username != "" && auth.Password != "" {
req.SetBasicAuth(auth.Username, auth.Password)
}
req.Header.Set("User-Agent", UserAgent)
req.Header.Set("Accept", Accept)
req.Header.Set("Accept-Language", AcceptLang)
for k, v := range hvals {
req.Header.Set(k, v)
}
resp, err := DefaultClient.Do(req)
if err != nil {
return "", err
}
in, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
err = fmt.Errorf("%d: %s", resp.StatusCode, resp.Status)
}
return string(in), err
}
// Crawl will spider the web page at the URL argument looking for DNS names within the scope argument.
func Crawl(ctx context.Context, u string, scope []string, max int, f filter.Filter) ([]string, error) {
select {
case <-ctx.Done():
return nil, fmt.Errorf("the context expired")
default:
}
newScope := append([]string{}, scope...)
target := subRE.FindString(u)
if target != "" {
var found bool
for _, domain := range newScope {
if target == domain {
found = true
break
}
}
if !found {
newScope = append(newScope, target)
}
}
if f == nil {
f = filter.NewStringFilter()
defer f.Close()
}
var count int
var m sync.Mutex
results := stringset.New()
defer results.Close()
g := geziyor.NewGeziyor(&geziyor.Options{
AllowedDomains: newScope,
StartURLs: []string{u},
Timeout: 5 * time.Minute,
RobotsTxtDisabled: true,
UserAgent: UserAgent,
LogDisabled: true,
ConcurrentRequests: 5,
RequestDelay: 750 * time.Millisecond,
RequestDelayRandomize: true,
ParseFunc: func(g *geziyor.Geziyor, r *client.Response) {
resp, err := httputil.DumpResponse(interface{}(r).(*http.Response), true)
if err == nil {
for _, n := range subRE.FindAllString(string(resp), -1) {
if name := CleanName(n); whichDomain(name, scope) != "" {
m.Lock()
results.Insert(name)
m.Unlock()
}
}
}
processURL := func(u *url.URL) {
// Attempt to save the name in our results
m.Lock()
results.Insert(u.Hostname())
m.Unlock()
if s := crawlFilterURLs(u, f); s != "" {
// Be sure the crawl has not exceeded the maximum links to be followed
m.Lock()
count++
current := count
m.Unlock()
if max <= 0 || current < max {
g.Get(s, g.Opt.ParseFunc)
}
}
}
r.HTMLDoc.Find("a").Each(func(i int, s *goquery.Selection) {
if href, ok := s.Attr("href"); ok {
if u, err := r.Request.URL.Parse(href); err == nil && whichDomain(u.Hostname(), newScope) != "" {
processURL(u)
}
}
})
r.HTMLDoc.Find("script").Each(func(i int, s *goquery.Selection) {
if src, ok := s.Attr("src"); ok {
if u, err := r.Request.URL.Parse(src); err == nil && whichDomain(u.Hostname(), newScope) != "" {
processURL(u)
}
}
})
},
})
options := &client.Options{
MaxBodySize: 100 * 1024 * 1024, // 100MB
RetryTimes: 2,
RetryHTTPCodes: []int{408, 500, 502, 503, 504, 522, 524},
}
g.Client = client.NewClient(options)
g.Client.Client = http.DefaultClient
done := make(chan struct{}, 2)
go func() {
g.Start()
done <- struct{}{}
}()
var err error
select {
case <-ctx.Done():
err = fmt.Errorf("the context expired during the crawl of %s", u)
case <-done:
if len(results.Slice()) == 0 {
err = fmt.Errorf("no DNS names were discovered during the crawl of %s", u)
}
}
return results.Slice(), err
}
func crawlFilterURLs(p *url.URL, f filter.Filter) string {
// Check that the URL has an appropriate scheme for scraping
if !p.IsAbs() || (p.Scheme != "http" && p.Scheme != "https") {
return ""
}
// If the URL path has a file extension, check that it's of interest
if ext := crawlRE.FindString(p.Path); ext != "" {
ext = strings.ToLower(ext)
var found bool
for _, s := range crawlFileStarts {
if strings.HasPrefix(ext, "."+s) {
found = true
break
}
}
for _, e := range crawlFileEnds {
if strings.HasSuffix(ext, e) {
found = true
break
}
}
if !found {
return ""
}
}
// Remove fragments and check if we've seen this URL before
p.Fragment = ""
p.RawFragment = ""
if f.Duplicate(p.String()) {
return ""
}
return p.String()
}
func whichDomain(name string, scope []string) string {
n := strings.TrimSpace(name)
for _, d := range scope {
if strings.HasSuffix(n, d) {
// fork made me do it :>
nlen := len(n)
dlen := len(d)
// Check for exact match first to guard against out of bound index
if nlen == dlen || n[nlen-dlen-1] == '.' {
return d
}
}
}
return ""
}
// PullCertificateNames attempts to pull a cert from one or more ports on an IP.
func PullCertificateNames(ctx context.Context, addr string, ports []int) []string {
var names []string
// Check hosts for certificates that contain subdomain names
for _, port := range ports {
select {
case <-ctx.Done():
return names
default:
}
c, err := TLSConn(ctx, addr, port)
if err != nil {
continue
}
// Get the correct certificate in the chain
certChain := c.ConnectionState().PeerCertificates
cert := certChain[0]
// Create the new requests from names found within the cert
names = append(names, namesFromCert(cert)...)
}
return names
}
// TLSConn attempts to make a TLS connection with the host on given port
func TLSConn(ctx context.Context, host string, port int) (*tls.Conn, error) {
// Set the maximum time allowed for making the connection
tCtx, cancel := context.WithTimeout(ctx, handshakeTimeout)
defer cancel()
// Obtain the connection
conn, err := amassnet.DialContext(tCtx, "tcp", net.JoinHostPort(host, strconv.Itoa(port)))
if err != nil {
return nil, err
}
defer conn.Close()
c := tls.Client(conn, &tls.Config{InsecureSkipVerify: true})
// Attempt to acquire the certificate chain
errChan := make(chan error, 2)
go func() {
errChan <- c.Handshake()
}()
t := time.NewTimer(handshakeTimeout)
select {
case <-t.C:
err = errors.New("handshake timeout")
case e := <-errChan:
err = e
}
t.Stop()
return c, err
}
func namesFromCert(cert *x509.Certificate) []string {
var cn string
for _, name := range cert.Subject.Names {
oid := name.Type
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
if oid[3] == 3 {
cn = fmt.Sprintf("%s", name.Value)
break
}
}
}
subdomains := stringset.New()
defer subdomains.Close()
// Add the subject common name to the list of subdomain names
commonName := dns.RemoveAsteriskLabel(cn)
if commonName != "" {
subdomains.Insert(commonName)
}
// Add the cert DNS names to the list of subdomain names
for _, name := range cert.DNSNames {
n := dns.RemoveAsteriskLabel(name)
if n != "" {
subdomains.Insert(n)
}
}
return subdomains.Slice()
}
// ClientCountryCode returns the country code for the public-facing IP address for the host of the process.
func ClientCountryCode(ctx context.Context) string {
headers := map[string]string{"Content-Type": "application/json"}
page, err := RequestWebPage(ctx, "https://ipapi.co/json", nil, headers, nil)
if err != nil {
return ""
}
// Extract the country code from the REST API results
var ipinfo struct {
CountryCode string `json:"country"`
}
if err := json.Unmarshal([]byte(page), &ipinfo); err != nil {
return ""
}
return strings.ToLower(ipinfo.CountryCode)
}
// CleanName will clean up the names scraped from the web.
func CleanName(name string) string {
var err error
name, err = strconv.Unquote("\"" + strings.TrimSpace(name) + "\"")
if err == nil {
name = subRE.FindString(name)
}
name = strings.ToLower(name)
for {
name = strings.Trim(name, "-.")
if i := nameStripRE.FindStringIndex(name); i != nil {
name = name[i[1]:]
} else {
break
}
}
return name
}
|
package network
//----------------------------------------------------------------------
// This file is part of Gospel.
// Copyright (C) 2011-2020 Bernd Fix
//
// Gospel is free software: you can redistribute it and/or modify it
// under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Gospel is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// SPDX-License-Identifier: AGPL3.0-or-later
//----------------------------------------------------------------------
import (
"errors"
"github.com/bfix/gospel/logger"
"net"
"net/url"
"strconv"
"strings"
"time"
)
var socksState = []string{
"succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"to X'FF' unassigned",
}
// Socks5Connect connects to a SOCKS5 proxy.
func Socks5Connect(proto string, addr string, port int, proxy string) (net.Conn, error) {
return Socks5ConnectTimeout(proto, addr, port, proxy, 0)
}
// Socks5ConnectTimeout connects to a SOCKS5 proxy with timeout.
func Socks5ConnectTimeout(proto string, addr string, port int, proxy string, timeout time.Duration) (net.Conn, error) {
var (
conn net.Conn
err error
)
if proto != "tcp" {
logger.Printf(logger.ERROR, "[network] Unsupported protocol '%s'.\n", proto)
return nil, errors.New("Unsupported protocol (TCP only)")
}
p, err := url.Parse(proxy)
if err != nil {
return nil, err
}
if len(p.Scheme) > 0 && p.Scheme != "socks5" {
logger.Printf(logger.ERROR, "[network] Invalid proxy scheme '%s'.\n", p.Scheme)
return nil, errors.New("Invalid proxy scheme")
}
idx := strings.Index(p.Host, ":")
if idx == -1 {
logger.Printf(logger.ERROR, "[network] Invalid host definition '%s'.\n", p.Host)
return nil, errors.New("Invalid host definition (missing port)")
}
pPort, err := strconv.Atoi(p.Host[idx+1:])
if err != nil || port < 1 || port > 65535 {
logger.Printf(logger.ERROR, "[network] Invalid port definition '%d'.\n", pPort)
return nil, errors.New("Invalid host definition (port out of range)")
}
if timeout == 0 {
conn, err = net.Dial("tcp", p.Host)
} else {
conn, err = net.DialTimeout("tcp", p.Host, timeout)
}
if err != nil {
logger.Printf(logger.ERROR, "[network] failed to connect to proxy server: %s\n", err.Error())
return nil, err
}
data := make([]byte, 1024)
//-----------------------------------------------------------------
// negotiate authentication
//-----------------------------------------------------------------
data[0] = 5 // SOCKS version
data[1] = 1 // One available authentication method
data[2] = 0 // No authentication required
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
if n, err := conn.Write(data[:3]); n != 3 {
logger.Printf(logger.ERROR, "[network] failed to write to proxy server: %s\n", err.Error())
conn.Close()
return nil, err
}
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
if n, err := conn.Read(data); n != 2 {
logger.Printf(logger.ERROR, "[network] failed to read from proxy server: %s\n", err.Error())
conn.Close()
return nil, err
}
if data[0] != 5 || data[1] == 0xFF {
logger.Println(logger.ERROR, "[network] proxy server refuses non-authenticated connection.")
conn.Close()
return nil, err
}
//-----------------------------------------------------------------
// connect to target (request/reply processing)
//-----------------------------------------------------------------
dn := []byte(addr)
size := len(dn)
data[0] = 5 // SOCKS versions
data[1] = 1 // connect to target
data[2] = 0 // reserved
data[3] = 3 // domain name specified
data[4] = byte(size) // length of domain name
for i, v := range dn {
data[5+i] = v
}
data[5+size] = (byte)(port / 256)
data[6+size] = (byte)(port % 256)
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
if n, err := conn.Write(data[:7+size]); n != (7 + size) {
logger.Printf(logger.ERROR, "[network] failed to write to proxy server: %s\n", err.Error())
conn.Close()
return nil, err
}
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
_, err = conn.Read(data)
if err != nil {
conn.Close()
return nil, err
}
if data[1] != 0 {
err = errors.New(socksState[data[1]])
logger.Printf(logger.ERROR, "[network] proxy server failed: %s\n", err.Error())
conn.Close()
return nil, err
}
// remove timeout from connection
conn.SetDeadline(0)
// return connection
return conn, nil
}
Fixed reset of connection deadline.
package network
//----------------------------------------------------------------------
// This file is part of Gospel.
// Copyright (C) 2011-2020 Bernd Fix
//
// Gospel is free software: you can redistribute it and/or modify it
// under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// Gospel is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// SPDX-License-Identifier: AGPL3.0-or-later
//----------------------------------------------------------------------
import (
"errors"
"github.com/bfix/gospel/logger"
"net"
"net/url"
"strconv"
"strings"
"time"
)
var socksState = []string{
"succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"to X'FF' unassigned",
}
// Socks5Connect connects to a SOCKS5 proxy.
func Socks5Connect(proto string, addr string, port int, proxy string) (net.Conn, error) {
return Socks5ConnectTimeout(proto, addr, port, proxy, 0)
}
// Socks5ConnectTimeout connects to a SOCKS5 proxy with timeout.
func Socks5ConnectTimeout(proto string, addr string, port int, proxy string, timeout time.Duration) (net.Conn, error) {
var (
conn net.Conn
err error
)
if proto != "tcp" {
logger.Printf(logger.ERROR, "[network] Unsupported protocol '%s'.\n", proto)
return nil, errors.New("Unsupported protocol (TCP only)")
}
p, err := url.Parse(proxy)
if err != nil {
return nil, err
}
if len(p.Scheme) > 0 && p.Scheme != "socks5" {
logger.Printf(logger.ERROR, "[network] Invalid proxy scheme '%s'.\n", p.Scheme)
return nil, errors.New("Invalid proxy scheme")
}
idx := strings.Index(p.Host, ":")
if idx == -1 {
logger.Printf(logger.ERROR, "[network] Invalid host definition '%s'.\n", p.Host)
return nil, errors.New("Invalid host definition (missing port)")
}
pPort, err := strconv.Atoi(p.Host[idx+1:])
if err != nil || port < 1 || port > 65535 {
logger.Printf(logger.ERROR, "[network] Invalid port definition '%d'.\n", pPort)
return nil, errors.New("Invalid host definition (port out of range)")
}
if timeout == 0 {
conn, err = net.Dial("tcp", p.Host)
} else {
conn, err = net.DialTimeout("tcp", p.Host, timeout)
}
if err != nil {
logger.Printf(logger.ERROR, "[network] failed to connect to proxy server: %s\n", err.Error())
return nil, err
}
data := make([]byte, 1024)
//-----------------------------------------------------------------
// negotiate authentication
//-----------------------------------------------------------------
data[0] = 5 // SOCKS version
data[1] = 1 // One available authentication method
data[2] = 0 // No authentication required
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
if n, err := conn.Write(data[:3]); n != 3 {
logger.Printf(logger.ERROR, "[network] failed to write to proxy server: %s\n", err.Error())
conn.Close()
return nil, err
}
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
if n, err := conn.Read(data); n != 2 {
logger.Printf(logger.ERROR, "[network] failed to read from proxy server: %s\n", err.Error())
conn.Close()
return nil, err
}
if data[0] != 5 || data[1] == 0xFF {
logger.Println(logger.ERROR, "[network] proxy server refuses non-authenticated connection.")
conn.Close()
return nil, err
}
//-----------------------------------------------------------------
// connect to target (request/reply processing)
//-----------------------------------------------------------------
dn := []byte(addr)
size := len(dn)
data[0] = 5 // SOCKS versions
data[1] = 1 // connect to target
data[2] = 0 // reserved
data[3] = 3 // domain name specified
data[4] = byte(size) // length of domain name
for i, v := range dn {
data[5+i] = v
}
data[5+size] = (byte)(port / 256)
data[6+size] = (byte)(port % 256)
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
if n, err := conn.Write(data[:7+size]); n != (7 + size) {
logger.Printf(logger.ERROR, "[network] failed to write to proxy server: %s\n", err.Error())
conn.Close()
return nil, err
}
if timeout > 0 {
conn.SetDeadline(time.Now().Add(timeout))
}
_, err = conn.Read(data)
if err != nil {
conn.Close()
return nil, err
}
if data[1] != 0 {
err = errors.New(socksState[data[1]])
logger.Printf(logger.ERROR, "[network] proxy server failed: %s\n", err.Error())
conn.Close()
return nil, err
}
// remove timeout from connection
var zero time.Time
conn.SetDeadline(zero)
// return connection
return conn, nil
}
|
package gce
import (
"bytes"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/emccode/rexray/core"
"github.com/emccode/rexray/core/config"
"github.com/emccode/rexray/core/errors"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v1"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"time"
)
const providerName = "gce"
// The GCE storage driver.
type driver struct {
currentInstanceId string
client *compute.Service
r *core.RexRay
zone string
project string
}
func ef() errors.Fields {
return errors.Fields{
"provider": providerName,
}
}
func eff(fields errors.Fields) map[string]interface{} {
errFields := map[string]interface{}{
"provider": providerName,
}
if fields != nil {
for k, v := range fields {
errFields[k] = v
}
}
return errFields
}
func init() {
core.RegisterDriver(providerName, newDriver)
config.Register(configRegistration())
}
func newDriver() core.Driver {
return &driver{}
}
func (d *driver) Init(r *core.RexRay) error {
d.r = r
var err error
d.zone = d.r.Config.GetString("gce.zone")
d.project = d.r.Config.GetString("gce.project")
serviceAccountJSON, err := ioutil.ReadFile(d.r.Config.GetString("gce.keyfile"))
if err != nil {
log.WithField("provider", providerName).Fatalf("Could not read service account credentials file, %s => {%s}", d.r.Config.GetString("gce.keyfile"), err)
return err
}
config, err := google.JWTConfigFromJSON(serviceAccountJSON,
compute.ComputeScope,
)
client, err := compute.New(config.Client(context.Background()))
if err != nil {
log.WithField("provider", providerName).Fatalf("Could not create compute client => {%s}", err)
}
d.client = client
instanceId, err := getCurrentInstanceId()
if err != nil {
return err
}
d.currentInstanceId = instanceId
log.WithField("provider", providerName).Info("storage driver initialized")
return nil
}
func getCurrentInstanceId() (string, error) {
conn, err := net.DialTimeout("tcp", "metadata.google.internal:80", 50*time.Millisecond)
if err != nil {
return "", err
}
defer conn.Close()
url := "http://metadata.google.internal/computeMetadata/v1/instance/id"
client := &http.Client{}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("Error: %v\n", err)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
n := bytes.Index(data, []byte{0})
return string(data[:n]), nil
}
func (d *driver) Name() string {
return providerName
}
func (d *driver) GetVolumeMapping() ([]*core.BlockDevice, error) {
log.WithField("provider", providerName).Debug("GetVolumeMapping")
diskMap := make(map[string]*compute.Disk)
disks, err := d.client.Disks.List(d.project, d.zone).Do()
if err != nil {
return []*core.BlockDevice{}, err
}
for _, disk := range disks.Items {
diskMap[disk.SelfLink] = disk
}
instances, err := d.client.Instances.List(d.project, d.zone).Do()
if err != nil {
return []*core.BlockDevice{}, err
}
var ret []*core.BlockDevice
for _, instance := range instances.Items {
for _, disk := range instance.Disks {
ret = append(ret, &core.BlockDevice{
ProviderName: "gce",
InstanceID: strconv.FormatUint(instance.Id, 10),
VolumeID: strconv.FormatUint(diskMap[disk.Source].Id, 10),
DeviceName: disk.DeviceName,
Region: diskMap[disk.Source].Zone,
Status: diskMap[disk.Source].Status,
NetworkName: disk.Source,
})
}
}
return ret, nil
}
func (d *driver) GetInstance() (*core.Instance, error) {
log.WithField("provider", providerName).Debug("GetInstance")
query := d.client.Instances.List(d.project, d.zone)
query.Filter(fmt.Sprintf("id eq %s", d.currentInstanceId))
instances, err := query.Do()
if err != nil {
return nil, err
}
for _, instance := range instances.Items {
return &core.Instance{
ProviderName: "gce",
InstanceID: strconv.FormatUint(instance.Id, 10),
Region: instance.Zone,
Name: instance.Name,
}, nil
}
return nil, nil
}
func (d *driver) CreateSnapshot(
runAsync bool,
snapshotName, volumeID, description string) ([]*core.Snapshot, error) {
log.WithField("provider", providerName).Debug("CreateSnapshot")
return nil, nil
}
func (d *driver) GetSnapshot(
volumeID, snapshotID, snapshotName string) ([]*core.Snapshot, error) {
log.WithField("provider", providerName).Debug("GetSnapshot")
return nil, nil
}
func (d *driver) RemoveSnapshot(snapshotID string) error {
log.WithField("provider", providerName).Debug("RemoveSnapshot")
return nil
}
func (d *driver) GetDeviceNextAvailable() (string, error) {
letters := []string{
"a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p"}
blockDeviceNames := make(map[string]bool)
blockDeviceMapping, err := d.GetVolumeMapping()
if err != nil {
return "", err
}
for _, blockDevice := range blockDeviceMapping {
re, _ := regexp.Compile(`^/dev/xvd([a-z])`)
res := re.FindStringSubmatch(blockDevice.DeviceName)
if len(res) > 0 {
blockDeviceNames[res[1]] = true
}
}
localDevices, err := getLocalDevices()
if err != nil {
return "", err
}
for _, localDevice := range localDevices {
re, _ := regexp.Compile(`^xvd([a-z])`)
res := re.FindStringSubmatch(localDevice)
if len(res) > 0 {
blockDeviceNames[res[1]] = true
}
}
for _, letter := range letters {
if !blockDeviceNames[letter] {
nextDeviceName := "/dev/xvd" + letter
log.Println("Got next device name: " + nextDeviceName)
return nextDeviceName, nil
}
}
return "", errors.New("No available device")
}
func getLocalDevices() (deviceNames []string, err error) {
file := "/proc/partitions"
contentBytes, err := ioutil.ReadFile(file)
if err != nil {
return []string{}, err
}
content := string(contentBytes)
lines := strings.Split(content, "\n")
for _, line := range lines[2:] {
fields := strings.Fields(line)
if len(fields) == 4 {
deviceNames = append(deviceNames, fields[3])
}
}
return deviceNames, nil
}
func (d *driver) CreateVolume(
runAsync bool, volumeName, volumeID, snapshotID, volumeType string,
IOPS, size int64, availabilityZone string) (*core.Volume, error) {
log.WithField("provider", providerName).Debug("CreateVolume")
return nil, nil
}
func (d *driver) createVolumeCreateSnapshot(
volumeID string, snapshotID string) (string, error) {
log.WithField("provider", providerName).Debug("CreateVolumeCreateSnapshot")
return "", nil
}
func (d *driver) GetVolume(
volumeID, volumeName string) ([]*core.Volume, error) {
log.WithField("provider", providerName).Debugf("GetVolume :%s %s", volumeID, volumeName)
query := d.client.Disks.List(d.project, d.zone)
if volumeID != "" {
query.Filter(fmt.Sprintf("id eq %s", volumeID))
}
if volumeName != "" {
query.Filter(fmt.Sprintf("name eq %s", volumeName))
}
var attachments []*core.VolumeAttachment
instances, err := d.client.Instances.List(d.project, d.zone).Do()
if err != nil {
return []*core.Volume{}, err
}
for _, instance := range instances.Items {
for _, disk := range instance.Disks {
attachment := &core.VolumeAttachment{
InstanceID: strconv.FormatUint(instance.Id, 10),
DeviceName: disk.DeviceName,
Status: disk.Mode,
VolumeID: disk.Source,
}
attachments = append(attachments, attachment)
}
}
disks, err := query.Do()
if err != nil {
return []*core.Volume{}, err
}
var volumesSD []*core.Volume
for _, disk := range disks.Items {
var diskAttachments []*core.VolumeAttachment
for _, attachment := range attachments {
if attachment.VolumeID == disk.SelfLink {
diskAttachments = append(diskAttachments, &core.VolumeAttachment{
InstanceID: attachment.InstanceID,
DeviceName: attachment.DeviceName,
Status: attachment.Status,
VolumeID: strconv.FormatUint(disk.Id, 10),
})
}
}
volumeSD := &core.Volume{
Name: disk.Name,
VolumeID: strconv.FormatUint(disk.Id, 10),
AvailabilityZone: disk.Zone,
Status: disk.Status,
VolumeType: disk.Kind,
NetworkName: disk.SelfLink,
IOPS: 0,
Size: strconv.FormatInt(disk.SizeGb, 10),
Attachments: diskAttachments,
}
volumesSD = append(volumesSD, volumeSD)
}
return volumesSD, nil
}
func (d *driver) GetVolumeAttach(
volumeID, instanceID string) ([]*core.VolumeAttachment, error) {
log.WithField("provider", providerName).Debugf("GetVolumeAttach :%s %s", volumeID, instanceID)
var attachments []*core.VolumeAttachment
query := d.client.Instances.List(d.project, d.zone)
if instanceID != "" {
query.Filter(fmt.Sprintf("id eq %s", instanceID))
}
instances, err := query.Do()
if err != nil {
return []*core.VolumeAttachment{}, err
}
for _, instance := range instances.Items {
for _, disk := range instance.Disks {
attachment := &core.VolumeAttachment{
InstanceID: strconv.FormatUint(instance.Id, 10),
DeviceName: disk.DeviceName,
Status: disk.Mode,
VolumeID: disk.Source,
}
attachments = append(attachments, attachment)
}
}
return attachments, nil
}
func (d *driver) waitSnapshotComplete(snapshotID string) error {
return nil
}
func (d *driver) waitVolumeComplete(volumeID string) error {
return nil
}
func (d *driver) waitVolumeAttach(volumeID, instanceID string) error {
return nil
}
func (d *driver) waitVolumeDetach(volumeID string) error {
return nil
}
func (d *driver) RemoveVolume(volumeID string) error {
return nil
}
func (d *driver) AttachVolume(
runAsync bool,
volumeID, instanceID string) ([]*core.VolumeAttachment, error) {
log.WithField("provider", providerName).Debug("AttachVolume")
return nil, nil
}
func (d *driver) DetachVolume(
runAsync bool,
volumeID, blank string) error {
log.WithField("provider", providerName).Debug("DetachVolume")
return nil
}
func (d *driver) CopySnapshot(runAsync bool,
volumeID, snapshotID, snapshotName, destinationSnapshotName,
destinationRegion string) (*core.Snapshot, error) {
log.WithField("provider", providerName).Debug("CopySnapshot")
return nil, nil
}
func configRegistration() *config.Registration {
r := config.NewRegistration("Google GCE")
r.Key(config.String, "", "", "", "gce.zone")
r.Key(config.String, "", "", "", "gce.project")
r.Key(config.String, "", "", "", "gce.keyfile")
return r
}
Another method ot read body
package gce
import (
"bytes"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/emccode/rexray/core"
"github.com/emccode/rexray/core/config"
"github.com/emccode/rexray/core/errors"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v1"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"time"
)
const providerName = "gce"
// The GCE storage driver.
type driver struct {
currentInstanceId string
client *compute.Service
r *core.RexRay
zone string
project string
}
func ef() errors.Fields {
return errors.Fields{
"provider": providerName,
}
}
func eff(fields errors.Fields) map[string]interface{} {
errFields := map[string]interface{}{
"provider": providerName,
}
if fields != nil {
for k, v := range fields {
errFields[k] = v
}
}
return errFields
}
func init() {
core.RegisterDriver(providerName, newDriver)
config.Register(configRegistration())
}
func newDriver() core.Driver {
return &driver{}
}
func (d *driver) Init(r *core.RexRay) error {
d.r = r
var err error
d.zone = d.r.Config.GetString("gce.zone")
d.project = d.r.Config.GetString("gce.project")
serviceAccountJSON, err := ioutil.ReadFile(d.r.Config.GetString("gce.keyfile"))
if err != nil {
log.WithField("provider", providerName).Fatalf("Could not read service account credentials file, %s => {%s}", d.r.Config.GetString("gce.keyfile"), err)
return err
}
config, err := google.JWTConfigFromJSON(serviceAccountJSON,
compute.ComputeScope,
)
client, err := compute.New(config.Client(context.Background()))
if err != nil {
log.WithField("provider", providerName).Fatalf("Could not create compute client => {%s}", err)
}
d.client = client
instanceId, err := getCurrentInstanceId()
if err != nil {
return err
}
d.currentInstanceId = instanceId
log.WithField("provider", providerName).Info("storage driver initialized")
return nil
}
func getCurrentInstanceId() (string, error) {
conn, err := net.DialTimeout("tcp", "metadata.google.internal:80", 50*time.Millisecond)
if err != nil {
return "", err
}
defer conn.Close()
url := "http://metadata.google.internal/computeMetadata/v1/instance/id"
client := &http.Client{}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("Error: %v\n", err)
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
s := buf.String()
return s, nil
}
func (d *driver) Name() string {
return providerName
}
func (d *driver) GetVolumeMapping() ([]*core.BlockDevice, error) {
log.WithField("provider", providerName).Debug("GetVolumeMapping")
diskMap := make(map[string]*compute.Disk)
disks, err := d.client.Disks.List(d.project, d.zone).Do()
if err != nil {
return []*core.BlockDevice{}, err
}
for _, disk := range disks.Items {
diskMap[disk.SelfLink] = disk
}
instances, err := d.client.Instances.List(d.project, d.zone).Do()
if err != nil {
return []*core.BlockDevice{}, err
}
var ret []*core.BlockDevice
for _, instance := range instances.Items {
for _, disk := range instance.Disks {
ret = append(ret, &core.BlockDevice{
ProviderName: "gce",
InstanceID: strconv.FormatUint(instance.Id, 10),
VolumeID: strconv.FormatUint(diskMap[disk.Source].Id, 10),
DeviceName: disk.DeviceName,
Region: diskMap[disk.Source].Zone,
Status: diskMap[disk.Source].Status,
NetworkName: disk.Source,
})
}
}
return ret, nil
}
func (d *driver) GetInstance() (*core.Instance, error) {
log.WithField("provider", providerName).Debug("GetInstance")
query := d.client.Instances.List(d.project, d.zone)
query.Filter(fmt.Sprintf("id eq %s", d.currentInstanceId))
instances, err := query.Do()
if err != nil {
return nil, err
}
for _, instance := range instances.Items {
return &core.Instance{
ProviderName: "gce",
InstanceID: strconv.FormatUint(instance.Id, 10),
Region: instance.Zone,
Name: instance.Name,
}, nil
}
return nil, nil
}
func (d *driver) CreateSnapshot(
runAsync bool,
snapshotName, volumeID, description string) ([]*core.Snapshot, error) {
log.WithField("provider", providerName).Debug("CreateSnapshot")
return nil, nil
}
func (d *driver) GetSnapshot(
volumeID, snapshotID, snapshotName string) ([]*core.Snapshot, error) {
log.WithField("provider", providerName).Debug("GetSnapshot")
return nil, nil
}
func (d *driver) RemoveSnapshot(snapshotID string) error {
log.WithField("provider", providerName).Debug("RemoveSnapshot")
return nil
}
func (d *driver) GetDeviceNextAvailable() (string, error) {
letters := []string{
"a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p"}
blockDeviceNames := make(map[string]bool)
blockDeviceMapping, err := d.GetVolumeMapping()
if err != nil {
return "", err
}
for _, blockDevice := range blockDeviceMapping {
re, _ := regexp.Compile(`^/dev/xvd([a-z])`)
res := re.FindStringSubmatch(blockDevice.DeviceName)
if len(res) > 0 {
blockDeviceNames[res[1]] = true
}
}
localDevices, err := getLocalDevices()
if err != nil {
return "", err
}
for _, localDevice := range localDevices {
re, _ := regexp.Compile(`^xvd([a-z])`)
res := re.FindStringSubmatch(localDevice)
if len(res) > 0 {
blockDeviceNames[res[1]] = true
}
}
for _, letter := range letters {
if !blockDeviceNames[letter] {
nextDeviceName := "/dev/xvd" + letter
log.Println("Got next device name: " + nextDeviceName)
return nextDeviceName, nil
}
}
return "", errors.New("No available device")
}
func getLocalDevices() (deviceNames []string, err error) {
file := "/proc/partitions"
contentBytes, err := ioutil.ReadFile(file)
if err != nil {
return []string{}, err
}
content := string(contentBytes)
lines := strings.Split(content, "\n")
for _, line := range lines[2:] {
fields := strings.Fields(line)
if len(fields) == 4 {
deviceNames = append(deviceNames, fields[3])
}
}
return deviceNames, nil
}
func (d *driver) CreateVolume(
runAsync bool, volumeName, volumeID, snapshotID, volumeType string,
IOPS, size int64, availabilityZone string) (*core.Volume, error) {
log.WithField("provider", providerName).Debug("CreateVolume")
return nil, nil
}
func (d *driver) createVolumeCreateSnapshot(
volumeID string, snapshotID string) (string, error) {
log.WithField("provider", providerName).Debug("CreateVolumeCreateSnapshot")
return "", nil
}
func (d *driver) GetVolume(
volumeID, volumeName string) ([]*core.Volume, error) {
log.WithField("provider", providerName).Debugf("GetVolume :%s %s", volumeID, volumeName)
query := d.client.Disks.List(d.project, d.zone)
if volumeID != "" {
query.Filter(fmt.Sprintf("id eq %s", volumeID))
}
if volumeName != "" {
query.Filter(fmt.Sprintf("name eq %s", volumeName))
}
var attachments []*core.VolumeAttachment
instances, err := d.client.Instances.List(d.project, d.zone).Do()
if err != nil {
return []*core.Volume{}, err
}
for _, instance := range instances.Items {
for _, disk := range instance.Disks {
attachment := &core.VolumeAttachment{
InstanceID: strconv.FormatUint(instance.Id, 10),
DeviceName: disk.DeviceName,
Status: disk.Mode,
VolumeID: disk.Source,
}
attachments = append(attachments, attachment)
}
}
disks, err := query.Do()
if err != nil {
return []*core.Volume{}, err
}
var volumesSD []*core.Volume
for _, disk := range disks.Items {
var diskAttachments []*core.VolumeAttachment
for _, attachment := range attachments {
if attachment.VolumeID == disk.SelfLink {
diskAttachments = append(diskAttachments, &core.VolumeAttachment{
InstanceID: attachment.InstanceID,
DeviceName: attachment.DeviceName,
Status: attachment.Status,
VolumeID: strconv.FormatUint(disk.Id, 10),
})
}
}
volumeSD := &core.Volume{
Name: disk.Name,
VolumeID: strconv.FormatUint(disk.Id, 10),
AvailabilityZone: disk.Zone,
Status: disk.Status,
VolumeType: disk.Kind,
NetworkName: disk.SelfLink,
IOPS: 0,
Size: strconv.FormatInt(disk.SizeGb, 10),
Attachments: diskAttachments,
}
volumesSD = append(volumesSD, volumeSD)
}
return volumesSD, nil
}
func (d *driver) GetVolumeAttach(
volumeID, instanceID string) ([]*core.VolumeAttachment, error) {
log.WithField("provider", providerName).Debugf("GetVolumeAttach :%s %s", volumeID, instanceID)
var attachments []*core.VolumeAttachment
query := d.client.Instances.List(d.project, d.zone)
if instanceID != "" {
query.Filter(fmt.Sprintf("id eq %s", instanceID))
}
instances, err := query.Do()
if err != nil {
return []*core.VolumeAttachment{}, err
}
for _, instance := range instances.Items {
for _, disk := range instance.Disks {
attachment := &core.VolumeAttachment{
InstanceID: strconv.FormatUint(instance.Id, 10),
DeviceName: disk.DeviceName,
Status: disk.Mode,
VolumeID: disk.Source,
}
attachments = append(attachments, attachment)
}
}
return attachments, nil
}
func (d *driver) waitSnapshotComplete(snapshotID string) error {
return nil
}
func (d *driver) waitVolumeComplete(volumeID string) error {
return nil
}
func (d *driver) waitVolumeAttach(volumeID, instanceID string) error {
return nil
}
func (d *driver) waitVolumeDetach(volumeID string) error {
return nil
}
func (d *driver) RemoveVolume(volumeID string) error {
return nil
}
func (d *driver) AttachVolume(
runAsync bool,
volumeID, instanceID string) ([]*core.VolumeAttachment, error) {
log.WithField("provider", providerName).Debug("AttachVolume")
return nil, nil
}
func (d *driver) DetachVolume(
runAsync bool,
volumeID, blank string) error {
log.WithField("provider", providerName).Debug("DetachVolume")
return nil
}
func (d *driver) CopySnapshot(runAsync bool,
volumeID, snapshotID, snapshotName, destinationSnapshotName,
destinationRegion string) (*core.Snapshot, error) {
log.WithField("provider", providerName).Debug("CopySnapshot")
return nil, nil
}
func configRegistration() *config.Registration {
r := config.NewRegistration("Google GCE")
r.Key(config.String, "", "", "", "gce.zone")
r.Key(config.String, "", "", "", "gce.project")
r.Key(config.String, "", "", "", "gce.keyfile")
return r
}
|
package ethutil
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"math/big"
"strings"
)
// Number to bytes
//
// Returns the number in bytes with the specified base
func NumberToBytes(num interface{}, bits int) []byte {
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, num)
if err != nil {
fmt.Println("NumberToBytes failed:", err)
}
return buf.Bytes()[buf.Len()-(bits/8):]
}
// Bytes to number
//
// Attempts to cast a byte slice to a unsigned integer
func BytesToNumber(b []byte) uint64 {
var number uint64
// Make sure the buffer is 64bits
data := make([]byte, 8)
data = append(data[:len(b)], b...)
buf := bytes.NewReader(data)
err := binary.Read(buf, binary.BigEndian, &number)
if err != nil {
fmt.Println("BytesToNumber failed:", err)
}
return number
}
// Read variable int
//
// Read a variable length number in big endian byte order
func ReadVarint(reader *bytes.Reader) (ret uint64) {
if reader.Len() == 8 {
var num uint64
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
} else if reader.Len() == 4 {
var num uint32
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
} else if reader.Len() == 2 {
var num uint16
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
} else {
var num uint8
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
}
return ret
}
// Binary length
//
// Returns the true binary length of the given number
func BinaryLength(num int) int {
if num == 0 {
return 0
}
return 1 + BinaryLength(num>>8)
}
// Copy bytes
//
// Returns an exact copy of the provided bytes
func CopyBytes(b []byte) (copiedBytes []byte) {
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
func IsHex(str string) bool {
l := len(str)
return l >= 4 && l%2 == 0 && str[0:2] == "0x"
}
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
func Hex2Bytes(str string) []byte {
h, _ := hex.DecodeString(str)
return h
}
func StringToByteFunc(str string, cb func(str string) []byte) (ret []byte) {
if len(str) > 1 && str[0:2] == "0x" && !strings.Contains(str, "\n") {
ret = Hex2Bytes(str[2:])
} else {
ret = cb(str)
}
return
}
func FormatData(data string) []byte {
if len(data) == 0 {
return nil
}
// Simple stupid
d := new(big.Int)
if data[0:1] == "\"" && data[len(data)-1:] == "\"" {
return RightPadBytes([]byte(data), 32)
} else if len(data) > 1 && data[:2] == "0x" {
d.SetBytes(Hex2Bytes(data[2:]))
} else {
d.SetString(data, 0)
}
return BigToBytes(d, 256)
}
func RightPadBytes(slice []byte, l int) []byte {
if l < len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[0:len(slice)], slice)
return padded
}
func LeftPadBytes(slice []byte, l int) []byte {
if l < len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
Convert a byte slice to address
package ethutil
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"math/big"
"strings"
)
// Number to bytes
//
// Returns the number in bytes with the specified base
func NumberToBytes(num interface{}, bits int) []byte {
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, num)
if err != nil {
fmt.Println("NumberToBytes failed:", err)
}
return buf.Bytes()[buf.Len()-(bits/8):]
}
// Bytes to number
//
// Attempts to cast a byte slice to a unsigned integer
func BytesToNumber(b []byte) uint64 {
var number uint64
// Make sure the buffer is 64bits
data := make([]byte, 8)
data = append(data[:len(b)], b...)
buf := bytes.NewReader(data)
err := binary.Read(buf, binary.BigEndian, &number)
if err != nil {
fmt.Println("BytesToNumber failed:", err)
}
return number
}
// Read variable int
//
// Read a variable length number in big endian byte order
func ReadVarint(reader *bytes.Reader) (ret uint64) {
if reader.Len() == 8 {
var num uint64
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
} else if reader.Len() == 4 {
var num uint32
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
} else if reader.Len() == 2 {
var num uint16
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
} else {
var num uint8
binary.Read(reader, binary.BigEndian, &num)
ret = uint64(num)
}
return ret
}
// Binary length
//
// Returns the true binary length of the given number
func BinaryLength(num int) int {
if num == 0 {
return 0
}
return 1 + BinaryLength(num>>8)
}
// Copy bytes
//
// Returns an exact copy of the provided bytes
func CopyBytes(b []byte) (copiedBytes []byte) {
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
func IsHex(str string) bool {
l := len(str)
return l >= 4 && l%2 == 0 && str[0:2] == "0x"
}
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
func Hex2Bytes(str string) []byte {
h, _ := hex.DecodeString(str)
return h
}
func StringToByteFunc(str string, cb func(str string) []byte) (ret []byte) {
if len(str) > 1 && str[0:2] == "0x" && !strings.Contains(str, "\n") {
ret = Hex2Bytes(str[2:])
} else {
ret = cb(str)
}
return
}
func FormatData(data string) []byte {
if len(data) == 0 {
return nil
}
// Simple stupid
d := new(big.Int)
if data[0:1] == "\"" && data[len(data)-1:] == "\"" {
return RightPadBytes([]byte(data), 32)
} else if len(data) > 1 && data[:2] == "0x" {
d.SetBytes(Hex2Bytes(data[2:]))
} else {
d.SetString(data, 0)
}
return BigToBytes(d, 256)
}
func RightPadBytes(slice []byte, l int) []byte {
if l < len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[0:len(slice)], slice)
return padded
}
func LeftPadBytes(slice []byte, l int) []byte {
if l < len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
func Address(slice []byte) []byte {
if len(slice) < 20 {
slice = LeftPadBytes(slice, 20)
} else if len(slice) > 20 {
slice = slice[len(slice)-20:]
}
return slice
}
|
package event
import (
"sync"
"time"
"github.com/oakmound/oak/timing"
)
// Oak uses the following built in events:
//
// - EnterFrame: the beginning of every logical frame.
// Payload: (int) frames passed since this scene started
//
// - CollisionStart/Stop: when a PhaseCollision entity starts/stops touching some label.
// Payload: (collision.Label) the label the entity has started/stopped touching
//
// - MouseCollisionStart/Stop: as above, for mouse collision
// Payload: (mouse.Event)
//
// - KeyDown/Up: when a key is pressed down / lifted up.
// Payload: (string) the key pressed
//
// - Mouse events: MousePress, MouseRelease, MouseScrollDown, MouseScrollUp, MouseDrag
// Payload: (mouse.Event) details on the mouse event
//
// - AnimationEnd: Triggered on animations CIDs when they loop from the last to the first frame
// Payload: nil
//
// - ViewportUpdate: Triggered when the viewport changes.
// Payload: []float64{viewportX, viewportY}
// Trigger an event, but only
// for one ID. Use case example:
// on onHit event
func (id CID) Trigger(eventName string, data interface{}) {
go func(eventName string, data interface{}) {
eb := GetBus()
mutex.RLock()
iid := int(id)
if idMap, ok := eb.bindingMap[eventName]; ok {
if bs, ok := idMap[iid]; ok {
for i := bs.highIndex - 1; i >= 0; i-- {
triggerDefault((*bs.highPriority[i]).sl, iid, eventName, data)
}
triggerDefault((bs.defaultPriority).sl, iid, eventName, data)
for i := 0; i < bs.lowIndex; i++ {
triggerDefault((*bs.lowPriority[i]).sl, iid, eventName, data)
}
}
}
mutex.RUnlock()
}(eventName, data)
}
// TriggerAfter will trigger the given event after d time.
func (id CID) TriggerAfter(d time.Duration, eventName string, data interface{}) {
go func() {
timing.DoAfter(d, func() {
id.Trigger(eventName, data)
})
}()
}
// Trigger is equivalent to bus.Trigger(...)
// Todo: move this to legacy.go, see mouse or collision
func Trigger(eventName string, data interface{}) {
thisBus.Trigger(eventName, data)
}
// TriggerBack is equivalent to bus.TriggerBack(...)
func TriggerBack(eventName string, data interface{}) chan bool {
return thisBus.TriggerBack(eventName, data)
}
// TriggerBack is a version of Trigger which returns a channel that
// informs on when all bindables have been called and returned from
// the input event. It is dangerous to use this unless you have a
// very good idea how things will synchronize, as if a triggered
// bindable itself makes a TriggerBack call, this will cause the engine to freeze,
// as the function will never end because the first TriggerBack has control of
// the lock for the event bus, and the first TriggerBack won't give up that lock
// until the function ends.
//
// This inherently means that when you call Trigger, the event will almost
// almost never be immediately triggered but rather will be triggered sometime
// soon in the future.
//
// TriggerBack is right now used by the primary logic loop to dictate logical
// framerate, so EnterFrame events are called through TriggerBack.
func (eb *Bus) TriggerBack(eventName string, data interface{}) chan bool {
ch := make(chan bool)
go func(ch chan bool, eb *Bus, eventName string, data interface{}) {
ebtrigger(eb, eventName, data)
ch <- true
}(ch, eb, eventName, data)
return ch
}
// Trigger will scan through the event bus and call all bindables found attached
// to the given event, with the passed in data.
func (eb *Bus) Trigger(eventName string, data interface{}) {
go func(eb *Bus, eventName string, data interface{}) {
ebtrigger(eb, eventName, data)
}(eb, eventName, data)
}
func ebtrigger(eb *Bus, eventName string, data interface{}) {
mutex.RLock()
// Loop through all bindableStores for this eventName
for id, bs := range (*eb).bindingMap[eventName] {
// Top to bottom, high priority
for i := bs.highIndex - 1; i >= 0; i-- {
triggerDefault((*bs.highPriority[i]).sl, id, eventName, data)
}
}
for id, bs := range (*eb).bindingMap[eventName] {
if bs != nil && bs.defaultPriority != nil {
triggerDefault((bs.defaultPriority).sl, id, eventName, data)
}
}
for id, bs := range (*eb).bindingMap[eventName] {
// Bottom to top, low priority
for i := 0; i < bs.lowIndex; i++ {
triggerDefault((*bs.lowPriority[i]).sl, id, eventName, data)
}
}
mutex.RUnlock()
}
func triggerDefault(sl []Bindable, id int, eventName string, data interface{}) {
prog := &sync.WaitGroup{}
prog.Add(len(sl))
for i, bnd := range sl {
go func(bnd Bindable, id int, eventName string, data interface{}, prog *sync.WaitGroup, index int) {
handleBindable(bnd, id, data, index, eventName)
prog.Done()
}(bnd, id, eventName, data, prog, i)
}
prog.Wait()
}
func handleBindable(bnd Bindable, id int, data interface{}, index int, eventName string) {
if bnd != nil {
if id == 0 || GetEntity(id) != nil {
response := bnd(id, data)
switch response {
case UnbindEvent:
UnbindAll(BindingOption{
Event{
eventName,
id,
},
0,
})
case UnbindSingle:
binding{
BindingOption{
Event{
eventName,
id,
},
0,
},
index,
}.unbind()
}
}
}
}
When using distant priority bindings, this fixes a potential nil pointer exception
package event
import (
"sync"
"time"
"github.com/oakmound/oak/timing"
)
// Oak uses the following built in events:
//
// - EnterFrame: the beginning of every logical frame.
// Payload: (int) frames passed since this scene started
//
// - CollisionStart/Stop: when a PhaseCollision entity starts/stops touching some label.
// Payload: (collision.Label) the label the entity has started/stopped touching
//
// - MouseCollisionStart/Stop: as above, for mouse collision
// Payload: (mouse.Event)
//
// - KeyDown/Up: when a key is pressed down / lifted up.
// Payload: (string) the key pressed
//
// - Mouse events: MousePress, MouseRelease, MouseScrollDown, MouseScrollUp, MouseDrag
// Payload: (mouse.Event) details on the mouse event
//
// - AnimationEnd: Triggered on animations CIDs when they loop from the last to the first frame
// Payload: nil
//
// - ViewportUpdate: Triggered when the viewport changes.
// Payload: []float64{viewportX, viewportY}
// Trigger an event, but only
// for one ID. Use case example:
// on onHit event
func (id CID) Trigger(eventName string, data interface{}) {
go func(eventName string, data interface{}) {
eb := GetBus()
mutex.RLock()
iid := int(id)
if idMap, ok := eb.bindingMap[eventName]; ok {
if bs, ok := idMap[iid]; ok {
for i := bs.highIndex - 1; i >= 0; i-- {
lst := bs.highPriority[i]
if lst != nil {
triggerDefault((*lst).sl, iid, eventName, data)
}
}
triggerDefault((bs.defaultPriority).sl, iid, eventName, data)
for i := 0; i < bs.lowIndex; i++ {
lst := bs.lowPriority[i]
if lst != nil {
triggerDefault((*lst).sl, iid, eventName, data)
}
}
}
}
mutex.RUnlock()
}(eventName, data)
}
// TriggerAfter will trigger the given event after d time.
func (id CID) TriggerAfter(d time.Duration, eventName string, data interface{}) {
go func() {
timing.DoAfter(d, func() {
id.Trigger(eventName, data)
})
}()
}
// Trigger is equivalent to bus.Trigger(...)
// Todo: move this to legacy.go, see mouse or collision
func Trigger(eventName string, data interface{}) {
thisBus.Trigger(eventName, data)
}
// TriggerBack is equivalent to bus.TriggerBack(...)
func TriggerBack(eventName string, data interface{}) chan bool {
return thisBus.TriggerBack(eventName, data)
}
// TriggerBack is a version of Trigger which returns a channel that
// informs on when all bindables have been called and returned from
// the input event. It is dangerous to use this unless you have a
// very good idea how things will synchronize, as if a triggered
// bindable itself makes a TriggerBack call, this will cause the engine to freeze,
// as the function will never end because the first TriggerBack has control of
// the lock for the event bus, and the first TriggerBack won't give up that lock
// until the function ends.
//
// This inherently means that when you call Trigger, the event will almost
// almost never be immediately triggered but rather will be triggered sometime
// soon in the future.
//
// TriggerBack is right now used by the primary logic loop to dictate logical
// framerate, so EnterFrame events are called through TriggerBack.
func (eb *Bus) TriggerBack(eventName string, data interface{}) chan bool {
ch := make(chan bool)
go func(ch chan bool, eb *Bus, eventName string, data interface{}) {
ebtrigger(eb, eventName, data)
ch <- true
}(ch, eb, eventName, data)
return ch
}
// Trigger will scan through the event bus and call all bindables found attached
// to the given event, with the passed in data.
func (eb *Bus) Trigger(eventName string, data interface{}) {
go func(eb *Bus, eventName string, data interface{}) {
ebtrigger(eb, eventName, data)
}(eb, eventName, data)
}
func ebtrigger(eb *Bus, eventName string, data interface{}) {
mutex.RLock()
// Loop through all bindableStores for this eventName
for id, bs := range (*eb).bindingMap[eventName] {
// Top to bottom, high priority
for i := bs.highIndex - 1; i >= 0; i-- {
lst := bs.highPriority[i]
if lst != nil {
triggerDefault((*lst).sl, id, eventName, data)
}
}
}
for id, bs := range (*eb).bindingMap[eventName] {
if bs != nil && bs.defaultPriority != nil {
triggerDefault((bs.defaultPriority).sl, id, eventName, data)
}
}
for id, bs := range (*eb).bindingMap[eventName] {
// Bottom to top, low priority
for i := 0; i < bs.lowIndex; i++ {
lst := bs.lowPriority[i]
if lst != nil {
triggerDefault((*lst).sl, id, eventName, data)
}
}
}
mutex.RUnlock()
}
func triggerDefault(sl []Bindable, id int, eventName string, data interface{}) {
prog := &sync.WaitGroup{}
prog.Add(len(sl))
for i, bnd := range sl {
go func(bnd Bindable, id int, eventName string, data interface{}, prog *sync.WaitGroup, index int) {
handleBindable(bnd, id, data, index, eventName)
prog.Done()
}(bnd, id, eventName, data, prog, i)
}
prog.Wait()
}
func handleBindable(bnd Bindable, id int, data interface{}, index int, eventName string) {
if bnd != nil {
if id == 0 || GetEntity(id) != nil {
response := bnd(id, data)
switch response {
case UnbindEvent:
UnbindAll(BindingOption{
Event{
eventName,
id,
},
0,
})
case UnbindSingle:
binding{
BindingOption{
Event{
eventName,
id,
},
0,
},
index,
}.unbind()
}
}
}
}
|
package morningStar
import (
"../jsonHttp"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"regexp"
"strconv"
"sync"
"time"
)
const IDS_URL = `https://elasticsearch.vibioh.fr/funds/morningStarId/_search?size=8000`
const PERFORMANCE_URL = `http://www.morningstar.fr/fr/funds/snapshot/snapshot.aspx?tab=1&id=`
const VOLATILITE_URL = `http://www.morningstar.fr/fr/funds/snapshot/snapshot.aspx?tab=2&id=`
const REFRESH_DELAY_HOURS = 12
const CONCURRENT_FETCHER = 32
var EMPTY_BYTE = []byte(``)
var ZERO_BYTE = []byte(`0`)
var PERIOD_BYTE = []byte(`.`)
var COMMA_BYTE = []byte(`,`)
var PERCENT_BYTE = []byte(`%`)
var AMP_BYTE = []byte(`&`)
var HTML_AMP_BYTE = []byte(`&`)
var LIST_REQUEST = regexp.MustCompile(`^/list$`)
var PERF_REQUEST = regexp.MustCompile(`^/(.+?)$`)
var ID = regexp.MustCompile(`"_id":"(.*?)"`)
var ISIN = regexp.MustCompile(`ISIN.:(\S+)`)
var LABEL = regexp.MustCompile(`<h1[^>]*?>((?:.|\n)*?)</h1>`)
var RATING = regexp.MustCompile(`<span\sclass=".*?stars([0-9]).*?">`)
var CATEGORY = regexp.MustCompile(`<span[^>]*?>Catégorie</span>.*?<span[^>]*?>(.*?)</span>`)
var PERF_ONE_MONTH = regexp.MustCompile(`<td[^>]*?>1 mois</td><td[^>]*?>(.*?)</td>`)
var PERF_THREE_MONTH = regexp.MustCompile(`<td[^>]*?>3 mois</td><td[^>]*?>(.*?)</td>`)
var PERF_SIX_MONTH = regexp.MustCompile(`<td[^>]*?>6 mois</td><td[^>]*?>(.*?)</td>`)
var PERF_ONE_YEAR = regexp.MustCompile(`<td[^>]*?>1 an</td><td[^>]*?>(.*?)</td>`)
var VOL_3_YEAR = regexp.MustCompile(`<td[^>]*?>Ecart-type 3 ans.?</td><td[^>]*?>(.*?)</td>`)
type Performance struct {
Id string `json:"id"`
Isin string `json:"isin"`
Label string `json:"label"`
Category string `json:"category"`
Rating string `json:"rating"`
OneMonth float64 `json:"1m"`
ThreeMonth float64 `json:"3m"`
SixMonth float64 `json:"6m"`
OneYear float64 `json:"1y"`
VolThreeYears float64 `json:"v3y"`
Score float64 `json:"score"`
Update time.Time `json:"ts"`
}
type SyncedMap struct {
sync.RWMutex
performances map[string]*Performance
}
func (m *SyncedMap) get(key string) (*Performance, bool) {
m.RLock()
defer m.RUnlock()
performance, ok := m.performances[key]
return performance, ok
}
func (m *SyncedMap) push(key string, performance *Performance) {
m.Lock()
defer m.Unlock()
m.performances[key] = performance
}
var PERFORMANCE_CACHE = SyncedMap{performances: make(map[string]*Performance)}
type Results struct {
Results interface{} `json:"results"`
}
func init() {
go func() {
refreshCache()
c := time.Tick(REFRESH_DELAY_HOURS * time.Hour)
for range c {
refreshCache()
}
}()
}
func refreshCache() {
log.Print(`Cache refresh - start`)
defer log.Print(`Cache refresh - end`)
for _, performance := range retrievePerformances(fetchIds(), fetchPerformance) {
PERFORMANCE_CACHE.push(performance.Id, performance)
}
}
func readBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
return ioutil.ReadAll(body)
}
func getBody(url string) ([]byte, error) {
response, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf(`Error while retrieving data from %s: %v`, url, err)
}
if response.StatusCode >= 400 {
return nil, fmt.Errorf(`Got error %d while getting %s`, response.StatusCode, url)
}
body, err := readBody(response.Body)
if err != nil {
return nil, fmt.Errorf(`Error while reading body of %s: %v`, url, err)
}
return body, nil
}
func extractLabel(extract *regexp.Regexp, body []byte, defaultValue []byte) []byte {
match := extract.FindSubmatch(body)
if match == nil {
return defaultValue
}
return bytes.Replace(match[1], HTML_AMP_BYTE, AMP_BYTE, -1)
}
func extractPerformance(extract *regexp.Regexp, body []byte) float64 {
dotResult := bytes.Replace(extractLabel(extract, body, EMPTY_BYTE), COMMA_BYTE, PERIOD_BYTE, -1)
percentageResult := bytes.Replace(dotResult, PERCENT_BYTE, EMPTY_BYTE, -1)
trimResult := bytes.TrimSpace(percentageResult)
result, err := strconv.ParseFloat(string(trimResult), 64)
if err != nil {
return 0.0
}
return result
}
func cleanId(morningStarId []byte) string {
return string(bytes.ToLower(morningStarId))
}
func fetchPerformance(morningStarId []byte) (*Performance, error) {
cleanId := cleanId(morningStarId)
performanceBody, err := getBody(PERFORMANCE_URL + cleanId)
if err != nil {
return nil, err
}
volatiliteBody, err := getBody(VOLATILITE_URL + cleanId)
if err != nil {
return nil, err
}
isin := string(extractLabel(ISIN, performanceBody, EMPTY_BYTE))
label := string(extractLabel(LABEL, performanceBody, EMPTY_BYTE))
rating := string(extractLabel(RATING, performanceBody, ZERO_BYTE))
category := string(extractLabel(CATEGORY, performanceBody, EMPTY_BYTE))
oneMonth := extractPerformance(PERF_ONE_MONTH, performanceBody)
threeMonths := extractPerformance(PERF_THREE_MONTH, performanceBody)
sixMonths := extractPerformance(PERF_SIX_MONTH, performanceBody)
oneYear := extractPerformance(PERF_ONE_YEAR, performanceBody)
volThreeYears := extractPerformance(VOL_3_YEAR, volatiliteBody)
score := (0.25 * oneMonth) + (0.3 * threeMonths) + (0.25 * sixMonths) + (0.2 * oneYear) - (0.1 * volThreeYears)
scoreTruncated := float64(int(score*100)) / 100
return &Performance{cleanId, isin, label, category, rating, oneMonth, threeMonths, sixMonths, oneYear, volThreeYears, scoreTruncated, time.Now()}, nil
}
func fetchIds() [][]byte {
if idsBody, err := getBody(IDS_URL); err != nil {
log.Print(err)
return nil
} else {
idsMatch := ID.FindAllSubmatch(idsBody, -1)
ids := make([][]byte, 0, len(idsMatch))
for _, match := range idsMatch {
ids = append(ids, match[1])
}
return ids
}
}
func retrievePerformance(morningStarId []byte) (*Performance, error) {
cleanId := cleanId(morningStarId)
performance, ok := PERFORMANCE_CACHE.get(cleanId)
if ok && time.Now().Add(time.Hour*-(REFRESH_DELAY_HOURS+1)).Before(performance.Update) {
return performance, nil
}
if performance, err := fetchPerformance(morningStarId); err != nil {
return nil, err
} else {
PERFORMANCE_CACHE.push(cleanId, performance)
return performance, nil
}
}
func concurrentRetrievePerformances(ids [][]byte, wg *sync.WaitGroup, performances chan<- *Performance, method func([]byte) (*Performance, error)) {
tokens := make(chan int, CONCURRENT_FETCHER)
clearSemaphores := func() {
wg.Done()
<-tokens
}
for _, id := range ids {
tokens <- 1
go func(morningStarId []byte) {
defer clearSemaphores()
if performance, err := method(morningStarId); err == nil {
performances <- performance
}
}(id)
}
}
func retrievePerformances(ids [][]byte, method func([]byte) (*Performance, error)) []*Performance {
var wg sync.WaitGroup
wg.Add(len(ids))
performances := make(chan *Performance, CONCURRENT_FETCHER)
go concurrentRetrievePerformances(ids, &wg, performances, method)
go func() {
wg.Wait()
close(performances)
}()
results := make([]*Performance, 0, len(ids))
for performance := range performances {
results = append(results, performance)
}
return results
}
func performanceHandler(w http.ResponseWriter, morningStarId []byte) {
performance, err := retrievePerformance(morningStarId)
if err != nil {
http.Error(w, err.Error(), 500)
} else {
jsonHttp.ResponseJson(w, *performance)
}
}
func listHandler(w http.ResponseWriter, r *http.Request) {
jsonHttp.ResponseJson(w, Results{retrievePerformances(fetchIds(), retrievePerformance)})
}
type Handler struct {
}
func (handler Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add(`Access-Control-Allow-Origin`, `*`)
w.Header().Add(`Access-Control-Allow-Headers`, `Content-Type`)
w.Header().Add(`Access-Control-Allow-Methods`, `GET, POST`)
w.Header().Add(`X-Content-Type-Options`, `nosniff`)
urlPath := []byte(r.URL.Path)
if LIST_REQUEST.Match(urlPath) {
listHandler(w, r)
} else if PERF_REQUEST.Match(urlPath) {
performanceHandler(w, PERF_REQUEST.FindSubmatch(urlPath)[1])
}
}
Update morningStar.go
package morningStar
import (
"../jsonHttp"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"regexp"
"strconv"
"sync"
"time"
)
const IDS_URL = `https://elasticsearch.vibioh.fr/funds/morningStarId/_search?size=8000`
const PERFORMANCE_URL = `http://www.morningstar.fr/fr/funds/snapshot/snapshot.aspx?tab=1&id=`
const VOLATILITE_URL = `http://www.morningstar.fr/fr/funds/snapshot/snapshot.aspx?tab=2&id=`
const REFRESH_DELAY_HOURS = 12
const CONCURRENT_FETCHER = 32
var EMPTY_BYTE = []byte(``)
var ZERO_BYTE = []byte(`0`)
var PERIOD_BYTE = []byte(`.`)
var COMMA_BYTE = []byte(`,`)
var PERCENT_BYTE = []byte(`%`)
var AMP_BYTE = []byte(`&`)
var HTML_AMP_BYTE = []byte(`&`)
var LIST_REQUEST = regexp.MustCompile(`^/list$`)
var PERF_REQUEST = regexp.MustCompile(`^/(.+?)$`)
var ID = regexp.MustCompile(`"_id":"(.*?)"`)
var ISIN = regexp.MustCompile(`ISIN.:(\S+)`)
var LABEL = regexp.MustCompile(`<h1[^>]*?>((?:.|\n)*?)</h1>`)
var RATING = regexp.MustCompile(`<span\sclass=".*?stars([0-9]).*?">`)
var CATEGORY = regexp.MustCompile(`<span[^>]*?>Catégorie</span>.*?<span[^>]*?>(.*?)</span>`)
var PERF_ONE_MONTH = regexp.MustCompile(`<td[^>]*?>1 mois</td><td[^>]*?>(.*?)</td>`)
var PERF_THREE_MONTH = regexp.MustCompile(`<td[^>]*?>3 mois</td><td[^>]*?>(.*?)</td>`)
var PERF_SIX_MONTH = regexp.MustCompile(`<td[^>]*?>6 mois</td><td[^>]*?>(.*?)</td>`)
var PERF_ONE_YEAR = regexp.MustCompile(`<td[^>]*?>1 an</td><td[^>]*?>(.*?)</td>`)
var VOL_3_YEAR = regexp.MustCompile(`<td[^>]*?>Ecart-type 3 ans.?</td><td[^>]*?>(.*?)</td>`)
type Performance struct {
Id string `json:"id"`
Isin string `json:"isin"`
Label string `json:"label"`
Category string `json:"category"`
Rating string `json:"rating"`
OneMonth float64 `json:"1m"`
ThreeMonth float64 `json:"3m"`
SixMonth float64 `json:"6m"`
OneYear float64 `json:"1y"`
VolThreeYears float64 `json:"v3y"`
Score float64 `json:"score"`
Update time.Time `json:"ts"`
}
type SyncedMap struct {
sync.RWMutex
performances map[string]*Performance
}
func (m *SyncedMap) get(key string) (*Performance, bool) {
m.RLock()
defer m.RUnlock()
performance, ok := m.performances[key]
return performance, ok
}
func (m *SyncedMap) push(key string, performance *Performance) {
m.Lock()
defer m.Unlock()
m.performances[key] = performance
}
var PERFORMANCE_CACHE = SyncedMap{performances: make(map[string]*Performance)}
type Results struct {
Results interface{} `json:"results"`
}
func init() {
go func() {
refreshCache()
c := time.Tick(REFRESH_DELAY_HOURS * time.Hour)
for range c {
refreshCache()
}
}()
}
func refreshCache() {
log.Print(`Cache refresh - start`)
defer log.Print(`Cache refresh - end`)
for _, performance := range retrievePerformances(fetchIds(), fetchPerformance) {
PERFORMANCE_CACHE.push(performance.Id, performance)
}
}
func readBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
return ioutil.ReadAll(body)
}
func getBody(url string) ([]byte, error) {
response, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf(`Error while retrieving data from %s: %v`, url, err)
}
if response.StatusCode >= 400 {
return nil, fmt.Errorf(`Got error %d while getting %s`, response.StatusCode, url)
}
body, err := readBody(response.Body)
if err != nil {
return nil, fmt.Errorf(`Error while reading body of %s: %v`, url, err)
}
return body, nil
}
func extractLabel(extract *regexp.Regexp, body []byte, defaultValue []byte) []byte {
match := extract.FindSubmatch(body)
if match == nil {
return defaultValue
}
return bytes.Replace(match[1], HTML_AMP_BYTE, AMP_BYTE, -1)
}
func extractPerformance(extract *regexp.Regexp, body []byte) float64 {
dotResult := bytes.Replace(extractLabel(extract, body, EMPTY_BYTE), COMMA_BYTE, PERIOD_BYTE, -1)
percentageResult := bytes.Replace(dotResult, PERCENT_BYTE, EMPTY_BYTE, -1)
trimResult := bytes.TrimSpace(percentageResult)
result, err := strconv.ParseFloat(string(trimResult), 64)
if err != nil {
return 0.0
}
return result
}
func cleanId(morningStarId []byte) string {
return string(bytes.ToLower(morningStarId))
}
func fetchPerformance(morningStarId []byte) (*Performance, error) {
cleanId := cleanId(morningStarId)
performanceBody, err := getBody(PERFORMANCE_URL + cleanId)
if err != nil {
return nil, err
}
volatiliteBody, err := getBody(VOLATILITE_URL + cleanId)
if err != nil {
return nil, err
}
isin := string(extractLabel(ISIN, performanceBody, EMPTY_BYTE))
label := string(extractLabel(LABEL, performanceBody, EMPTY_BYTE))
rating := string(extractLabel(RATING, performanceBody, ZERO_BYTE))
category := string(extractLabel(CATEGORY, performanceBody, EMPTY_BYTE))
oneMonth := extractPerformance(PERF_ONE_MONTH, performanceBody)
threeMonths := extractPerformance(PERF_THREE_MONTH, performanceBody)
sixMonths := extractPerformance(PERF_SIX_MONTH, performanceBody)
oneYear := extractPerformance(PERF_ONE_YEAR, performanceBody)
volThreeYears := extractPerformance(VOL_3_YEAR, volatiliteBody)
score := (0.25 * oneMonth) + (0.3 * threeMonths) + (0.25 * sixMonths) + (0.2 * oneYear) - (0.1 * volThreeYears)
scoreTruncated := float64(int(score*100)) / 100
return &Performance{cleanId, isin, label, category, rating, oneMonth, threeMonths, sixMonths, oneYear, volThreeYears, scoreTruncated, time.Now()}, nil
}
func fetchIds() [][]byte {
if idsBody, err := getBody(IDS_URL); err != nil {
log.Print(err)
return nil
} else {
idsMatch := ID.FindAllSubmatch(idsBody, -1)
ids := make([][]byte, 0, len(idsMatch))
for _, match := range idsMatch {
ids = append(ids, match[1])
}
return ids
}
}
func retrievePerformance(morningStarId []byte) (*Performance, error) {
cleanId := cleanId(morningStarId)
performance, ok := PERFORMANCE_CACHE.get(cleanId)
if ok && time.Now().Add(time.Hour*-(REFRESH_DELAY_HOURS+1)).Before(performance.Update) {
return performance, nil
}
if performance, err := fetchPerformance(morningStarId); err != nil {
return nil, err
} else {
PERFORMANCE_CACHE.push(cleanId, performance)
return performance, nil
}
}
func concurrentRetrievePerformances(ids [][]byte, wg *sync.WaitGroup, performances chan<- *Performance, method func([]byte) (*Performance, error)) {
tokens := make(chan int, CONCURRENT_FETCHER)
clearSemaphores := func() {
wg.Done()
<-tokens
}
for _, id := range ids {
tokens <- 1
go func(morningStarId []byte) {
defer clearSemaphores()
if performance, err := method(morningStarId); err == nil {
performances <- performance
}
}(id)
}
}
func retrievePerformances(ids [][]byte, method func([]byte) (*Performance, error)) []*Performance {
var wg sync.WaitGroup
wg.Add(len(ids))
performances := make(chan *Performance, CONCURRENT_FETCHER)
go concurrentRetrievePerformances(ids, &wg, performances, method)
go func() {
wg.Wait()
close(performances)
}()
results := make([]*Performance, 0, len(ids))
for performance := range performances {
results = append(results, performance)
}
return results
}
func performanceHandler(w http.ResponseWriter, morningStarId []byte) {
performance, err := retrievePerformance(morningStarId)
if err != nil {
http.Error(w, err.Error(), 500)
} else {
jsonHttp.ResponseJson(w, *performance)
}
}
func listHandler(w http.ResponseWriter, r *http.Request) {
jsonHttp.ResponseJson(w, Results{retrievePerformances(fetchIds(), retrievePerformance)})
}
type Handler struct {
}
func (handler Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add(`Access-Control-Allow-Origin`, `*`)
w.Header().Add(`Access-Control-Allow-Headers`, `Content-Type`)
w.Header().Add(`Access-Control-Allow-Methods`, `GET`)
w.Header().Add(`X-Content-Type-Options`, `nosniff`)
urlPath := []byte(r.URL.Path)
if LIST_REQUEST.Match(urlPath) {
listHandler(w, r)
} else if PERF_REQUEST.Match(urlPath) {
performanceHandler(w, PERF_REQUEST.FindSubmatch(urlPath)[1])
}
}
|
package main
import (
"encoding/json"
"fmt"
"go.net/websocket"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"runtime"
"strings"
"text/template"
"time"
"uuid"
)
type ServerConfig struct {
Hostname string `json:"hostname"`
Port string `json:"port"`
NotifyPrefix string `json:"notifyPrefix"`
UseTLS bool `json:"useTLS"`
CertFilename string `json:"certFilename"`
KeyFilename string `json:"keyFilename"`
}
var gServerConfig ServerConfig
type Client struct {
Websocket *websocket.Conn `json:"-"`
UAID string `json:"uaid"`
Ip string `json:"ip"`
Port float64 `json:"port"`
LastContact time.Time `json:"-"`
}
type Channel struct {
UAID string `json:"uaid"`
ChannelID string `json:"channelID"`
Version uint64 `json:"version"`
}
type ChannelIDSet map[string]*Channel
type ServerState struct {
// Mapping from a UAID to the Client object
// json field is "-" to prevent serialization
// since the connectedness of a client means nothing
// across sessions
ConnectedClients map[string]*Client `json:"-"`
// Mapping from a UAID to all channelIDs owned by that UAID
// where channelIDs are represented as a map-backed set
UAIDToChannelIDs map[string]ChannelIDSet `json:"uaidToChannels"`
// Mapping from a ChannelID to the cooresponding Channel
ChannelIDToChannel ChannelIDSet `json:"channelIDToChannel"`
}
var gServerState ServerState
func readConfig() {
var data []byte
var err error
data, err = ioutil.ReadFile("config.json")
if err != nil {
log.Println("Not configured. Could not find config.json")
os.Exit(-1)
}
err = json.Unmarshal(data, &gServerConfig)
if err != nil {
log.Println("Could not unmarshal config.json", err)
os.Exit(-1)
return
}
}
func openState() {
var data []byte
var err error
data, err = ioutil.ReadFile("serverstate.json")
if err == nil {
err = json.Unmarshal(data, &gServerState)
if err == nil {
gServerState.ConnectedClients = make(map[string]*Client)
return
}
}
log.Println(" -> creating new server state")
gServerState.UAIDToChannelIDs = make(map[string]ChannelIDSet)
gServerState.ChannelIDToChannel = make(ChannelIDSet)
gServerState.ConnectedClients = make(map[string]*Client)
}
func saveState() {
log.Println(" -> saving state..")
var data []byte
var err error
data, err = json.Marshal(gServerState)
if err != nil {
return
}
ioutil.WriteFile("serverstate.json", data, 0644)
}
func makeNotifyURL(suffix string) string {
var scheme string
if gServerConfig.UseTLS {
scheme = "https://"
} else {
scheme = "http://"
}
return scheme + gServerConfig.Hostname + ":" + gServerConfig.Port + gServerConfig.NotifyPrefix + suffix
}
func handleRegister(client *Client, f map[string]interface{}) {
type RegisterResponse struct {
Name string `json:"messageType"`
Status int `json:"status"`
PushEndpoint string `json:"pushEndpoint"`
ChannelID string `json:"channelID"`
}
if f["channelID"] == nil {
log.Println("channelID is missing!")
return
}
var channelID = f["channelID"].(string)
register := RegisterResponse{"register", 0, "", channelID}
prevEntry, exists := gServerState.ChannelIDToChannel[channelID]
if exists && prevEntry.UAID != client.UAID {
register.Status = 409
} else {
channel := &Channel{client.UAID, channelID, 0}
if gServerState.UAIDToChannelIDs[client.UAID] == nil {
gServerState.UAIDToChannelIDs[client.UAID] = make(ChannelIDSet)
}
gServerState.UAIDToChannelIDs[client.UAID][channelID] = channel
gServerState.ChannelIDToChannel[channelID] = channel
register.Status = 200
register.PushEndpoint = makeNotifyURL(channelID)
}
if register.Status == 0 {
panic("Register(): status field was left unset when replying to client")
}
j, err := json.Marshal(register)
if err != nil {
log.Println("Could not convert register response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
// we could not send the message to a peer
log.Println("Could not send message to ", client.Websocket, err.Error())
}
}
func handleUnregister(client *Client, f map[string]interface{}) {
if f["channelID"] == nil {
log.Println("channelID is missing!")
return
}
var channelID = f["channelID"].(string)
_, ok := gServerState.ChannelIDToChannel[channelID]
if ok {
// only delete if UA owns this channel
_, owns := gServerState.UAIDToChannelIDs[client.UAID][channelID]
if owns {
// remove ownership
delete(gServerState.UAIDToChannelIDs[client.UAID], channelID)
// delete the channel itself
delete(gServerState.ChannelIDToChannel, channelID)
}
}
type UnregisterResponse struct {
Name string `json:"messageType"`
Status int `json:"status"`
ChannelID string `json:"channelID"`
}
unregister := UnregisterResponse{"unregister", 200, channelID}
j, err := json.Marshal(unregister)
if err != nil {
log.Println("Could not convert unregister response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
// we could not send the message to a peer
log.Println("Could not send message to ", client.Websocket, err.Error())
}
}
func handleHello(client *Client, f map[string]interface{}) {
status := 200
if f["uaid"] == nil {
uaid, err := uuid.GenUUID()
if err != nil {
status = 400
log.Println("GenUUID error %s", err)
}
client.UAID = uaid
} else {
client.UAID = f["uaid"].(string)
// BUG(nikhilm): Does not deal with sending
// a new UAID if their is a channel that was sent
// by the UA which the server does not know about.
// Which means in the case of this memory only server
// it should actually always send a new UAID when it was
// restarted
if f["channelIDs"] != nil {
for _, foo := range f["channelIDs"].([]interface{}) {
channelID := foo.(string)
if gServerState.UAIDToChannelIDs[client.UAID] == nil {
gServerState.UAIDToChannelIDs[client.UAID] = make(ChannelIDSet)
}
c := &Channel{client.UAID, channelID, 0}
gServerState.UAIDToChannelIDs[client.UAID][channelID] = c
gServerState.ChannelIDToChannel[channelID] = c
}
}
}
gServerState.ConnectedClients[client.UAID] = client
if f["interface"] != nil {
m := f["interface"].(map[string]interface{})
client.Ip = m["ip"].(string)
client.Port = m["port"].(float64)
}
type HelloResponse struct {
Name string `json:"messageType"`
Status int `json:"status"`
UAID string `json:"uaid"`
}
hello := HelloResponse{"hello", status, client.UAID}
j, err := json.Marshal(hello)
if err != nil {
log.Println("Could not convert hello response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
log.Println("Could not send message to ", client.Websocket, err.Error())
}
}
func handleAck(client *Client, f map[string]interface{}) {
}
func pushHandler(ws *websocket.Conn) {
client := &Client{ws, "", "", 0, time.Now()}
for {
var f map[string]interface{}
var err error
if err = websocket.JSON.Receive(ws, &f); err != nil {
log.Println("Websocket Disconnected.", err.Error())
break
}
client.LastContact = time.Now()
log.Println("pushHandler msg: ", f["messageType"])
switch f["messageType"] {
case "hello":
handleHello(client, f)
break
case "register":
handleRegister(client, f)
break
case "unregister":
handleUnregister(client, f)
break
case "ack":
handleAck(client, f)
break
default:
log.Println(" -> Unknown", f)
break
}
saveState()
}
log.Println("Closing Websocket!")
ws.Close()
gServerState.ConnectedClients[client.UAID].Websocket = nil
}
func notifyHandler(w http.ResponseWriter, r *http.Request) {
log.Println("Got notification from app server ", r.URL)
if r.Method != "PUT" {
log.Println("NOT A PUT")
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("Method must be PUT."))
return
}
channelID := strings.Replace(r.URL.Path, gServerConfig.NotifyPrefix, "", 1)
if strings.Contains(channelID, "/") {
log.Println("Could not find a valid channelID")
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("Could not find a valid channelID."))
return
}
channel, found := gServerState.ChannelIDToChannel[channelID]
if !found {
log.Println("Could not find channel " + channelID)
return
}
channel.Version++
client := gServerState.ConnectedClients[channel.UAID]
saveState()
if client == nil {
log.Println("no known client for the channel.")
} else if client.Websocket == nil {
wakeupClient(client)
} else {
sendNotificationToClient(client, channel)
}
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
func wakeupClient(client *Client) {
// TODO probably want to do this a few times before
// giving up.
log.Println("wakeupClient: ", client)
service := fmt.Sprintf("%s:%g", client.Ip, client.Port)
udpAddr, err := net.ResolveUDPAddr("udp4", service)
if err != nil {
log.Println("ResolveUDPAddr error ", err.Error())
return
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
log.Println("DialUDP error ", err.Error())
return
}
_, err = conn.Write([]byte("push"))
if err != nil {
log.Println("UDP Write error ", err.Error())
return
}
}
func sendNotificationToClient(client *Client, channel *Channel) {
type NotificationResponse struct {
Name string `json:"messageType"`
Channels []Channel `json:"updates"`
}
var channels []Channel
channels = append(channels, *channel)
notification := NotificationResponse{"notification", channels}
j, err := json.Marshal(notification)
if err != nil {
log.Println("Could not convert hello response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
log.Println("Could not send message to ", channel, err.Error())
}
}
func disconnectUDPClient(uaid string) {
if gServerState.ConnectedClients[uaid].Websocket == nil {
return
}
gServerState.ConnectedClients[uaid].Websocket.CloseWithStatus(4774)
gServerState.ConnectedClients[uaid].Websocket = nil
}
func admin(w http.ResponseWriter, r *http.Request) {
memstats := new(runtime.MemStats)
runtime.ReadMemStats(memstats)
totalMemory := memstats.Alloc
type User struct {
UAID string
Connected bool
Channels []*Channel
}
type Arguments struct {
PushEndpointPrefix string
TotalMemory uint64
Users []User
}
arguments := Arguments{makeNotifyURL(""), totalMemory, nil}
for uaid, channelIDSet := range gServerState.UAIDToChannelIDs {
connected := gServerState.ConnectedClients[uaid] != nil
var channels []*Channel
for _, channel := range channelIDSet {
channels = append(channels, channel)
}
u := User{uaid, connected, channels}
arguments.Users = append(arguments.Users, u)
}
t := template.New("users.template")
s1, _ := t.ParseFiles("templates/users.template")
s1.Execute(w, arguments)
}
func main() {
readConfig()
openState()
http.HandleFunc("/admin", admin)
http.Handle("/", websocket.Handler(pushHandler))
http.HandleFunc(gServerConfig.NotifyPrefix, notifyHandler)
go func() {
c := time.Tick(10 * time.Second)
for now := range c {
for uaid, client := range gServerState.ConnectedClients {
if now.Sub(client.LastContact).Seconds() > 15 && client.Ip != "" {
log.Println("Will wake up ", client.Ip, ". closing connection")
disconnectUDPClient(uaid)
}
}
}
}()
log.Println("Listening on", gServerConfig.Hostname+":"+gServerConfig.Port)
var err error
if gServerConfig.UseTLS {
err = http.ListenAndServeTLS(gServerConfig.Hostname+":"+gServerConfig.Port,
gServerConfig.CertFilename,
gServerConfig.KeyFilename,
nil)
} else {
for i := 0; i < 5; i++ {
log.Println("This is a really unsafe way to run the push server. Really. Don't do this in production.")
}
err = http.ListenAndServe(gServerConfig.Hostname+":"+gServerConfig.Port, nil)
}
log.Println("Exiting... ", err)
}
Use Websocket as the key that tells us if we are connected or not in the Admin view
package main
import (
"encoding/json"
"fmt"
"go.net/websocket"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"runtime"
"strings"
"text/template"
"time"
"uuid"
)
type ServerConfig struct {
Hostname string `json:"hostname"`
Port string `json:"port"`
NotifyPrefix string `json:"notifyPrefix"`
UseTLS bool `json:"useTLS"`
CertFilename string `json:"certFilename"`
KeyFilename string `json:"keyFilename"`
}
var gServerConfig ServerConfig
type Client struct {
Websocket *websocket.Conn `json:"-"`
UAID string `json:"uaid"`
Ip string `json:"ip"`
Port float64 `json:"port"`
LastContact time.Time `json:"-"`
}
type Channel struct {
UAID string `json:"uaid"`
ChannelID string `json:"channelID"`
Version uint64 `json:"version"`
}
type ChannelIDSet map[string]*Channel
type ServerState struct {
// Mapping from a UAID to the Client object
// json field is "-" to prevent serialization
// since the connectedness of a client means nothing
// across sessions
ConnectedClients map[string]*Client `json:"-"`
// Mapping from a UAID to all channelIDs owned by that UAID
// where channelIDs are represented as a map-backed set
UAIDToChannelIDs map[string]ChannelIDSet `json:"uaidToChannels"`
// Mapping from a ChannelID to the cooresponding Channel
ChannelIDToChannel ChannelIDSet `json:"channelIDToChannel"`
}
var gServerState ServerState
func readConfig() {
var data []byte
var err error
data, err = ioutil.ReadFile("config.json")
if err != nil {
log.Println("Not configured. Could not find config.json")
os.Exit(-1)
}
err = json.Unmarshal(data, &gServerConfig)
if err != nil {
log.Println("Could not unmarshal config.json", err)
os.Exit(-1)
return
}
}
func openState() {
var data []byte
var err error
data, err = ioutil.ReadFile("serverstate.json")
if err == nil {
err = json.Unmarshal(data, &gServerState)
if err == nil {
gServerState.ConnectedClients = make(map[string]*Client)
return
}
}
log.Println(" -> creating new server state")
gServerState.UAIDToChannelIDs = make(map[string]ChannelIDSet)
gServerState.ChannelIDToChannel = make(ChannelIDSet)
gServerState.ConnectedClients = make(map[string]*Client)
}
func saveState() {
log.Println(" -> saving state..")
var data []byte
var err error
data, err = json.Marshal(gServerState)
if err != nil {
return
}
ioutil.WriteFile("serverstate.json", data, 0644)
}
func makeNotifyURL(suffix string) string {
var scheme string
if gServerConfig.UseTLS {
scheme = "https://"
} else {
scheme = "http://"
}
return scheme + gServerConfig.Hostname + ":" + gServerConfig.Port + gServerConfig.NotifyPrefix + suffix
}
func handleRegister(client *Client, f map[string]interface{}) {
type RegisterResponse struct {
Name string `json:"messageType"`
Status int `json:"status"`
PushEndpoint string `json:"pushEndpoint"`
ChannelID string `json:"channelID"`
}
if f["channelID"] == nil {
log.Println("channelID is missing!")
return
}
var channelID = f["channelID"].(string)
register := RegisterResponse{"register", 0, "", channelID}
prevEntry, exists := gServerState.ChannelIDToChannel[channelID]
if exists && prevEntry.UAID != client.UAID {
register.Status = 409
} else {
channel := &Channel{client.UAID, channelID, 0}
if gServerState.UAIDToChannelIDs[client.UAID] == nil {
gServerState.UAIDToChannelIDs[client.UAID] = make(ChannelIDSet)
}
gServerState.UAIDToChannelIDs[client.UAID][channelID] = channel
gServerState.ChannelIDToChannel[channelID] = channel
register.Status = 200
register.PushEndpoint = makeNotifyURL(channelID)
}
if register.Status == 0 {
panic("Register(): status field was left unset when replying to client")
}
j, err := json.Marshal(register)
if err != nil {
log.Println("Could not convert register response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
// we could not send the message to a peer
log.Println("Could not send message to ", client.Websocket, err.Error())
}
}
func handleUnregister(client *Client, f map[string]interface{}) {
if f["channelID"] == nil {
log.Println("channelID is missing!")
return
}
var channelID = f["channelID"].(string)
_, ok := gServerState.ChannelIDToChannel[channelID]
if ok {
// only delete if UA owns this channel
_, owns := gServerState.UAIDToChannelIDs[client.UAID][channelID]
if owns {
// remove ownership
delete(gServerState.UAIDToChannelIDs[client.UAID], channelID)
// delete the channel itself
delete(gServerState.ChannelIDToChannel, channelID)
}
}
type UnregisterResponse struct {
Name string `json:"messageType"`
Status int `json:"status"`
ChannelID string `json:"channelID"`
}
unregister := UnregisterResponse{"unregister", 200, channelID}
j, err := json.Marshal(unregister)
if err != nil {
log.Println("Could not convert unregister response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
// we could not send the message to a peer
log.Println("Could not send message to ", client.Websocket, err.Error())
}
}
func handleHello(client *Client, f map[string]interface{}) {
status := 200
if f["uaid"] == nil {
uaid, err := uuid.GenUUID()
if err != nil {
status = 400
log.Println("GenUUID error %s", err)
}
client.UAID = uaid
} else {
client.UAID = f["uaid"].(string)
// BUG(nikhilm): Does not deal with sending
// a new UAID if their is a channel that was sent
// by the UA which the server does not know about.
// Which means in the case of this memory only server
// it should actually always send a new UAID when it was
// restarted
if f["channelIDs"] != nil {
for _, foo := range f["channelIDs"].([]interface{}) {
channelID := foo.(string)
if gServerState.UAIDToChannelIDs[client.UAID] == nil {
gServerState.UAIDToChannelIDs[client.UAID] = make(ChannelIDSet)
}
c := &Channel{client.UAID, channelID, 0}
gServerState.UAIDToChannelIDs[client.UAID][channelID] = c
gServerState.ChannelIDToChannel[channelID] = c
}
}
}
gServerState.ConnectedClients[client.UAID] = client
if f["interface"] != nil {
m := f["interface"].(map[string]interface{})
client.Ip = m["ip"].(string)
client.Port = m["port"].(float64)
}
type HelloResponse struct {
Name string `json:"messageType"`
Status int `json:"status"`
UAID string `json:"uaid"`
}
hello := HelloResponse{"hello", status, client.UAID}
j, err := json.Marshal(hello)
if err != nil {
log.Println("Could not convert hello response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
log.Println("Could not send message to ", client.Websocket, err.Error())
}
}
func handleAck(client *Client, f map[string]interface{}) {
}
func pushHandler(ws *websocket.Conn) {
client := &Client{ws, "", "", 0, time.Now()}
for {
var f map[string]interface{}
var err error
if err = websocket.JSON.Receive(ws, &f); err != nil {
log.Println("Websocket Disconnected.", err.Error())
break
}
client.LastContact = time.Now()
log.Println("pushHandler msg: ", f["messageType"])
switch f["messageType"] {
case "hello":
handleHello(client, f)
break
case "register":
handleRegister(client, f)
break
case "unregister":
handleUnregister(client, f)
break
case "ack":
handleAck(client, f)
break
default:
log.Println(" -> Unknown", f)
break
}
saveState()
}
log.Println("Closing Websocket!")
ws.Close()
gServerState.ConnectedClients[client.UAID].Websocket = nil
}
func notifyHandler(w http.ResponseWriter, r *http.Request) {
log.Println("Got notification from app server ", r.URL)
if r.Method != "PUT" {
log.Println("NOT A PUT")
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("Method must be PUT."))
return
}
channelID := strings.Replace(r.URL.Path, gServerConfig.NotifyPrefix, "", 1)
if strings.Contains(channelID, "/") {
log.Println("Could not find a valid channelID")
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("Could not find a valid channelID."))
return
}
channel, found := gServerState.ChannelIDToChannel[channelID]
if !found {
log.Println("Could not find channel " + channelID)
return
}
channel.Version++
client := gServerState.ConnectedClients[channel.UAID]
saveState()
if client == nil {
log.Println("no known client for the channel.")
} else if client.Websocket == nil {
wakeupClient(client)
} else {
sendNotificationToClient(client, channel)
}
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
func wakeupClient(client *Client) {
// TODO probably want to do this a few times before
// giving up.
log.Println("wakeupClient: ", client)
service := fmt.Sprintf("%s:%g", client.Ip, client.Port)
udpAddr, err := net.ResolveUDPAddr("udp4", service)
if err != nil {
log.Println("ResolveUDPAddr error ", err.Error())
return
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
log.Println("DialUDP error ", err.Error())
return
}
_, err = conn.Write([]byte("push"))
if err != nil {
log.Println("UDP Write error ", err.Error())
return
}
}
func sendNotificationToClient(client *Client, channel *Channel) {
type NotificationResponse struct {
Name string `json:"messageType"`
Channels []Channel `json:"updates"`
}
var channels []Channel
channels = append(channels, *channel)
notification := NotificationResponse{"notification", channels}
j, err := json.Marshal(notification)
if err != nil {
log.Println("Could not convert hello response to json %s", err)
return
}
if err = websocket.Message.Send(client.Websocket, string(j)); err != nil {
log.Println("Could not send message to ", channel, err.Error())
}
}
func disconnectUDPClient(uaid string) {
if gServerState.ConnectedClients[uaid].Websocket == nil {
return
}
gServerState.ConnectedClients[uaid].Websocket.CloseWithStatus(4774)
gServerState.ConnectedClients[uaid].Websocket = nil
}
func admin(w http.ResponseWriter, r *http.Request) {
memstats := new(runtime.MemStats)
runtime.ReadMemStats(memstats)
totalMemory := memstats.Alloc
type User struct {
UAID string
Connected bool
Channels []*Channel
}
type Arguments struct {
PushEndpointPrefix string
TotalMemory uint64
Users []User
}
arguments := Arguments{makeNotifyURL(""), totalMemory, nil}
for uaid, channelIDSet := range gServerState.UAIDToChannelIDs {
connected := gServerState.ConnectedClients[uaid].Websocket != nil
var channels []*Channel
for _, channel := range channelIDSet {
channels = append(channels, channel)
}
u := User{uaid, connected, channels}
arguments.Users = append(arguments.Users, u)
}
t := template.New("users.template")
s1, _ := t.ParseFiles("templates/users.template")
s1.Execute(w, arguments)
}
func main() {
readConfig()
openState()
http.HandleFunc("/admin", admin)
http.Handle("/", websocket.Handler(pushHandler))
http.HandleFunc(gServerConfig.NotifyPrefix, notifyHandler)
go func() {
c := time.Tick(10 * time.Second)
for now := range c {
for uaid, client := range gServerState.ConnectedClients {
if now.Sub(client.LastContact).Seconds() > 15 && client.Ip != "" {
log.Println("Will wake up ", client.Ip, ". closing connection")
disconnectUDPClient(uaid)
}
}
}
}()
log.Println("Listening on", gServerConfig.Hostname+":"+gServerConfig.Port)
var err error
if gServerConfig.UseTLS {
err = http.ListenAndServeTLS(gServerConfig.Hostname+":"+gServerConfig.Port,
gServerConfig.CertFilename,
gServerConfig.KeyFilename,
nil)
} else {
for i := 0; i < 5; i++ {
log.Println("This is a really unsafe way to run the push server. Really. Don't do this in production.")
}
err = http.ListenAndServe(gServerConfig.Hostname+":"+gServerConfig.Port, nil)
}
log.Println("Exiting... ", err)
}
|
// Copyright (c) 2018 Shivaram Lingamneni <slingamn@cs.stanford.edu>
// released under the MIT license
package irc
import (
"errors"
"fmt"
"regexp"
"time"
"github.com/oragono/oragono/irc/sno"
)
const hostservHelp = `HostServ lets you manage your vhost (i.e., the string displayed
in place of your client's hostname/IP).`
var (
errVHostBadCharacters = errors.New("Vhost contains prohibited characters")
errVHostTooLong = errors.New("Vhost is too long")
// ascii only for now
defaultValidVhostRegex = regexp.MustCompile(`^[0-9A-Za-z.\-_/]+$`)
)
func hostservEnabled(config *Config) bool {
return config.Accounts.VHosts.Enabled
}
func hostservRequestsEnabled(config *Config) bool {
return config.Accounts.VHosts.Enabled && config.Accounts.VHosts.UserRequests.Enabled
}
var (
hostservCommands = map[string]*serviceCommand{
"on": {
handler: hsOnOffHandler,
help: `Syntax: $bON$b
ON enables your vhost, if you have one approved.`,
helpShort: `$bON$b enables your vhost, if you have one approved.`,
authRequired: true,
enabled: hostservEnabled,
},
"off": {
handler: hsOnOffHandler,
help: `Syntax: $bOFF$b
OFF disables your vhost, if you have one approved.`,
helpShort: `$bOFF$b disables your vhost, if you have one approved.`,
authRequired: true,
enabled: hostservEnabled,
},
"request": {
handler: hsRequestHandler,
help: `Syntax: $bREQUEST <vhost>$b
REQUEST requests that a new vhost by assigned to your account. The request must
then be approved by a server operator.`,
helpShort: `$bREQUEST$b requests a new vhost, pending operator approval.`,
authRequired: true,
enabled: hostservRequestsEnabled,
minParams: 1,
},
"status": {
handler: hsStatusHandler,
help: `Syntax: $bSTATUS [user]$b
STATUS displays your current vhost, if any, and the status of your most recent
request for a new one. A server operator can view someone else's status.`,
helpShort: `$bSTATUS$b shows your vhost and request status.`,
enabled: hostservEnabled,
},
"set": {
handler: hsSetHandler,
help: `Syntax: $bSET <user> <vhost>$b
SET sets a user's vhost, bypassing the request system.`,
helpShort: `$bSET$b sets a user's vhost.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 2,
},
"del": {
handler: hsSetHandler,
help: `Syntax: $bDEL <user>$b
DEL deletes a user's vhost.`,
helpShort: `$bDEL$b deletes a user's vhost.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
},
"waiting": {
handler: hsWaitingHandler,
help: `Syntax: $bWAITING$b
WAITING shows a list of pending vhost requests, which can then be approved
or rejected.`,
helpShort: `$bWAITING$b shows a list of pending vhost requests.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
},
"approve": {
handler: hsApproveHandler,
help: `Syntax: $bAPPROVE <user>$b
APPROVE approves a user's vhost request.`,
helpShort: `$bAPPROVE$b approves a user's vhost request.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
},
"reject": {
handler: hsRejectHandler,
help: `Syntax: $bREJECT <user> [<reason>]$b
REJECT rejects a user's vhost request, optionally giving them a reason
for the rejection.`,
helpShort: `$bREJECT$b rejects a user's vhost request.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
maxParams: 2,
unsplitFinalParam: true,
},
"forbid": {
handler: hsForbidHandler,
help: `Syntax: $bFORBID <user>$b
FORBID prevents a user from using any vhost, including ones on the offer list.`,
helpShort: `$bFORBID$b prevents a user from using vhosts.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
maxParams: 1,
},
"permit": {
handler: hsForbidHandler,
help: `Syntax: $bPERMIT <user>$b
PERMIT undoes FORBID, allowing the user to TAKE vhosts again.`,
helpShort: `$bPERMIT$b allows a user to use vhosts again.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
maxParams: 1,
},
"offerlist": {
handler: hsOfferListHandler,
help: `Syntax: $bOFFERLIST$b
OFFERLIST lists vhosts that can be chosen without requiring operator approval;
to use one of the listed vhosts, take it with /HOSTSERV TAKE.`,
helpShort: `$bOFFERLIST$b lists vhosts that can be taken without operator approval.`,
enabled: hostservEnabled,
minParams: 0,
maxParams: 0,
},
"take": {
handler: hsTakeHandler,
help: `Syntax: $bTAKE$b <vhost>
TAKE sets your vhost to one of the vhosts in the server's offer list; to see
the offered vhosts, use /HOSTSERV OFFERLIST.`,
helpShort: `$bTAKE$b sets your vhost to one of the options from the offer list.`,
enabled: hostservEnabled,
authRequired: true,
minParams: 1,
maxParams: 1,
},
}
)
// hsNotice sends the client a notice from HostServ
func hsNotice(rb *ResponseBuffer, text string) {
rb.Add(nil, "HostServ!HostServ@localhost", "NOTICE", rb.target.Nick(), text)
}
// hsNotifyChannel notifies the designated channel of new vhost activity
func hsNotifyChannel(server *Server, message string) {
chname := server.Config().Accounts.VHosts.UserRequests.Channel
channel := server.channels.Get(chname)
if channel == nil {
return
}
chname = channel.Name()
for _, client := range channel.Members() {
client.Send(nil, "HostServ", "PRIVMSG", chname, message)
}
}
func hsOnOffHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
enable := false
if command == "on" {
enable = true
}
_, err := server.accounts.VHostSetEnabled(client, enable)
if err == errNoVhost {
hsNotice(rb, client.t(err.Error()))
} else if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else if enable {
hsNotice(rb, client.t("Successfully enabled your vhost"))
} else {
hsNotice(rb, client.t("Successfully disabled your vhost"))
}
}
func hsRequestHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
vhost := params[0]
if validateVhost(server, vhost, false) != nil {
hsNotice(rb, client.t("Invalid vhost"))
return
}
accountName := client.Account()
_, err := server.accounts.VHostRequest(accountName, vhost, time.Duration(server.Config().Accounts.VHosts.UserRequests.Cooldown))
if err != nil {
if throttled, ok := err.(*vhostThrottleExceeded); ok {
hsNotice(rb, fmt.Sprintf(client.t("You must wait an additional %v before making another request"), throttled.timeRemaining))
} else if err == errVhostsForbidden {
hsNotice(rb, client.t("An administrator has denied you the ability to use vhosts"))
} else {
hsNotice(rb, client.t("An error occurred"))
}
} else {
hsNotice(rb, client.t("Your vhost request will be reviewed by an administrator"))
chanMsg := fmt.Sprintf("Account %s requests vhost %s", accountName, vhost)
hsNotifyChannel(server, chanMsg)
server.snomasks.Send(sno.LocalVhosts, chanMsg)
}
}
func hsStatusHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
var accountName string
if len(params) > 0 {
if !client.HasRoleCapabs("vhosts") {
hsNotice(rb, client.t("Command restricted"))
return
}
accountName = params[0]
} else {
accountName = client.Account()
if accountName == "" {
hsNotice(rb, client.t("You're not logged into an account"))
return
}
}
account, err := server.accounts.LoadAccount(accountName)
if err != nil {
if err != errAccountDoesNotExist {
server.logger.Warning("internal", "error loading account info", accountName, err.Error())
}
hsNotice(rb, client.t("No such account"))
return
}
if account.VHost.Forbidden {
hsNotice(rb, client.t("An administrator has denied you the ability to use vhosts"))
return
}
if account.VHost.ApprovedVHost != "" {
hsNotice(rb, fmt.Sprintf(client.t("Account %[1]s has vhost: %[2]s"), accountName, account.VHost.ApprovedVHost))
if !account.VHost.Enabled {
hsNotice(rb, client.t("This vhost is currently disabled, but can be enabled with /HS ON"))
}
} else {
hsNotice(rb, fmt.Sprintf(client.t("Account %s has no vhost"), accountName))
}
if account.VHost.RequestedVHost != "" {
hsNotice(rb, fmt.Sprintf(client.t("A request is pending for vhost: %s"), account.VHost.RequestedVHost))
}
if account.VHost.RejectedVHost != "" {
hsNotice(rb, fmt.Sprintf(client.t("A request was previously made for vhost: %s"), account.VHost.RejectedVHost))
hsNotice(rb, fmt.Sprintf(client.t("It was rejected for reason: %s"), account.VHost.RejectionReason))
}
}
func validateVhost(server *Server, vhost string, oper bool) error {
config := server.Config()
if len(vhost) > config.Accounts.VHosts.MaxLength {
return errVHostTooLong
}
if !config.Accounts.VHosts.ValidRegexp.MatchString(vhost) {
return errVHostBadCharacters
}
return nil
}
func hsSetHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
user := params[0]
var vhost string
if command == "set" {
vhost = params[1]
if validateVhost(server, vhost, true) != nil {
hsNotice(rb, client.t("Invalid vhost"))
return
}
}
// else: command == "del", vhost == ""
_, err := server.accounts.VHostSet(user, vhost)
if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else if vhost != "" {
hsNotice(rb, client.t("Successfully set vhost"))
} else {
hsNotice(rb, client.t("Successfully cleared vhost"))
}
}
func hsWaitingHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
requests, total := server.accounts.VHostListRequests(10)
hsNotice(rb, fmt.Sprintf(client.t("There are %[1]d pending requests for vhosts (%[2]d displayed)"), total, len(requests)))
for i, request := range requests {
hsNotice(rb, fmt.Sprintf(client.t("%[1]d. User %[2]s requests vhost: %[3]s"), i+1, request.Account, request.RequestedVHost))
}
}
func hsApproveHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
user := params[0]
vhostInfo, err := server.accounts.VHostApprove(user)
if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else {
hsNotice(rb, fmt.Sprintf(client.t("Successfully approved vhost request for %s"), user))
chanMsg := fmt.Sprintf("Oper %[1]s approved vhost %[2]s for account %[3]s", client.Nick(), vhostInfo.ApprovedVHost, user)
hsNotifyChannel(server, chanMsg)
server.snomasks.Send(sno.LocalVhosts, chanMsg)
for _, client := range server.accounts.AccountToClients(user) {
client.Notice(client.t("Your vhost request was approved by an administrator"))
}
}
}
func hsRejectHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
var reason string
user := params[0]
if len(params) > 1 {
reason = params[1]
}
vhostInfo, err := server.accounts.VHostReject(user, reason)
if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else {
hsNotice(rb, fmt.Sprintf(client.t("Successfully rejected vhost request for %s"), user))
chanMsg := fmt.Sprintf("Oper %s rejected vhost %s for account %s, with the reason: %v", client.Nick(), vhostInfo.RejectedVHost, user, reason)
hsNotifyChannel(server, chanMsg)
server.snomasks.Send(sno.LocalVhosts, chanMsg)
for _, client := range server.accounts.AccountToClients(user) {
if reason == "" {
client.Notice("Your vhost request was rejected by an administrator")
} else {
client.Notice(fmt.Sprintf(client.t("Your vhost request was rejected by an administrator. The reason given was: %s"), reason))
}
}
}
}
func hsForbidHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
user := params[0]
forbidden := command == "forbid"
_, err := server.accounts.VHostForbid(user, forbidden)
if err == errAccountDoesNotExist {
hsNotice(rb, client.t("No such account"))
} else if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else {
if forbidden {
hsNotice(rb, fmt.Sprintf(client.t("User %s is no longer allowed to use vhosts"), user))
} else {
hsNotice(rb, fmt.Sprintf(client.t("User %s is now allowed to use vhosts"), user))
}
}
}
func hsOfferListHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
vhostConfig := server.Config().Accounts.VHosts
if len(vhostConfig.OfferList) == 0 {
if vhostConfig.UserRequests.Enabled {
hsNotice(rb, client.t("The server does not offer any vhosts, but you can request one with /HOSTSERV REQUEST"))
} else {
hsNotice(rb, client.t("The server does not offer any vhosts"))
}
} else {
hsNotice(rb, client.t("The following vhosts are available and can be chosen with /HOSTSERV TAKE:"))
for _, vhost := range vhostConfig.OfferList {
hsNotice(rb, vhost)
}
}
}
func hsTakeHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
config := server.Config()
vhost := params[0]
found := false
for _, offered := range config.Accounts.VHosts.OfferList {
if offered == vhost {
found = true
}
}
if !found {
hsNotice(rb, client.t("That vhost isn't being offered by the server"))
return
}
account := client.Account()
_, err := server.accounts.VHostTake(account, vhost, time.Duration(config.Accounts.VHosts.UserRequests.Cooldown))
if err != nil {
if throttled, ok := err.(*vhostThrottleExceeded); ok {
hsNotice(rb, fmt.Sprintf(client.t("You must wait an additional %v before taking a vhost"), throttled.timeRemaining))
} else if err == errVhostsForbidden {
hsNotice(rb, client.t("An administrator has denied you the ability to use vhosts"))
} else {
hsNotice(rb, client.t("An error occurred"))
}
} else {
hsNotice(rb, client.t("Successfully set vhost"))
server.snomasks.Send(sno.LocalVhosts, fmt.Sprintf("Client %s (account %s) took vhost %s", client.Nick(), account, vhost))
}
}
fix #805
// Copyright (c) 2018 Shivaram Lingamneni <slingamn@cs.stanford.edu>
// released under the MIT license
package irc
import (
"errors"
"fmt"
"regexp"
"time"
"github.com/oragono/oragono/irc/sno"
)
const (
hostservHelp = `HostServ lets you manage your vhost (i.e., the string displayed
in place of your client's hostname/IP).`
hsNickMask = "HostServ!HostServ@localhost"
)
var (
errVHostBadCharacters = errors.New("Vhost contains prohibited characters")
errVHostTooLong = errors.New("Vhost is too long")
// ascii only for now
defaultValidVhostRegex = regexp.MustCompile(`^[0-9A-Za-z.\-_/]+$`)
)
func hostservEnabled(config *Config) bool {
return config.Accounts.VHosts.Enabled
}
func hostservRequestsEnabled(config *Config) bool {
return config.Accounts.VHosts.Enabled && config.Accounts.VHosts.UserRequests.Enabled
}
var (
hostservCommands = map[string]*serviceCommand{
"on": {
handler: hsOnOffHandler,
help: `Syntax: $bON$b
ON enables your vhost, if you have one approved.`,
helpShort: `$bON$b enables your vhost, if you have one approved.`,
authRequired: true,
enabled: hostservEnabled,
},
"off": {
handler: hsOnOffHandler,
help: `Syntax: $bOFF$b
OFF disables your vhost, if you have one approved.`,
helpShort: `$bOFF$b disables your vhost, if you have one approved.`,
authRequired: true,
enabled: hostservEnabled,
},
"request": {
handler: hsRequestHandler,
help: `Syntax: $bREQUEST <vhost>$b
REQUEST requests that a new vhost by assigned to your account. The request must
then be approved by a server operator.`,
helpShort: `$bREQUEST$b requests a new vhost, pending operator approval.`,
authRequired: true,
enabled: hostservRequestsEnabled,
minParams: 1,
},
"status": {
handler: hsStatusHandler,
help: `Syntax: $bSTATUS [user]$b
STATUS displays your current vhost, if any, and the status of your most recent
request for a new one. A server operator can view someone else's status.`,
helpShort: `$bSTATUS$b shows your vhost and request status.`,
enabled: hostservEnabled,
},
"set": {
handler: hsSetHandler,
help: `Syntax: $bSET <user> <vhost>$b
SET sets a user's vhost, bypassing the request system.`,
helpShort: `$bSET$b sets a user's vhost.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 2,
},
"del": {
handler: hsSetHandler,
help: `Syntax: $bDEL <user>$b
DEL deletes a user's vhost.`,
helpShort: `$bDEL$b deletes a user's vhost.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
},
"waiting": {
handler: hsWaitingHandler,
help: `Syntax: $bWAITING$b
WAITING shows a list of pending vhost requests, which can then be approved
or rejected.`,
helpShort: `$bWAITING$b shows a list of pending vhost requests.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
},
"approve": {
handler: hsApproveHandler,
help: `Syntax: $bAPPROVE <user>$b
APPROVE approves a user's vhost request.`,
helpShort: `$bAPPROVE$b approves a user's vhost request.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
},
"reject": {
handler: hsRejectHandler,
help: `Syntax: $bREJECT <user> [<reason>]$b
REJECT rejects a user's vhost request, optionally giving them a reason
for the rejection.`,
helpShort: `$bREJECT$b rejects a user's vhost request.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
maxParams: 2,
unsplitFinalParam: true,
},
"forbid": {
handler: hsForbidHandler,
help: `Syntax: $bFORBID <user>$b
FORBID prevents a user from using any vhost, including ones on the offer list.`,
helpShort: `$bFORBID$b prevents a user from using vhosts.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
maxParams: 1,
},
"permit": {
handler: hsForbidHandler,
help: `Syntax: $bPERMIT <user>$b
PERMIT undoes FORBID, allowing the user to TAKE vhosts again.`,
helpShort: `$bPERMIT$b allows a user to use vhosts again.`,
capabs: []string{"vhosts"},
enabled: hostservEnabled,
minParams: 1,
maxParams: 1,
},
"offerlist": {
handler: hsOfferListHandler,
help: `Syntax: $bOFFERLIST$b
OFFERLIST lists vhosts that can be chosen without requiring operator approval;
to use one of the listed vhosts, take it with /HOSTSERV TAKE.`,
helpShort: `$bOFFERLIST$b lists vhosts that can be taken without operator approval.`,
enabled: hostservEnabled,
minParams: 0,
maxParams: 0,
},
"take": {
handler: hsTakeHandler,
help: `Syntax: $bTAKE$b <vhost>
TAKE sets your vhost to one of the vhosts in the server's offer list; to see
the offered vhosts, use /HOSTSERV OFFERLIST.`,
helpShort: `$bTAKE$b sets your vhost to one of the options from the offer list.`,
enabled: hostservEnabled,
authRequired: true,
minParams: 1,
maxParams: 1,
},
}
)
// hsNotice sends the client a notice from HostServ
func hsNotice(rb *ResponseBuffer, text string) {
rb.Add(nil, hsNickMask, "NOTICE", rb.target.Nick(), text)
}
// hsNotifyChannel notifies the designated channel of new vhost activity
func hsNotifyChannel(server *Server, message string) {
chname := server.Config().Accounts.VHosts.UserRequests.Channel
channel := server.channels.Get(chname)
if channel == nil {
return
}
chname = channel.Name()
for _, client := range channel.Members() {
client.Send(nil, hsNickMask, "PRIVMSG", chname, message)
}
}
func hsOnOffHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
enable := false
if command == "on" {
enable = true
}
_, err := server.accounts.VHostSetEnabled(client, enable)
if err == errNoVhost {
hsNotice(rb, client.t(err.Error()))
} else if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else if enable {
hsNotice(rb, client.t("Successfully enabled your vhost"))
} else {
hsNotice(rb, client.t("Successfully disabled your vhost"))
}
}
func hsRequestHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
vhost := params[0]
if validateVhost(server, vhost, false) != nil {
hsNotice(rb, client.t("Invalid vhost"))
return
}
accountName := client.Account()
_, err := server.accounts.VHostRequest(accountName, vhost, time.Duration(server.Config().Accounts.VHosts.UserRequests.Cooldown))
if err != nil {
if throttled, ok := err.(*vhostThrottleExceeded); ok {
hsNotice(rb, fmt.Sprintf(client.t("You must wait an additional %v before making another request"), throttled.timeRemaining))
} else if err == errVhostsForbidden {
hsNotice(rb, client.t("An administrator has denied you the ability to use vhosts"))
} else {
hsNotice(rb, client.t("An error occurred"))
}
} else {
hsNotice(rb, client.t("Your vhost request will be reviewed by an administrator"))
chanMsg := fmt.Sprintf("Account %s requests vhost %s", accountName, vhost)
hsNotifyChannel(server, chanMsg)
server.snomasks.Send(sno.LocalVhosts, chanMsg)
}
}
func hsStatusHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
var accountName string
if len(params) > 0 {
if !client.HasRoleCapabs("vhosts") {
hsNotice(rb, client.t("Command restricted"))
return
}
accountName = params[0]
} else {
accountName = client.Account()
if accountName == "" {
hsNotice(rb, client.t("You're not logged into an account"))
return
}
}
account, err := server.accounts.LoadAccount(accountName)
if err != nil {
if err != errAccountDoesNotExist {
server.logger.Warning("internal", "error loading account info", accountName, err.Error())
}
hsNotice(rb, client.t("No such account"))
return
}
if account.VHost.Forbidden {
hsNotice(rb, client.t("An administrator has denied you the ability to use vhosts"))
return
}
if account.VHost.ApprovedVHost != "" {
hsNotice(rb, fmt.Sprintf(client.t("Account %[1]s has vhost: %[2]s"), accountName, account.VHost.ApprovedVHost))
if !account.VHost.Enabled {
hsNotice(rb, client.t("This vhost is currently disabled, but can be enabled with /HS ON"))
}
} else {
hsNotice(rb, fmt.Sprintf(client.t("Account %s has no vhost"), accountName))
}
if account.VHost.RequestedVHost != "" {
hsNotice(rb, fmt.Sprintf(client.t("A request is pending for vhost: %s"), account.VHost.RequestedVHost))
}
if account.VHost.RejectedVHost != "" {
hsNotice(rb, fmt.Sprintf(client.t("A request was previously made for vhost: %s"), account.VHost.RejectedVHost))
hsNotice(rb, fmt.Sprintf(client.t("It was rejected for reason: %s"), account.VHost.RejectionReason))
}
}
func validateVhost(server *Server, vhost string, oper bool) error {
config := server.Config()
if len(vhost) > config.Accounts.VHosts.MaxLength {
return errVHostTooLong
}
if !config.Accounts.VHosts.ValidRegexp.MatchString(vhost) {
return errVHostBadCharacters
}
return nil
}
func hsSetHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
user := params[0]
var vhost string
if command == "set" {
vhost = params[1]
if validateVhost(server, vhost, true) != nil {
hsNotice(rb, client.t("Invalid vhost"))
return
}
}
// else: command == "del", vhost == ""
_, err := server.accounts.VHostSet(user, vhost)
if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else if vhost != "" {
hsNotice(rb, client.t("Successfully set vhost"))
} else {
hsNotice(rb, client.t("Successfully cleared vhost"))
}
}
func hsWaitingHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
requests, total := server.accounts.VHostListRequests(10)
hsNotice(rb, fmt.Sprintf(client.t("There are %[1]d pending requests for vhosts (%[2]d displayed)"), total, len(requests)))
for i, request := range requests {
hsNotice(rb, fmt.Sprintf(client.t("%[1]d. User %[2]s requests vhost: %[3]s"), i+1, request.Account, request.RequestedVHost))
}
}
func hsApproveHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
user := params[0]
vhostInfo, err := server.accounts.VHostApprove(user)
if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else {
hsNotice(rb, fmt.Sprintf(client.t("Successfully approved vhost request for %s"), user))
chanMsg := fmt.Sprintf("Oper %[1]s approved vhost %[2]s for account %[3]s", client.Nick(), vhostInfo.ApprovedVHost, user)
hsNotifyChannel(server, chanMsg)
server.snomasks.Send(sno.LocalVhosts, chanMsg)
for _, client := range server.accounts.AccountToClients(user) {
client.Send(nil, hsNickMask, "NOTICE", client.Nick(), client.t("Your vhost request was approved by an administrator"))
}
}
}
func hsRejectHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
var reason string
user := params[0]
if len(params) > 1 {
reason = params[1]
}
vhostInfo, err := server.accounts.VHostReject(user, reason)
if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else {
hsNotice(rb, fmt.Sprintf(client.t("Successfully rejected vhost request for %s"), user))
chanMsg := fmt.Sprintf("Oper %s rejected vhost %s for account %s, with the reason: %v", client.Nick(), vhostInfo.RejectedVHost, user, reason)
hsNotifyChannel(server, chanMsg)
server.snomasks.Send(sno.LocalVhosts, chanMsg)
for _, client := range server.accounts.AccountToClients(user) {
if reason == "" {
client.Send(nil, hsNickMask, "NOTICE", client.Nick(), client.t("Your vhost request was rejected by an administrator"))
} else {
client.Send(nil, hsNickMask, "NOTICE", client.Nick(), fmt.Sprintf(client.t("Your vhost request was rejected by an administrator. The reason given was: %s"), reason))
}
}
}
}
func hsForbidHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
user := params[0]
forbidden := command == "forbid"
_, err := server.accounts.VHostForbid(user, forbidden)
if err == errAccountDoesNotExist {
hsNotice(rb, client.t("No such account"))
} else if err != nil {
hsNotice(rb, client.t("An error occurred"))
} else {
if forbidden {
hsNotice(rb, fmt.Sprintf(client.t("User %s is no longer allowed to use vhosts"), user))
} else {
hsNotice(rb, fmt.Sprintf(client.t("User %s is now allowed to use vhosts"), user))
}
}
}
func hsOfferListHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
vhostConfig := server.Config().Accounts.VHosts
if len(vhostConfig.OfferList) == 0 {
if vhostConfig.UserRequests.Enabled {
hsNotice(rb, client.t("The server does not offer any vhosts, but you can request one with /HOSTSERV REQUEST"))
} else {
hsNotice(rb, client.t("The server does not offer any vhosts"))
}
} else {
hsNotice(rb, client.t("The following vhosts are available and can be chosen with /HOSTSERV TAKE:"))
for _, vhost := range vhostConfig.OfferList {
hsNotice(rb, vhost)
}
}
}
func hsTakeHandler(server *Server, client *Client, command string, params []string, rb *ResponseBuffer) {
config := server.Config()
vhost := params[0]
found := false
for _, offered := range config.Accounts.VHosts.OfferList {
if offered == vhost {
found = true
}
}
if !found {
hsNotice(rb, client.t("That vhost isn't being offered by the server"))
return
}
account := client.Account()
_, err := server.accounts.VHostTake(account, vhost, time.Duration(config.Accounts.VHosts.UserRequests.Cooldown))
if err != nil {
if throttled, ok := err.(*vhostThrottleExceeded); ok {
hsNotice(rb, fmt.Sprintf(client.t("You must wait an additional %v before taking a vhost"), throttled.timeRemaining))
} else if err == errVhostsForbidden {
hsNotice(rb, client.t("An administrator has denied you the ability to use vhosts"))
} else {
hsNotice(rb, client.t("An error occurred"))
}
} else {
hsNotice(rb, client.t("Successfully set vhost"))
server.snomasks.Send(sno.LocalVhosts, fmt.Sprintf("Client %s (account %s) took vhost %s", client.Nick(), account, vhost))
}
}
|
package itsdangerous
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"crypto/subtle"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"hash"
"strings"
"time"
)
// 2011/01/01 in UTC
const EPOCH = 1293840000
// Encodes a single string. The resulting string is safe for putting into URLs.
func base64Encode(src []byte) string {
s := base64.URLEncoding.EncodeToString(src)
return strings.Trim(s, "=")
}
// Decodes a single string.
func base64Decode(s string) ([]byte, error) {
b, err := base64.URLEncoding.DecodeString(s + strings.Repeat("=", len(s)%4))
if err != nil {
return []byte(""), err
}
return b, nil
}
// Returns the current timestamp. This implementation returns the
// seconds since 1/1/2011.
func getTimestamp() uint32 {
return uint32(time.Now().Unix() - EPOCH)
}
type SigningAlgorithm interface {
GetSignature(key, value string) []byte
VerifySignature(key, value string, sig []byte) bool
}
// This struct provides signature generation using HMACs.
type HMACAlgorithm struct {
DigestMethod hash.Hash
}
// Returns the signature for the given key and value.
func (a *HMACAlgorithm) GetSignature(key, value string) []byte {
a.DigestMethod.Reset()
h := hmac.New(func() hash.Hash { return a.DigestMethod }, []byte(key))
h.Write([]byte(value))
return h.Sum(nil)
}
// Verifies the given signature matches the expected signature.
func (a *HMACAlgorithm) VerifySignature(key, value string, sig []byte) bool {
eq := subtle.ConstantTimeCompare(sig, []byte(a.GetSignature(key, value)))
return eq == 1
}
// This struct provides an algorithm that does not perform any
// signing and returns an empty signature.
type NoneAlgorithm struct {
HMACAlgorithm
}
// Returns the signature for the given key and value.
func (a *NoneAlgorithm) GetSignature(key, value string) []byte {
return []byte("")
}
type Signer struct {
SecretKey string
Sep string
Salt string
KeyDerivation string
DigestMethod hash.Hash
Algorithm SigningAlgorithm
}
// Derives the key. Keep in mind that the key derivation in itsdangerous is not intended
// to be used as a security method to make a complex key out of a short password.
// Instead you should use large random secret keys.
func (s *Signer) DeriveKey() (string, error) {
var key string
var err error
s.DigestMethod.Reset()
switch s.KeyDerivation {
case "concat":
h := s.DigestMethod
h.Write([]byte(s.Salt + s.SecretKey))
key = string(h.Sum(nil))
case "django-concat":
h := s.DigestMethod
h.Write([]byte(s.Salt + "signer" + s.SecretKey))
key = string(h.Sum(nil))
case "hmac":
h := hmac.New(func() hash.Hash { return s.DigestMethod }, []byte(s.SecretKey))
h.Write([]byte(s.Salt))
key = string(h.Sum(nil))
case "none":
key = s.SecretKey
default:
key, err = "", errors.New("Unknown key derivation method")
}
return key, err
}
// Returns the signature for the given value.
func (s *Signer) GetSignature(value string) (string, error) {
key, err := s.DeriveKey()
if err != nil {
return "", err
}
sig := s.Algorithm.GetSignature(key, value)
return base64Encode(sig), err
}
// Verifies the signature for the given value.
func (s *Signer) VerifySignature(value, sig string) (bool, error) {
key, err := s.DeriveKey()
if err != nil {
return false, err
}
signed, err := base64Decode(sig)
if err != nil {
return false, err
}
return s.Algorithm.VerifySignature(key, value, signed), nil
}
// Signs the given string.
func (s *Signer) Sign(value string) (string, error) {
sig, err := s.GetSignature(value)
if err != nil {
return "", err
}
return value + s.Sep + sig, nil
}
// Unsigns the given string.
func (s *Signer) Unsign(signed string) (string, error) {
if !strings.Contains(signed, s.Sep) {
return "", errors.New(fmt.Sprintf("No %s found in value", s.Sep))
}
li := strings.LastIndex(signed, s.Sep)
value, sig := signed[:li], signed[li+len(s.Sep):]
if ok, _ := s.VerifySignature(value, sig); ok == true {
return value, nil
}
return "", errors.New(fmt.Sprintf("Signature %s does not match", sig))
}
func NewSigner(secret, salt, sep, derivation string, digest hash.Hash, algo SigningAlgorithm) *Signer {
if salt == "" {
salt = "itsdangerous.Signer"
}
if sep == "" {
sep = "."
}
if derivation == "" {
derivation = "django-concat"
}
if digest == nil {
digest = sha1.New()
}
if algo == nil {
algo = &HMACAlgorithm{DigestMethod: digest}
}
return &Signer{
SecretKey: secret,
Salt: salt,
Sep: sep,
KeyDerivation: derivation,
DigestMethod: digest,
Algorithm: algo,
}
}
type TimestampSigner struct {
Signer
}
// Signs the given string.
func (s *TimestampSigner) Sign(value string) (string, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, getTimestamp()); err != nil {
return "", err
}
ts := base64Encode(buf.Bytes())
val := value + s.Sep + ts
sig, err := s.GetSignature(val)
if err != nil {
return "", err
}
return val + s.Sep + sig, nil
}
// Unsigns the given string.
func (s *TimestampSigner) Unsign(value string, maxAge uint32) (string, error) {
var timestamp uint32
result, err := s.Signer.Unsign(value)
if err != nil {
return "", err
}
// If there is no timestamp in the result there is something seriously wrong.
if !strings.Contains(result, s.Sep) {
return "", errors.New("Timestamp missing")
}
li := strings.LastIndex(result, s.Sep)
val, ts := result[:li], result[li+len(s.Sep):]
sig, err := base64Decode(ts)
if err != nil {
return "", err
}
buf := bytes.NewReader([]byte(sig))
if err = binary.Read(buf, binary.BigEndian, ×tamp); err != nil {
return "", err
}
if maxAge > 0 {
if age := getTimestamp() - timestamp; age > maxAge {
return "", errors.New(fmt.Sprintf("Signature age %d > %d seconds", age, maxAge))
}
}
return val, nil
}
func NewTimestampSigner(secret, salt, sep, derivation string, digest hash.Hash, algo SigningAlgorithm) *TimestampSigner {
signer := NewSigner(secret, salt, sep, derivation, digest, algo)
return &TimestampSigner{Signer: *signer}
}
conform to golint
/*
Package itsdangerous implements various functions to deal with untrusted sources.
Mainly useful for web applications.
This package exists purely as a port of https://github.com/mitsuhiko/itsdangerous,
where the original version is written in Python.
*/
package itsdangerous
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"crypto/subtle"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"hash"
"strings"
"time"
)
// 2011/01/01 in UTC
const EPOCH = 1293840000
// Encodes a single string. The resulting string is safe for putting into URLs.
func base64Encode(src []byte) string {
s := base64.URLEncoding.EncodeToString(src)
return strings.Trim(s, "=")
}
// Decodes a single string.
func base64Decode(s string) ([]byte, error) {
b, err := base64.URLEncoding.DecodeString(s + strings.Repeat("=", len(s)%4))
if err != nil {
return []byte(""), err
}
return b, nil
}
// Returns the current timestamp. This implementation returns the
// seconds since 1/1/2011.
func getTimestamp() uint32 {
return uint32(time.Now().Unix() - EPOCH)
}
// SigningAlgorithm provides interfaces to generate and verify signature
type SigningAlgorithm interface {
GetSignature(key, value string) []byte
VerifySignature(key, value string, sig []byte) bool
}
// HMACAlgorithm provides signature generation using HMACs.
type HMACAlgorithm struct {
DigestMethod hash.Hash
}
// GetSignature returns the signature for the given key and value.
func (a *HMACAlgorithm) GetSignature(key, value string) []byte {
a.DigestMethod.Reset()
h := hmac.New(func() hash.Hash { return a.DigestMethod }, []byte(key))
h.Write([]byte(value))
return h.Sum(nil)
}
// VerifySignature verifies the given signature matches the expected signature.
func (a *HMACAlgorithm) VerifySignature(key, value string, sig []byte) bool {
eq := subtle.ConstantTimeCompare(sig, []byte(a.GetSignature(key, value)))
return eq == 1
}
// NoneAlgorithm provides an algorithm that does not perform any
// signing and returns an empty signature.
type NoneAlgorithm struct {
HMACAlgorithm
}
// GetSignature returns the signature for the given key and value.
func (a *NoneAlgorithm) GetSignature(key, value string) []byte {
return []byte("")
}
// Signer can sign bytes and unsign it and validate the signature
// provided.
//
// Salt can be used to namespace the hash, so that a signed string is only
// valid for a given namespace. Leaving this at the default value or re-using
// a salt value across different parts of your application where the same
// signed value in one part can mean something different in another part
// is a security risk.
type Signer struct {
SecretKey string
Sep string
Salt string
KeyDerivation string
DigestMethod hash.Hash
Algorithm SigningAlgorithm
}
// DeriveKey generates a key derivation. Keep in mind that the key derivation in itsdangerous
// is not intended to be used as a security method to make a complex key out of a short password.
// Instead you should use large random secret keys.
func (s *Signer) DeriveKey() (string, error) {
var key string
var err error
s.DigestMethod.Reset()
switch s.KeyDerivation {
case "concat":
h := s.DigestMethod
h.Write([]byte(s.Salt + s.SecretKey))
key = string(h.Sum(nil))
case "django-concat":
h := s.DigestMethod
h.Write([]byte(s.Salt + "signer" + s.SecretKey))
key = string(h.Sum(nil))
case "hmac":
h := hmac.New(func() hash.Hash { return s.DigestMethod }, []byte(s.SecretKey))
h.Write([]byte(s.Salt))
key = string(h.Sum(nil))
case "none":
key = s.SecretKey
default:
key, err = "", errors.New("unknown key derivation method")
}
return key, err
}
// GetSignature returns the signature for the given value.
func (s *Signer) GetSignature(value string) (string, error) {
key, err := s.DeriveKey()
if err != nil {
return "", err
}
sig := s.Algorithm.GetSignature(key, value)
return base64Encode(sig), err
}
// VerifySignature verifies the signature for the given value.
func (s *Signer) VerifySignature(value, sig string) (bool, error) {
key, err := s.DeriveKey()
if err != nil {
return false, err
}
signed, err := base64Decode(sig)
if err != nil {
return false, err
}
return s.Algorithm.VerifySignature(key, value, signed), nil
}
// Sign the given string.
func (s *Signer) Sign(value string) (string, error) {
sig, err := s.GetSignature(value)
if err != nil {
return "", err
}
return value + s.Sep + sig, nil
}
// Unsign the given string.
func (s *Signer) Unsign(signed string) (string, error) {
if !strings.Contains(signed, s.Sep) {
return "", fmt.Errorf("No %s found in value", s.Sep)
}
li := strings.LastIndex(signed, s.Sep)
value, sig := signed[:li], signed[li+len(s.Sep):]
if ok, _ := s.VerifySignature(value, sig); ok == true {
return value, nil
}
return "", fmt.Errorf("Signature %s does not match", sig)
}
// NewSigner creates a new TimestampSigner
func NewSigner(secret, salt, sep, derivation string, digest hash.Hash, algo SigningAlgorithm) *Signer {
if salt == "" {
salt = "itsdangerous.Signer"
}
if sep == "" {
sep = "."
}
if derivation == "" {
derivation = "django-concat"
}
if digest == nil {
digest = sha1.New()
}
if algo == nil {
algo = &HMACAlgorithm{DigestMethod: digest}
}
return &Signer{
SecretKey: secret,
Salt: salt,
Sep: sep,
KeyDerivation: derivation,
DigestMethod: digest,
Algorithm: algo,
}
}
// TimestampSigner works like the regular Signer but also records the time
// of the signing and can be used to expire signatures.
type TimestampSigner struct {
Signer
}
// Sign the given string.
func (s *TimestampSigner) Sign(value string) (string, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, getTimestamp()); err != nil {
return "", err
}
ts := base64Encode(buf.Bytes())
val := value + s.Sep + ts
sig, err := s.GetSignature(val)
if err != nil {
return "", err
}
return val + s.Sep + sig, nil
}
// Unsign the given string.
func (s *TimestampSigner) Unsign(value string, maxAge uint32) (string, error) {
var timestamp uint32
result, err := s.Signer.Unsign(value)
if err != nil {
return "", err
}
// If there is no timestamp in the result there is something seriously wrong.
if !strings.Contains(result, s.Sep) {
return "", errors.New("timestamp missing")
}
li := strings.LastIndex(result, s.Sep)
val, ts := result[:li], result[li+len(s.Sep):]
sig, err := base64Decode(ts)
if err != nil {
return "", err
}
buf := bytes.NewReader([]byte(sig))
if err = binary.Read(buf, binary.BigEndian, ×tamp); err != nil {
return "", err
}
if maxAge > 0 {
if age := getTimestamp() - timestamp; age > maxAge {
return "", fmt.Errorf("Signature age %d > %d seconds", age, maxAge)
}
}
return val, nil
}
// NewTimestampSigner creates a new TimestampSigner
func NewTimestampSigner(secret, salt, sep, derivation string, digest hash.Hash, algo SigningAlgorithm) *TimestampSigner {
signer := NewSigner(secret, salt, sep, derivation, digest, algo)
return &TimestampSigner{Signer: *signer}
}
|
package main
import (
"fmt"
"net/http"
"os"
"strconv"
"github.com/gocql/gocql"
)
func main() {
cluster := gocql.NewCluster("gophr.dev")
cluster.ProtoVersion = 4
cluster.Keyspace = "gophr"
cluster.Consistency = gocql.One
session, _ := cluster.CreateSession()
defer session.Close()
http.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "OK")
})
http.HandleFunc("/search", SearchHandler(session))
portStr := os.Getenv("PORT")
var port int
if len(portStr) == 0 {
fmt.Println("Port left unspecified; setting port to 3000.")
port = 3000
} else if portNum, err := strconv.Atoi(portStr); err == nil {
fmt.Printf("Port was specified as %d.\n", portNum)
port = portNum
} else {
fmt.Println("Port was invalid; setting port to 3000.")
port = 3000
}
http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
change gophr.dev => gophr-db
package main
import (
"fmt"
"net/http"
"os"
"strconv"
"github.com/gocql/gocql"
)
func main() {
cluster := gocql.NewCluster("gophr-db")
cluster.ProtoVersion = 4
cluster.Keyspace = "gophr"
cluster.Consistency = gocql.One
session, _ := cluster.CreateSession()
defer session.Close()
http.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "OK")
})
http.HandleFunc("/search", SearchHandler(session))
portStr := os.Getenv("PORT")
var port int
if len(portStr) == 0 {
fmt.Println("Port left unspecified; setting port to 3000.")
port = 3000
} else if portNum, err := strconv.Atoi(portStr); err == nil {
fmt.Printf("Port was specified as %d.\n", portNum)
port = portNum
} else {
fmt.Println("Port was invalid; setting port to 3000.")
port = 3000
}
http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
|
// Package zip implements functions for jarcat that manipulate .zip files.
package zip
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"gopkg.in/op/go-logging.v1"
"third_party/go/zip"
)
var log = logging.MustGetLogger("zip")
var modTime = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)
// fileHeaderLen is the length of a file header in a zipfile.
// We need to know this to adjust alignment.
const fileHeaderLen = 30
// A File represents an output zipfile.
type File struct {
f io.WriteCloser
w *zip.Writer
filename string
input string
// Include and Exclude are prefixes of filenames to include or exclude from the zipfile.
Include, Exclude []string
// Strict controls whether we deny duplicate files or not.
// Zipfiles can readily contain duplicates, if this is true we reject them unless they are identical.
// If false we allow duplicates and leave it to someone else to handle.
Strict bool
// RenameDirs is a map of directories to rename, from the old name to the new one.
RenameDirs map[string]string
// StripPrefix is a prefix that is stripped off any files added with AddFiles.
StripPrefix string
// Suffix is the suffix of files that we include while scanning.
Suffix []string
// ExcludeSuffix is a list of suffixes that are excluded from the file scan.
ExcludeSuffix []string
// StoreSuffix is a list of file suffixes that will be stored instead of deflated.
StoreSuffix []string
// IncludeOther will make the file scan include other files that are not part of a zip file.
IncludeOther bool
// AddInitPy will make the writer add __init__.py files to all directories that don't already have one on close.
AddInitPy bool
// DirEntries makes the writer add empty directory entries.
DirEntries bool
// Align aligns entries to a multiple of this many bytes.
Align int
// files tracks the files that we've written so far.
files map[string]fileRecord
// concatenatedFiles tracks the files that are built up as we go.
concatenatedFiles map[string][]byte
}
// A fileRecord records some information about a file that we use to check if they're exact duplicates.
type fileRecord struct {
ZipFile string
CompressedSize64 uint64
UncompressedSize64 uint64
CRC32 uint32
}
// NewFile constructs and returns a new File.
func NewFile(output string, strict bool) *File {
f, err := os.Create(output)
if err != nil {
log.Fatalf("Failed to open output file: %s", err)
}
return &File{
f: f,
w: zip.NewWriter(f),
filename: output,
Strict: strict,
files: map[string]fileRecord{},
concatenatedFiles: map[string][]byte{},
}
}
// Close must be called before the File is destroyed.
func (f *File) Close() {
f.handleConcatenatedFiles()
if f.AddInitPy {
if err := f.AddInitPyFiles(); err != nil {
log.Fatalf("%s", err)
}
}
if err := f.w.Close(); err != nil {
log.Fatalf("Failed to finalise zip file: %s", err)
}
if err := f.f.Close(); err != nil {
log.Fatalf("Failed to close file: %s", err)
}
}
// AddZipFile copies the contents of a zip file into the new zipfile.
func (f *File) AddZipFile(filepath string) error {
r, err := zip.OpenReader(filepath)
if err != nil {
return err
}
defer r.Close()
// Reopen file to get a directly readable version without decompression.
r2, err := os.Open(filepath)
if err != nil {
return err
}
defer r2.Close()
for _, rf := range r.File {
log.Debug("Found file %s (from %s)", rf.Name, filepath)
// This directory is very awkward. We need to merge the contents by concatenating them,
// we can't replace them or leave them out.
if strings.HasPrefix(rf.Name, "META-INF/services/") ||
strings.HasPrefix(rf.Name, "META-INF/spring") ||
rf.Name == "META-INF/please_sourcemap" {
if err := f.concatenateFile(rf); err != nil {
return err
}
continue
}
if !f.shouldInclude(rf.Name) {
continue
}
hasTrailingSlash := strings.HasSuffix(rf.Name, "/")
isDir := hasTrailingSlash || rf.FileInfo().IsDir()
if isDir && !hasTrailingSlash {
rf.Name = rf.Name + "/"
}
if existing, present := f.files[rf.Name]; present {
// Allow duplicates of directories. Seemingly the best way to identify them is that
// they end in a trailing slash.
if isDir {
continue
}
// Allow skipping existing files that are exactly the same as the added ones.
// It's unnecessarily awkward to insist on not ever doubling up on a dependency.
// TODO(pebers): Bit of a hack ignoring it when CRC is 0, would be better to add
// the correct CRC when added through WriteFile.
if existing.CRC32 == rf.CRC32 || existing.CRC32 == 0 {
log.Info("Skipping %s / %s: already added (from %s)", filepath, rf.Name, existing.ZipFile)
continue
}
if f.Strict {
log.Error("Duplicate file %s (from %s, already added from %s); crc %d / %d", rf.Name, filepath, existing.ZipFile, rf.CRC32, existing.CRC32)
return fmt.Errorf("File %s already added to destination zip file (from %s)", rf.Name, existing.ZipFile)
}
continue
}
for before, after := range f.RenameDirs {
if strings.HasPrefix(rf.Name, before) {
rf.Name = path.Join(after, strings.TrimPrefix(rf.Name, before))
if isDir {
rf.Name = rf.Name + "/"
}
break
}
}
if f.StripPrefix != "" {
rf.Name = strings.TrimPrefix(rf.Name, f.StripPrefix)
}
// Java tools don't seem to like writing a data descriptor for stored items.
// Unsure if this is a limitation of the format or a problem of those tools.
rf.Flags = 0
f.addExistingFile(rf.Name, filepath, rf.CompressedSize64, rf.UncompressedSize64, rf.CRC32)
start, err := rf.DataOffset()
if err != nil {
return err
}
if _, err := r2.Seek(start, 0); err != nil {
return err
}
if err := f.addFile(&rf.FileHeader, r2, rf.CRC32); err != nil {
return err
}
}
return nil
}
// walk is a filepath.WalkFunc-compatible function which walks a file tree,
// adding all the files it finds within it.
func (f *File) walk(path string, info os.FileInfo, err error) error {
if err != nil {
return err
} else if path != f.input && (info.Mode()&os.ModeSymlink) != 0 {
if resolved, err := filepath.EvalSymlinks(path); err != nil {
return err
} else if stat, err := os.Stat(resolved); err != nil {
return err
} else if stat.IsDir() {
// TODO(peterebden): Is this case still needed?
return filepath.Walk(resolved, f.walk)
}
}
if path == f.filename {
return nil
} else if !info.IsDir() {
if !f.matchesSuffix(path, f.ExcludeSuffix) {
if f.matchesSuffix(path, f.Suffix) {
log.Debug("Adding zip file %s", path)
if err := f.AddZipFile(path); err != nil {
return fmt.Errorf("Error adding %s to zipfile: %s", path, err)
}
} else if f.IncludeOther && !f.HasExistingFile(path) {
log.Debug("Including existing non-zip file %s", path)
if b, err := ioutil.ReadFile(path); err != nil {
return fmt.Errorf("Error reading %s to zipfile: %s", path, err)
} else if err := f.StripBytecodeTimestamp(path, b); err != nil {
return err
} else if err := f.WriteFile(path, b); err != nil {
return err
}
}
}
} else if (len(f.Suffix) == 0 || f.AddInitPy) && path != "." && f.DirEntries { // Only add directory entries in "dumb" mode.
log.Debug("Adding directory entry %s/", path)
if err := f.WriteDir(path); err != nil {
return err
}
}
return nil
}
// AddFiles walks the given directory and adds any zip files (determined by suffix) that it finds within.
func (f *File) AddFiles(in string) error {
f.input = in
return filepath.Walk(in, f.walk)
}
// shouldExcludeSuffix returns true if the given filename has a suffix that should be excluded.
func (f *File) matchesSuffix(path string, suffixes []string) bool {
for _, suffix := range suffixes {
if suffix != "" && strings.HasSuffix(path, suffix) {
return true
}
}
return false
}
// shouldInclude returns true if the given filename should be included according to the include / exclude sets of this File.
func (f *File) shouldInclude(name string) bool {
for _, excl := range f.Exclude {
if matched, _ := filepath.Match(excl, name); matched {
log.Debug("Skipping %s (excluded by %s)", name, excl)
return false
} else if matched, _ := filepath.Match(excl, filepath.Base(name)); matched {
log.Debug("Skipping %s (excluded by %s)", name, excl)
return false
}
}
if len(f.Include) == 0 {
return true
}
for _, incl := range f.Include {
if matched, _ := filepath.Match(incl, name); matched || strings.HasPrefix(name, incl) {
return true
}
}
log.Debug("Skipping %s (didn't match any includes)", name)
return false
}
// AddInitPyFiles adds an __init__.py file to every directory in the zip file that doesn't already have one.
func (f *File) AddInitPyFiles() error {
s := make([]string, 0, len(f.files))
for p := range f.files {
s = append(s, p)
}
sort.Strings(s)
for _, p := range s {
for d := filepath.Dir(p); d != "."; d = filepath.Dir(d) {
if filepath.Base(d) == "__pycache__" {
break // Don't need to add an __init__.py here.
}
initPyPath := path.Join(d, "__init__.py")
// Don't write one at the root, it's not necessary.
if _, present := f.files[initPyPath]; present || initPyPath == "__init__.py" {
break
} else if _, present := f.files[initPyPath+"c"]; present {
// If we already have a pyc / pyo we don't need the __init__.py as well.
break
} else if _, present := f.files[initPyPath+"o"]; present {
break
}
log.Debug("Adding %s", initPyPath)
f.files[initPyPath] = fileRecord{}
if err := f.WriteFile(initPyPath, []byte{}); err != nil {
return err
}
}
}
return nil
}
// AddManifest adds a manifest to the given zip writer with a Main-Class entry (and a couple of others)
func (f *File) AddManifest(mainClass string) error {
manifest := fmt.Sprintf("Manifest-Version: 1.0\nMain-Class: %s\n", mainClass)
return f.WriteFile("META-INF/MANIFEST.MF", []byte(manifest))
}
// HasExistingFile returns true if the writer has already written the given file.
func (f *File) HasExistingFile(name string) bool {
_, present := f.files[name]
return present
}
// addExistingFile adds a record for an existing file, although doesn't write any contents.
func (f *File) addExistingFile(name, file string, compressedSize, uncompressedSize uint64, crc uint32) {
f.files[name] = fileRecord{file, compressedSize, uncompressedSize, crc}
}
// concatenateFile adds a file to the zip which is concatenated with any existing content with the same name.
// Writing is deferred since we obviously can't append to it later.
func (f *File) concatenateFile(zf *zip.File) error {
r, err := zf.Open()
if err != nil {
return err
}
defer r.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
return err
}
contents := buf.Bytes()
if !bytes.HasSuffix(contents, []byte{'\n'}) {
contents = append(contents, '\n')
}
f.concatenatedFiles[zf.Name] = append(f.concatenatedFiles[zf.Name], contents...)
return nil
}
// handleConcatenatedFiles appends concatenated files to the archive's directory for writing.
func (f *File) handleConcatenatedFiles() error {
// Must do it in a deterministic order
files := make([]string, 0, len(f.concatenatedFiles))
for name := range f.concatenatedFiles {
files = append(files, name)
}
sort.Strings(files)
for _, name := range files {
if err := f.WriteFile(name, f.concatenatedFiles[name]); err != nil {
return err
}
}
return nil
}
// addFile writes a file to the new writer.
func (f *File) addFile(fh *zip.FileHeader, r io.Reader, crc uint32) error {
f.align(fh)
fh.Flags = 0 // we're not writing a data descriptor after the file
comp := func(w io.Writer) (io.WriteCloser, error) { return nopCloser{w}, nil }
fh.SetModTime(modTime)
fw, err := f.w.CreateHeaderWithCompressor(fh, comp, fixedCrc32{value: crc})
if err == nil {
_, err = io.CopyN(fw, r, int64(fh.CompressedSize64))
}
return err
}
// WriteFile writes a complete file to the writer.
func (f *File) WriteFile(filename string, data []byte) error {
fh := zip.FileHeader{
Name: filename,
Method: zip.Deflate,
}
fh.SetModTime(modTime)
for _, ext := range f.StoreSuffix {
if strings.HasSuffix(filename, ext) {
fh.Method = zip.Store
break
}
}
f.align(&fh)
if fw, err := f.w.CreateHeader(&fh); err != nil {
return err
} else if _, err := fw.Write(data); err != nil {
return err
}
f.addExistingFile(filename, filename, 0, 0, 0)
return nil
}
// align writes any necessary bytes to align the next file.
func (f *File) align(h *zip.FileHeader) {
if f.Align != 0 && h.Method == zip.Store {
// We have to allow space for writing the header, so we predict what the offset will be after it.
fileStart := f.w.Offset() + fileHeaderLen + len(h.Name) + len(h.Extra)
if overlap := fileStart % f.Align; overlap != 0 {
if err := f.w.WriteRaw(bytes.Repeat([]byte{0}, f.Align-overlap)); err != nil {
log.Error("Failed to pad file: %s", err)
}
}
}
}
// WriteDir writes a directory entry to the writer.
func (f *File) WriteDir(filename string) error {
filename += "/" // Must have trailing slash to tell it it's a directory.
fh := zip.FileHeader{
Name: filename,
Method: zip.Store,
}
fh.SetModTime(modTime)
if _, err := f.w.CreateHeader(&fh); err != nil {
return err
}
f.addExistingFile(filename, filename, 0, 0, 0)
return nil
}
// WritePreamble writes a preamble to the zipfile.
func (f *File) WritePreamble(preamble []byte) error {
return f.w.WriteRaw(preamble)
}
// StripBytecodeTimestamp strips a timestamp from a .pyc or .pyo file.
// This is important so our output is deterministic.
func (f *File) StripBytecodeTimestamp(filename string, contents []byte) error {
if strings.HasSuffix(filename, ".pyc") || strings.HasSuffix(filename, ".pyo") {
if len(contents) < 8 {
log.Warning("Invalid bytecode file, will not strip timestamp")
} else {
// The .pyc format starts with a two-byte magic number, a \r\n, then a four-byte
// timestamp. It is that timestamp we are interested in; we overwrite it with
// the same mtime we use in the zipfile directory (it's important that it is
// deterministic, but also that it matches, otherwise zipimport complains).
var buf bytes.Buffer
binary.Write(&buf, binary.LittleEndian, modTime.Unix())
b := buf.Bytes()
contents[4] = b[0]
contents[5] = b[1]
contents[6] = b[2]
contents[7] = b[3]
}
}
return nil
}
type nopCloser struct {
io.Writer
}
func (w nopCloser) Close() error {
return nil
}
// fixedCrc32 implements a Hash32 interface that just writes out a predetermined value.
// this is really cheating of course but serves our purposes here.
type fixedCrc32 struct {
value uint32
}
func (crc fixedCrc32) Write(p []byte) (n int, err error) {
return len(p), nil
}
func (crc fixedCrc32) Sum(b []byte) []byte {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, crc.value)
return b
}
func (crc fixedCrc32) Sum32() uint32 {
return crc.value
}
func (crc fixedCrc32) Reset() {
}
func (crc fixedCrc32) Size() int {
return 32
}
func (crc fixedCrc32) BlockSize() int {
return 32
}
Minor reordering of jarcat logic
// Package zip implements functions for jarcat that manipulate .zip files.
package zip
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"gopkg.in/op/go-logging.v1"
"third_party/go/zip"
)
var log = logging.MustGetLogger("zip")
var modTime = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)
// fileHeaderLen is the length of a file header in a zipfile.
// We need to know this to adjust alignment.
const fileHeaderLen = 30
// A File represents an output zipfile.
type File struct {
f io.WriteCloser
w *zip.Writer
filename string
input string
// Include and Exclude are prefixes of filenames to include or exclude from the zipfile.
Include, Exclude []string
// Strict controls whether we deny duplicate files or not.
// Zipfiles can readily contain duplicates, if this is true we reject them unless they are identical.
// If false we allow duplicates and leave it to someone else to handle.
Strict bool
// RenameDirs is a map of directories to rename, from the old name to the new one.
RenameDirs map[string]string
// StripPrefix is a prefix that is stripped off any files added with AddFiles.
StripPrefix string
// Suffix is the suffix of files that we include while scanning.
Suffix []string
// ExcludeSuffix is a list of suffixes that are excluded from the file scan.
ExcludeSuffix []string
// StoreSuffix is a list of file suffixes that will be stored instead of deflated.
StoreSuffix []string
// IncludeOther will make the file scan include other files that are not part of a zip file.
IncludeOther bool
// AddInitPy will make the writer add __init__.py files to all directories that don't already have one on close.
AddInitPy bool
// DirEntries makes the writer add empty directory entries.
DirEntries bool
// Align aligns entries to a multiple of this many bytes.
Align int
// files tracks the files that we've written so far.
files map[string]fileRecord
// concatenatedFiles tracks the files that are built up as we go.
concatenatedFiles map[string][]byte
}
// A fileRecord records some information about a file that we use to check if they're exact duplicates.
type fileRecord struct {
ZipFile string
CompressedSize64 uint64
UncompressedSize64 uint64
CRC32 uint32
}
// NewFile constructs and returns a new File.
func NewFile(output string, strict bool) *File {
f, err := os.Create(output)
if err != nil {
log.Fatalf("Failed to open output file: %s", err)
}
return &File{
f: f,
w: zip.NewWriter(f),
filename: output,
Strict: strict,
files: map[string]fileRecord{},
concatenatedFiles: map[string][]byte{},
}
}
// Close must be called before the File is destroyed.
func (f *File) Close() {
f.handleConcatenatedFiles()
if f.AddInitPy {
if err := f.AddInitPyFiles(); err != nil {
log.Fatalf("%s", err)
}
}
if err := f.w.Close(); err != nil {
log.Fatalf("Failed to finalise zip file: %s", err)
}
if err := f.f.Close(); err != nil {
log.Fatalf("Failed to close file: %s", err)
}
}
// AddZipFile copies the contents of a zip file into the new zipfile.
func (f *File) AddZipFile(filepath string) error {
r, err := zip.OpenReader(filepath)
if err != nil {
return err
}
defer r.Close()
// Reopen file to get a directly readable version without decompression.
r2, err := os.Open(filepath)
if err != nil {
return err
}
defer r2.Close()
for _, rf := range r.File {
log.Debug("Found file %s (from %s)", rf.Name, filepath)
if !f.shouldInclude(rf.Name) {
continue
}
// This directory is very awkward. We need to merge the contents by concatenating them,
// we can't replace them or leave them out.
if strings.HasPrefix(rf.Name, "META-INF/services/") ||
strings.HasPrefix(rf.Name, "META-INF/spring") ||
rf.Name == "META-INF/please_sourcemap" {
if err := f.concatenateFile(rf); err != nil {
return err
}
continue
}
hasTrailingSlash := strings.HasSuffix(rf.Name, "/")
isDir := hasTrailingSlash || rf.FileInfo().IsDir()
if isDir && !hasTrailingSlash {
rf.Name = rf.Name + "/"
}
if existing, present := f.files[rf.Name]; present {
// Allow duplicates of directories. Seemingly the best way to identify them is that
// they end in a trailing slash.
if isDir {
continue
}
// Allow skipping existing files that are exactly the same as the added ones.
// It's unnecessarily awkward to insist on not ever doubling up on a dependency.
// TODO(pebers): Bit of a hack ignoring it when CRC is 0, would be better to add
// the correct CRC when added through WriteFile.
if existing.CRC32 == rf.CRC32 || existing.CRC32 == 0 {
log.Info("Skipping %s / %s: already added (from %s)", filepath, rf.Name, existing.ZipFile)
continue
}
if f.Strict {
log.Error("Duplicate file %s (from %s, already added from %s); crc %d / %d", rf.Name, filepath, existing.ZipFile, rf.CRC32, existing.CRC32)
return fmt.Errorf("File %s already added to destination zip file (from %s)", rf.Name, existing.ZipFile)
}
continue
}
for before, after := range f.RenameDirs {
if strings.HasPrefix(rf.Name, before) {
rf.Name = path.Join(after, strings.TrimPrefix(rf.Name, before))
if isDir {
rf.Name = rf.Name + "/"
}
break
}
}
if f.StripPrefix != "" {
rf.Name = strings.TrimPrefix(rf.Name, f.StripPrefix)
}
// Java tools don't seem to like writing a data descriptor for stored items.
// Unsure if this is a limitation of the format or a problem of those tools.
rf.Flags = 0
f.addExistingFile(rf.Name, filepath, rf.CompressedSize64, rf.UncompressedSize64, rf.CRC32)
start, err := rf.DataOffset()
if err != nil {
return err
}
if _, err := r2.Seek(start, 0); err != nil {
return err
}
if err := f.addFile(&rf.FileHeader, r2, rf.CRC32); err != nil {
return err
}
}
return nil
}
// walk is a filepath.WalkFunc-compatible function which walks a file tree,
// adding all the files it finds within it.
func (f *File) walk(path string, info os.FileInfo, err error) error {
if err != nil {
return err
} else if path != f.input && (info.Mode()&os.ModeSymlink) != 0 {
if resolved, err := filepath.EvalSymlinks(path); err != nil {
return err
} else if stat, err := os.Stat(resolved); err != nil {
return err
} else if stat.IsDir() {
// TODO(peterebden): Is this case still needed?
return filepath.Walk(resolved, f.walk)
}
}
if path == f.filename {
return nil
} else if !info.IsDir() {
if !f.matchesSuffix(path, f.ExcludeSuffix) {
if f.matchesSuffix(path, f.Suffix) {
log.Debug("Adding zip file %s", path)
if err := f.AddZipFile(path); err != nil {
return fmt.Errorf("Error adding %s to zipfile: %s", path, err)
}
} else if f.IncludeOther && !f.HasExistingFile(path) {
log.Debug("Including existing non-zip file %s", path)
if b, err := ioutil.ReadFile(path); err != nil {
return fmt.Errorf("Error reading %s to zipfile: %s", path, err)
} else if err := f.StripBytecodeTimestamp(path, b); err != nil {
return err
} else if err := f.WriteFile(path, b); err != nil {
return err
}
}
}
} else if (len(f.Suffix) == 0 || f.AddInitPy) && path != "." && f.DirEntries { // Only add directory entries in "dumb" mode.
log.Debug("Adding directory entry %s/", path)
if err := f.WriteDir(path); err != nil {
return err
}
}
return nil
}
// AddFiles walks the given directory and adds any zip files (determined by suffix) that it finds within.
func (f *File) AddFiles(in string) error {
f.input = in
return filepath.Walk(in, f.walk)
}
// shouldExcludeSuffix returns true if the given filename has a suffix that should be excluded.
func (f *File) matchesSuffix(path string, suffixes []string) bool {
for _, suffix := range suffixes {
if suffix != "" && strings.HasSuffix(path, suffix) {
return true
}
}
return false
}
// shouldInclude returns true if the given filename should be included according to the include / exclude sets of this File.
func (f *File) shouldInclude(name string) bool {
for _, excl := range f.Exclude {
if matched, _ := filepath.Match(excl, name); matched {
log.Debug("Skipping %s (excluded by %s)", name, excl)
return false
} else if matched, _ := filepath.Match(excl, filepath.Base(name)); matched {
log.Debug("Skipping %s (excluded by %s)", name, excl)
return false
}
}
if len(f.Include) == 0 {
return true
}
for _, incl := range f.Include {
if matched, _ := filepath.Match(incl, name); matched || strings.HasPrefix(name, incl) {
return true
}
}
log.Debug("Skipping %s (didn't match any includes)", name)
return false
}
// AddInitPyFiles adds an __init__.py file to every directory in the zip file that doesn't already have one.
func (f *File) AddInitPyFiles() error {
s := make([]string, 0, len(f.files))
for p := range f.files {
s = append(s, p)
}
sort.Strings(s)
for _, p := range s {
for d := filepath.Dir(p); d != "."; d = filepath.Dir(d) {
if filepath.Base(d) == "__pycache__" {
break // Don't need to add an __init__.py here.
}
initPyPath := path.Join(d, "__init__.py")
// Don't write one at the root, it's not necessary.
if _, present := f.files[initPyPath]; present || initPyPath == "__init__.py" {
break
} else if _, present := f.files[initPyPath+"c"]; present {
// If we already have a pyc / pyo we don't need the __init__.py as well.
break
} else if _, present := f.files[initPyPath+"o"]; present {
break
}
log.Debug("Adding %s", initPyPath)
f.files[initPyPath] = fileRecord{}
if err := f.WriteFile(initPyPath, []byte{}); err != nil {
return err
}
}
}
return nil
}
// AddManifest adds a manifest to the given zip writer with a Main-Class entry (and a couple of others)
func (f *File) AddManifest(mainClass string) error {
manifest := fmt.Sprintf("Manifest-Version: 1.0\nMain-Class: %s\n", mainClass)
return f.WriteFile("META-INF/MANIFEST.MF", []byte(manifest))
}
// HasExistingFile returns true if the writer has already written the given file.
func (f *File) HasExistingFile(name string) bool {
_, present := f.files[name]
return present
}
// addExistingFile adds a record for an existing file, although doesn't write any contents.
func (f *File) addExistingFile(name, file string, compressedSize, uncompressedSize uint64, crc uint32) {
f.files[name] = fileRecord{file, compressedSize, uncompressedSize, crc}
}
// concatenateFile adds a file to the zip which is concatenated with any existing content with the same name.
// Writing is deferred since we obviously can't append to it later.
func (f *File) concatenateFile(zf *zip.File) error {
r, err := zf.Open()
if err != nil {
return err
}
defer r.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
return err
}
contents := buf.Bytes()
if !bytes.HasSuffix(contents, []byte{'\n'}) {
contents = append(contents, '\n')
}
f.concatenatedFiles[zf.Name] = append(f.concatenatedFiles[zf.Name], contents...)
return nil
}
// handleConcatenatedFiles appends concatenated files to the archive's directory for writing.
func (f *File) handleConcatenatedFiles() error {
// Must do it in a deterministic order
files := make([]string, 0, len(f.concatenatedFiles))
for name := range f.concatenatedFiles {
files = append(files, name)
}
sort.Strings(files)
for _, name := range files {
if err := f.WriteFile(name, f.concatenatedFiles[name]); err != nil {
return err
}
}
return nil
}
// addFile writes a file to the new writer.
func (f *File) addFile(fh *zip.FileHeader, r io.Reader, crc uint32) error {
f.align(fh)
fh.Flags = 0 // we're not writing a data descriptor after the file
comp := func(w io.Writer) (io.WriteCloser, error) { return nopCloser{w}, nil }
fh.SetModTime(modTime)
fw, err := f.w.CreateHeaderWithCompressor(fh, comp, fixedCrc32{value: crc})
if err == nil {
_, err = io.CopyN(fw, r, int64(fh.CompressedSize64))
}
return err
}
// WriteFile writes a complete file to the writer.
func (f *File) WriteFile(filename string, data []byte) error {
fh := zip.FileHeader{
Name: filename,
Method: zip.Deflate,
}
fh.SetModTime(modTime)
for _, ext := range f.StoreSuffix {
if strings.HasSuffix(filename, ext) {
fh.Method = zip.Store
break
}
}
f.align(&fh)
if fw, err := f.w.CreateHeader(&fh); err != nil {
return err
} else if _, err := fw.Write(data); err != nil {
return err
}
f.addExistingFile(filename, filename, 0, 0, 0)
return nil
}
// align writes any necessary bytes to align the next file.
func (f *File) align(h *zip.FileHeader) {
if f.Align != 0 && h.Method == zip.Store {
// We have to allow space for writing the header, so we predict what the offset will be after it.
fileStart := f.w.Offset() + fileHeaderLen + len(h.Name) + len(h.Extra)
if overlap := fileStart % f.Align; overlap != 0 {
if err := f.w.WriteRaw(bytes.Repeat([]byte{0}, f.Align-overlap)); err != nil {
log.Error("Failed to pad file: %s", err)
}
}
}
}
// WriteDir writes a directory entry to the writer.
func (f *File) WriteDir(filename string) error {
filename += "/" // Must have trailing slash to tell it it's a directory.
fh := zip.FileHeader{
Name: filename,
Method: zip.Store,
}
fh.SetModTime(modTime)
if _, err := f.w.CreateHeader(&fh); err != nil {
return err
}
f.addExistingFile(filename, filename, 0, 0, 0)
return nil
}
// WritePreamble writes a preamble to the zipfile.
func (f *File) WritePreamble(preamble []byte) error {
return f.w.WriteRaw(preamble)
}
// StripBytecodeTimestamp strips a timestamp from a .pyc or .pyo file.
// This is important so our output is deterministic.
func (f *File) StripBytecodeTimestamp(filename string, contents []byte) error {
if strings.HasSuffix(filename, ".pyc") || strings.HasSuffix(filename, ".pyo") {
if len(contents) < 8 {
log.Warning("Invalid bytecode file, will not strip timestamp")
} else {
// The .pyc format starts with a two-byte magic number, a \r\n, then a four-byte
// timestamp. It is that timestamp we are interested in; we overwrite it with
// the same mtime we use in the zipfile directory (it's important that it is
// deterministic, but also that it matches, otherwise zipimport complains).
var buf bytes.Buffer
binary.Write(&buf, binary.LittleEndian, modTime.Unix())
b := buf.Bytes()
contents[4] = b[0]
contents[5] = b[1]
contents[6] = b[2]
contents[7] = b[3]
}
}
return nil
}
type nopCloser struct {
io.Writer
}
func (w nopCloser) Close() error {
return nil
}
// fixedCrc32 implements a Hash32 interface that just writes out a predetermined value.
// this is really cheating of course but serves our purposes here.
type fixedCrc32 struct {
value uint32
}
func (crc fixedCrc32) Write(p []byte) (n int, err error) {
return len(p), nil
}
func (crc fixedCrc32) Sum(b []byte) []byte {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, crc.value)
return b
}
func (crc fixedCrc32) Sum32() uint32 {
return crc.value
}
func (crc fixedCrc32) Reset() {
}
func (crc fixedCrc32) Size() int {
return 32
}
func (crc fixedCrc32) BlockSize() int {
return 32
}
|
package main
import (
"testing"
"github.com/fsouza/go-dockerclient"
)
type TestDockerClient struct {
containers []docker.APIContainers
}
func (rdc *TestDockerClient) ListContainers() ([]docker.APIContainers, error) {
return rdc.containers, nil
}
func (rdc *TestDockerClient) AddContainer(container docker.APIContainers) error {
rdc.containers = append(rdc.containers, container)
return nil
}
func NewTestDockerClient() (*TestDockerClient, error) {
dockerClient := TestDockerClient{}
return &dockerClient, nil
}
func TestSomething(t *testing.T) {
tdc, _ := NewTestDockerClient()
tdc.AddContainer(
docker.APIContainers{
ID: "foo",
},
)
// t.Fail()
// fmt.Println(tdc)
}
first real test of Environments
package main
import (
"io/ioutil"
"os"
"testing"
"github.com/fsouza/go-dockerclient"
"github.com/stretchr/testify/assert"
)
type TestDockerClient struct {
containers []docker.APIContainers
}
func (rdc *TestDockerClient) ListContainers() ([]docker.APIContainers, error) {
return rdc.containers, nil
}
func (rdc *TestDockerClient) AddContainer(container docker.APIContainers) error {
rdc.containers = append(rdc.containers, container)
return nil
}
func NewTestDockerClient() (*TestDockerClient, error) {
dockerClient := TestDockerClient{}
return &dockerClient, nil
}
func TestEnvironments(t *testing.T) {
assert := assert.New(t)
tempdir, _ := ioutil.TempDir("", "ddc")
defer os.RemoveAll(tempdir)
sc, _ := NewSystemClientWithBase(tempdir)
dc, _ := NewTestDockerClient()
dc.AddContainer(
docker.APIContainers{
ID: "foo",
Names: []string{"/ddc_foo"},
Image: "nate/clojuredev:latest",
Status: "Up 12 hours",
Ports: []docker.APIPort{
{32768, 22, "tcp", "0.0.0.0"},
},
},
)
sc.EnsureEnvironmentDir("foo")
envs, err := Environments(dc, sc)
assert.Nil(err)
assert.Equal(
envs,
map[string]Environment{
"foo": Environment{
"foo",
&Container{
"ddc_foo",
"nate/clojuredev:latest",
true,
[]Port{{"0.0.0.0", 22, 32768, "tcp"}},
},
"clojure",
},
},
)
}
|
package session
import (
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"log"
"time"
"github.com/gofly/saml"
"github.com/vanackere/ldap"
)
var (
ErrBindFailed = errors.New("ldap: bind failed")
ErrSessionNotFound = errors.New("session is not found")
)
func randomBytes(n int) []byte {
rv := make([]byte, n)
saml.RandReader.Read(rv)
return rv
}
type LDAPSessProvider struct {
ldapAddr string
bindDN string
sessionMaxAge time.Duration
sessions map[string]*saml.Session
}
func NewLDAPSessionProvider(ldapAddr, bindDN string, sessionMaxAge time.Duration) *LDAPSessProvider {
return &LDAPSessProvider{
ldapAddr: ldapAddr,
bindDN: bindDN,
sessionMaxAge: sessionMaxAge,
sessions: make(map[string]*saml.Session),
}
}
func (p *LDAPSessProvider) GetSessionByUsernameAndPassword(username, password string) (*saml.Session, error) {
ldapConn, err := ldap.DialTLS("tcp", p.ldapAddr, nil)
if err != nil {
log.Printf("ldap.DialTLS %s with error: %s \n", p.ldapAddr, err)
return nil, err
}
defer ldapConn.Close()
err = ldapConn.Bind(fmt.Sprintf("cn=%s,%s", username, p.bindDN), password)
if err != nil {
log.Printf("ldapConn.Bind(%s) with error: %s\n", username, err)
return nil, ErrBindFailed
}
sessID := base64.StdEncoding.EncodeToString(randomBytes(32))
return &saml.Session{
ID: sessID,
NameID: username,
CreateTime: saml.TimeNow(),
ExpireTime: saml.TimeNow().Add(p.sessionMaxAge),
Index: hex.EncodeToString(randomBytes(32)),
UserName: username,
}, nil
}
func (p *LDAPSessProvider) GetSessionBySessionID(sessID string) (*saml.Session, error) {
if session, ok := p.sessions[sessID]; ok {
return session, nil
}
return nil, ErrSessionNotFound
}
func (p *LDAPSessProvider) SetSession(session *saml.Session) error {
p.sessions[session.ID] = session
return nil
}
func (p *LDAPSessProvider) DestroySession(sessID string) error {
delete(p.sessions, sessID)
return nil
}
fix read and write concurrently panic
package session
import (
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"log"
"time"
"sync"
"github.com/gofly/saml"
"github.com/vanackere/ldap"
)
var (
ErrBindFailed = errors.New("ldap: bind failed")
ErrSessionNotFound = errors.New("session is not found")
)
func randomBytes(n int) []byte {
rv := make([]byte, n)
saml.RandReader.Read(rv)
return rv
}
type LDAPSessProvider struct {
ldapAddr string
bindDN string
sessionMaxAge time.Duration
sessions map[string]*saml.Session
sessLock *sync.RWMutex
}
func NewLDAPSessionProvider(ldapAddr, bindDN string, sessionMaxAge time.Duration) *LDAPSessProvider {
return &LDAPSessProvider{
ldapAddr: ldapAddr,
bindDN: bindDN,
sessionMaxAge: sessionMaxAge,
sessions: make(map[string]*saml.Session),
sessLock: &sync.RWMutex{},
}
}
func (p *LDAPSessProvider) GetSessionByUsernameAndPassword(username, password string) (*saml.Session, error) {
ldapConn, err := ldap.DialTLS("tcp", p.ldapAddr, nil)
if err != nil {
log.Printf("ldap.DialTLS %s with error: %s \n", p.ldapAddr, err)
return nil, err
}
defer ldapConn.Close()
err = ldapConn.Bind(fmt.Sprintf("cn=%s,%s", username, p.bindDN), password)
if err != nil {
log.Printf("ldapConn.Bind(%s) with error: %s\n", username, err)
return nil, ErrBindFailed
}
sessID := base64.StdEncoding.EncodeToString(randomBytes(32))
return &saml.Session{
ID: sessID,
NameID: username,
CreateTime: saml.TimeNow(),
ExpireTime: saml.TimeNow().Add(p.sessionMaxAge),
Index: hex.EncodeToString(randomBytes(32)),
UserName: username,
}, nil
}
func (p *LDAPSessProvider) GetSessionBySessionID(sessID string) (*saml.Session, error) {
p.sessLock.RLock()
defer p.sessLock.RUnlock()
if session, ok := p.sessions[sessID]; ok {
return session, nil
}
return nil, ErrSessionNotFound
}
func (p *LDAPSessProvider) SetSession(session *saml.Session) error {
p.sessLock.Lock()
defer p.sessLock.Unlock()
p.sessions[session.ID] = session
return nil
}
func (p *LDAPSessProvider) DestroySession(sessID string) error {
p.sessLock.Lock()
defer p.sessLock.Unlock()
delete(p.sessions, sessID)
return nil
}
|
/*
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package probes
import (
"errors"
"math"
"net"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/safchain/ethtool"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netlink/nl"
"github.com/skydive-project/skydive/common"
"github.com/skydive-project/skydive/config"
"github.com/skydive-project/skydive/logging"
"github.com/skydive-project/skydive/topology/graph"
)
const (
maxEpollEvents = 32
)
var ownershipMetadata = graph.Metadata{"RelationType": "ownership"}
type NetLinkProbe struct {
sync.RWMutex
Graph *graph.Graph
Root *graph.Node
state int64
ethtool *ethtool.Ethtool
netlink *netlink.Handle
indexToChildrenQueue map[int64][]graph.Identifier
links map[string]*graph.Node
wg sync.WaitGroup
}
func (u *NetLinkProbe) linkPendingChildren(intf *graph.Node, index int64) {
// add children of this interface that was previously added
if children, ok := u.indexToChildrenQueue[index]; ok {
for _, id := range children {
child := u.Graph.GetNode(id)
if child != nil {
u.Graph.Link(intf, child, graph.Metadata{"RelationType": "layer2"})
}
}
delete(u.indexToChildrenQueue, index)
}
}
func (u *NetLinkProbe) linkIntfToIndex(intf *graph.Node, index int64) {
// assuming we have only one master with this index
parent := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
if parent != nil {
// ignore ovs-system interface as it doesn't make any sense according to
// the following thread:
// http://openvswitch.org/pipermail/discuss/2013-October/011657.html
if name, _ := parent.GetFieldString("Name"); name == "ovs-system" {
return
}
if !u.Graph.AreLinked(parent, intf, layer2Metadata) {
u.Graph.Link(parent, intf, layer2Metadata)
}
} else {
// not yet the bridge so, enqueue for a later add
u.indexToChildrenQueue[index] = append(u.indexToChildrenQueue[index], intf.ID)
}
}
func (u *NetLinkProbe) handleIntfIsChild(intf *graph.Node, link netlink.Link) {
// handle pending relationship
u.linkPendingChildren(intf, int64(link.Attrs().Index))
// interface being a part of a bridge
if link.Attrs().MasterIndex != 0 {
u.linkIntfToIndex(intf, int64(link.Attrs().MasterIndex))
}
if link.Attrs().ParentIndex != 0 {
if _, err := intf.GetFieldString("Vlan"); err == nil {
u.linkIntfToIndex(intf, int64(int64(link.Attrs().ParentIndex)))
}
}
}
func (u *NetLinkProbe) handleIntfIsVeth(intf *graph.Node, link netlink.Link) {
if link.Type() != "veth" {
return
}
ifIndex, err := intf.GetFieldInt64("IfIndex")
if err != nil {
return
}
linkMetadata := graph.Metadata{"RelationType": "layer2", "Type": "veth"}
if peerIndex, err := intf.GetFieldInt64("PeerIfIndex"); err == nil {
peerResolver := func(root *graph.Node) error {
u.Graph.Lock()
defer u.Graph.Unlock()
// re get the interface from the graph since the interface could have been deleted
if u.Graph.GetNode(intf.ID) == nil {
return errors.New("Node not found")
}
var peer *graph.Node
if root == nil {
peer = u.Graph.LookupFirstNode(graph.Metadata{"IfIndex": peerIndex, "Type": "veth"})
} else {
peer = u.Graph.LookupFirstChild(root, graph.Metadata{"IfIndex": peerIndex, "Type": "veth"})
}
if peer == nil {
return errors.New("Peer not found")
}
if !u.Graph.AreLinked(peer, intf, linkMetadata) {
u.Graph.Link(peer, intf, linkMetadata)
}
return nil
}
if peerIndex > ifIndex {
go func() {
// lookup first in the local namespace then in the whole graph
// since we can have the exact same interface (name/index) in different namespaces
// we always take first the closer one.
localFnc := func() error {
if u.isRunning() == false {
return nil
}
return peerResolver(u.Root)
}
if err := common.Retry(localFnc, 10, 100*time.Millisecond); err != nil {
peerResolver(nil)
}
}()
}
}
}
func (u *NetLinkProbe) addGenericLinkToTopology(link netlink.Link, m graph.Metadata) *graph.Node {
name := link.Attrs().Name
index := int64(link.Attrs().Index)
var intf *graph.Node
intf = u.Graph.LookupFirstChild(u.Root, graph.Metadata{
"IfIndex": index,
})
// could be a member of ovs
intfs := u.Graph.GetNodes(graph.Metadata{
"Name": name,
"IfIndex": index,
})
for _, i := range intfs {
if uuid, _ := i.GetFieldString("UUID"); uuid != "" {
intf = i
break
}
}
if intf == nil {
intf = u.Graph.NewNode(graph.GenID(), m)
}
if !u.Graph.AreLinked(u.Root, intf, ownershipMetadata) {
u.Graph.Link(u.Root, intf, ownershipMetadata)
}
// ignore ovs-system interface as it doesn't make any sense according to
// the following thread:
// http://openvswitch.org/pipermail/discuss/2013-October/011657.html
if name == "ovs-system" {
return intf
}
return intf
}
func (u *NetLinkProbe) addBridgeLinkToTopology(link netlink.Link, m graph.Metadata) *graph.Node {
name := link.Attrs().Name
index := int64(link.Attrs().Index)
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{
"Name": name,
"IfIndex": index,
})
if intf == nil {
intf = u.Graph.NewNode(graph.GenID(), m)
}
if !u.Graph.AreLinked(u.Root, intf, ownershipMetadata) {
u.Graph.Link(u.Root, intf, ownershipMetadata)
}
u.linkPendingChildren(intf, index)
return intf
}
func (u *NetLinkProbe) addOvsLinkToTopology(link netlink.Link, m graph.Metadata) *graph.Node {
name := link.Attrs().Name
intf := u.Graph.LookupFirstNode(graph.Metadata{"Name": name, "Driver": "openvswitch"})
if intf == nil {
intf = u.Graph.NewNode(graph.GenID(), m)
}
if !u.Graph.AreLinked(u.Root, intf, ownershipMetadata) {
u.Graph.Link(u.Root, intf, ownershipMetadata)
}
return intf
}
func (u *NetLinkProbe) getLinkIPs(link netlink.Link, family int) string {
var ips []string
addrs, err := u.netlink.AddrList(link, family)
if err != nil {
return ""
}
for _, addr := range addrs {
ips = append(ips, addr.IPNet.String())
}
return strings.Join(ips, ",")
}
func (u *NetLinkProbe) updateMetadataStatistics(statistics *netlink.LinkStatistics, metadata graph.Metadata, prefix string) {
metadata[prefix+"/Collisions"] = uint64(statistics.Collisions)
metadata[prefix+"/Multicast"] = uint64(statistics.Multicast)
metadata[prefix+"/RxBytes"] = uint64(statistics.RxBytes)
metadata[prefix+"/RxCompressed"] = uint64(statistics.RxCompressed)
metadata[prefix+"/RxCrcErrors"] = uint64(statistics.RxCrcErrors)
metadata[prefix+"/RxDropped"] = uint64(statistics.RxDropped)
metadata[prefix+"/RxErrors"] = uint64(statistics.RxErrors)
metadata[prefix+"/RxFifoErrors"] = uint64(statistics.RxFifoErrors)
metadata[prefix+"/RxFrameErrors"] = uint64(statistics.RxFrameErrors)
metadata[prefix+"/RxLengthErrors"] = uint64(statistics.RxLengthErrors)
metadata[prefix+"/RxMissedErrors"] = uint64(statistics.RxMissedErrors)
metadata[prefix+"/RxOverErrors"] = uint64(statistics.RxOverErrors)
metadata[prefix+"/RxPackets"] = uint64(statistics.RxPackets)
metadata[prefix+"/TxAbortedErrors"] = uint64(statistics.TxAbortedErrors)
metadata[prefix+"/TxBytes"] = uint64(statistics.TxBytes)
metadata[prefix+"/TxCarrierErrors"] = uint64(statistics.TxCarrierErrors)
metadata[prefix+"/TxCompressed"] = uint64(statistics.TxCompressed)
metadata[prefix+"/TxDropped"] = uint64(statistics.TxDropped)
metadata[prefix+"/TxErrors"] = uint64(statistics.TxErrors)
metadata[prefix+"/TxFifoErrors"] = uint64(statistics.TxFifoErrors)
metadata[prefix+"/TxHeartbeatErrors"] = uint64(statistics.TxHeartbeatErrors)
metadata[prefix+"/TxPackets"] = uint64(statistics.TxPackets)
metadata[prefix+"/TxWindowErrors"] = uint64(statistics.TxWindowErrors)
}
func (u *NetLinkProbe) addLinkToTopology(link netlink.Link) {
u.Lock()
defer u.Unlock()
u.Graph.Lock()
defer u.Graph.Unlock()
logging.GetLogger().Debugf("Netlink ADD event for %s(%d,%s) within %s", link.Attrs().Name, link.Attrs().Index, link.Type(), u.Root.String())
driver, _ := u.ethtool.DriverName(link.Attrs().Name)
if driver == "" && link.Type() == "bridge" {
driver = "bridge"
}
metadata := graph.Metadata{
"Name": link.Attrs().Name,
"Type": link.Type(),
"EncapType": link.Attrs().EncapType,
"IfIndex": int64(link.Attrs().Index),
"MAC": link.Attrs().HardwareAddr.String(),
"MTU": int64(link.Attrs().MTU),
"Driver": driver,
}
if speed, err := u.ethtool.CmdGet(ðtool.EthtoolCmd{}, link.Attrs().Name); err == nil {
if speed != math.MaxUint32 {
metadata["Speed"] = speed
}
}
if statistics := link.Attrs().Statistics; statistics != nil {
u.updateMetadataStatistics(statistics, metadata, "Statistics")
}
if link.Type() == "veth" {
stats, err := u.ethtool.Stats(link.Attrs().Name)
if err != nil && err != syscall.ENODEV {
logging.GetLogger().Errorf("Unable get stats from ethtool (%s): %s", link.Attrs().Name, err.Error())
} else if index, ok := stats["peer_ifindex"]; ok {
metadata["PeerIfIndex"] = int64(index)
}
}
ipv4 := u.getLinkIPs(link, netlink.FAMILY_V4)
if len(ipv4) > 0 {
metadata["IPV4"] = ipv4
}
ipv6 := u.getLinkIPs(link, netlink.FAMILY_V6)
if len(ipv6) > 0 {
metadata["IPV6"] = ipv6
}
if vlan, ok := link.(*netlink.Vlan); ok {
metadata["Vlan"] = vlan.VlanId
}
if (link.Attrs().Flags & net.FlagUp) > 0 {
metadata["State"] = "UP"
} else {
metadata["State"] = "DOWN"
}
if link.Type() == "bond" {
metadata["BondMode"] = link.(*netlink.Bond).Mode.String()
}
var intf *graph.Node
switch driver {
case "bridge":
intf = u.addBridgeLinkToTopology(link, metadata)
case "openvswitch":
intf = u.addOvsLinkToTopology(link, metadata)
// always prefer Type from ovs
if tp, _ := intf.GetFieldString("Type"); tp != "" {
metadata["Type"] = tp
}
default:
intf = u.addGenericLinkToTopology(link, metadata)
}
if intf == nil {
return
}
u.links[link.Attrs().Name] = intf
// merge metadata
tr := u.Graph.StartMetadataTransaction(intf)
for k, v := range metadata {
tr.AddMetadata(k, v)
}
tr.Commit()
u.handleIntfIsChild(intf, link)
u.handleIntfIsVeth(intf, link)
}
func (u *NetLinkProbe) onLinkAdded(link netlink.Link) {
if u.isRunning() == true {
u.addLinkToTopology(link)
}
}
func (u *NetLinkProbe) onLinkDeleted(link netlink.Link) {
index := link.Attrs().Index
u.Graph.Lock()
defer u.Graph.Unlock()
logging.GetLogger().Debugf("Netlink DEL event for %s(%d) within %s", link.Attrs().Name, link.Attrs().Index, u.Root.String())
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
// case of removing the interface from a bridge
if intf != nil {
parents := u.Graph.LookupParents(intf, graph.Metadata{"Type": "bridge"}, graph.Metadata{})
for _, parent := range parents {
u.Graph.Unlink(parent, intf)
}
}
// check whether the interface has been deleted or not
// we get a delete event when an interface is removed from a bridge
_, err := u.netlink.LinkByIndex(index)
if err != nil && intf != nil {
// if openvswitch do not remove let's do the job by ovs piece of code
driver, _ := intf.GetFieldString("Driver")
uuid, _ := intf.GetFieldString("UUID")
if driver == "openvswitch" && uuid != "" {
u.Graph.Unlink(u.Root, intf)
} else {
u.Graph.DelNode(intf)
}
}
u.Lock()
delete(u.indexToChildrenQueue, int64(index))
delete(u.links, link.Attrs().Name)
u.Unlock()
}
func getFamilyKey(family int) string {
switch family {
case netlink.FAMILY_V4:
return "IPV4"
case netlink.FAMILY_V6:
return "IPV6"
}
return ""
}
func (u *NetLinkProbe) onAddressAdded(addr netlink.Addr, family int, index int) {
u.Graph.Lock()
defer u.Graph.Unlock()
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
if intf == nil {
logging.GetLogger().Errorf("No interface with index %d for new address %s", index, addr.IPNet.String())
return
}
key := getFamilyKey(family)
if v, err := intf.GetFieldString(key); err == nil {
if strings.Contains(v+",", addr.IPNet.String()+",") {
return
}
}
ips := addr.IPNet.String()
if v, err := intf.GetFieldString(key); err == nil {
ips = v + "," + ips
}
u.Graph.AddMetadata(intf, key, ips)
}
func (u *NetLinkProbe) onAddressDeleted(addr netlink.Addr, family int, index int) {
u.Graph.Lock()
defer u.Graph.Unlock()
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
if intf == nil {
logging.GetLogger().Errorf("No interface with index %d for new address %s", index, addr.IPNet.String())
return
}
key := getFamilyKey(family)
if v, err := intf.GetFieldString(key); err == nil {
ips := strings.Split(v, ",")
for i, ip := range ips {
if ip == addr.IPNet.String() {
ips = append(ips[:i], ips[i+1:]...)
break
}
}
if len(ips) == 0 {
u.Graph.DelMetadata(intf, key)
} else {
u.Graph.AddMetadata(intf, key, strings.Join(ips, ","))
}
}
}
func (u *NetLinkProbe) initialize() {
links, err := u.netlink.LinkList()
if err != nil {
logging.GetLogger().Errorf("Unable to list interfaces: %s", err.Error())
return
}
for _, link := range links {
u.addLinkToTopology(link)
}
}
func (u *NetLinkProbe) isRunning() bool {
return atomic.LoadInt64(&u.state) == common.RunningState
}
func parseAddr(m []byte) (addr netlink.Addr, family, index int, err error) {
msg := nl.DeserializeIfAddrmsg(m)
family = -1
index = -1
attrs, err1 := nl.ParseRouteAttr(m[msg.Len():])
if err1 != nil {
err = err1
return
}
family = int(msg.Family)
index = int(msg.Index)
var local, dst *net.IPNet
for _, attr := range attrs {
switch attr.Attr.Type {
case syscall.IFA_ADDRESS:
dst = &net.IPNet{
IP: attr.Value,
Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
}
addr.Peer = dst
case syscall.IFA_LOCAL:
local = &net.IPNet{
IP: attr.Value,
Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
}
addr.IPNet = local
}
}
// IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
if local != nil {
addr.IPNet = local
} else {
addr.IPNet = dst
}
addr.Scope = int(msg.Scope)
return
}
func (u *NetLinkProbe) start(nsPath string) {
var context *common.NetNSContext
var err error
// Enter the network namespace if necessary
if nsPath != "" {
context, err = common.NewNetNsContext(nsPath)
if err != nil {
logging.GetLogger().Errorf("Failed to switch namespace: %s", err.Error())
return
}
}
// Both NewHandle and Subscribe need to done in the network namespace.
h, err := netlink.NewHandle(syscall.NETLINK_ROUTE)
if err != nil {
logging.GetLogger().Errorf("Failed to create netlink handle: %s", err.Error())
context.Close()
return
}
defer h.Delete()
s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
if err != nil {
logging.GetLogger().Errorf("Failed to subscribe to netlink messages: %s", err.Error())
context.Close()
return
}
defer s.Close()
u.ethtool, err = ethtool.NewEthtool()
if err != nil {
logging.GetLogger().Errorf("Failed to create ethtool object: %s", err.Error())
context.Close()
return
}
defer u.ethtool.Close()
epfd, e := syscall.EpollCreate1(0)
if e != nil {
logging.GetLogger().Errorf("Failed to create epoll: %s", err.Error())
return
}
defer syscall.Close(epfd)
// Leave the network namespace
context.Close()
u.wg.Add(1)
defer u.wg.Done()
atomic.StoreInt64(&u.state, common.RunningState)
defer atomic.StoreInt64(&u.state, common.StoppedState)
u.netlink = h
u.initialize()
fd := s.GetFd()
err = syscall.SetNonblock(fd, true)
if err != nil {
logging.GetLogger().Errorf("Failed to set the netlink fd as non-blocking: %s", err.Error())
return
}
event := syscall.EpollEvent{Events: syscall.EPOLLIN, Fd: int32(fd)}
if err = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil {
logging.GetLogger().Errorf("Failed to control epoll: %s", err.Error())
return
}
events := make([]syscall.EpollEvent, maxEpollEvents)
u.wg.Add(1)
seconds := config.GetConfig().GetInt("agent.topology.netlink.metrics_update")
ticker := time.NewTicker(time.Duration(seconds) * time.Second)
done := make(chan struct{})
defer func() {
ticker.Stop()
done <- struct{}{}
}()
// Go routine to update the interface statistics
go func() {
defer u.wg.Done()
last := time.Now().UTC()
for {
select {
case <-ticker.C:
now := time.Now().UTC()
u.RLock()
for name, node := range u.links {
if link, err := h.LinkByName(name); err == nil {
if stats := link.Attrs().Statistics; stats != nil {
u.Graph.Lock()
tr := u.Graph.StartMetadataTransaction(node)
// get and update the metadata transaction instance
m := tr.Metadata
metric := netlink.LinkStatistics{
Collisions: stats.Collisions - m["Statistics/Collisions"].(uint64),
Multicast: stats.Multicast - m["Statistics/Multicast"].(uint64),
RxBytes: stats.RxBytes - m["Statistics/RxBytes"].(uint64),
RxCompressed: stats.RxCompressed - m["Statistics/RxCompressed"].(uint64),
RxCrcErrors: stats.RxCrcErrors - m["Statistics/RxCrcErrors"].(uint64),
RxDropped: stats.RxDropped - m["Statistics/RxDropped"].(uint64),
RxErrors: stats.RxErrors - m["Statistics/RxErrors"].(uint64),
RxFifoErrors: stats.RxFifoErrors - m["Statistics/RxFifoErrors"].(uint64),
RxFrameErrors: stats.RxFrameErrors - m["Statistics/RxFrameErrors"].(uint64),
RxLengthErrors: stats.RxLengthErrors - m["Statistics/RxLengthErrors"].(uint64),
RxMissedErrors: stats.RxMissedErrors - m["Statistics/RxMissedErrors"].(uint64),
RxOverErrors: stats.RxOverErrors - m["Statistics/RxOverErrors"].(uint64),
RxPackets: stats.RxPackets - m["Statistics/RxPackets"].(uint64),
TxAbortedErrors: stats.TxAbortedErrors - m["Statistics/TxAbortedErrors"].(uint64),
TxBytes: stats.TxBytes - m["Statistics/TxBytes"].(uint64),
TxCarrierErrors: stats.TxCarrierErrors - m["Statistics/TxCarrierErrors"].(uint64),
TxCompressed: stats.TxCompressed - m["Statistics/TxCompressed"].(uint64),
TxDropped: stats.TxDropped - m["Statistics/TxDropped"].(uint64),
TxErrors: stats.TxErrors - m["Statistics/TxErrors"].(uint64),
TxFifoErrors: stats.TxFifoErrors - m["Statistics/TxFifoErrors"].(uint64),
TxHeartbeatErrors: stats.TxHeartbeatErrors - m["Statistics/TxHeartbeatErrors"].(uint64),
TxPackets: stats.TxPackets - m["Statistics/TxPackets"].(uint64),
TxWindowErrors: stats.TxWindowErrors - m["Statistics/TxWindowErrors"].(uint64),
}
u.updateMetadataStatistics(stats, m, "Statistics")
u.updateMetadataStatistics(&metric, m, "LastMetric")
m["LastMetric/Start"] = common.UnixMillis(last)
m["LastMetric/Last"] = common.UnixMillis(now)
tr.Commit()
u.Graph.Unlock()
}
}
}
u.RUnlock()
last = now
case <-done:
return
}
}
}()
for atomic.LoadInt64(&u.state) == common.RunningState {
n, err := syscall.EpollWait(epfd, events[:], 1000)
if err != nil {
errno, ok := err.(syscall.Errno)
if ok && errno != syscall.EINTR {
logging.GetLogger().Errorf("Failed to receive from events from netlink: %s", err.Error())
}
continue
}
if n == 0 {
continue
}
msgs, err := s.Receive()
if err != nil {
if errno, ok := err.(syscall.Errno); !ok || !errno.Temporary() {
logging.GetLogger().Errorf("Failed to receive from netlink messages: %s", err.Error())
return
}
time.Sleep(1 * time.Second)
continue
}
for _, msg := range msgs {
switch msg.Header.Type {
case syscall.RTM_NEWLINK:
link, err := netlink.LinkDeserialize(&msg.Header, msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to deserialize netlink message: %s", err.Error())
continue
}
u.onLinkAdded(link)
case syscall.RTM_DELLINK:
link, err := netlink.LinkDeserialize(&msg.Header, msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to deserialize netlink message: %s", err.Error())
continue
}
u.onLinkDeleted(link)
case syscall.RTM_NEWADDR:
addr, family, ifindex, err := parseAddr(msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to parse newlink message: %s", err.Error())
continue
}
u.onAddressAdded(addr, family, ifindex)
case syscall.RTM_DELADDR:
addr, family, ifindex, err := parseAddr(msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to parse newlink message: %s", err.Error())
continue
}
u.onAddressDeleted(addr, family, ifindex)
}
}
}
}
func (u *NetLinkProbe) Start() {
go u.start("")
}
func (u *NetLinkProbe) Run(nsPath string) {
u.start(nsPath)
}
func (u *NetLinkProbe) Stop() {
if atomic.CompareAndSwapInt64(&u.state, common.RunningState, common.StoppingState) {
u.wg.Wait()
}
}
func NewNetLinkProbe(g *graph.Graph, n *graph.Node) *NetLinkProbe {
np := &NetLinkProbe{
Graph: g,
Root: n,
indexToChildrenQueue: make(map[int64][]graph.Identifier),
links: make(map[string]*graph.Node),
state: common.StoppedState,
}
return np
}
netlink: avoid dead lock in statistics and delete
Change-Id: I6d3ba2a806f5fabe810766ae7cdf501806037a66
Reviewed-on: https://softwarefactory-project.io/r/7165
Reviewed-by: Sylvain Baubeau <b0a7dc08a3f38d713e107103b0a85b0badc14b84@redhat.com>
Tested-by: Sylvain Baubeau <b0a7dc08a3f38d713e107103b0a85b0badc14b84@redhat.com>
Workflow: Sylvain Baubeau <b0a7dc08a3f38d713e107103b0a85b0badc14b84@redhat.com>
Tested-by: Jenkins CI <d95b56ce41a2e1ac4cecdd398defd7414407cc08@softwarefactory-project.io>
/*
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package probes
import (
"errors"
"math"
"net"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/safchain/ethtool"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netlink/nl"
"github.com/skydive-project/skydive/common"
"github.com/skydive-project/skydive/config"
"github.com/skydive-project/skydive/logging"
"github.com/skydive-project/skydive/topology/graph"
)
const (
maxEpollEvents = 32
)
var ownershipMetadata = graph.Metadata{"RelationType": "ownership"}
type NetLinkProbe struct {
sync.RWMutex
Graph *graph.Graph
Root *graph.Node
state int64
ethtool *ethtool.Ethtool
netlink *netlink.Handle
indexToChildrenQueue map[int64][]graph.Identifier
links map[string]*graph.Node
wg sync.WaitGroup
}
func (u *NetLinkProbe) linkPendingChildren(intf *graph.Node, index int64) {
// add children of this interface that was previously added
if children, ok := u.indexToChildrenQueue[index]; ok {
for _, id := range children {
child := u.Graph.GetNode(id)
if child != nil {
u.Graph.Link(intf, child, graph.Metadata{"RelationType": "layer2"})
}
}
delete(u.indexToChildrenQueue, index)
}
}
func (u *NetLinkProbe) linkIntfToIndex(intf *graph.Node, index int64) {
// assuming we have only one master with this index
parent := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
if parent != nil {
// ignore ovs-system interface as it doesn't make any sense according to
// the following thread:
// http://openvswitch.org/pipermail/discuss/2013-October/011657.html
if name, _ := parent.GetFieldString("Name"); name == "ovs-system" {
return
}
if !u.Graph.AreLinked(parent, intf, layer2Metadata) {
u.Graph.Link(parent, intf, layer2Metadata)
}
} else {
// not yet the bridge so, enqueue for a later add
u.indexToChildrenQueue[index] = append(u.indexToChildrenQueue[index], intf.ID)
}
}
func (u *NetLinkProbe) handleIntfIsChild(intf *graph.Node, link netlink.Link) {
// handle pending relationship
u.linkPendingChildren(intf, int64(link.Attrs().Index))
// interface being a part of a bridge
if link.Attrs().MasterIndex != 0 {
u.linkIntfToIndex(intf, int64(link.Attrs().MasterIndex))
}
if link.Attrs().ParentIndex != 0 {
if _, err := intf.GetFieldString("Vlan"); err == nil {
u.linkIntfToIndex(intf, int64(int64(link.Attrs().ParentIndex)))
}
}
}
func (u *NetLinkProbe) handleIntfIsVeth(intf *graph.Node, link netlink.Link) {
if link.Type() != "veth" {
return
}
ifIndex, err := intf.GetFieldInt64("IfIndex")
if err != nil {
return
}
linkMetadata := graph.Metadata{"RelationType": "layer2", "Type": "veth"}
if peerIndex, err := intf.GetFieldInt64("PeerIfIndex"); err == nil {
peerResolver := func(root *graph.Node) error {
u.Graph.Lock()
defer u.Graph.Unlock()
// re get the interface from the graph since the interface could have been deleted
if u.Graph.GetNode(intf.ID) == nil {
return errors.New("Node not found")
}
var peer *graph.Node
if root == nil {
peer = u.Graph.LookupFirstNode(graph.Metadata{"IfIndex": peerIndex, "Type": "veth"})
} else {
peer = u.Graph.LookupFirstChild(root, graph.Metadata{"IfIndex": peerIndex, "Type": "veth"})
}
if peer == nil {
return errors.New("Peer not found")
}
if !u.Graph.AreLinked(peer, intf, linkMetadata) {
u.Graph.Link(peer, intf, linkMetadata)
}
return nil
}
if peerIndex > ifIndex {
go func() {
// lookup first in the local namespace then in the whole graph
// since we can have the exact same interface (name/index) in different namespaces
// we always take first the closer one.
localFnc := func() error {
if u.isRunning() == false {
return nil
}
return peerResolver(u.Root)
}
if err := common.Retry(localFnc, 10, 100*time.Millisecond); err != nil {
peerResolver(nil)
}
}()
}
}
}
func (u *NetLinkProbe) addGenericLinkToTopology(link netlink.Link, m graph.Metadata) *graph.Node {
name := link.Attrs().Name
index := int64(link.Attrs().Index)
var intf *graph.Node
intf = u.Graph.LookupFirstChild(u.Root, graph.Metadata{
"IfIndex": index,
})
// could be a member of ovs
intfs := u.Graph.GetNodes(graph.Metadata{
"Name": name,
"IfIndex": index,
})
for _, i := range intfs {
if uuid, _ := i.GetFieldString("UUID"); uuid != "" {
intf = i
break
}
}
if intf == nil {
intf = u.Graph.NewNode(graph.GenID(), m)
}
if !u.Graph.AreLinked(u.Root, intf, ownershipMetadata) {
u.Graph.Link(u.Root, intf, ownershipMetadata)
}
// ignore ovs-system interface as it doesn't make any sense according to
// the following thread:
// http://openvswitch.org/pipermail/discuss/2013-October/011657.html
if name == "ovs-system" {
return intf
}
return intf
}
func (u *NetLinkProbe) addBridgeLinkToTopology(link netlink.Link, m graph.Metadata) *graph.Node {
name := link.Attrs().Name
index := int64(link.Attrs().Index)
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{
"Name": name,
"IfIndex": index,
})
if intf == nil {
intf = u.Graph.NewNode(graph.GenID(), m)
}
if !u.Graph.AreLinked(u.Root, intf, ownershipMetadata) {
u.Graph.Link(u.Root, intf, ownershipMetadata)
}
u.linkPendingChildren(intf, index)
return intf
}
func (u *NetLinkProbe) addOvsLinkToTopology(link netlink.Link, m graph.Metadata) *graph.Node {
name := link.Attrs().Name
intf := u.Graph.LookupFirstNode(graph.Metadata{"Name": name, "Driver": "openvswitch"})
if intf == nil {
intf = u.Graph.NewNode(graph.GenID(), m)
}
if !u.Graph.AreLinked(u.Root, intf, ownershipMetadata) {
u.Graph.Link(u.Root, intf, ownershipMetadata)
}
return intf
}
func (u *NetLinkProbe) getLinkIPs(link netlink.Link, family int) string {
var ips []string
addrs, err := u.netlink.AddrList(link, family)
if err != nil {
return ""
}
for _, addr := range addrs {
ips = append(ips, addr.IPNet.String())
}
return strings.Join(ips, ",")
}
func (u *NetLinkProbe) updateMetadataStatistics(statistics *netlink.LinkStatistics, metadata graph.Metadata, prefix string) {
metadata[prefix+"/Collisions"] = uint64(statistics.Collisions)
metadata[prefix+"/Multicast"] = uint64(statistics.Multicast)
metadata[prefix+"/RxBytes"] = uint64(statistics.RxBytes)
metadata[prefix+"/RxCompressed"] = uint64(statistics.RxCompressed)
metadata[prefix+"/RxCrcErrors"] = uint64(statistics.RxCrcErrors)
metadata[prefix+"/RxDropped"] = uint64(statistics.RxDropped)
metadata[prefix+"/RxErrors"] = uint64(statistics.RxErrors)
metadata[prefix+"/RxFifoErrors"] = uint64(statistics.RxFifoErrors)
metadata[prefix+"/RxFrameErrors"] = uint64(statistics.RxFrameErrors)
metadata[prefix+"/RxLengthErrors"] = uint64(statistics.RxLengthErrors)
metadata[prefix+"/RxMissedErrors"] = uint64(statistics.RxMissedErrors)
metadata[prefix+"/RxOverErrors"] = uint64(statistics.RxOverErrors)
metadata[prefix+"/RxPackets"] = uint64(statistics.RxPackets)
metadata[prefix+"/TxAbortedErrors"] = uint64(statistics.TxAbortedErrors)
metadata[prefix+"/TxBytes"] = uint64(statistics.TxBytes)
metadata[prefix+"/TxCarrierErrors"] = uint64(statistics.TxCarrierErrors)
metadata[prefix+"/TxCompressed"] = uint64(statistics.TxCompressed)
metadata[prefix+"/TxDropped"] = uint64(statistics.TxDropped)
metadata[prefix+"/TxErrors"] = uint64(statistics.TxErrors)
metadata[prefix+"/TxFifoErrors"] = uint64(statistics.TxFifoErrors)
metadata[prefix+"/TxHeartbeatErrors"] = uint64(statistics.TxHeartbeatErrors)
metadata[prefix+"/TxPackets"] = uint64(statistics.TxPackets)
metadata[prefix+"/TxWindowErrors"] = uint64(statistics.TxWindowErrors)
}
func (u *NetLinkProbe) addLinkToTopology(link netlink.Link) {
u.Graph.Lock()
defer u.Graph.Unlock()
logging.GetLogger().Debugf("Netlink ADD event for %s(%d,%s) within %s", link.Attrs().Name, link.Attrs().Index, link.Type(), u.Root.String())
driver, _ := u.ethtool.DriverName(link.Attrs().Name)
if driver == "" && link.Type() == "bridge" {
driver = "bridge"
}
metadata := graph.Metadata{
"Name": link.Attrs().Name,
"Type": link.Type(),
"EncapType": link.Attrs().EncapType,
"IfIndex": int64(link.Attrs().Index),
"MAC": link.Attrs().HardwareAddr.String(),
"MTU": int64(link.Attrs().MTU),
"Driver": driver,
}
if speed, err := u.ethtool.CmdGet(ðtool.EthtoolCmd{}, link.Attrs().Name); err == nil {
if speed != math.MaxUint32 {
metadata["Speed"] = speed
}
}
if statistics := link.Attrs().Statistics; statistics != nil {
u.updateMetadataStatistics(statistics, metadata, "Statistics")
}
if link.Type() == "veth" {
stats, err := u.ethtool.Stats(link.Attrs().Name)
if err != nil && err != syscall.ENODEV {
logging.GetLogger().Errorf("Unable get stats from ethtool (%s): %s", link.Attrs().Name, err.Error())
} else if index, ok := stats["peer_ifindex"]; ok {
metadata["PeerIfIndex"] = int64(index)
}
}
ipv4 := u.getLinkIPs(link, netlink.FAMILY_V4)
if len(ipv4) > 0 {
metadata["IPV4"] = ipv4
}
ipv6 := u.getLinkIPs(link, netlink.FAMILY_V6)
if len(ipv6) > 0 {
metadata["IPV6"] = ipv6
}
if vlan, ok := link.(*netlink.Vlan); ok {
metadata["Vlan"] = vlan.VlanId
}
if (link.Attrs().Flags & net.FlagUp) > 0 {
metadata["State"] = "UP"
} else {
metadata["State"] = "DOWN"
}
if link.Type() == "bond" {
metadata["BondMode"] = link.(*netlink.Bond).Mode.String()
}
var intf *graph.Node
switch driver {
case "bridge":
intf = u.addBridgeLinkToTopology(link, metadata)
case "openvswitch":
intf = u.addOvsLinkToTopology(link, metadata)
// always prefer Type from ovs
if tp, _ := intf.GetFieldString("Type"); tp != "" {
metadata["Type"] = tp
}
default:
intf = u.addGenericLinkToTopology(link, metadata)
}
if intf == nil {
return
}
u.Lock()
u.links[link.Attrs().Name] = intf
u.Unlock()
// merge metadata
tr := u.Graph.StartMetadataTransaction(intf)
for k, v := range metadata {
tr.AddMetadata(k, v)
}
tr.Commit()
u.handleIntfIsChild(intf, link)
u.handleIntfIsVeth(intf, link)
}
func (u *NetLinkProbe) onLinkAdded(link netlink.Link) {
if u.isRunning() == true {
u.addLinkToTopology(link)
}
}
func (u *NetLinkProbe) onLinkDeleted(link netlink.Link) {
index := link.Attrs().Index
u.Graph.Lock()
logging.GetLogger().Debugf("Netlink DEL event for %s(%d) within %s", link.Attrs().Name, link.Attrs().Index, u.Root.String())
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
// case of removing the interface from a bridge
if intf != nil {
parents := u.Graph.LookupParents(intf, graph.Metadata{"Type": "bridge"}, graph.Metadata{})
for _, parent := range parents {
u.Graph.Unlink(parent, intf)
}
}
// check whether the interface has been deleted or not
// we get a delete event when an interface is removed from a bridge
_, err := u.netlink.LinkByIndex(index)
if err != nil && intf != nil {
// if openvswitch do not remove let's do the job by ovs piece of code
driver, _ := intf.GetFieldString("Driver")
uuid, _ := intf.GetFieldString("UUID")
if driver == "openvswitch" && uuid != "" {
u.Graph.Unlink(u.Root, intf)
} else {
u.Graph.DelNode(intf)
}
}
u.Graph.Unlock()
u.Lock()
delete(u.indexToChildrenQueue, int64(index))
delete(u.links, link.Attrs().Name)
u.Unlock()
}
func getFamilyKey(family int) string {
switch family {
case netlink.FAMILY_V4:
return "IPV4"
case netlink.FAMILY_V6:
return "IPV6"
}
return ""
}
func (u *NetLinkProbe) onAddressAdded(addr netlink.Addr, family int, index int) {
u.Graph.Lock()
defer u.Graph.Unlock()
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
if intf == nil {
logging.GetLogger().Errorf("No interface with index %d for new address %s", index, addr.IPNet.String())
return
}
key := getFamilyKey(family)
if v, err := intf.GetFieldString(key); err == nil {
if strings.Contains(v+",", addr.IPNet.String()+",") {
return
}
}
ips := addr.IPNet.String()
if v, err := intf.GetFieldString(key); err == nil {
ips = v + "," + ips
}
u.Graph.AddMetadata(intf, key, ips)
}
func (u *NetLinkProbe) onAddressDeleted(addr netlink.Addr, family int, index int) {
u.Graph.Lock()
defer u.Graph.Unlock()
intf := u.Graph.LookupFirstChild(u.Root, graph.Metadata{"IfIndex": index})
if intf == nil {
logging.GetLogger().Errorf("No interface with index %d for new address %s", index, addr.IPNet.String())
return
}
key := getFamilyKey(family)
if v, err := intf.GetFieldString(key); err == nil {
ips := strings.Split(v, ",")
for i, ip := range ips {
if ip == addr.IPNet.String() {
ips = append(ips[:i], ips[i+1:]...)
break
}
}
if len(ips) == 0 {
u.Graph.DelMetadata(intf, key)
} else {
u.Graph.AddMetadata(intf, key, strings.Join(ips, ","))
}
}
}
func (u *NetLinkProbe) initialize() {
links, err := u.netlink.LinkList()
if err != nil {
logging.GetLogger().Errorf("Unable to list interfaces: %s", err.Error())
return
}
for _, link := range links {
u.addLinkToTopology(link)
}
}
func (u *NetLinkProbe) isRunning() bool {
return atomic.LoadInt64(&u.state) == common.RunningState
}
func parseAddr(m []byte) (addr netlink.Addr, family, index int, err error) {
msg := nl.DeserializeIfAddrmsg(m)
family = -1
index = -1
attrs, err1 := nl.ParseRouteAttr(m[msg.Len():])
if err1 != nil {
err = err1
return
}
family = int(msg.Family)
index = int(msg.Index)
var local, dst *net.IPNet
for _, attr := range attrs {
switch attr.Attr.Type {
case syscall.IFA_ADDRESS:
dst = &net.IPNet{
IP: attr.Value,
Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
}
addr.Peer = dst
case syscall.IFA_LOCAL:
local = &net.IPNet{
IP: attr.Value,
Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
}
addr.IPNet = local
}
}
// IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS
if local != nil {
addr.IPNet = local
} else {
addr.IPNet = dst
}
addr.Scope = int(msg.Scope)
return
}
func (u *NetLinkProbe) start(nsPath string) {
var context *common.NetNSContext
var err error
// Enter the network namespace if necessary
if nsPath != "" {
context, err = common.NewNetNsContext(nsPath)
if err != nil {
logging.GetLogger().Errorf("Failed to switch namespace: %s", err.Error())
return
}
}
// Both NewHandle and Subscribe need to done in the network namespace.
h, err := netlink.NewHandle(syscall.NETLINK_ROUTE)
if err != nil {
logging.GetLogger().Errorf("Failed to create netlink handle: %s", err.Error())
context.Close()
return
}
defer h.Delete()
s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
if err != nil {
logging.GetLogger().Errorf("Failed to subscribe to netlink messages: %s", err.Error())
context.Close()
return
}
defer s.Close()
u.ethtool, err = ethtool.NewEthtool()
if err != nil {
logging.GetLogger().Errorf("Failed to create ethtool object: %s", err.Error())
context.Close()
return
}
defer u.ethtool.Close()
epfd, e := syscall.EpollCreate1(0)
if e != nil {
logging.GetLogger().Errorf("Failed to create epoll: %s", err.Error())
return
}
defer syscall.Close(epfd)
// Leave the network namespace
context.Close()
u.wg.Add(1)
defer u.wg.Done()
atomic.StoreInt64(&u.state, common.RunningState)
defer atomic.StoreInt64(&u.state, common.StoppedState)
u.netlink = h
u.initialize()
fd := s.GetFd()
err = syscall.SetNonblock(fd, true)
if err != nil {
logging.GetLogger().Errorf("Failed to set the netlink fd as non-blocking: %s", err.Error())
return
}
event := syscall.EpollEvent{Events: syscall.EPOLLIN, Fd: int32(fd)}
if err = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil {
logging.GetLogger().Errorf("Failed to control epoll: %s", err.Error())
return
}
events := make([]syscall.EpollEvent, maxEpollEvents)
u.wg.Add(1)
seconds := config.GetConfig().GetInt("agent.topology.netlink.metrics_update")
ticker := time.NewTicker(time.Duration(seconds) * time.Second)
done := make(chan struct{})
defer func() {
ticker.Stop()
done <- struct{}{}
}()
// Go routine to update the interface statistics
go func() {
defer u.wg.Done()
last := time.Now().UTC()
for {
select {
case <-ticker.C:
now := time.Now().UTC()
// do a copy of the original in order to avoid inter locks
// between graph lock and netlink lock while iterating
u.RLock()
links := make(map[string]*graph.Node)
for k, v := range u.links {
links[k] = v
}
u.RUnlock()
for name, node := range links {
if link, err := h.LinkByName(name); err == nil {
if stats := link.Attrs().Statistics; stats != nil {
u.Graph.Lock()
tr := u.Graph.StartMetadataTransaction(node)
// get and update the metadata transaction instance
m := tr.Metadata
metric := netlink.LinkStatistics{
Collisions: stats.Collisions - m["Statistics/Collisions"].(uint64),
Multicast: stats.Multicast - m["Statistics/Multicast"].(uint64),
RxBytes: stats.RxBytes - m["Statistics/RxBytes"].(uint64),
RxCompressed: stats.RxCompressed - m["Statistics/RxCompressed"].(uint64),
RxCrcErrors: stats.RxCrcErrors - m["Statistics/RxCrcErrors"].(uint64),
RxDropped: stats.RxDropped - m["Statistics/RxDropped"].(uint64),
RxErrors: stats.RxErrors - m["Statistics/RxErrors"].(uint64),
RxFifoErrors: stats.RxFifoErrors - m["Statistics/RxFifoErrors"].(uint64),
RxFrameErrors: stats.RxFrameErrors - m["Statistics/RxFrameErrors"].(uint64),
RxLengthErrors: stats.RxLengthErrors - m["Statistics/RxLengthErrors"].(uint64),
RxMissedErrors: stats.RxMissedErrors - m["Statistics/RxMissedErrors"].(uint64),
RxOverErrors: stats.RxOverErrors - m["Statistics/RxOverErrors"].(uint64),
RxPackets: stats.RxPackets - m["Statistics/RxPackets"].(uint64),
TxAbortedErrors: stats.TxAbortedErrors - m["Statistics/TxAbortedErrors"].(uint64),
TxBytes: stats.TxBytes - m["Statistics/TxBytes"].(uint64),
TxCarrierErrors: stats.TxCarrierErrors - m["Statistics/TxCarrierErrors"].(uint64),
TxCompressed: stats.TxCompressed - m["Statistics/TxCompressed"].(uint64),
TxDropped: stats.TxDropped - m["Statistics/TxDropped"].(uint64),
TxErrors: stats.TxErrors - m["Statistics/TxErrors"].(uint64),
TxFifoErrors: stats.TxFifoErrors - m["Statistics/TxFifoErrors"].(uint64),
TxHeartbeatErrors: stats.TxHeartbeatErrors - m["Statistics/TxHeartbeatErrors"].(uint64),
TxPackets: stats.TxPackets - m["Statistics/TxPackets"].(uint64),
TxWindowErrors: stats.TxWindowErrors - m["Statistics/TxWindowErrors"].(uint64),
}
u.updateMetadataStatistics(stats, m, "Statistics")
u.updateMetadataStatistics(&metric, m, "LastMetric")
m["LastMetric/Start"] = common.UnixMillis(last)
m["LastMetric/Last"] = common.UnixMillis(now)
tr.Commit()
u.Graph.Unlock()
}
}
}
last = now
case <-done:
return
}
}
}()
for atomic.LoadInt64(&u.state) == common.RunningState {
n, err := syscall.EpollWait(epfd, events[:], 1000)
if err != nil {
errno, ok := err.(syscall.Errno)
if ok && errno != syscall.EINTR {
logging.GetLogger().Errorf("Failed to receive from events from netlink: %s", err.Error())
}
continue
}
if n == 0 {
continue
}
msgs, err := s.Receive()
if err != nil {
if errno, ok := err.(syscall.Errno); !ok || !errno.Temporary() {
logging.GetLogger().Errorf("Failed to receive from netlink messages: %s", err.Error())
return
}
time.Sleep(1 * time.Second)
continue
}
for _, msg := range msgs {
switch msg.Header.Type {
case syscall.RTM_NEWLINK:
link, err := netlink.LinkDeserialize(&msg.Header, msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to deserialize netlink message: %s", err.Error())
continue
}
u.onLinkAdded(link)
case syscall.RTM_DELLINK:
link, err := netlink.LinkDeserialize(&msg.Header, msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to deserialize netlink message: %s", err.Error())
continue
}
u.onLinkDeleted(link)
case syscall.RTM_NEWADDR:
addr, family, ifindex, err := parseAddr(msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to parse newlink message: %s", err.Error())
continue
}
u.onAddressAdded(addr, family, ifindex)
case syscall.RTM_DELADDR:
addr, family, ifindex, err := parseAddr(msg.Data)
if err != nil {
logging.GetLogger().Warningf("Failed to parse newlink message: %s", err.Error())
continue
}
u.onAddressDeleted(addr, family, ifindex)
}
}
}
}
func (u *NetLinkProbe) Start() {
go u.start("")
}
func (u *NetLinkProbe) Run(nsPath string) {
u.start(nsPath)
}
func (u *NetLinkProbe) Stop() {
if atomic.CompareAndSwapInt64(&u.state, common.RunningState, common.StoppingState) {
u.wg.Wait()
}
}
func NewNetLinkProbe(g *graph.Graph, n *graph.Node) *NetLinkProbe {
np := &NetLinkProbe{
Graph: g,
Root: n,
indexToChildrenQueue: make(map[int64][]graph.Identifier),
links: make(map[string]*graph.Node),
state: common.StoppedState,
}
return np
}
|
// Package tsm1 provides a TSDB in the Time Structured Merge tree format.
package tsm1 // import "github.com/influxdata/influxdb/tsdb/engine/tsm1"
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb/index/inmem"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/tsdb"
_ "github.com/influxdata/influxdb/tsdb/index"
"github.com/uber-go/zap"
)
//go:generate tmpl -data=@iterator.gen.go.tmpldata iterator.gen.go.tmpl
//go:generate tmpl -data=@file_store.gen.go.tmpldata file_store.gen.go.tmpl
//go:generate tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl
//go:generate tmpl -data=@compact.gen.go.tmpldata compact.gen.go.tmpl
func init() {
tsdb.RegisterEngine("tsm1", NewEngine)
}
var (
// Ensure Engine implements the interface.
_ tsdb.Engine = &Engine{}
// Static objects to prevent small allocs.
timeBytes = []byte("time")
keyFieldSeparatorBytes = []byte(keyFieldSeparator)
)
const (
// keyFieldSeparator separates the series key from the field name in the composite key
// that identifies a specific field in series
keyFieldSeparator = "#!~#"
)
// Statistics gathered by the engine.
const (
statCacheCompactions = "cacheCompactions"
statCacheCompactionsActive = "cacheCompactionsActive"
statCacheCompactionError = "cacheCompactionErr"
statCacheCompactionDuration = "cacheCompactionDuration"
statTSMLevel1Compactions = "tsmLevel1Compactions"
statTSMLevel1CompactionsActive = "tsmLevel1CompactionsActive"
statTSMLevel1CompactionError = "tsmLevel1CompactionErr"
statTSMLevel1CompactionDuration = "tsmLevel1CompactionDuration"
statTSMLevel2Compactions = "tsmLevel2Compactions"
statTSMLevel2CompactionsActive = "tsmLevel2CompactionsActive"
statTSMLevel2CompactionError = "tsmLevel2CompactionErr"
statTSMLevel2CompactionDuration = "tsmLevel2CompactionDuration"
statTSMLevel3Compactions = "tsmLevel3Compactions"
statTSMLevel3CompactionsActive = "tsmLevel3CompactionsActive"
statTSMLevel3CompactionError = "tsmLevel3CompactionErr"
statTSMLevel3CompactionDuration = "tsmLevel3CompactionDuration"
statTSMOptimizeCompactions = "tsmOptimizeCompactions"
statTSMOptimizeCompactionsActive = "tsmOptimizeCompactionsActive"
statTSMOptimizeCompactionError = "tsmOptimizeCompactionErr"
statTSMOptimizeCompactionDuration = "tsmOptimizeCompactionDuration"
statTSMFullCompactions = "tsmFullCompactions"
statTSMFullCompactionsActive = "tsmFullCompactionsActive"
statTSMFullCompactionError = "tsmFullCompactionErr"
statTSMFullCompactionDuration = "tsmFullCompactionDuration"
)
// Engine represents a storage engine with compressed blocks.
type Engine struct {
mu sync.RWMutex
// The following group of fields is used to track the state of level compactions within the
// Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is
// used to signal those goroutines to shutdown. Every request to disable level compactions will
// call 'Wait' on 'wg', with the first goroutine to arrive (levelWorkers == 0 while holding the
// lock) will close the done channel and re-assign 'nil' to the variable. Re-enabling will
// decrease 'levelWorkers', and when it decreases to zero, level compactions will be started
// back up again.
wg sync.WaitGroup // waitgroup for active level compaction goroutines
done chan struct{} // channel to signal level compactions to stop
levelWorkers int // Number of "workers" that expect compactions to be in a disabled state
snapDone chan struct{} // channel to signal snapshot compactions to stop
snapWG sync.WaitGroup // waitgroup for running snapshot compactions
id uint64
database string
path string
logger zap.Logger // Logger to be used for important messages
traceLogger zap.Logger // Logger to be used when trace-logging is on.
traceLogging bool
index tsdb.Index
fieldset *tsdb.MeasurementFieldSet
WAL *WAL
Cache *Cache
Compactor *Compactor
CompactionPlan CompactionPlanner
FileStore *FileStore
MaxPointsPerBlock int
// CacheFlushMemorySizeThreshold specifies the minimum size threshodl for
// the cache when the engine should write a snapshot to a TSM file
CacheFlushMemorySizeThreshold uint64
// CacheFlushWriteColdDuration specifies the length of time after which if
// no writes have been committed to the WAL, the engine will write
// a snapshot of the cache to a TSM file
CacheFlushWriteColdDuration time.Duration
// Controls whether to enabled compactions when the engine is open
enableCompactionsOnOpen bool
stats *EngineStatistics
// The limiter for concurrent compactions
compactionLimiter limiter.Fixed
}
// NewEngine returns a new instance of Engine.
func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine {
w := NewWAL(walPath)
w.syncDelay = time.Duration(opt.Config.WALFsyncDelay)
fs := NewFileStore(path)
cache := NewCache(uint64(opt.Config.CacheMaxMemorySize), path)
c := &Compactor{
Dir: path,
FileStore: fs,
}
logger := zap.New(zap.NullEncoder())
e := &Engine{
id: id,
database: database,
path: path,
index: idx,
logger: logger,
traceLogger: logger,
traceLogging: opt.Config.TraceLoggingEnabled,
fieldset: tsdb.NewMeasurementFieldSet(),
WAL: w,
Cache: cache,
FileStore: fs,
Compactor: c,
CompactionPlan: NewDefaultPlanner(fs, time.Duration(opt.Config.CompactFullWriteColdDuration)),
CacheFlushMemorySizeThreshold: opt.Config.CacheSnapshotMemorySize,
CacheFlushWriteColdDuration: time.Duration(opt.Config.CacheSnapshotWriteColdDuration),
enableCompactionsOnOpen: true,
stats: &EngineStatistics{},
compactionLimiter: opt.CompactionLimiter,
}
// Attach fieldset to index.
e.index.SetFieldSet(e.fieldset)
if e.traceLogging {
fs.enableTraceLogging(true)
w.enableTraceLogging(true)
}
return e
}
// SetEnabled sets whether the engine is enabled.
func (e *Engine) SetEnabled(enabled bool) {
e.enableCompactionsOnOpen = enabled
e.SetCompactionsEnabled(enabled)
}
// SetCompactionsEnabled enables compactions on the engine. When disabled
// all running compactions are aborted and new compactions stop running.
func (e *Engine) SetCompactionsEnabled(enabled bool) {
if enabled {
e.enableSnapshotCompactions()
e.enableLevelCompactions(false)
} else {
e.disableSnapshotCompactions()
e.disableLevelCompactions(false)
}
}
// enableLevelCompactions will request that level compactions start back up again
//
// 'wait' signifies that a corresponding call to disableLevelCompactions(true) was made at some
// point, and the associated task that required disabled compactions is now complete
func (e *Engine) enableLevelCompactions(wait bool) {
// If we don't need to wait, see if we're already enabled
if !wait {
e.mu.RLock()
if e.done != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
}
e.mu.Lock()
if wait {
e.levelWorkers -= 1
}
if e.levelWorkers != 0 || e.done != nil {
// still waiting on more workers or already enabled
e.mu.Unlock()
return
}
// last one to enable, start things back up
e.Compactor.EnableCompactions()
quit := make(chan struct{})
e.done = quit
e.wg.Add(4)
e.mu.Unlock()
go func() { defer e.wg.Done(); e.compactTSMFull(quit) }()
go func() { defer e.wg.Done(); e.compactTSMLevel(true, 1, quit) }()
go func() { defer e.wg.Done(); e.compactTSMLevel(true, 2, quit) }()
go func() { defer e.wg.Done(); e.compactTSMLevel(false, 3, quit) }()
}
// disableLevelCompactions will stop level compactions before returning.
//
// If 'wait' is set to true, then a corresponding call to enableLevelCompactions(true) will be
// required before level compactions will start back up again.
func (e *Engine) disableLevelCompactions(wait bool) {
e.mu.Lock()
old := e.levelWorkers
if wait {
e.levelWorkers += 1
}
if old == 0 && e.done != nil {
// Prevent new compactions from starting
e.Compactor.DisableCompactions()
// Stop all background compaction goroutines
close(e.done)
e.done = nil
}
e.mu.Unlock()
e.wg.Wait()
}
func (e *Engine) enableSnapshotCompactions() {
// Check if already enabled under read lock
e.mu.RLock()
if e.snapDone != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
// Check again under write lock
e.mu.Lock()
if e.snapDone != nil {
e.mu.Unlock()
return
}
e.Compactor.EnableSnapshots()
quit := make(chan struct{})
e.snapDone = quit
e.snapWG.Add(1)
e.mu.Unlock()
go func() { defer e.snapWG.Done(); e.compactCache(quit) }()
}
func (e *Engine) disableSnapshotCompactions() {
e.mu.Lock()
if e.snapDone != nil {
close(e.snapDone)
e.snapDone = nil
e.Compactor.DisableSnapshots()
}
e.mu.Unlock()
e.snapWG.Wait()
}
// Path returns the path the engine was opened with.
func (e *Engine) Path() string { return e.path }
func (e *Engine) SetFieldName(measurement []byte, name string) {
e.index.SetFieldName(measurement, name)
}
func (e *Engine) MeasurementExists(name []byte) (bool, error) {
return e.index.MeasurementExists(name)
}
func (e *Engine) MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {
return e.index.MeasurementNamesByExpr(expr)
}
func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return e.index.MeasurementNamesByRegex(re)
}
// MeasurementFields returns the measurement fields for a measurement.
func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields {
return e.fieldset.CreateFieldsIfNotExists(measurement)
}
func (e *Engine) HasTagKey(name, key []byte) (bool, error) {
return e.index.HasTagKey(name, key)
}
func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
return e.index.MeasurementTagKeysByExpr(name, expr)
}
// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.
//
// MeasurementTagKeyValuesByExpr relies on the provided tag keys being sorted.
// The caller can indicate the tag keys have been sorted by setting the
// keysSorted argument appropriately. Tag values are returned in a slice that
// is indexible according to the sorted order of the tag keys, e.g., the values
// for the earliest tag k will be available in index 0 of the returned values
// slice.
//
func (e *Engine) MeasurementTagKeyValuesByExpr(name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
return e.index.MeasurementTagKeyValuesByExpr(name, keys, expr, keysSorted)
}
func (e *Engine) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
return e.index.ForEachMeasurementTagKey(name, fn)
}
func (e *Engine) TagKeyCardinality(name, key []byte) int {
return e.index.TagKeyCardinality(name, key)
}
// SeriesN returns the unique number of series in the index.
func (e *Engine) SeriesN() int64 {
return e.index.SeriesN()
}
func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.SeriesSketches()
}
func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.MeasurementsSketches()
}
// LastModified returns the time when this shard was last modified.
func (e *Engine) LastModified() time.Time {
walTime := e.WAL.LastWriteTime()
fsTime := e.FileStore.LastModified()
if walTime.After(fsTime) {
return walTime
}
return fsTime
}
// EngineStatistics maintains statistics for the engine.
type EngineStatistics struct {
CacheCompactions int64 // Counter of cache compactions that have ever run.
CacheCompactionsActive int64 // Gauge of cache compactions currently running.
CacheCompactionErrors int64 // Counter of cache compactions that have failed due to error.
CacheCompactionDuration int64 // Counter of number of wall nanoseconds spent in cache compactions.
TSMCompactions [3]int64 // Counter of TSM compactions (by level) that have ever run.
TSMCompactionsActive [3]int64 // Gauge of TSM compactions (by level) currently running.
TSMCompactionErrors [3]int64 // Counter of TSM compcations (by level) that have failed due to error.
TSMCompactionDuration [3]int64 // Counter of number of wall nanoseconds spent in TSM compactions (by level).
TSMOptimizeCompactions int64 // Counter of optimize compactions that have ever run.
TSMOptimizeCompactionsActive int64 // Gauge of optimize compactions currently running.
TSMOptimizeCompactionErrors int64 // Counter of optimize compactions that have failed due to error.
TSMOptimizeCompactionDuration int64 // Counter of number of wall nanoseconds spent in optimize compactions.
TSMFullCompactions int64 // Counter of full compactions that have ever run.
TSMFullCompactionsActive int64 // Gauge of full compactions currently running.
TSMFullCompactionErrors int64 // Counter of full compactions that have failed due to error.
TSMFullCompactionDuration int64 // Counter of number of wall nanoseconds spent in full compactions.
}
// Statistics returns statistics for periodic monitoring.
func (e *Engine) Statistics(tags map[string]string) []models.Statistic {
statistics := make([]models.Statistic, 0, 4)
statistics = append(statistics, models.Statistic{
Name: "tsm1_engine",
Tags: tags,
Values: map[string]interface{}{
statCacheCompactions: atomic.LoadInt64(&e.stats.CacheCompactions),
statCacheCompactionsActive: atomic.LoadInt64(&e.stats.CacheCompactionsActive),
statCacheCompactionError: atomic.LoadInt64(&e.stats.CacheCompactionErrors),
statCacheCompactionDuration: atomic.LoadInt64(&e.stats.CacheCompactionDuration),
statTSMLevel1Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[0]),
statTSMLevel1CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]),
statTSMLevel1CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[0]),
statTSMLevel1CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[0]),
statTSMLevel2Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[1]),
statTSMLevel2CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]),
statTSMLevel2CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[1]),
statTSMLevel2CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[1]),
statTSMLevel3Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[2]),
statTSMLevel3CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]),
statTSMLevel3CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[2]),
statTSMLevel3CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[2]),
statTSMOptimizeCompactions: atomic.LoadInt64(&e.stats.TSMOptimizeCompactions),
statTSMOptimizeCompactionsActive: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive),
statTSMOptimizeCompactionError: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionErrors),
statTSMOptimizeCompactionDuration: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionDuration),
statTSMFullCompactions: atomic.LoadInt64(&e.stats.TSMFullCompactions),
statTSMFullCompactionsActive: atomic.LoadInt64(&e.stats.TSMFullCompactionsActive),
statTSMFullCompactionError: atomic.LoadInt64(&e.stats.TSMFullCompactionErrors),
statTSMFullCompactionDuration: atomic.LoadInt64(&e.stats.TSMFullCompactionDuration),
},
})
statistics = append(statistics, e.Cache.Statistics(tags)...)
statistics = append(statistics, e.FileStore.Statistics(tags)...)
statistics = append(statistics, e.WAL.Statistics(tags)...)
return statistics
}
// DiskSize returns the total size in bytes of all TSM and WAL segments on disk.
func (e *Engine) DiskSize() int64 {
return e.FileStore.DiskSizeBytes() + e.WAL.DiskSizeBytes()
}
// Open opens and initializes the engine.
func (e *Engine) Open() error {
if err := os.MkdirAll(e.path, 0777); err != nil {
return err
}
if err := e.cleanup(); err != nil {
return err
}
if err := e.WAL.Open(); err != nil {
return err
}
if err := e.FileStore.Open(); err != nil {
return err
}
if err := e.reloadCache(); err != nil {
return err
}
e.Compactor.Open()
if e.enableCompactionsOnOpen {
e.SetCompactionsEnabled(true)
}
return nil
}
// Close closes the engine. Subsequent calls to Close are a nop.
func (e *Engine) Close() error {
e.SetCompactionsEnabled(false)
// Lock now and close everything else down.
e.mu.Lock()
defer e.mu.Unlock()
e.done = nil // Ensures that the channel will not be closed again.
if err := e.FileStore.Close(); err != nil {
return err
}
return e.WAL.Close()
}
// WithLogger sets the logger for the engine.
func (e *Engine) WithLogger(log zap.Logger) {
e.logger = log.With(zap.String("engine", "tsm1"))
if e.traceLogging {
e.traceLogger = e.logger
}
e.WAL.WithLogger(e.logger)
e.FileStore.WithLogger(e.logger)
}
// LoadMetadataIndex loads the shard metadata into memory.
func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {
now := time.Now()
// Save reference to index for iterator creation.
e.index = index
if err := e.FileStore.WalkKeys(func(key []byte, typ byte) error {
fieldType, err := tsmFieldTypeToInfluxQLDataType(typ)
if err != nil {
return err
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
return err
}
return nil
}); err != nil {
return err
}
// load metadata from the Cache
if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
fieldType, err := entry.values.InfluxQLType()
if err != nil {
e.logger.Info(fmt.Sprintf("error getting the data type of values for key %s: %s", key, err.Error()))
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
return err
}
return nil
}); err != nil {
return err
}
e.traceLogger.Info(fmt.Sprintf("Meta data index for shard %d loaded in %v", shardID, time.Since(now)))
return nil
}
// IsIdle returns true if the cache is empty, there are no running compactions and the
// shard is fully compacted.
func (e *Engine) IsIdle() bool {
cacheEmpty := e.Cache.Size() == 0
runningCompactions := atomic.LoadInt64(&e.stats.CacheCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[0])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[1])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[2])
runningCompactions += atomic.LoadInt64(&e.stats.TSMFullCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive)
return cacheEmpty && runningCompactions == 0 && e.CompactionPlan.FullyCompacted()
}
// Backup writes a tar archive of any TSM files modified since the passed
// in time to the passed in writer. The basePath will be prepended to the names
// of the files in the archive. It will force a snapshot of the WAL first
// then perform the backup with a read lock against the file store. This means
// that new TSM files will not be able to be created in this shard while the
// backup is running. For shards that are still acively getting writes, this
// could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.
func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
if err := e.index.SnapshotTo(path); err != nil {
return err
}
tw := tar.NewWriter(w)
defer tw.Close()
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
// Recursively read all files from path.
files, err := readDir(path, "")
if err != nil {
return err
}
// Filter paths to only changed files.
var filtered []string
for _, file := range files {
fi, err := os.Stat(filepath.Join(path, file))
if err != nil {
return err
} else if !fi.ModTime().After(since) {
continue
}
filtered = append(filtered, file)
}
if len(filtered) == 0 {
return nil
}
for _, f := range filtered {
if err := e.writeFileToBackup(f, basePath, filepath.Join(path, f), tw); err != nil {
return err
}
}
return nil
}
// writeFileToBackup copies the file into the tar archive. Files will use the shardRelativePath
// in their names. This should be the <db>/<retention policy>/<id> part of the path.
func (e *Engine) writeFileToBackup(name string, shardRelativePath, fullPath string, tw *tar.Writer) error {
f, err := os.Stat(fullPath)
if err != nil {
return err
}
h := &tar.Header{
Name: filepath.ToSlash(filepath.Join(shardRelativePath, name)),
ModTime: f.ModTime(),
Size: f.Size(),
Mode: int64(f.Mode()),
}
if err := tw.WriteHeader(h); err != nil {
return err
}
fr, err := os.Open(fullPath)
if err != nil {
return err
}
defer fr.Close()
_, err = io.CopyN(tw, fr, h.Size)
return err
}
// Restore reads a tar archive generated by Backup().
// Only files that match basePath will be copied into the directory. This obtains
// a write lock so no operations can be performed while restoring.
func (e *Engine) Restore(r io.Reader, basePath string) error {
return e.overlay(r, basePath, false)
}
// Import reads a tar archive generated by Backup() and adds each
// file matching basePath as a new TSM file. This obtains
// a write lock so no operations can be performed while Importing.
func (e *Engine) Import(r io.Reader, basePath string) error {
return e.overlay(r, basePath, true)
}
// overlay reads a tar archive generated by Backup() and adds each file
// from the archive matching basePath to the shard.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
// Copy files from archive while under lock to prevent reopening.
newFiles, err := func() ([]string, error) {
e.mu.Lock()
defer e.mu.Unlock()
var newFiles []string
tr := tar.NewReader(r)
for {
if fileName, err := e.readFileFromBackup(tr, basePath, asNew); err == io.EOF {
break
} else if err != nil {
return nil, err
} else if fileName != "" {
newFiles = append(newFiles, fileName)
}
}
if err := syncDir(e.path); err != nil {
return nil, err
}
if err := e.FileStore.Replace(nil, newFiles); err != nil {
return nil, err
}
return newFiles, nil
}()
if err != nil {
return err
}
// Load any new series keys to the index
readers := make([]chan seriesKey, 0, len(newFiles))
for _, f := range newFiles {
ch := make(chan seriesKey, 1)
readers = append(readers, ch)
// If asNew is true, the files created from readFileFromBackup will be new ones
// having a temp extension.
f = strings.TrimSuffix(f, ".tmp")
fd, err := os.Open(f)
if err != nil {
return err
}
r, err := NewTSMReader(fd)
if err != nil {
return err
}
defer r.Close()
go func(c chan seriesKey, r *TSMReader) {
n := r.KeyCount()
for i := 0; i < n; i++ {
key, typ := r.KeyAt(i)
c <- seriesKey{key, typ}
}
close(c)
}(ch, r)
}
// Merge and dedup all the series keys across each reader to reduce
// lock contention on the index.
merged := merge(readers...)
for v := range merged {
fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)
if err != nil {
return err
}
if err := e.addToIndexFromKey(v.key, fieldType); err != nil {
return err
}
}
return nil
}
// readFileFromBackup copies the next file from the archive into the shard.
// The file is skipped if it does not have a matching shardRelativePath prefix.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, asNew bool) (string, error) {
// Read next archive file.
hdr, err := tr.Next()
if err != nil {
return "", err
}
nativeFileName := filepath.FromSlash(hdr.Name)
// Skip file if it does not have a matching prefix.
if !filepath.HasPrefix(nativeFileName, shardRelativePath) {
return "", nil
}
filename, err := filepath.Rel(shardRelativePath, nativeFileName)
if err != nil {
return "", err
}
if asNew {
filename = fmt.Sprintf("%09d-%09d.%s", e.FileStore.NextGeneration(), 1, TSMFileExtension)
}
destPath := filepath.Join(e.path, filename)
tmp := destPath + ".tmp"
// Create new file on disk.
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return "", err
}
defer f.Close()
// Copy from archive to the file.
if _, err := io.CopyN(f, tr, hdr.Size); err != nil {
return "", err
}
// Sync to disk & close.
if err := f.Sync(); err != nil {
return "", err
}
return tmp, nil
}
// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the
// database index and measurement fields
func (e *Engine) addToIndexFromKey(key []byte, fieldType influxql.DataType) error {
seriesKey, field := SeriesAndFieldFromCompositeKey(key)
name := tsdb.MeasurementFromSeriesKey(seriesKey)
mf := e.fieldset.CreateFieldsIfNotExists(name)
if err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil {
return err
}
// Build in-memory index, if necessary.
if e.index.Type() == inmem.IndexName {
tags, _ := models.ParseTags(seriesKey)
if err := e.index.InitializeSeries(seriesKey, name, tags); err != nil {
return err
}
}
return nil
}
// WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key.
func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var keyBuf []byte
var baseLen int
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
// Skip fields name "time", they are illegal
if bytes.Equal(iter.FieldKey(), timeBytes) {
continue
}
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
err := e.Cache.WriteMulti(values)
if err != nil {
return err
}
_, err = e.WAL.WriteMulti(values)
return err
}
// containsSeries returns a map of keys indicating whether the key exists and
// has values or not.
func (e *Engine) containsSeries(keys [][]byte) (map[string]bool, error) {
// keyMap is used to see if a given key exists. keys
// are the measurement + tagset (minus separate & field)
keyMap := map[string]bool{}
for _, k := range keys {
keyMap[string(k)] = false
}
for _, k := range e.Cache.unsortedKeys() {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
keyMap[string(seriesKey)] = true
}
if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey(k)
if _, ok := keyMap[string(seriesKey)]; ok {
keyMap[string(seriesKey)] = true
}
return nil
}); err != nil {
return nil, err
}
return keyMap, nil
}
// deleteSeries removes all series keys from the engine.
func (e *Engine) deleteSeries(seriesKeys [][]byte) error {
return e.DeleteSeriesRange(seriesKeys, math.MinInt64, math.MaxInt64)
}
// DeleteSeriesRange removes the values between min and max (inclusive) from all series.
func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
if len(seriesKeys) == 0 {
return nil
}
// Ensure keys are sorted since lower layers require them to be.
if !bytesutil.IsSorted(seriesKeys) {
bytesutil.Sort(seriesKeys)
}
// Disable and abort running compactions so that tombstones added existing tsm
// files don't get removed. This would cause deleted measurements/series to
// re-appear once the compaction completed. We only disable the level compactions
// so that snapshotting does not stop while writing out tombstones. If it is stopped,
// and writing tombstones takes a long time, writes can get rejected due to the cache
// filling up.
e.disableLevelCompactions(true)
defer e.enableLevelCompactions(true)
tempKeys := seriesKeys[:]
deleteKeys := make([][]byte, 0, len(seriesKeys))
// go through the keys in the file store
if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey(k)
// Both tempKeys and keys walked are sorted, skip any passed in keys
// that don't exist in our key set.
for len(tempKeys) > 0 && bytes.Compare(tempKeys[0], seriesKey) < 0 {
tempKeys = tempKeys[1:]
}
// Keys match, add the full series key to delete.
if len(tempKeys) > 0 && bytes.Equal(tempKeys[0], seriesKey) {
deleteKeys = append(deleteKeys, k)
}
return nil
}); err != nil {
return err
}
if err := e.FileStore.DeleteRange(deleteKeys, min, max); err != nil {
return err
}
// find the keys in the cache and remove them
walKeys := deleteKeys[:0]
// ApplySerialEntryFn cannot return an error in this invocation.
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
// Cache does not walk keys in sorted order, so search the sorted
// series we need to delete to see if any of the cache keys match.
i := bytesutil.SearchBytes(seriesKeys, seriesKey)
if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) {
// k is the measurement + tags + sep + field
walKeys = append(walKeys, k)
}
return nil
})
e.Cache.DeleteRange(walKeys, min, max)
// delete from the WAL
if _, err := e.WAL.DeleteRange(walKeys, min, max); err != nil {
return err
}
// Have we deleted all points for the series? If so, we need to remove
// the series from the index.
existing, err := e.containsSeries(seriesKeys)
if err != nil {
return err
}
for k, exists := range existing {
if !exists {
if err := e.index.UnassignShard(k, e.id); err != nil {
return err
}
}
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) DeleteMeasurement(name []byte) error {
// Delete the bulk of data outside of the fields lock.
if err := e.deleteMeasurement(name); err != nil {
return err
}
// Under lock, delete any series created deletion.
if err := e.fieldset.DeleteWithLock(string(name), func() error {
return e.deleteMeasurement(name)
}); err != nil {
return err
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) deleteMeasurement(name []byte) error {
// Attempt to find the series keys.
keys, err := e.index.MeasurementSeriesKeysByExpr(name, nil)
if err != nil {
return err
} else if len(keys) > 0 {
if err := e.deleteSeries(keys); err != nil {
return err
}
}
return nil
}
// ForEachMeasurementName iterates over each measurement name in the engine.
func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {
return e.index.ForEachMeasurementName(fn)
}
// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
func (e *Engine) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {
return e.index.MeasurementSeriesKeysByExpr(name, expr)
}
func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {
return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice)
}
func (e *Engine) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
return e.index.CreateSeriesIfNotExists(key, name, tags)
}
// WriteTo is not implemented.
func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") }
// WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.
func (e *Engine) WriteSnapshot() error {
// Lock and grab the cache snapshot along with all the closed WAL
// filenames associated with the snapshot
var started *time.Time
defer func() {
if started != nil {
e.Cache.UpdateCompactTime(time.Since(*started))
e.logger.Info(fmt.Sprintf("Snapshot for path %s written in %v", e.path, time.Since(*started)))
}
}()
closedFiles, snapshot, err := func() ([]string, *Cache, error) {
e.mu.Lock()
defer e.mu.Unlock()
now := time.Now()
started = &now
if err := e.WAL.CloseSegment(); err != nil {
return nil, nil, err
}
segments, err := e.WAL.ClosedSegments()
if err != nil {
return nil, nil, err
}
snapshot, err := e.Cache.Snapshot()
if err != nil {
return nil, nil, err
}
return segments, snapshot, nil
}()
if err != nil {
return err
}
if snapshot.Size() == 0 {
e.Cache.ClearSnapshot(true)
return nil
}
// The snapshotted cache may have duplicate points and unsorted data. We need to deduplicate
// it before writing the snapshot. This can be very expensive so it's done while we are not
// holding the engine write lock.
dedup := time.Now()
snapshot.Deduplicate()
e.traceLogger.Info(fmt.Sprintf("Snapshot for path %s deduplicated in %v", e.path, time.Since(dedup)))
return e.writeSnapshotAndCommit(closedFiles, snapshot)
}
// CreateSnapshot will create a temp directory that holds
// temporary hardlinks to the underylyng shard files.
func (e *Engine) CreateSnapshot() (string, error) {
if err := e.WriteSnapshot(); err != nil {
return "", err
}
e.mu.RLock()
defer e.mu.RUnlock()
return e.FileStore.CreateSnapshot()
}
// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.
func (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) (err error) {
defer func() {
if err != nil {
e.Cache.ClearSnapshot(false)
}
}()
// write the new snapshot files
newFiles, err := e.Compactor.WriteSnapshot(snapshot)
if err != nil {
e.logger.Info(fmt.Sprintf("error writing snapshot from compactor: %v", err))
return err
}
e.mu.RLock()
defer e.mu.RUnlock()
// update the file store with these new files
if err := e.FileStore.Replace(nil, newFiles); err != nil {
e.logger.Info(fmt.Sprintf("error adding new TSM files from snapshot: %v", err))
return err
}
// clear the snapshot from the in-memory cache, then the old WAL files
e.Cache.ClearSnapshot(true)
if err := e.WAL.Remove(closedFiles); err != nil {
e.logger.Info(fmt.Sprintf("error removing closed wal segments: %v", err))
}
return nil
}
// compactCache continually checks if the WAL cache should be written to disk.
func (e *Engine) compactCache(quit <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-quit:
return
case <-t.C:
e.Cache.UpdateAge()
if e.ShouldCompactCache(e.WAL.LastWriteTime()) {
start := time.Now()
e.traceLogger.Info(fmt.Sprintf("Compacting cache for %s", e.path))
err := e.WriteSnapshot()
if err != nil && err != errCompactionsDisabled {
e.logger.Info(fmt.Sprintf("error writing snapshot: %v", err))
atomic.AddInt64(&e.stats.CacheCompactionErrors, 1)
} else {
atomic.AddInt64(&e.stats.CacheCompactions, 1)
}
atomic.AddInt64(&e.stats.CacheCompactionDuration, time.Since(start).Nanoseconds())
}
}
}
}
// ShouldCompactCache returns true if the Cache is over its flush threshold
// or if the passed in lastWriteTime is older than the write cold threshold.
func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool {
sz := e.Cache.Size()
if sz == 0 {
return false
}
return sz > e.CacheFlushMemorySizeThreshold ||
time.Since(lastWriteTime) > e.CacheFlushWriteColdDuration
}
func (e *Engine) compactTSMLevel(fast bool, level int, quit <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-quit:
return
case <-t.C:
s := e.levelCompactionStrategy(fast, level)
if s != nil {
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release(s.compactionGroups)
}
}
}
}
func (e *Engine) compactTSMFull(quit <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-quit:
return
case <-t.C:
s := e.fullCompactionStrategy()
if s != nil {
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release(s.compactionGroups)
}
}
}
}
// onFileStoreReplace is callback handler invoked when the FileStore
// has replaced one set of TSM files with a new set.
func (e *Engine) onFileStoreReplace(newFiles []TSMFile) {
// Load any new series keys to the index
readers := make([]chan seriesKey, 0, len(newFiles))
for _, r := range newFiles {
ch := make(chan seriesKey, 1)
readers = append(readers, ch)
go func(c chan seriesKey, r TSMFile) {
n := r.KeyCount()
for i := 0; i < n; i++ {
key, typ := r.KeyAt(i)
c <- seriesKey{key, typ}
}
close(c)
}(ch, r)
}
// Merge and dedup all the series keys across each reader to reduce
// lock contention on the index.
merged := merge(readers...)
for v := range merged {
fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)
if err != nil {
e.logger.Error(fmt.Sprintf("refresh index (1): %v", err))
continue
}
if err := e.addToIndexFromKey(v.key, fieldType); err != nil {
e.logger.Error(fmt.Sprintf("refresh index (2): %v", err))
continue
}
}
// load metadata from the Cache
e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
fieldType, err := entry.values.InfluxQLType()
if err != nil {
e.logger.Error(fmt.Sprintf("refresh index (3): %v", err))
return nil
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
e.logger.Error(fmt.Sprintf("refresh index (4): %v", err))
return nil
}
return nil
})
}
// compactionStrategy holds the details of what to do in a compaction.
type compactionStrategy struct {
compactionGroups []CompactionGroup
// concurrency determines how many compactions groups will be started
// concurrently. These groups may be limited by the global limiter if
// enabled.
concurrency int
fast bool
description string
durationStat *int64
activeStat *int64
successStat *int64
errorStat *int64
logger zap.Logger
compactor *Compactor
fileStore *FileStore
limiter limiter.Fixed
engine *Engine
}
// Apply concurrently compacts all the groups in a compaction strategy.
func (s *compactionStrategy) Apply() {
start := time.Now()
// cap concurrent compaction groups to no more than 4 at a time.
concurrency := s.concurrency
if concurrency == 0 {
concurrency = 4
}
throttle := limiter.NewFixed(concurrency)
var wg sync.WaitGroup
for i := range s.compactionGroups {
wg.Add(1)
go func(groupNum int) {
defer wg.Done()
// limit concurrent compaction groups
throttle.Take()
defer throttle.Release()
s.compactGroup(groupNum)
}(i)
}
wg.Wait()
atomic.AddInt64(s.durationStat, time.Since(start).Nanoseconds())
}
// compactGroup executes the compaction strategy against a single CompactionGroup.
func (s *compactionStrategy) compactGroup(groupNum int) {
// Limit concurrent compactions if we have a limiter
if cap(s.limiter) > 0 {
s.limiter.Take()
defer s.limiter.Release()
}
group := s.compactionGroups[groupNum]
start := time.Now()
s.logger.Info(fmt.Sprintf("beginning %s compaction of group %d, %d TSM files", s.description, groupNum, len(group)))
for i, f := range group {
s.logger.Info(fmt.Sprintf("compacting %s group (%d) %s (#%d)", s.description, groupNum, f, i))
}
files, err := func() ([]string, error) {
// Count the compaction as active only while the compaction is actually running.
atomic.AddInt64(s.activeStat, 1)
defer atomic.AddInt64(s.activeStat, -1)
if s.fast {
return s.compactor.CompactFast(group)
} else {
return s.compactor.CompactFull(group)
}
}()
if err != nil {
_, inProgress := err.(errCompactionInProgress)
if err == errCompactionsDisabled || inProgress {
s.logger.Info(fmt.Sprintf("aborted %s compaction group (%d). %v", s.description, groupNum, err))
if _, ok := err.(errCompactionInProgress); ok {
time.Sleep(time.Second)
}
return
}
s.logger.Info(fmt.Sprintf("error compacting TSM files: %v", err))
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
if err := s.fileStore.ReplaceWithCallback(group, files, s.engine.onFileStoreReplace); err != nil {
s.logger.Info(fmt.Sprintf("error replacing new TSM files: %v", err))
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
for i, f := range files {
s.logger.Info(fmt.Sprintf("compacted %s group (%d) into %s (#%d)", s.description, groupNum, f, i))
}
s.logger.Info(fmt.Sprintf("compacted %s %d files into %d files in %s", s.description, len(group), len(files), time.Since(start)))
atomic.AddInt64(s.successStat, 1)
}
// levelCompactionStrategy returns a compactionStrategy for the given level.
// It returns nil if there are no TSM files to compact.
func (e *Engine) levelCompactionStrategy(fast bool, level int) *compactionStrategy {
compactionGroups := e.CompactionPlan.PlanLevel(level)
if len(compactionGroups) == 0 {
return nil
}
return &compactionStrategy{
concurrency: 4,
compactionGroups: compactionGroups,
logger: e.logger,
fileStore: e.FileStore,
compactor: e.Compactor,
fast: fast,
limiter: e.compactionLimiter,
engine: e,
description: fmt.Sprintf("level %d", level),
activeStat: &e.stats.TSMCompactionsActive[level-1],
successStat: &e.stats.TSMCompactions[level-1],
errorStat: &e.stats.TSMCompactionErrors[level-1],
durationStat: &e.stats.TSMCompactionDuration[level-1],
}
}
// fullCompactionStrategy returns a compactionStrategy for higher level generations of TSM files.
// It returns nil if there are no TSM files to compact.
func (e *Engine) fullCompactionStrategy() *compactionStrategy {
optimize := false
compactionGroups := e.CompactionPlan.Plan(e.WAL.LastWriteTime())
if len(compactionGroups) == 0 {
optimize = true
compactionGroups = e.CompactionPlan.PlanOptimize()
}
if len(compactionGroups) == 0 {
return nil
}
s := &compactionStrategy{
concurrency: 1,
compactionGroups: compactionGroups,
logger: e.logger,
fileStore: e.FileStore,
compactor: e.Compactor,
fast: optimize,
limiter: e.compactionLimiter,
engine: e,
}
if optimize {
s.description = "optimize"
s.activeStat = &e.stats.TSMOptimizeCompactionsActive
s.successStat = &e.stats.TSMOptimizeCompactions
s.errorStat = &e.stats.TSMOptimizeCompactionErrors
s.durationStat = &e.stats.TSMOptimizeCompactionDuration
} else {
s.description = "full"
s.activeStat = &e.stats.TSMFullCompactionsActive
s.successStat = &e.stats.TSMFullCompactions
s.errorStat = &e.stats.TSMFullCompactionErrors
s.durationStat = &e.stats.TSMFullCompactionDuration
}
return s
}
// reloadCache reads the WAL segment files and loads them into the cache.
func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info(fmt.Sprintf("Reloaded WAL cache %s in %v", e.WAL.Path(), time.Since(now)))
return nil
}
// cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid
// removing tmp files that are still in use.
func (e *Engine) cleanup() error {
allfiles, err := ioutil.ReadDir(e.path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
for _, f := range allfiles {
// Check to see if there are any `.tmp` directories that were left over from failed shard snapshots
if f.IsDir() && strings.HasSuffix(f.Name(), ".tmp") {
if err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil {
return fmt.Errorf("error removing tmp snapshot directory %q: %s", f.Name(), err)
}
}
}
return e.cleanupTempTSMFiles()
}
func (e *Engine) cleanupTempTSMFiles() error {
files, err := filepath.Glob(filepath.Join(e.path, fmt.Sprintf("*.%s", CompactionTempExtension)))
if err != nil {
return fmt.Errorf("error getting compaction temp files: %s", err.Error())
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return fmt.Errorf("error removing temp compaction files: %v", err)
}
}
return nil
}
// KeyCursor returns a KeyCursor for the given key starting at time t.
func (e *Engine) KeyCursor(key []byte, t int64, ascending bool) *KeyCursor {
return e.FileStore.KeyCursor(key, t, ascending)
}
// CreateIterator returns an iterator for the measurement based on opt.
func (e *Engine) CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) {
if call, ok := opt.Expr.(*influxql.Call); ok {
if opt.Interval.IsZero() {
if call.Name == "first" || call.Name == "last" {
refOpt := opt
refOpt.Limit = 1
refOpt.Ascending = call.Name == "first"
refOpt.Ordered = true
refOpt.Expr = call.Args[0]
itrs, err := e.createVarRefIterator(measurement, refOpt)
if err != nil {
return nil, err
}
return newMergeGuardIterator(itrs, opt)
}
}
inputs, err := e.createCallIterator(measurement, call, opt)
if err != nil {
return nil, err
} else if len(inputs) == 0 {
return nil, nil
}
return newMergeGuardIterator(inputs, opt)
}
itrs, err := e.createVarRefIterator(measurement, opt)
if err != nil {
return nil, err
}
return newMergeGuardIterator(itrs, opt)
}
func (e *Engine) createCallIterator(measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := call.Args[0].(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
// Determine tagsets for this measurement based on dimensions and filters.
tagSets, err := e.index.TagSets([]byte(measurement), opt)
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return err
default:
}
inputs, err := e.createTagSetIterators(ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// Wrap each series in a call iterator.
for i, input := range inputs {
if opt.InterruptCh != nil {
input = query.NewInterruptIterator(input, opt.InterruptCh)
}
itr, err := query.NewCallIterator(input, opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
inputs[i] = itr
}
itr := query.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0))
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(measurement string, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := opt.Expr.(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
// Determine tagsets for this measurement based on dimensions and filters.
tagSets, err := e.index.TagSets([]byte(measurement), opt)
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
inputs, err := e.createTagSetIterators(ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// If we have a LIMIT or OFFSET and the grouping of the outer query
// is different than the current grouping, we need to perform the
// limit on each of the individual series keys instead to improve
// performance.
if (opt.Limit > 0 || opt.Offset > 0) && len(opt.Dimensions) != len(opt.GroupBy) {
for i, input := range inputs {
inputs[i] = newLimitIterator(input, opt)
}
}
itr, err := query.Iterators(inputs).Merge(opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
// Apply a limit on the merged iterator.
if opt.Limit > 0 || opt.Offset > 0 {
if len(opt.Dimensions) == len(opt.GroupBy) {
// When the final dimensions and the current grouping are
// the same, we will only produce one series so we can use
// the faster limit iterator.
itr = newLimitIterator(itr, opt)
} else {
// When the dimensions are different than the current
// grouping, we need to account for the possibility there
// will be multiple series. The limit iterator in the
// influxql package handles that scenario.
itr = query.NewLimitIterator(itr, opt)
}
}
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetIterators creates a set of iterators for a tagset.
func (e *Engine) createTagSetIterators(ref *influxql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) {
// Set parallelism by number of logical cpus.
parallelism := runtime.GOMAXPROCS(0)
if parallelism > len(t.SeriesKeys) {
parallelism = len(t.SeriesKeys)
}
// Create series key groupings w/ return error.
groups := make([]struct {
keys []string
filters []influxql.Expr
itrs []query.Iterator
err error
}, parallelism)
// Group series keys.
n := len(t.SeriesKeys) / parallelism
for i := 0; i < parallelism; i++ {
group := &groups[i]
if i < parallelism-1 {
group.keys = t.SeriesKeys[i*n : (i+1)*n]
group.filters = t.Filters[i*n : (i+1)*n]
} else {
group.keys = t.SeriesKeys[i*n:]
group.filters = t.Filters[i*n:]
}
group.itrs = make([]query.Iterator, 0, len(group.keys))
}
// Read series groups in parallel.
var wg sync.WaitGroup
for i := range groups {
wg.Add(1)
go func(i int) {
defer wg.Done()
groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ref, name, groups[i].keys, t, groups[i].filters, opt)
}(i)
}
wg.Wait()
// Determine total number of iterators so we can allocate only once.
var itrN int
for _, group := range groups {
itrN += len(group.itrs)
}
// Combine all iterators together and check for errors.
var err error
itrs := make([]query.Iterator, 0, itrN)
for _, group := range groups {
if group.err != nil {
err = group.err
}
itrs = append(itrs, group.itrs...)
}
// If an error occurred, make sure we close all created iterators.
if err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series.
func (e *Engine) createTagSetGroupIterators(ref *influxql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []influxql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) {
conditionFields := make([]influxql.VarRef, len(influxql.ExprNames(opt.Condition)))
itrs := make([]query.Iterator, 0, len(seriesKeys))
for i, seriesKey := range seriesKeys {
fields := 0
if filters[i] != nil {
// Retrieve non-time fields from this series filter and filter out tags.
for _, f := range influxql.ExprNames(filters[i]) {
conditionFields[fields] = f
fields++
}
}
itr, err := e.createVarRefSeriesIterator(ref, name, seriesKey, t, filters[i], conditionFields[:fields], opt)
if err != nil {
return itrs, err
} else if itr == nil {
continue
}
itrs = append(itrs, itr)
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return nil, err
default:
}
// Enforce series limit at creation time.
if opt.MaxSeriesN > 0 && len(itrs) > opt.MaxSeriesN {
query.Iterators(itrs).Close()
return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", len(itrs), opt.MaxSeriesN)
}
}
return itrs, nil
}
// createVarRefSeriesIterator creates an iterator for a variable reference for a series.
func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, seriesKey string, t *query.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt query.IteratorOptions) (query.Iterator, error) {
_, tfs := models.ParseKey([]byte(seriesKey))
tags := query.NewTags(tfs.Map())
// Create options specific for this series.
itrOpt := opt
itrOpt.Condition = filter
// Build auxilary cursors.
// Tag values should be returned if the field doesn't exist.
var aux []cursorAt
if len(opt.Aux) > 0 {
aux = make([]cursorAt, len(opt.Aux))
for i, ref := range opt.Aux {
// Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag {
cur := e.buildCursor(name, seriesKey, tfs, &ref, opt)
if cur != nil {
aux[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case influxql.Float, influxql.AnyField:
aux[i] = nilFloatLiteralValueCursor
continue
case influxql.Integer:
aux[i] = nilIntegerLiteralValueCursor
continue
case influxql.Unsigned:
aux[i] = nilUnsignedLiteralValueCursor
continue
case influxql.String:
aux[i] = nilStringLiteralValueCursor
continue
case influxql.Boolean:
aux[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
aux[i] = nilStringLiteralValueCursor
} else {
aux[i] = &literalValueCursor{value: v}
}
}
}
// Remove _tagKey condition field.
// We can't seach on it because we can't join it to _tagValue based on time.
if varRefSliceContains(conditionFields, "_tagKey") {
conditionFields = varRefSliceRemove(conditionFields, "_tagKey")
// Remove _tagKey conditional references from iterator.
itrOpt.Condition = influxql.RewriteExpr(influxql.CloneExpr(itrOpt.Condition), func(expr influxql.Expr) influxql.Expr {
switch expr := expr.(type) {
case *influxql.BinaryExpr:
if ref, ok := expr.LHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" {
return &influxql.BooleanLiteral{Val: true}
}
if ref, ok := expr.RHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" {
return &influxql.BooleanLiteral{Val: true}
}
}
return expr
})
}
// Build conditional field cursors.
// If a conditional field doesn't exist then ignore the series.
var conds []cursorAt
if len(conditionFields) > 0 {
conds = make([]cursorAt, len(conditionFields))
for i, ref := range conditionFields {
// Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag {
cur := e.buildCursor(name, seriesKey, tfs, &ref, opt)
if cur != nil {
conds[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case influxql.Float, influxql.AnyField:
conds[i] = nilFloatLiteralValueCursor
continue
case influxql.Integer:
conds[i] = nilIntegerLiteralValueCursor
continue
case influxql.Unsigned:
conds[i] = nilUnsignedLiteralValueCursor
continue
case influxql.String:
conds[i] = nilStringLiteralValueCursor
continue
case influxql.Boolean:
conds[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
conds[i] = nilStringLiteralValueCursor
} else {
conds[i] = &literalValueCursor{value: v}
}
}
}
condNames := influxql.VarRefs(conditionFields).Strings()
// Limit tags to only the dimensions selected.
dimensions := opt.GetDimensions()
tags = tags.Subset(dimensions)
// If it's only auxiliary fields then it doesn't matter what type of iterator we use.
if ref == nil {
return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil
}
// Build main cursor.
cur := e.buildCursor(name, seriesKey, tfs, ref, opt)
// If the field doesn't exist then don't build an iterator.
if cur == nil {
cursorsAt(aux).close()
cursorsAt(conds).close()
return nil, nil
}
// Remove measurement name if we are selecting the name.
if ref.Val == "_name" {
name = ""
}
switch cur := cur.(type) {
case floatCursor:
return newFloatIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case integerCursor:
return newIntegerIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case unsignedCursor:
return newUnsignedIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case stringCursor:
return newStringIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case booleanCursor:
return newBooleanIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
default:
panic("unreachable")
}
}
// buildCursor creates an untyped cursor for a field.
func (e *Engine) buildCursor(measurement, seriesKey string, tags models.Tags, ref *influxql.VarRef, opt query.IteratorOptions) cursor {
// Check if this is a system field cursor.
switch ref.Val {
case "_name":
return &stringSliceCursor{values: []string{measurement}}
case "_tagKey":
return &stringSliceCursor{values: tags.Keys()}
case "_tagValue":
return &stringSliceCursor{values: matchTagValues(tags, opt.Condition)}
case "_seriesKey":
return &stringSliceCursor{values: []string{seriesKey}}
}
// Look up fields for measurement.
mf := e.fieldset.Fields(measurement)
if mf == nil {
return nil
}
// Check for system field for field keys.
if ref.Val == "_fieldKey" {
return &stringSliceCursor{values: mf.FieldKeys()}
}
// Find individual field.
f := mf.Field(ref.Val)
if f == nil {
return nil
}
// Check if we need to perform a cast. Performing a cast in the
// engine (if it is possible) is much more efficient than an automatic cast.
if ref.Type != influxql.Unknown && ref.Type != influxql.AnyField && ref.Type != f.Type {
switch ref.Type {
case influxql.Float:
switch f.Type {
case influxql.Integer:
cur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)
return &floatCastIntegerCursor{cursor: cur}
case influxql.Unsigned:
cur := e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt)
return &floatCastUnsignedCursor{cursor: cur}
}
case influxql.Integer:
switch f.Type {
case influxql.Float:
cur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)
return &integerCastFloatCursor{cursor: cur}
case influxql.Unsigned:
cur := e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt)
return &integerCastUnsignedCursor{cursor: cur}
}
case influxql.Unsigned:
switch f.Type {
case influxql.Float:
cur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)
return &unsignedCastFloatCursor{cursor: cur}
case influxql.Integer:
cur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)
return &unsignedCastIntegerCursor{cursor: cur}
}
}
return nil
}
// Return appropriate cursor based on type.
switch f.Type {
case influxql.Float:
return e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)
case influxql.Integer:
return e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)
case influxql.Unsigned:
return e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt)
case influxql.String:
return e.buildStringCursor(measurement, seriesKey, ref.Val, opt)
case influxql.Boolean:
return e.buildBooleanCursor(measurement, seriesKey, ref.Val, opt)
default:
panic("unreachable")
}
}
func matchTagValues(tags models.Tags, condition influxql.Expr) []string {
if condition == nil {
return tags.Values()
}
// Populate map with tag values.
data := map[string]interface{}{}
for _, tag := range tags {
data[string(tag.Key)] = string(tag.Value)
}
// Match against each specific tag.
var values []string
for _, tag := range tags {
data["_tagKey"] = string(tag.Key)
if influxql.EvalBool(condition, data) {
values = append(values, string(tag.Value))
}
}
return values
}
// buildFloatCursor creates a cursor for a float field.
func (e *Engine) buildFloatCursor(measurement, seriesKey, field string, opt query.IteratorOptions) floatCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildIntegerCursor creates a cursor for an integer field.
func (e *Engine) buildIntegerCursor(measurement, seriesKey, field string, opt query.IteratorOptions) integerCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildUnsignedCursor creates a cursor for an unsigned field.
func (e *Engine) buildUnsignedCursor(measurement, seriesKey, field string, opt query.IteratorOptions) unsignedCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newUnsignedCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildStringCursor creates a cursor for a string field.
func (e *Engine) buildStringCursor(measurement, seriesKey, field string, opt query.IteratorOptions) stringCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildBooleanCursor creates a cursor for a boolean field.
func (e *Engine) buildBooleanCursor(measurement, seriesKey, field string, opt query.IteratorOptions) booleanCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
func (e *Engine) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) {
return e.index.SeriesPointIterator(opt)
}
// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID.
func SeriesFieldKey(seriesKey, field string) string {
return seriesKey + keyFieldSeparator + field
}
func SeriesFieldKeyBytes(seriesKey, field string) []byte {
b := make([]byte, len(seriesKey)+len(keyFieldSeparator)+len(field))
i := copy(b[:], seriesKey)
i += copy(b[i:], keyFieldSeparatorBytes)
copy(b[i:], field)
return b
}
func tsmFieldTypeToInfluxQLDataType(typ byte) (influxql.DataType, error) {
switch typ {
case BlockFloat64:
return influxql.Float, nil
case BlockInteger:
return influxql.Integer, nil
case BlockUnsigned:
return influxql.Unsigned, nil
case BlockBoolean:
return influxql.Boolean, nil
case BlockString:
return influxql.String, nil
default:
return influxql.Unknown, fmt.Errorf("unknown block type: %v", typ)
}
}
// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key.
func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {
sep := bytes.Index(key, keyFieldSeparatorBytes)
if sep == -1 {
// No field???
return key, nil
}
return key[:sep], key[sep+len(keyFieldSeparator):]
}
// readDir recursively reads all files from a path.
func readDir(root, rel string) ([]string, error) {
// Open root.
f, err := os.Open(filepath.Join(root, rel))
if err != nil {
return nil, err
}
defer f.Close()
// Read all files.
fis, err := f.Readdir(-1)
if err != nil {
return nil, err
}
// Read all subdirectories and append to the end.
var paths []string
for _, fi := range fis {
// Simply append if it's a file.
if !fi.IsDir() {
paths = append(paths, filepath.Join(rel, fi.Name()))
continue
}
// Read and append nested file paths.
children, err := readDir(root, filepath.Join(rel, fi.Name()))
if err != nil {
return nil, err
}
paths = append(paths, children...)
}
return paths, nil
}
func varRefSliceContains(a []influxql.VarRef, v string) bool {
for _, ref := range a {
if ref.Val == v {
return true
}
}
return false
}
func varRefSliceRemove(a []influxql.VarRef, v string) []influxql.VarRef {
if !varRefSliceContains(a, v) {
return a
}
other := make([]influxql.VarRef, 0, len(a))
for _, ref := range a {
if ref.Val != v {
other = append(other, ref)
}
}
return other
}
redundant allocation is overwritten by line 1769
// Package tsm1 provides a TSDB in the Time Structured Merge tree format.
package tsm1 // import "github.com/influxdata/influxdb/tsdb/engine/tsm1"
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb/index/inmem"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/tsdb"
_ "github.com/influxdata/influxdb/tsdb/index"
"github.com/uber-go/zap"
)
//go:generate tmpl -data=@iterator.gen.go.tmpldata iterator.gen.go.tmpl
//go:generate tmpl -data=@file_store.gen.go.tmpldata file_store.gen.go.tmpl
//go:generate tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl
//go:generate tmpl -data=@compact.gen.go.tmpldata compact.gen.go.tmpl
func init() {
tsdb.RegisterEngine("tsm1", NewEngine)
}
var (
// Ensure Engine implements the interface.
_ tsdb.Engine = &Engine{}
// Static objects to prevent small allocs.
timeBytes = []byte("time")
keyFieldSeparatorBytes = []byte(keyFieldSeparator)
)
const (
// keyFieldSeparator separates the series key from the field name in the composite key
// that identifies a specific field in series
keyFieldSeparator = "#!~#"
)
// Statistics gathered by the engine.
const (
statCacheCompactions = "cacheCompactions"
statCacheCompactionsActive = "cacheCompactionsActive"
statCacheCompactionError = "cacheCompactionErr"
statCacheCompactionDuration = "cacheCompactionDuration"
statTSMLevel1Compactions = "tsmLevel1Compactions"
statTSMLevel1CompactionsActive = "tsmLevel1CompactionsActive"
statTSMLevel1CompactionError = "tsmLevel1CompactionErr"
statTSMLevel1CompactionDuration = "tsmLevel1CompactionDuration"
statTSMLevel2Compactions = "tsmLevel2Compactions"
statTSMLevel2CompactionsActive = "tsmLevel2CompactionsActive"
statTSMLevel2CompactionError = "tsmLevel2CompactionErr"
statTSMLevel2CompactionDuration = "tsmLevel2CompactionDuration"
statTSMLevel3Compactions = "tsmLevel3Compactions"
statTSMLevel3CompactionsActive = "tsmLevel3CompactionsActive"
statTSMLevel3CompactionError = "tsmLevel3CompactionErr"
statTSMLevel3CompactionDuration = "tsmLevel3CompactionDuration"
statTSMOptimizeCompactions = "tsmOptimizeCompactions"
statTSMOptimizeCompactionsActive = "tsmOptimizeCompactionsActive"
statTSMOptimizeCompactionError = "tsmOptimizeCompactionErr"
statTSMOptimizeCompactionDuration = "tsmOptimizeCompactionDuration"
statTSMFullCompactions = "tsmFullCompactions"
statTSMFullCompactionsActive = "tsmFullCompactionsActive"
statTSMFullCompactionError = "tsmFullCompactionErr"
statTSMFullCompactionDuration = "tsmFullCompactionDuration"
)
// Engine represents a storage engine with compressed blocks.
type Engine struct {
mu sync.RWMutex
// The following group of fields is used to track the state of level compactions within the
// Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is
// used to signal those goroutines to shutdown. Every request to disable level compactions will
// call 'Wait' on 'wg', with the first goroutine to arrive (levelWorkers == 0 while holding the
// lock) will close the done channel and re-assign 'nil' to the variable. Re-enabling will
// decrease 'levelWorkers', and when it decreases to zero, level compactions will be started
// back up again.
wg sync.WaitGroup // waitgroup for active level compaction goroutines
done chan struct{} // channel to signal level compactions to stop
levelWorkers int // Number of "workers" that expect compactions to be in a disabled state
snapDone chan struct{} // channel to signal snapshot compactions to stop
snapWG sync.WaitGroup // waitgroup for running snapshot compactions
id uint64
database string
path string
logger zap.Logger // Logger to be used for important messages
traceLogger zap.Logger // Logger to be used when trace-logging is on.
traceLogging bool
index tsdb.Index
fieldset *tsdb.MeasurementFieldSet
WAL *WAL
Cache *Cache
Compactor *Compactor
CompactionPlan CompactionPlanner
FileStore *FileStore
MaxPointsPerBlock int
// CacheFlushMemorySizeThreshold specifies the minimum size threshodl for
// the cache when the engine should write a snapshot to a TSM file
CacheFlushMemorySizeThreshold uint64
// CacheFlushWriteColdDuration specifies the length of time after which if
// no writes have been committed to the WAL, the engine will write
// a snapshot of the cache to a TSM file
CacheFlushWriteColdDuration time.Duration
// Controls whether to enabled compactions when the engine is open
enableCompactionsOnOpen bool
stats *EngineStatistics
// The limiter for concurrent compactions
compactionLimiter limiter.Fixed
}
// NewEngine returns a new instance of Engine.
func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine {
w := NewWAL(walPath)
w.syncDelay = time.Duration(opt.Config.WALFsyncDelay)
fs := NewFileStore(path)
cache := NewCache(uint64(opt.Config.CacheMaxMemorySize), path)
c := &Compactor{
Dir: path,
FileStore: fs,
}
logger := zap.New(zap.NullEncoder())
e := &Engine{
id: id,
database: database,
path: path,
index: idx,
logger: logger,
traceLogger: logger,
traceLogging: opt.Config.TraceLoggingEnabled,
fieldset: tsdb.NewMeasurementFieldSet(),
WAL: w,
Cache: cache,
FileStore: fs,
Compactor: c,
CompactionPlan: NewDefaultPlanner(fs, time.Duration(opt.Config.CompactFullWriteColdDuration)),
CacheFlushMemorySizeThreshold: opt.Config.CacheSnapshotMemorySize,
CacheFlushWriteColdDuration: time.Duration(opt.Config.CacheSnapshotWriteColdDuration),
enableCompactionsOnOpen: true,
stats: &EngineStatistics{},
compactionLimiter: opt.CompactionLimiter,
}
// Attach fieldset to index.
e.index.SetFieldSet(e.fieldset)
if e.traceLogging {
fs.enableTraceLogging(true)
w.enableTraceLogging(true)
}
return e
}
// SetEnabled sets whether the engine is enabled.
func (e *Engine) SetEnabled(enabled bool) {
e.enableCompactionsOnOpen = enabled
e.SetCompactionsEnabled(enabled)
}
// SetCompactionsEnabled enables compactions on the engine. When disabled
// all running compactions are aborted and new compactions stop running.
func (e *Engine) SetCompactionsEnabled(enabled bool) {
if enabled {
e.enableSnapshotCompactions()
e.enableLevelCompactions(false)
} else {
e.disableSnapshotCompactions()
e.disableLevelCompactions(false)
}
}
// enableLevelCompactions will request that level compactions start back up again
//
// 'wait' signifies that a corresponding call to disableLevelCompactions(true) was made at some
// point, and the associated task that required disabled compactions is now complete
func (e *Engine) enableLevelCompactions(wait bool) {
// If we don't need to wait, see if we're already enabled
if !wait {
e.mu.RLock()
if e.done != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
}
e.mu.Lock()
if wait {
e.levelWorkers -= 1
}
if e.levelWorkers != 0 || e.done != nil {
// still waiting on more workers or already enabled
e.mu.Unlock()
return
}
// last one to enable, start things back up
e.Compactor.EnableCompactions()
quit := make(chan struct{})
e.done = quit
e.wg.Add(4)
e.mu.Unlock()
go func() { defer e.wg.Done(); e.compactTSMFull(quit) }()
go func() { defer e.wg.Done(); e.compactTSMLevel(true, 1, quit) }()
go func() { defer e.wg.Done(); e.compactTSMLevel(true, 2, quit) }()
go func() { defer e.wg.Done(); e.compactTSMLevel(false, 3, quit) }()
}
// disableLevelCompactions will stop level compactions before returning.
//
// If 'wait' is set to true, then a corresponding call to enableLevelCompactions(true) will be
// required before level compactions will start back up again.
func (e *Engine) disableLevelCompactions(wait bool) {
e.mu.Lock()
old := e.levelWorkers
if wait {
e.levelWorkers += 1
}
if old == 0 && e.done != nil {
// Prevent new compactions from starting
e.Compactor.DisableCompactions()
// Stop all background compaction goroutines
close(e.done)
e.done = nil
}
e.mu.Unlock()
e.wg.Wait()
}
func (e *Engine) enableSnapshotCompactions() {
// Check if already enabled under read lock
e.mu.RLock()
if e.snapDone != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
// Check again under write lock
e.mu.Lock()
if e.snapDone != nil {
e.mu.Unlock()
return
}
e.Compactor.EnableSnapshots()
quit := make(chan struct{})
e.snapDone = quit
e.snapWG.Add(1)
e.mu.Unlock()
go func() { defer e.snapWG.Done(); e.compactCache(quit) }()
}
func (e *Engine) disableSnapshotCompactions() {
e.mu.Lock()
if e.snapDone != nil {
close(e.snapDone)
e.snapDone = nil
e.Compactor.DisableSnapshots()
}
e.mu.Unlock()
e.snapWG.Wait()
}
// Path returns the path the engine was opened with.
func (e *Engine) Path() string { return e.path }
func (e *Engine) SetFieldName(measurement []byte, name string) {
e.index.SetFieldName(measurement, name)
}
func (e *Engine) MeasurementExists(name []byte) (bool, error) {
return e.index.MeasurementExists(name)
}
func (e *Engine) MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {
return e.index.MeasurementNamesByExpr(expr)
}
func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return e.index.MeasurementNamesByRegex(re)
}
// MeasurementFields returns the measurement fields for a measurement.
func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields {
return e.fieldset.CreateFieldsIfNotExists(measurement)
}
func (e *Engine) HasTagKey(name, key []byte) (bool, error) {
return e.index.HasTagKey(name, key)
}
func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
return e.index.MeasurementTagKeysByExpr(name, expr)
}
// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.
//
// MeasurementTagKeyValuesByExpr relies on the provided tag keys being sorted.
// The caller can indicate the tag keys have been sorted by setting the
// keysSorted argument appropriately. Tag values are returned in a slice that
// is indexible according to the sorted order of the tag keys, e.g., the values
// for the earliest tag k will be available in index 0 of the returned values
// slice.
//
func (e *Engine) MeasurementTagKeyValuesByExpr(name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
return e.index.MeasurementTagKeyValuesByExpr(name, keys, expr, keysSorted)
}
func (e *Engine) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
return e.index.ForEachMeasurementTagKey(name, fn)
}
func (e *Engine) TagKeyCardinality(name, key []byte) int {
return e.index.TagKeyCardinality(name, key)
}
// SeriesN returns the unique number of series in the index.
func (e *Engine) SeriesN() int64 {
return e.index.SeriesN()
}
func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.SeriesSketches()
}
func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.MeasurementsSketches()
}
// LastModified returns the time when this shard was last modified.
func (e *Engine) LastModified() time.Time {
walTime := e.WAL.LastWriteTime()
fsTime := e.FileStore.LastModified()
if walTime.After(fsTime) {
return walTime
}
return fsTime
}
// EngineStatistics maintains statistics for the engine.
type EngineStatistics struct {
CacheCompactions int64 // Counter of cache compactions that have ever run.
CacheCompactionsActive int64 // Gauge of cache compactions currently running.
CacheCompactionErrors int64 // Counter of cache compactions that have failed due to error.
CacheCompactionDuration int64 // Counter of number of wall nanoseconds spent in cache compactions.
TSMCompactions [3]int64 // Counter of TSM compactions (by level) that have ever run.
TSMCompactionsActive [3]int64 // Gauge of TSM compactions (by level) currently running.
TSMCompactionErrors [3]int64 // Counter of TSM compcations (by level) that have failed due to error.
TSMCompactionDuration [3]int64 // Counter of number of wall nanoseconds spent in TSM compactions (by level).
TSMOptimizeCompactions int64 // Counter of optimize compactions that have ever run.
TSMOptimizeCompactionsActive int64 // Gauge of optimize compactions currently running.
TSMOptimizeCompactionErrors int64 // Counter of optimize compactions that have failed due to error.
TSMOptimizeCompactionDuration int64 // Counter of number of wall nanoseconds spent in optimize compactions.
TSMFullCompactions int64 // Counter of full compactions that have ever run.
TSMFullCompactionsActive int64 // Gauge of full compactions currently running.
TSMFullCompactionErrors int64 // Counter of full compactions that have failed due to error.
TSMFullCompactionDuration int64 // Counter of number of wall nanoseconds spent in full compactions.
}
// Statistics returns statistics for periodic monitoring.
func (e *Engine) Statistics(tags map[string]string) []models.Statistic {
statistics := make([]models.Statistic, 0, 4)
statistics = append(statistics, models.Statistic{
Name: "tsm1_engine",
Tags: tags,
Values: map[string]interface{}{
statCacheCompactions: atomic.LoadInt64(&e.stats.CacheCompactions),
statCacheCompactionsActive: atomic.LoadInt64(&e.stats.CacheCompactionsActive),
statCacheCompactionError: atomic.LoadInt64(&e.stats.CacheCompactionErrors),
statCacheCompactionDuration: atomic.LoadInt64(&e.stats.CacheCompactionDuration),
statTSMLevel1Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[0]),
statTSMLevel1CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]),
statTSMLevel1CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[0]),
statTSMLevel1CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[0]),
statTSMLevel2Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[1]),
statTSMLevel2CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]),
statTSMLevel2CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[1]),
statTSMLevel2CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[1]),
statTSMLevel3Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[2]),
statTSMLevel3CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]),
statTSMLevel3CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[2]),
statTSMLevel3CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[2]),
statTSMOptimizeCompactions: atomic.LoadInt64(&e.stats.TSMOptimizeCompactions),
statTSMOptimizeCompactionsActive: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive),
statTSMOptimizeCompactionError: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionErrors),
statTSMOptimizeCompactionDuration: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionDuration),
statTSMFullCompactions: atomic.LoadInt64(&e.stats.TSMFullCompactions),
statTSMFullCompactionsActive: atomic.LoadInt64(&e.stats.TSMFullCompactionsActive),
statTSMFullCompactionError: atomic.LoadInt64(&e.stats.TSMFullCompactionErrors),
statTSMFullCompactionDuration: atomic.LoadInt64(&e.stats.TSMFullCompactionDuration),
},
})
statistics = append(statistics, e.Cache.Statistics(tags)...)
statistics = append(statistics, e.FileStore.Statistics(tags)...)
statistics = append(statistics, e.WAL.Statistics(tags)...)
return statistics
}
// DiskSize returns the total size in bytes of all TSM and WAL segments on disk.
func (e *Engine) DiskSize() int64 {
return e.FileStore.DiskSizeBytes() + e.WAL.DiskSizeBytes()
}
// Open opens and initializes the engine.
func (e *Engine) Open() error {
if err := os.MkdirAll(e.path, 0777); err != nil {
return err
}
if err := e.cleanup(); err != nil {
return err
}
if err := e.WAL.Open(); err != nil {
return err
}
if err := e.FileStore.Open(); err != nil {
return err
}
if err := e.reloadCache(); err != nil {
return err
}
e.Compactor.Open()
if e.enableCompactionsOnOpen {
e.SetCompactionsEnabled(true)
}
return nil
}
// Close closes the engine. Subsequent calls to Close are a nop.
func (e *Engine) Close() error {
e.SetCompactionsEnabled(false)
// Lock now and close everything else down.
e.mu.Lock()
defer e.mu.Unlock()
e.done = nil // Ensures that the channel will not be closed again.
if err := e.FileStore.Close(); err != nil {
return err
}
return e.WAL.Close()
}
// WithLogger sets the logger for the engine.
func (e *Engine) WithLogger(log zap.Logger) {
e.logger = log.With(zap.String("engine", "tsm1"))
if e.traceLogging {
e.traceLogger = e.logger
}
e.WAL.WithLogger(e.logger)
e.FileStore.WithLogger(e.logger)
}
// LoadMetadataIndex loads the shard metadata into memory.
func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {
now := time.Now()
// Save reference to index for iterator creation.
e.index = index
if err := e.FileStore.WalkKeys(func(key []byte, typ byte) error {
fieldType, err := tsmFieldTypeToInfluxQLDataType(typ)
if err != nil {
return err
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
return err
}
return nil
}); err != nil {
return err
}
// load metadata from the Cache
if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
fieldType, err := entry.values.InfluxQLType()
if err != nil {
e.logger.Info(fmt.Sprintf("error getting the data type of values for key %s: %s", key, err.Error()))
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
return err
}
return nil
}); err != nil {
return err
}
e.traceLogger.Info(fmt.Sprintf("Meta data index for shard %d loaded in %v", shardID, time.Since(now)))
return nil
}
// IsIdle returns true if the cache is empty, there are no running compactions and the
// shard is fully compacted.
func (e *Engine) IsIdle() bool {
cacheEmpty := e.Cache.Size() == 0
runningCompactions := atomic.LoadInt64(&e.stats.CacheCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[0])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[1])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[2])
runningCompactions += atomic.LoadInt64(&e.stats.TSMFullCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive)
return cacheEmpty && runningCompactions == 0 && e.CompactionPlan.FullyCompacted()
}
// Backup writes a tar archive of any TSM files modified since the passed
// in time to the passed in writer. The basePath will be prepended to the names
// of the files in the archive. It will force a snapshot of the WAL first
// then perform the backup with a read lock against the file store. This means
// that new TSM files will not be able to be created in this shard while the
// backup is running. For shards that are still acively getting writes, this
// could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.
func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
if err := e.index.SnapshotTo(path); err != nil {
return err
}
tw := tar.NewWriter(w)
defer tw.Close()
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
// Recursively read all files from path.
files, err := readDir(path, "")
if err != nil {
return err
}
// Filter paths to only changed files.
var filtered []string
for _, file := range files {
fi, err := os.Stat(filepath.Join(path, file))
if err != nil {
return err
} else if !fi.ModTime().After(since) {
continue
}
filtered = append(filtered, file)
}
if len(filtered) == 0 {
return nil
}
for _, f := range filtered {
if err := e.writeFileToBackup(f, basePath, filepath.Join(path, f), tw); err != nil {
return err
}
}
return nil
}
// writeFileToBackup copies the file into the tar archive. Files will use the shardRelativePath
// in their names. This should be the <db>/<retention policy>/<id> part of the path.
func (e *Engine) writeFileToBackup(name string, shardRelativePath, fullPath string, tw *tar.Writer) error {
f, err := os.Stat(fullPath)
if err != nil {
return err
}
h := &tar.Header{
Name: filepath.ToSlash(filepath.Join(shardRelativePath, name)),
ModTime: f.ModTime(),
Size: f.Size(),
Mode: int64(f.Mode()),
}
if err := tw.WriteHeader(h); err != nil {
return err
}
fr, err := os.Open(fullPath)
if err != nil {
return err
}
defer fr.Close()
_, err = io.CopyN(tw, fr, h.Size)
return err
}
// Restore reads a tar archive generated by Backup().
// Only files that match basePath will be copied into the directory. This obtains
// a write lock so no operations can be performed while restoring.
func (e *Engine) Restore(r io.Reader, basePath string) error {
return e.overlay(r, basePath, false)
}
// Import reads a tar archive generated by Backup() and adds each
// file matching basePath as a new TSM file. This obtains
// a write lock so no operations can be performed while Importing.
func (e *Engine) Import(r io.Reader, basePath string) error {
return e.overlay(r, basePath, true)
}
// overlay reads a tar archive generated by Backup() and adds each file
// from the archive matching basePath to the shard.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
// Copy files from archive while under lock to prevent reopening.
newFiles, err := func() ([]string, error) {
e.mu.Lock()
defer e.mu.Unlock()
var newFiles []string
tr := tar.NewReader(r)
for {
if fileName, err := e.readFileFromBackup(tr, basePath, asNew); err == io.EOF {
break
} else if err != nil {
return nil, err
} else if fileName != "" {
newFiles = append(newFiles, fileName)
}
}
if err := syncDir(e.path); err != nil {
return nil, err
}
if err := e.FileStore.Replace(nil, newFiles); err != nil {
return nil, err
}
return newFiles, nil
}()
if err != nil {
return err
}
// Load any new series keys to the index
readers := make([]chan seriesKey, 0, len(newFiles))
for _, f := range newFiles {
ch := make(chan seriesKey, 1)
readers = append(readers, ch)
// If asNew is true, the files created from readFileFromBackup will be new ones
// having a temp extension.
f = strings.TrimSuffix(f, ".tmp")
fd, err := os.Open(f)
if err != nil {
return err
}
r, err := NewTSMReader(fd)
if err != nil {
return err
}
defer r.Close()
go func(c chan seriesKey, r *TSMReader) {
n := r.KeyCount()
for i := 0; i < n; i++ {
key, typ := r.KeyAt(i)
c <- seriesKey{key, typ}
}
close(c)
}(ch, r)
}
// Merge and dedup all the series keys across each reader to reduce
// lock contention on the index.
merged := merge(readers...)
for v := range merged {
fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)
if err != nil {
return err
}
if err := e.addToIndexFromKey(v.key, fieldType); err != nil {
return err
}
}
return nil
}
// readFileFromBackup copies the next file from the archive into the shard.
// The file is skipped if it does not have a matching shardRelativePath prefix.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, asNew bool) (string, error) {
// Read next archive file.
hdr, err := tr.Next()
if err != nil {
return "", err
}
nativeFileName := filepath.FromSlash(hdr.Name)
// Skip file if it does not have a matching prefix.
if !filepath.HasPrefix(nativeFileName, shardRelativePath) {
return "", nil
}
filename, err := filepath.Rel(shardRelativePath, nativeFileName)
if err != nil {
return "", err
}
if asNew {
filename = fmt.Sprintf("%09d-%09d.%s", e.FileStore.NextGeneration(), 1, TSMFileExtension)
}
destPath := filepath.Join(e.path, filename)
tmp := destPath + ".tmp"
// Create new file on disk.
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return "", err
}
defer f.Close()
// Copy from archive to the file.
if _, err := io.CopyN(f, tr, hdr.Size); err != nil {
return "", err
}
// Sync to disk & close.
if err := f.Sync(); err != nil {
return "", err
}
return tmp, nil
}
// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the
// database index and measurement fields
func (e *Engine) addToIndexFromKey(key []byte, fieldType influxql.DataType) error {
seriesKey, field := SeriesAndFieldFromCompositeKey(key)
name := tsdb.MeasurementFromSeriesKey(seriesKey)
mf := e.fieldset.CreateFieldsIfNotExists(name)
if err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil {
return err
}
// Build in-memory index, if necessary.
if e.index.Type() == inmem.IndexName {
tags, _ := models.ParseTags(seriesKey)
if err := e.index.InitializeSeries(seriesKey, name, tags); err != nil {
return err
}
}
return nil
}
// WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key.
func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var keyBuf []byte
var baseLen int
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
// Skip fields name "time", they are illegal
if bytes.Equal(iter.FieldKey(), timeBytes) {
continue
}
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
err := e.Cache.WriteMulti(values)
if err != nil {
return err
}
_, err = e.WAL.WriteMulti(values)
return err
}
// containsSeries returns a map of keys indicating whether the key exists and
// has values or not.
func (e *Engine) containsSeries(keys [][]byte) (map[string]bool, error) {
// keyMap is used to see if a given key exists. keys
// are the measurement + tagset (minus separate & field)
keyMap := map[string]bool{}
for _, k := range keys {
keyMap[string(k)] = false
}
for _, k := range e.Cache.unsortedKeys() {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
keyMap[string(seriesKey)] = true
}
if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey(k)
if _, ok := keyMap[string(seriesKey)]; ok {
keyMap[string(seriesKey)] = true
}
return nil
}); err != nil {
return nil, err
}
return keyMap, nil
}
// deleteSeries removes all series keys from the engine.
func (e *Engine) deleteSeries(seriesKeys [][]byte) error {
return e.DeleteSeriesRange(seriesKeys, math.MinInt64, math.MaxInt64)
}
// DeleteSeriesRange removes the values between min and max (inclusive) from all series.
func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
if len(seriesKeys) == 0 {
return nil
}
// Ensure keys are sorted since lower layers require them to be.
if !bytesutil.IsSorted(seriesKeys) {
bytesutil.Sort(seriesKeys)
}
// Disable and abort running compactions so that tombstones added existing tsm
// files don't get removed. This would cause deleted measurements/series to
// re-appear once the compaction completed. We only disable the level compactions
// so that snapshotting does not stop while writing out tombstones. If it is stopped,
// and writing tombstones takes a long time, writes can get rejected due to the cache
// filling up.
e.disableLevelCompactions(true)
defer e.enableLevelCompactions(true)
tempKeys := seriesKeys[:]
deleteKeys := make([][]byte, 0, len(seriesKeys))
// go through the keys in the file store
if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey(k)
// Both tempKeys and keys walked are sorted, skip any passed in keys
// that don't exist in our key set.
for len(tempKeys) > 0 && bytes.Compare(tempKeys[0], seriesKey) < 0 {
tempKeys = tempKeys[1:]
}
// Keys match, add the full series key to delete.
if len(tempKeys) > 0 && bytes.Equal(tempKeys[0], seriesKey) {
deleteKeys = append(deleteKeys, k)
}
return nil
}); err != nil {
return err
}
if err := e.FileStore.DeleteRange(deleteKeys, min, max); err != nil {
return err
}
// find the keys in the cache and remove them
walKeys := deleteKeys[:0]
// ApplySerialEntryFn cannot return an error in this invocation.
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
// Cache does not walk keys in sorted order, so search the sorted
// series we need to delete to see if any of the cache keys match.
i := bytesutil.SearchBytes(seriesKeys, seriesKey)
if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) {
// k is the measurement + tags + sep + field
walKeys = append(walKeys, k)
}
return nil
})
e.Cache.DeleteRange(walKeys, min, max)
// delete from the WAL
if _, err := e.WAL.DeleteRange(walKeys, min, max); err != nil {
return err
}
// Have we deleted all points for the series? If so, we need to remove
// the series from the index.
existing, err := e.containsSeries(seriesKeys)
if err != nil {
return err
}
for k, exists := range existing {
if !exists {
if err := e.index.UnassignShard(k, e.id); err != nil {
return err
}
}
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) DeleteMeasurement(name []byte) error {
// Delete the bulk of data outside of the fields lock.
if err := e.deleteMeasurement(name); err != nil {
return err
}
// Under lock, delete any series created deletion.
if err := e.fieldset.DeleteWithLock(string(name), func() error {
return e.deleteMeasurement(name)
}); err != nil {
return err
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) deleteMeasurement(name []byte) error {
// Attempt to find the series keys.
keys, err := e.index.MeasurementSeriesKeysByExpr(name, nil)
if err != nil {
return err
} else if len(keys) > 0 {
if err := e.deleteSeries(keys); err != nil {
return err
}
}
return nil
}
// ForEachMeasurementName iterates over each measurement name in the engine.
func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {
return e.index.ForEachMeasurementName(fn)
}
// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
func (e *Engine) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {
return e.index.MeasurementSeriesKeysByExpr(name, expr)
}
func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {
return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice)
}
func (e *Engine) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
return e.index.CreateSeriesIfNotExists(key, name, tags)
}
// WriteTo is not implemented.
func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") }
// WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.
func (e *Engine) WriteSnapshot() error {
// Lock and grab the cache snapshot along with all the closed WAL
// filenames associated with the snapshot
var started *time.Time
defer func() {
if started != nil {
e.Cache.UpdateCompactTime(time.Since(*started))
e.logger.Info(fmt.Sprintf("Snapshot for path %s written in %v", e.path, time.Since(*started)))
}
}()
closedFiles, snapshot, err := func() ([]string, *Cache, error) {
e.mu.Lock()
defer e.mu.Unlock()
now := time.Now()
started = &now
if err := e.WAL.CloseSegment(); err != nil {
return nil, nil, err
}
segments, err := e.WAL.ClosedSegments()
if err != nil {
return nil, nil, err
}
snapshot, err := e.Cache.Snapshot()
if err != nil {
return nil, nil, err
}
return segments, snapshot, nil
}()
if err != nil {
return err
}
if snapshot.Size() == 0 {
e.Cache.ClearSnapshot(true)
return nil
}
// The snapshotted cache may have duplicate points and unsorted data. We need to deduplicate
// it before writing the snapshot. This can be very expensive so it's done while we are not
// holding the engine write lock.
dedup := time.Now()
snapshot.Deduplicate()
e.traceLogger.Info(fmt.Sprintf("Snapshot for path %s deduplicated in %v", e.path, time.Since(dedup)))
return e.writeSnapshotAndCommit(closedFiles, snapshot)
}
// CreateSnapshot will create a temp directory that holds
// temporary hardlinks to the underylyng shard files.
func (e *Engine) CreateSnapshot() (string, error) {
if err := e.WriteSnapshot(); err != nil {
return "", err
}
e.mu.RLock()
defer e.mu.RUnlock()
return e.FileStore.CreateSnapshot()
}
// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.
func (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) (err error) {
defer func() {
if err != nil {
e.Cache.ClearSnapshot(false)
}
}()
// write the new snapshot files
newFiles, err := e.Compactor.WriteSnapshot(snapshot)
if err != nil {
e.logger.Info(fmt.Sprintf("error writing snapshot from compactor: %v", err))
return err
}
e.mu.RLock()
defer e.mu.RUnlock()
// update the file store with these new files
if err := e.FileStore.Replace(nil, newFiles); err != nil {
e.logger.Info(fmt.Sprintf("error adding new TSM files from snapshot: %v", err))
return err
}
// clear the snapshot from the in-memory cache, then the old WAL files
e.Cache.ClearSnapshot(true)
if err := e.WAL.Remove(closedFiles); err != nil {
e.logger.Info(fmt.Sprintf("error removing closed wal segments: %v", err))
}
return nil
}
// compactCache continually checks if the WAL cache should be written to disk.
func (e *Engine) compactCache(quit <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-quit:
return
case <-t.C:
e.Cache.UpdateAge()
if e.ShouldCompactCache(e.WAL.LastWriteTime()) {
start := time.Now()
e.traceLogger.Info(fmt.Sprintf("Compacting cache for %s", e.path))
err := e.WriteSnapshot()
if err != nil && err != errCompactionsDisabled {
e.logger.Info(fmt.Sprintf("error writing snapshot: %v", err))
atomic.AddInt64(&e.stats.CacheCompactionErrors, 1)
} else {
atomic.AddInt64(&e.stats.CacheCompactions, 1)
}
atomic.AddInt64(&e.stats.CacheCompactionDuration, time.Since(start).Nanoseconds())
}
}
}
}
// ShouldCompactCache returns true if the Cache is over its flush threshold
// or if the passed in lastWriteTime is older than the write cold threshold.
func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool {
sz := e.Cache.Size()
if sz == 0 {
return false
}
return sz > e.CacheFlushMemorySizeThreshold ||
time.Since(lastWriteTime) > e.CacheFlushWriteColdDuration
}
func (e *Engine) compactTSMLevel(fast bool, level int, quit <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-quit:
return
case <-t.C:
s := e.levelCompactionStrategy(fast, level)
if s != nil {
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release(s.compactionGroups)
}
}
}
}
func (e *Engine) compactTSMFull(quit <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-quit:
return
case <-t.C:
s := e.fullCompactionStrategy()
if s != nil {
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release(s.compactionGroups)
}
}
}
}
// onFileStoreReplace is callback handler invoked when the FileStore
// has replaced one set of TSM files with a new set.
func (e *Engine) onFileStoreReplace(newFiles []TSMFile) {
// Load any new series keys to the index
readers := make([]chan seriesKey, 0, len(newFiles))
for _, r := range newFiles {
ch := make(chan seriesKey, 1)
readers = append(readers, ch)
go func(c chan seriesKey, r TSMFile) {
n := r.KeyCount()
for i := 0; i < n; i++ {
key, typ := r.KeyAt(i)
c <- seriesKey{key, typ}
}
close(c)
}(ch, r)
}
// Merge and dedup all the series keys across each reader to reduce
// lock contention on the index.
merged := merge(readers...)
for v := range merged {
fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)
if err != nil {
e.logger.Error(fmt.Sprintf("refresh index (1): %v", err))
continue
}
if err := e.addToIndexFromKey(v.key, fieldType); err != nil {
e.logger.Error(fmt.Sprintf("refresh index (2): %v", err))
continue
}
}
// load metadata from the Cache
e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
fieldType, err := entry.values.InfluxQLType()
if err != nil {
e.logger.Error(fmt.Sprintf("refresh index (3): %v", err))
return nil
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
e.logger.Error(fmt.Sprintf("refresh index (4): %v", err))
return nil
}
return nil
})
}
// compactionStrategy holds the details of what to do in a compaction.
type compactionStrategy struct {
compactionGroups []CompactionGroup
// concurrency determines how many compactions groups will be started
// concurrently. These groups may be limited by the global limiter if
// enabled.
concurrency int
fast bool
description string
durationStat *int64
activeStat *int64
successStat *int64
errorStat *int64
logger zap.Logger
compactor *Compactor
fileStore *FileStore
limiter limiter.Fixed
engine *Engine
}
// Apply concurrently compacts all the groups in a compaction strategy.
func (s *compactionStrategy) Apply() {
start := time.Now()
// cap concurrent compaction groups to no more than 4 at a time.
concurrency := s.concurrency
if concurrency == 0 {
concurrency = 4
}
throttle := limiter.NewFixed(concurrency)
var wg sync.WaitGroup
for i := range s.compactionGroups {
wg.Add(1)
go func(groupNum int) {
defer wg.Done()
// limit concurrent compaction groups
throttle.Take()
defer throttle.Release()
s.compactGroup(groupNum)
}(i)
}
wg.Wait()
atomic.AddInt64(s.durationStat, time.Since(start).Nanoseconds())
}
// compactGroup executes the compaction strategy against a single CompactionGroup.
func (s *compactionStrategy) compactGroup(groupNum int) {
// Limit concurrent compactions if we have a limiter
if cap(s.limiter) > 0 {
s.limiter.Take()
defer s.limiter.Release()
}
group := s.compactionGroups[groupNum]
start := time.Now()
s.logger.Info(fmt.Sprintf("beginning %s compaction of group %d, %d TSM files", s.description, groupNum, len(group)))
for i, f := range group {
s.logger.Info(fmt.Sprintf("compacting %s group (%d) %s (#%d)", s.description, groupNum, f, i))
}
files, err := func() ([]string, error) {
// Count the compaction as active only while the compaction is actually running.
atomic.AddInt64(s.activeStat, 1)
defer atomic.AddInt64(s.activeStat, -1)
if s.fast {
return s.compactor.CompactFast(group)
} else {
return s.compactor.CompactFull(group)
}
}()
if err != nil {
_, inProgress := err.(errCompactionInProgress)
if err == errCompactionsDisabled || inProgress {
s.logger.Info(fmt.Sprintf("aborted %s compaction group (%d). %v", s.description, groupNum, err))
if _, ok := err.(errCompactionInProgress); ok {
time.Sleep(time.Second)
}
return
}
s.logger.Info(fmt.Sprintf("error compacting TSM files: %v", err))
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
if err := s.fileStore.ReplaceWithCallback(group, files, s.engine.onFileStoreReplace); err != nil {
s.logger.Info(fmt.Sprintf("error replacing new TSM files: %v", err))
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
for i, f := range files {
s.logger.Info(fmt.Sprintf("compacted %s group (%d) into %s (#%d)", s.description, groupNum, f, i))
}
s.logger.Info(fmt.Sprintf("compacted %s %d files into %d files in %s", s.description, len(group), len(files), time.Since(start)))
atomic.AddInt64(s.successStat, 1)
}
// levelCompactionStrategy returns a compactionStrategy for the given level.
// It returns nil if there are no TSM files to compact.
func (e *Engine) levelCompactionStrategy(fast bool, level int) *compactionStrategy {
compactionGroups := e.CompactionPlan.PlanLevel(level)
if len(compactionGroups) == 0 {
return nil
}
return &compactionStrategy{
concurrency: 4,
compactionGroups: compactionGroups,
logger: e.logger,
fileStore: e.FileStore,
compactor: e.Compactor,
fast: fast,
limiter: e.compactionLimiter,
engine: e,
description: fmt.Sprintf("level %d", level),
activeStat: &e.stats.TSMCompactionsActive[level-1],
successStat: &e.stats.TSMCompactions[level-1],
errorStat: &e.stats.TSMCompactionErrors[level-1],
durationStat: &e.stats.TSMCompactionDuration[level-1],
}
}
// fullCompactionStrategy returns a compactionStrategy for higher level generations of TSM files.
// It returns nil if there are no TSM files to compact.
func (e *Engine) fullCompactionStrategy() *compactionStrategy {
optimize := false
compactionGroups := e.CompactionPlan.Plan(e.WAL.LastWriteTime())
if len(compactionGroups) == 0 {
optimize = true
compactionGroups = e.CompactionPlan.PlanOptimize()
}
if len(compactionGroups) == 0 {
return nil
}
s := &compactionStrategy{
concurrency: 1,
compactionGroups: compactionGroups,
logger: e.logger,
fileStore: e.FileStore,
compactor: e.Compactor,
fast: optimize,
limiter: e.compactionLimiter,
engine: e,
}
if optimize {
s.description = "optimize"
s.activeStat = &e.stats.TSMOptimizeCompactionsActive
s.successStat = &e.stats.TSMOptimizeCompactions
s.errorStat = &e.stats.TSMOptimizeCompactionErrors
s.durationStat = &e.stats.TSMOptimizeCompactionDuration
} else {
s.description = "full"
s.activeStat = &e.stats.TSMFullCompactionsActive
s.successStat = &e.stats.TSMFullCompactions
s.errorStat = &e.stats.TSMFullCompactionErrors
s.durationStat = &e.stats.TSMFullCompactionDuration
}
return s
}
// reloadCache reads the WAL segment files and loads them into the cache.
func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info(fmt.Sprintf("Reloaded WAL cache %s in %v", e.WAL.Path(), time.Since(now)))
return nil
}
// cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid
// removing tmp files that are still in use.
func (e *Engine) cleanup() error {
allfiles, err := ioutil.ReadDir(e.path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
for _, f := range allfiles {
// Check to see if there are any `.tmp` directories that were left over from failed shard snapshots
if f.IsDir() && strings.HasSuffix(f.Name(), ".tmp") {
if err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil {
return fmt.Errorf("error removing tmp snapshot directory %q: %s", f.Name(), err)
}
}
}
return e.cleanupTempTSMFiles()
}
func (e *Engine) cleanupTempTSMFiles() error {
files, err := filepath.Glob(filepath.Join(e.path, fmt.Sprintf("*.%s", CompactionTempExtension)))
if err != nil {
return fmt.Errorf("error getting compaction temp files: %s", err.Error())
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return fmt.Errorf("error removing temp compaction files: %v", err)
}
}
return nil
}
// KeyCursor returns a KeyCursor for the given key starting at time t.
func (e *Engine) KeyCursor(key []byte, t int64, ascending bool) *KeyCursor {
return e.FileStore.KeyCursor(key, t, ascending)
}
// CreateIterator returns an iterator for the measurement based on opt.
func (e *Engine) CreateIterator(measurement string, opt query.IteratorOptions) (query.Iterator, error) {
if call, ok := opt.Expr.(*influxql.Call); ok {
if opt.Interval.IsZero() {
if call.Name == "first" || call.Name == "last" {
refOpt := opt
refOpt.Limit = 1
refOpt.Ascending = call.Name == "first"
refOpt.Ordered = true
refOpt.Expr = call.Args[0]
itrs, err := e.createVarRefIterator(measurement, refOpt)
if err != nil {
return nil, err
}
return newMergeGuardIterator(itrs, opt)
}
}
inputs, err := e.createCallIterator(measurement, call, opt)
if err != nil {
return nil, err
} else if len(inputs) == 0 {
return nil, nil
}
return newMergeGuardIterator(inputs, opt)
}
itrs, err := e.createVarRefIterator(measurement, opt)
if err != nil {
return nil, err
}
return newMergeGuardIterator(itrs, opt)
}
func (e *Engine) createCallIterator(measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := call.Args[0].(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
// Determine tagsets for this measurement based on dimensions and filters.
tagSets, err := e.index.TagSets([]byte(measurement), opt)
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return err
default:
}
inputs, err := e.createTagSetIterators(ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// Wrap each series in a call iterator.
for i, input := range inputs {
if opt.InterruptCh != nil {
input = query.NewInterruptIterator(input, opt.InterruptCh)
}
itr, err := query.NewCallIterator(input, opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
inputs[i] = itr
}
itr := query.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0))
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(measurement string, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := opt.Expr.(*influxql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
// Determine tagsets for this measurement based on dimensions and filters.
tagSets, err := e.index.TagSets([]byte(measurement), opt)
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
inputs, err := e.createTagSetIterators(ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// If we have a LIMIT or OFFSET and the grouping of the outer query
// is different than the current grouping, we need to perform the
// limit on each of the individual series keys instead to improve
// performance.
if (opt.Limit > 0 || opt.Offset > 0) && len(opt.Dimensions) != len(opt.GroupBy) {
for i, input := range inputs {
inputs[i] = newLimitIterator(input, opt)
}
}
itr, err := query.Iterators(inputs).Merge(opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
// Apply a limit on the merged iterator.
if opt.Limit > 0 || opt.Offset > 0 {
if len(opt.Dimensions) == len(opt.GroupBy) {
// When the final dimensions and the current grouping are
// the same, we will only produce one series so we can use
// the faster limit iterator.
itr = newLimitIterator(itr, opt)
} else {
// When the dimensions are different than the current
// grouping, we need to account for the possibility there
// will be multiple series. The limit iterator in the
// influxql package handles that scenario.
itr = query.NewLimitIterator(itr, opt)
}
}
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetIterators creates a set of iterators for a tagset.
func (e *Engine) createTagSetIterators(ref *influxql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) {
// Set parallelism by number of logical cpus.
parallelism := runtime.GOMAXPROCS(0)
if parallelism > len(t.SeriesKeys) {
parallelism = len(t.SeriesKeys)
}
// Create series key groupings w/ return error.
groups := make([]struct {
keys []string
filters []influxql.Expr
itrs []query.Iterator
err error
}, parallelism)
// Group series keys.
n := len(t.SeriesKeys) / parallelism
for i := 0; i < parallelism; i++ {
group := &groups[i]
if i < parallelism-1 {
group.keys = t.SeriesKeys[i*n : (i+1)*n]
group.filters = t.Filters[i*n : (i+1)*n]
} else {
group.keys = t.SeriesKeys[i*n:]
group.filters = t.Filters[i*n:]
}
}
// Read series groups in parallel.
var wg sync.WaitGroup
for i := range groups {
wg.Add(1)
go func(i int) {
defer wg.Done()
groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ref, name, groups[i].keys, t, groups[i].filters, opt)
}(i)
}
wg.Wait()
// Determine total number of iterators so we can allocate only once.
var itrN int
for _, group := range groups {
itrN += len(group.itrs)
}
// Combine all iterators together and check for errors.
var err error
itrs := make([]query.Iterator, 0, itrN)
for _, group := range groups {
if group.err != nil {
err = group.err
}
itrs = append(itrs, group.itrs...)
}
// If an error occurred, make sure we close all created iterators.
if err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series.
func (e *Engine) createTagSetGroupIterators(ref *influxql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []influxql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) {
conditionFields := make([]influxql.VarRef, len(influxql.ExprNames(opt.Condition)))
itrs := make([]query.Iterator, 0, len(seriesKeys))
for i, seriesKey := range seriesKeys {
fields := 0
if filters[i] != nil {
// Retrieve non-time fields from this series filter and filter out tags.
for _, f := range influxql.ExprNames(filters[i]) {
conditionFields[fields] = f
fields++
}
}
itr, err := e.createVarRefSeriesIterator(ref, name, seriesKey, t, filters[i], conditionFields[:fields], opt)
if err != nil {
return itrs, err
} else if itr == nil {
continue
}
itrs = append(itrs, itr)
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return nil, err
default:
}
// Enforce series limit at creation time.
if opt.MaxSeriesN > 0 && len(itrs) > opt.MaxSeriesN {
query.Iterators(itrs).Close()
return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", len(itrs), opt.MaxSeriesN)
}
}
return itrs, nil
}
// createVarRefSeriesIterator creates an iterator for a variable reference for a series.
func (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, seriesKey string, t *query.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt query.IteratorOptions) (query.Iterator, error) {
_, tfs := models.ParseKey([]byte(seriesKey))
tags := query.NewTags(tfs.Map())
// Create options specific for this series.
itrOpt := opt
itrOpt.Condition = filter
// Build auxilary cursors.
// Tag values should be returned if the field doesn't exist.
var aux []cursorAt
if len(opt.Aux) > 0 {
aux = make([]cursorAt, len(opt.Aux))
for i, ref := range opt.Aux {
// Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag {
cur := e.buildCursor(name, seriesKey, tfs, &ref, opt)
if cur != nil {
aux[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case influxql.Float, influxql.AnyField:
aux[i] = nilFloatLiteralValueCursor
continue
case influxql.Integer:
aux[i] = nilIntegerLiteralValueCursor
continue
case influxql.Unsigned:
aux[i] = nilUnsignedLiteralValueCursor
continue
case influxql.String:
aux[i] = nilStringLiteralValueCursor
continue
case influxql.Boolean:
aux[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
aux[i] = nilStringLiteralValueCursor
} else {
aux[i] = &literalValueCursor{value: v}
}
}
}
// Remove _tagKey condition field.
// We can't seach on it because we can't join it to _tagValue based on time.
if varRefSliceContains(conditionFields, "_tagKey") {
conditionFields = varRefSliceRemove(conditionFields, "_tagKey")
// Remove _tagKey conditional references from iterator.
itrOpt.Condition = influxql.RewriteExpr(influxql.CloneExpr(itrOpt.Condition), func(expr influxql.Expr) influxql.Expr {
switch expr := expr.(type) {
case *influxql.BinaryExpr:
if ref, ok := expr.LHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" {
return &influxql.BooleanLiteral{Val: true}
}
if ref, ok := expr.RHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" {
return &influxql.BooleanLiteral{Val: true}
}
}
return expr
})
}
// Build conditional field cursors.
// If a conditional field doesn't exist then ignore the series.
var conds []cursorAt
if len(conditionFields) > 0 {
conds = make([]cursorAt, len(conditionFields))
for i, ref := range conditionFields {
// Create cursor from field if a tag wasn't requested.
if ref.Type != influxql.Tag {
cur := e.buildCursor(name, seriesKey, tfs, &ref, opt)
if cur != nil {
conds[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case influxql.Float, influxql.AnyField:
conds[i] = nilFloatLiteralValueCursor
continue
case influxql.Integer:
conds[i] = nilIntegerLiteralValueCursor
continue
case influxql.Unsigned:
conds[i] = nilUnsignedLiteralValueCursor
continue
case influxql.String:
conds[i] = nilStringLiteralValueCursor
continue
case influxql.Boolean:
conds[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
conds[i] = nilStringLiteralValueCursor
} else {
conds[i] = &literalValueCursor{value: v}
}
}
}
condNames := influxql.VarRefs(conditionFields).Strings()
// Limit tags to only the dimensions selected.
dimensions := opt.GetDimensions()
tags = tags.Subset(dimensions)
// If it's only auxiliary fields then it doesn't matter what type of iterator we use.
if ref == nil {
return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil
}
// Build main cursor.
cur := e.buildCursor(name, seriesKey, tfs, ref, opt)
// If the field doesn't exist then don't build an iterator.
if cur == nil {
cursorsAt(aux).close()
cursorsAt(conds).close()
return nil, nil
}
// Remove measurement name if we are selecting the name.
if ref.Val == "_name" {
name = ""
}
switch cur := cur.(type) {
case floatCursor:
return newFloatIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case integerCursor:
return newIntegerIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case unsignedCursor:
return newUnsignedIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case stringCursor:
return newStringIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case booleanCursor:
return newBooleanIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
default:
panic("unreachable")
}
}
// buildCursor creates an untyped cursor for a field.
func (e *Engine) buildCursor(measurement, seriesKey string, tags models.Tags, ref *influxql.VarRef, opt query.IteratorOptions) cursor {
// Check if this is a system field cursor.
switch ref.Val {
case "_name":
return &stringSliceCursor{values: []string{measurement}}
case "_tagKey":
return &stringSliceCursor{values: tags.Keys()}
case "_tagValue":
return &stringSliceCursor{values: matchTagValues(tags, opt.Condition)}
case "_seriesKey":
return &stringSliceCursor{values: []string{seriesKey}}
}
// Look up fields for measurement.
mf := e.fieldset.Fields(measurement)
if mf == nil {
return nil
}
// Check for system field for field keys.
if ref.Val == "_fieldKey" {
return &stringSliceCursor{values: mf.FieldKeys()}
}
// Find individual field.
f := mf.Field(ref.Val)
if f == nil {
return nil
}
// Check if we need to perform a cast. Performing a cast in the
// engine (if it is possible) is much more efficient than an automatic cast.
if ref.Type != influxql.Unknown && ref.Type != influxql.AnyField && ref.Type != f.Type {
switch ref.Type {
case influxql.Float:
switch f.Type {
case influxql.Integer:
cur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)
return &floatCastIntegerCursor{cursor: cur}
case influxql.Unsigned:
cur := e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt)
return &floatCastUnsignedCursor{cursor: cur}
}
case influxql.Integer:
switch f.Type {
case influxql.Float:
cur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)
return &integerCastFloatCursor{cursor: cur}
case influxql.Unsigned:
cur := e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt)
return &integerCastUnsignedCursor{cursor: cur}
}
case influxql.Unsigned:
switch f.Type {
case influxql.Float:
cur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)
return &unsignedCastFloatCursor{cursor: cur}
case influxql.Integer:
cur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)
return &unsignedCastIntegerCursor{cursor: cur}
}
}
return nil
}
// Return appropriate cursor based on type.
switch f.Type {
case influxql.Float:
return e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)
case influxql.Integer:
return e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)
case influxql.Unsigned:
return e.buildUnsignedCursor(measurement, seriesKey, ref.Val, opt)
case influxql.String:
return e.buildStringCursor(measurement, seriesKey, ref.Val, opt)
case influxql.Boolean:
return e.buildBooleanCursor(measurement, seriesKey, ref.Val, opt)
default:
panic("unreachable")
}
}
func matchTagValues(tags models.Tags, condition influxql.Expr) []string {
if condition == nil {
return tags.Values()
}
// Populate map with tag values.
data := map[string]interface{}{}
for _, tag := range tags {
data[string(tag.Key)] = string(tag.Value)
}
// Match against each specific tag.
var values []string
for _, tag := range tags {
data["_tagKey"] = string(tag.Key)
if influxql.EvalBool(condition, data) {
values = append(values, string(tag.Value))
}
}
return values
}
// buildFloatCursor creates a cursor for a float field.
func (e *Engine) buildFloatCursor(measurement, seriesKey, field string, opt query.IteratorOptions) floatCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildIntegerCursor creates a cursor for an integer field.
func (e *Engine) buildIntegerCursor(measurement, seriesKey, field string, opt query.IteratorOptions) integerCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildUnsignedCursor creates a cursor for an unsigned field.
func (e *Engine) buildUnsignedCursor(measurement, seriesKey, field string, opt query.IteratorOptions) unsignedCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newUnsignedCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildStringCursor creates a cursor for a string field.
func (e *Engine) buildStringCursor(measurement, seriesKey, field string, opt query.IteratorOptions) stringCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
// buildBooleanCursor creates a cursor for a boolean field.
func (e *Engine) buildBooleanCursor(measurement, seriesKey, field string, opt query.IteratorOptions) booleanCursor {
key := SeriesFieldKeyBytes(seriesKey, field)
cacheValues := e.Cache.Values(key)
keyCursor := e.KeyCursor(key, opt.SeekTime(), opt.Ascending)
return newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)
}
func (e *Engine) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) {
return e.index.SeriesPointIterator(opt)
}
// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID.
func SeriesFieldKey(seriesKey, field string) string {
return seriesKey + keyFieldSeparator + field
}
func SeriesFieldKeyBytes(seriesKey, field string) []byte {
b := make([]byte, len(seriesKey)+len(keyFieldSeparator)+len(field))
i := copy(b[:], seriesKey)
i += copy(b[i:], keyFieldSeparatorBytes)
copy(b[i:], field)
return b
}
func tsmFieldTypeToInfluxQLDataType(typ byte) (influxql.DataType, error) {
switch typ {
case BlockFloat64:
return influxql.Float, nil
case BlockInteger:
return influxql.Integer, nil
case BlockUnsigned:
return influxql.Unsigned, nil
case BlockBoolean:
return influxql.Boolean, nil
case BlockString:
return influxql.String, nil
default:
return influxql.Unknown, fmt.Errorf("unknown block type: %v", typ)
}
}
// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key.
func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {
sep := bytes.Index(key, keyFieldSeparatorBytes)
if sep == -1 {
// No field???
return key, nil
}
return key[:sep], key[sep+len(keyFieldSeparator):]
}
// readDir recursively reads all files from a path.
func readDir(root, rel string) ([]string, error) {
// Open root.
f, err := os.Open(filepath.Join(root, rel))
if err != nil {
return nil, err
}
defer f.Close()
// Read all files.
fis, err := f.Readdir(-1)
if err != nil {
return nil, err
}
// Read all subdirectories and append to the end.
var paths []string
for _, fi := range fis {
// Simply append if it's a file.
if !fi.IsDir() {
paths = append(paths, filepath.Join(rel, fi.Name()))
continue
}
// Read and append nested file paths.
children, err := readDir(root, filepath.Join(rel, fi.Name()))
if err != nil {
return nil, err
}
paths = append(paths, children...)
}
return paths, nil
}
func varRefSliceContains(a []influxql.VarRef, v string) bool {
for _, ref := range a {
if ref.Val == v {
return true
}
}
return false
}
func varRefSliceRemove(a []influxql.VarRef, v string) []influxql.VarRef {
if !varRefSliceContains(a, v) {
return a
}
other := make([]influxql.VarRef, 0, len(a))
for _, ref := range a {
if ref.Val != v {
other = append(other, ref)
}
}
return other
}
|
// Copyright 2016 yati authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"github.com/fsouza/go-dockerclient"
)
func createContainer(address, ca, cert, key, image string) error {
client, err := docker.NewClient(address)
if err != nil {
return err
}
config := docker.Config{Image: image}
opts := docker.CreateContainerOptions{Config: &config}
container, err := client.CreateContainer(opts)
if err != nil {
return err
}
return client.StartContainer(container.ID, nil)
}
installer: add command to pull images
// Copyright 2016 yati authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"os"
"github.com/fsouza/go-dockerclient"
)
func createContainer(address, ca, cert, key, image string) error {
client, err := docker.NewClient(address)
if err != nil {
return err
}
pullOpts := docker.PullImageOptions{
Repository: image,
OutputStream: os.Stdout,
Tag: "latest",
}
err = client.PullImage(pullOpts, docker.AuthConfiguration{})
if err != nil {
return err
}
config := docker.Config{Image: image}
opts := docker.CreateContainerOptions{Config: &config}
container, err := client.CreateContainer(opts)
if err != nil {
return err
}
return client.StartContainer(container.ID, nil)
}
|
package cluster
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"sync"
"time"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/db/cluster"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/task"
"github.com/lxc/lxd/lxd/warnings"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/pkg/errors"
)
type heartbeatMode int
const (
hearbeatNormal heartbeatMode = iota
hearbeatImmediate
hearbeatInitial
)
// APIHeartbeatMember contains specific cluster node info.
type APIHeartbeatMember struct {
ID int64 // ID field value in nodes table.
Address string // Host and Port of node.
Name string // Name of cluster member.
RaftID uint64 // ID field value in raft_nodes table, zero if non-raft node.
RaftRole int // Node role in the raft cluster, from the raft_nodes table
Raft bool // Deprecated, use non-zero RaftID instead to indicate raft node.
LastHeartbeat time.Time // Last time we received a successful response from node.
Online bool // Calculated from offline threshold and LastHeatbeat time.
updated bool // Has node been updated during this heartbeat run. Not sent to nodes.
}
// APIHeartbeatVersion contains max versions for all nodes in cluster.
type APIHeartbeatVersion struct {
Schema int
APIExtensions int
}
// APIHeartbeat contains data sent to nodes in heartbeat.
type APIHeartbeat struct {
sync.Mutex // Used to control access to Members maps.
cluster *db.Cluster
Members map[int64]APIHeartbeatMember
Version APIHeartbeatVersion
Time time.Time
// Indicates if heartbeat contains a fresh set of node states.
// This can be used to indicate to the receiving node that the state is fresh enough to
// trigger node refresh activies (such as forkdns).
FullStateList bool
}
// Update updates an existing APIHeartbeat struct with the raft and all node states supplied.
// If allNodes provided is an empty set then this is considered a non-full state list.
func (hbState *APIHeartbeat) Update(fullStateList bool, raftNodes []db.RaftNode, allNodes []db.NodeInfo, offlineThreshold time.Duration) {
var maxSchemaVersion, maxAPIExtensionsVersion int
if hbState.Members == nil {
hbState.Members = make(map[int64]APIHeartbeatMember)
}
// If we've been supplied a fresh set of node states, this is a full state list.
hbState.FullStateList = fullStateList
// Convert raftNodes to a map keyed on address for lookups later.
raftNodeMap := make(map[string]db.RaftNode, len(raftNodes))
for _, raftNode := range raftNodes {
raftNodeMap[raftNode.Address] = raftNode
}
// Add nodes (overwrites any nodes with same ID in map with fresh data).
for _, node := range allNodes {
member := APIHeartbeatMember{
ID: node.ID,
Address: node.Address,
Name: node.Name,
LastHeartbeat: node.Heartbeat,
Online: !node.Heartbeat.Before(time.Now().Add(-offlineThreshold)),
}
if raftNode, exists := raftNodeMap[member.Address]; exists {
member.Raft = true // Deprecated
member.RaftID = raftNode.ID
member.RaftRole = int(raftNode.Role)
delete(raftNodeMap, member.Address) // Used to check any remaining later.
}
// Add to the members map using the node ID (not the Raft Node ID).
hbState.Members[node.ID] = member
// Keep a record of highest APIExtensions and Schema version seen in all nodes.
if node.APIExtensions > maxAPIExtensionsVersion {
maxAPIExtensionsVersion = node.APIExtensions
}
if node.Schema > maxSchemaVersion {
maxSchemaVersion = node.Schema
}
}
hbState.Version = APIHeartbeatVersion{
Schema: maxSchemaVersion,
APIExtensions: maxAPIExtensionsVersion,
}
if len(raftNodeMap) > 0 && hbState.cluster != nil {
hbState.cluster.Transaction(func(tx *db.ClusterTx) error {
for addr, raftNode := range raftNodeMap {
_, err := tx.GetPendingNodeByAddress(addr)
if err != nil {
logger.Errorf("Unaccounted raft node(s) not found in 'nodes' table for heartbeat: %+v", raftNode)
}
}
return nil
})
}
return
}
// Send sends heartbeat requests to the nodes supplied and updates heartbeat state.
func (hbState *APIHeartbeat) Send(ctx context.Context, networkCert *shared.CertInfo, serverCert *shared.CertInfo, localAddress string, nodes []db.NodeInfo, spreadDuration time.Duration) {
heartbeatsWg := sync.WaitGroup{}
sendHeartbeat := func(nodeID int64, address string, spreadDuration time.Duration, heartbeatData *APIHeartbeat) {
defer heartbeatsWg.Done()
if spreadDuration > 0 {
// Spread in time by waiting up to 3s less than the interval.
spreadDurationMs := int(spreadDuration.Milliseconds())
spreadRange := spreadDurationMs - 3000
if spreadRange > 0 {
time.Sleep(time.Duration(rand.Intn(spreadRange)) * time.Millisecond)
}
}
// Update timestamp to current, used for time skew detection
heartbeatData.Time = time.Now().UTC()
err := HeartbeatNode(ctx, address, networkCert, serverCert, heartbeatData)
if err == nil {
heartbeatData.Lock()
// Ensure only update nodes that exist in Members already.
hbNode, existing := hbState.Members[nodeID]
if !existing {
return
}
hbNode.LastHeartbeat = time.Now()
hbNode.Online = true
hbNode.updated = true
heartbeatData.Members[nodeID] = hbNode
heartbeatData.Unlock()
logger.Debug("Successful heartbeat", log.Ctx{"address": address})
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(hbState.cluster, "", db.WarningOfflineClusterMember, cluster.TypeNode, int(nodeID))
if err != nil {
logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
} else {
logger.Warn("Failed heartbeat", log.Ctx{"address": address, "err": err})
err = hbState.cluster.UpsertWarningLocalNode("", cluster.TypeNode, int(nodeID), db.WarningOfflineClusterMember, err.Error())
if err != nil {
logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
}
}
for _, node := range nodes {
// Special case for the local node - just record the time now.
if node.Address == localAddress {
hbState.Lock()
hbNode := hbState.Members[node.ID]
hbNode.LastHeartbeat = time.Now()
hbNode.Online = true
hbNode.updated = true
hbState.Members[node.ID] = hbNode
hbState.Unlock()
continue
}
// Parallelize the rest.
heartbeatsWg.Add(1)
go sendHeartbeat(node.ID, node.Address, spreadDuration, hbState)
}
heartbeatsWg.Wait()
}
// HeartbeatTask returns a task function that performs leader-initiated heartbeat
// checks against all LXD nodes in the cluster.
//
// It will update the heartbeat timestamp column of the nodes table
// accordingly, and also notify them of the current list of database nodes.
func HeartbeatTask(gateway *Gateway) (task.Func, task.Schedule) {
// Since the database APIs are blocking we need to wrap the core logic
// and run it in a goroutine, so we can abort as soon as the context expires.
heartbeatWrapper := func(ctx context.Context) {
ch := make(chan struct{})
go func() {
gateway.heartbeat(ctx, hearbeatNormal)
ch <- struct{}{}
}()
select {
case <-ch:
case <-ctx.Done():
}
}
schedule := func() (time.Duration, error) {
return task.Every(gateway.heartbeatInterval())()
}
return heartbeatWrapper, schedule
}
// heartbeatInterval returns heartbeat interval to use.
func (g *Gateway) heartbeatInterval() time.Duration {
threshold := g.HeartbeatOfflineThreshold
if threshold <= 0 {
threshold = time.Duration(db.DefaultOfflineThreshold) * time.Second
}
return threshold / 2
}
// heartbeatRestart restarts cancels any ongoing heartbeat and restarts it.
// If there is no ongoing heartbeat then this is a no-op.
// Returns true if new heartbeat round was started.
func (g *Gateway) heartbeatRestart() bool {
g.heartbeatCancelLock.Lock() // Make sure we're the only ones inspecting the g.heartbeatCancel var.
// There is a cancellable heartbeat round ongoing.
if g.heartbeatCancel != nil {
g.heartbeatCancel() // Request ongoing hearbeat round cancel itself.
g.heartbeatCancel = nil // Indicate there is no further cancellable heartbeat round.
g.heartbeatCancelLock.Unlock() // Release lock ready for g.heartbeat to acquire it.
// Start a new heartbeat round async that will run as soon as ongoing heartbeat round exits.
go g.heartbeat(g.ctx, hearbeatImmediate)
return true
}
// No cancellable heartbeat round, release lock.
g.heartbeatCancelLock.Unlock()
return false
}
func (g *Gateway) heartbeat(ctx context.Context, mode heartbeatMode) {
// Avoid concurent heartbeat loops.
// This is possible when both the regular task and the out of band heartbeat round from a dqlite
// connection or notification restart both kick in at the same time.
g.heartbeatLock.Lock()
defer g.heartbeatLock.Unlock()
// Acquire the cancellation lock and populate it so that this heartbeat round can be cancelled if a
// notification cancellation request arrives during the round. Also setup a defer so that the cancellation
// function is set to nil when this function ends to indicate there is no ongoing heartbeat round.
g.heartbeatCancelLock.Lock()
ctx, g.heartbeatCancel = context.WithCancel(ctx)
defer func() {
g.heartbeatCancelLock.Lock()
if g.heartbeatCancel != nil {
g.heartbeatCancel()
g.heartbeatCancel = nil
}
g.heartbeatCancelLock.Unlock()
}()
g.heartbeatCancelLock.Unlock()
if g.Cluster == nil || g.server == nil || g.memoryDial != nil {
// We're not a raft node or we're not clustered
return
}
raftNodes, err := g.currentRaftNodes()
if err != nil {
if errors.Cause(err) == ErrNotLeader {
return
}
logger.Error("Failed to get current raft members", log.Ctx{"err": err})
return
}
// Address of this node.
localAddress, err := node.ClusterAddress(g.db)
if err != nil {
logger.Error("Failed to fetch local cluster address", log.Ctx{"err": err})
}
var allNodes []db.NodeInfo
err = g.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
allNodes, err = tx.GetNodes()
if err != nil {
return err
}
return nil
})
if err != nil {
logger.Warn("Failed to get current cluster members", log.Ctx{"err": err})
return
}
modeStr := "normal"
switch mode {
case hearbeatImmediate:
modeStr = "immediate"
case hearbeatInitial:
modeStr = "initial"
}
if mode != hearbeatNormal {
// Log unscheduled heartbeats with a higher level than normal heartbeats.
logger.Info("Starting heartbeat round", log.Ctx{"mode": modeStr, "address": localAddress})
} else {
// Don't spam the normal log with regular heartbeat messages.
logger.Debug("Starting heartbeat round", log.Ctx{"mode": modeStr, "address": localAddress})
}
// Replace the local raft_nodes table immediately because it
// might miss a row containing ourselves, since we might have
// been elected leader before the former leader had chance to
// send us a fresh update through the heartbeat pool.
logger.Debug("Heartbeat updating local raft members", log.Ctx{"members": raftNodes})
err = g.db.Transaction(func(tx *db.NodeTx) error {
return tx.ReplaceRaftNodes(raftNodes)
})
if err != nil {
logger.Warn("Failed to replace local raft members", log.Ctx{"err": err})
return
}
if localAddress == "" {
logger.Warn("No local address set, aborting heartbeat round")
return
}
startTime := time.Now()
heartbeatInterval := g.heartbeatInterval()
// Cumulative set of node states (will be written back to database once done).
hbState := &APIHeartbeat{cluster: g.Cluster}
// If we are doing a normal heartbeat round then spread the requests over the heartbeatInterval in order
// to reduce load on the cluster.
spreadDuration := time.Duration(0)
if mode == hearbeatNormal {
spreadDuration = heartbeatInterval
}
// If this leader node hasn't sent a heartbeat recently, then its node state records
// are likely out of date, this can happen when a node becomes a leader.
// Send stale set to all nodes in database to get a fresh set of active nodes.
if mode == hearbeatInitial {
hbState.Update(false, raftNodes, allNodes, g.HeartbeatOfflineThreshold)
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, allNodes, spreadDuration)
// We have the latest set of node states now, lets send that state set to all nodes.
hbState.FullStateList = true
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, allNodes, spreadDuration)
} else {
hbState.Update(true, raftNodes, allNodes, g.HeartbeatOfflineThreshold)
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, allNodes, spreadDuration)
}
// Look for any new node which appeared since sending last heartbeat.
var currentNodes []db.NodeInfo
err = g.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
currentNodes, err = tx.GetNodes()
if err != nil {
return err
}
return nil
})
if err != nil {
logger.Warn("Failed to get current cluster members", log.Ctx{"err": err})
return
}
newNodes := []db.NodeInfo{}
for _, currentNode := range currentNodes {
existing := false
for _, node := range allNodes {
if node.Address == currentNode.Address && node.ID == currentNode.ID {
existing = true
break
}
}
if !existing {
// We found a new node
allNodes = append(allNodes, currentNode)
newNodes = append(newNodes, currentNode)
}
}
// If any new nodes found, send heartbeat to just them (with full node state).
if len(newNodes) > 0 {
hbState.Update(true, raftNodes, allNodes, g.HeartbeatOfflineThreshold)
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, newNodes, 0)
}
// If the context has been cancelled, return immediately.
err = ctx.Err()
if err != nil {
logger.Warn("Aborting heartbeat round", log.Ctx{"err": err})
return
}
var unavailableMembers []string
err = query.Retry(func() error {
return g.Cluster.Transaction(func(tx *db.ClusterTx) error {
for _, node := range hbState.Members {
if !node.updated {
// If member has not been updated during this heartbeat round it means
// they are currently unreachable or rejecting heartbeats due to being
// in the process of shutting down. Eitherway we do not want to use this
// member as a candidate for role promotion.
unavailableMembers = append(unavailableMembers, node.Address)
continue
}
err := tx.SetNodeHeartbeat(node.Address, node.LastHeartbeat)
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return errors.Wrapf(err, "Failed updating heartbeat time for member %q", node.Address)
}
}
return nil
})
})
if err != nil {
logger.Error("Failed updating cluster heartbeats", log.Ctx{"err": err})
return
}
// If full node state was sent and node refresh task is specified, run it async.
if g.HeartbeatNodeHook != nil {
g.HeartbeatNodeHook(hbState, true, unavailableMembers)
}
duration := time.Now().Sub(startTime)
if duration > heartbeatInterval {
logger.Warn("Heartbeat round duration greater than heartbeat interval", log.Ctx{"duration": duration, "interval": heartbeatInterval})
}
// Update last leader heartbeat time so next time a full node state list can be sent (if not this time).
logger.Debug("Completed heartbeat round", log.Ctx{"duration": duration, "address": localAddress})
}
// HeartbeatNode performs a single heartbeat request against the node with the given address.
func HeartbeatNode(taskCtx context.Context, address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, heartbeatData *APIHeartbeat) error {
logger.Debug("Sending heartbeat request", log.Ctx{"address": address})
config, err := tlsClientConfig(networkCert, serverCert)
if err != nil {
return err
}
timeout := 2 * time.Second
url := fmt.Sprintf("https://%s%s", address, databaseEndpoint)
transport, cleanup := tlsTransport(config)
defer cleanup()
client := &http.Client{
Transport: transport,
Timeout: timeout,
}
buffer := bytes.Buffer{}
heartbeatData.Lock()
err = json.NewEncoder(&buffer).Encode(heartbeatData)
heartbeatData.Unlock()
if err != nil {
return err
}
request, err := http.NewRequest("PUT", url, bytes.NewReader(buffer.Bytes()))
if err != nil {
return err
}
setDqliteVersionHeader(request)
// Use 1s later timeout to give HTTP client chance timeout with more useful info.
ctx, cancel := context.WithTimeout(taskCtx, timeout+time.Second)
defer cancel()
request = request.WithContext(ctx)
request.Close = true // Immediately close the connection after the request is done
response, err := client.Do(request)
if err != nil {
return errors.Wrap(err, "Failed to send heartbeat request")
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return fmt.Errorf("Heartbeat request failed with status: %w", api.StatusErrorf(response.StatusCode, response.Status))
}
return nil
}
lxd/cluster/heartbeat: Use node.IsOffline in heartbeat member data
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package cluster
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"sync"
"time"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/db/cluster"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/task"
"github.com/lxc/lxd/lxd/warnings"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/pkg/errors"
)
type heartbeatMode int
const (
hearbeatNormal heartbeatMode = iota
hearbeatImmediate
hearbeatInitial
)
// APIHeartbeatMember contains specific cluster node info.
type APIHeartbeatMember struct {
ID int64 // ID field value in nodes table.
Address string // Host and Port of node.
Name string // Name of cluster member.
RaftID uint64 // ID field value in raft_nodes table, zero if non-raft node.
RaftRole int // Node role in the raft cluster, from the raft_nodes table
Raft bool // Deprecated, use non-zero RaftID instead to indicate raft node.
LastHeartbeat time.Time // Last time we received a successful response from node.
Online bool // Calculated from offline threshold and LastHeatbeat time.
updated bool // Has node been updated during this heartbeat run. Not sent to nodes.
}
// APIHeartbeatVersion contains max versions for all nodes in cluster.
type APIHeartbeatVersion struct {
Schema int
APIExtensions int
}
// APIHeartbeat contains data sent to nodes in heartbeat.
type APIHeartbeat struct {
sync.Mutex // Used to control access to Members maps.
cluster *db.Cluster
Members map[int64]APIHeartbeatMember
Version APIHeartbeatVersion
Time time.Time
// Indicates if heartbeat contains a fresh set of node states.
// This can be used to indicate to the receiving node that the state is fresh enough to
// trigger node refresh activies (such as forkdns).
FullStateList bool
}
// Update updates an existing APIHeartbeat struct with the raft and all node states supplied.
// If allNodes provided is an empty set then this is considered a non-full state list.
func (hbState *APIHeartbeat) Update(fullStateList bool, raftNodes []db.RaftNode, allNodes []db.NodeInfo, offlineThreshold time.Duration) {
var maxSchemaVersion, maxAPIExtensionsVersion int
if hbState.Members == nil {
hbState.Members = make(map[int64]APIHeartbeatMember)
}
// If we've been supplied a fresh set of node states, this is a full state list.
hbState.FullStateList = fullStateList
// Convert raftNodes to a map keyed on address for lookups later.
raftNodeMap := make(map[string]db.RaftNode, len(raftNodes))
for _, raftNode := range raftNodes {
raftNodeMap[raftNode.Address] = raftNode
}
// Add nodes (overwrites any nodes with same ID in map with fresh data).
for _, node := range allNodes {
member := APIHeartbeatMember{
ID: node.ID,
Address: node.Address,
Name: node.Name,
LastHeartbeat: node.Heartbeat,
Online: !node.IsOffline(offlineThreshold),
}
if raftNode, exists := raftNodeMap[member.Address]; exists {
member.Raft = true // Deprecated
member.RaftID = raftNode.ID
member.RaftRole = int(raftNode.Role)
delete(raftNodeMap, member.Address) // Used to check any remaining later.
}
// Add to the members map using the node ID (not the Raft Node ID).
hbState.Members[node.ID] = member
// Keep a record of highest APIExtensions and Schema version seen in all nodes.
if node.APIExtensions > maxAPIExtensionsVersion {
maxAPIExtensionsVersion = node.APIExtensions
}
if node.Schema > maxSchemaVersion {
maxSchemaVersion = node.Schema
}
}
hbState.Version = APIHeartbeatVersion{
Schema: maxSchemaVersion,
APIExtensions: maxAPIExtensionsVersion,
}
if len(raftNodeMap) > 0 && hbState.cluster != nil {
hbState.cluster.Transaction(func(tx *db.ClusterTx) error {
for addr, raftNode := range raftNodeMap {
_, err := tx.GetPendingNodeByAddress(addr)
if err != nil {
logger.Errorf("Unaccounted raft node(s) not found in 'nodes' table for heartbeat: %+v", raftNode)
}
}
return nil
})
}
return
}
// Send sends heartbeat requests to the nodes supplied and updates heartbeat state.
func (hbState *APIHeartbeat) Send(ctx context.Context, networkCert *shared.CertInfo, serverCert *shared.CertInfo, localAddress string, nodes []db.NodeInfo, spreadDuration time.Duration) {
heartbeatsWg := sync.WaitGroup{}
sendHeartbeat := func(nodeID int64, address string, spreadDuration time.Duration, heartbeatData *APIHeartbeat) {
defer heartbeatsWg.Done()
if spreadDuration > 0 {
// Spread in time by waiting up to 3s less than the interval.
spreadDurationMs := int(spreadDuration.Milliseconds())
spreadRange := spreadDurationMs - 3000
if spreadRange > 0 {
time.Sleep(time.Duration(rand.Intn(spreadRange)) * time.Millisecond)
}
}
// Update timestamp to current, used for time skew detection
heartbeatData.Time = time.Now().UTC()
err := HeartbeatNode(ctx, address, networkCert, serverCert, heartbeatData)
if err == nil {
heartbeatData.Lock()
// Ensure only update nodes that exist in Members already.
hbNode, existing := hbState.Members[nodeID]
if !existing {
return
}
hbNode.LastHeartbeat = time.Now()
hbNode.Online = true
hbNode.updated = true
heartbeatData.Members[nodeID] = hbNode
heartbeatData.Unlock()
logger.Debug("Successful heartbeat", log.Ctx{"address": address})
err = warnings.ResolveWarningsByLocalNodeAndProjectAndTypeAndEntity(hbState.cluster, "", db.WarningOfflineClusterMember, cluster.TypeNode, int(nodeID))
if err != nil {
logger.Warn("Failed to resolve warning", log.Ctx{"err": err})
}
} else {
logger.Warn("Failed heartbeat", log.Ctx{"address": address, "err": err})
err = hbState.cluster.UpsertWarningLocalNode("", cluster.TypeNode, int(nodeID), db.WarningOfflineClusterMember, err.Error())
if err != nil {
logger.Warn("Failed to create warning", log.Ctx{"err": err})
}
}
}
for _, node := range nodes {
// Special case for the local node - just record the time now.
if node.Address == localAddress {
hbState.Lock()
hbNode := hbState.Members[node.ID]
hbNode.LastHeartbeat = time.Now()
hbNode.Online = true
hbNode.updated = true
hbState.Members[node.ID] = hbNode
hbState.Unlock()
continue
}
// Parallelize the rest.
heartbeatsWg.Add(1)
go sendHeartbeat(node.ID, node.Address, spreadDuration, hbState)
}
heartbeatsWg.Wait()
}
// HeartbeatTask returns a task function that performs leader-initiated heartbeat
// checks against all LXD nodes in the cluster.
//
// It will update the heartbeat timestamp column of the nodes table
// accordingly, and also notify them of the current list of database nodes.
func HeartbeatTask(gateway *Gateway) (task.Func, task.Schedule) {
// Since the database APIs are blocking we need to wrap the core logic
// and run it in a goroutine, so we can abort as soon as the context expires.
heartbeatWrapper := func(ctx context.Context) {
ch := make(chan struct{})
go func() {
gateway.heartbeat(ctx, hearbeatNormal)
ch <- struct{}{}
}()
select {
case <-ch:
case <-ctx.Done():
}
}
schedule := func() (time.Duration, error) {
return task.Every(gateway.heartbeatInterval())()
}
return heartbeatWrapper, schedule
}
// heartbeatInterval returns heartbeat interval to use.
func (g *Gateway) heartbeatInterval() time.Duration {
threshold := g.HeartbeatOfflineThreshold
if threshold <= 0 {
threshold = time.Duration(db.DefaultOfflineThreshold) * time.Second
}
return threshold / 2
}
// heartbeatRestart restarts cancels any ongoing heartbeat and restarts it.
// If there is no ongoing heartbeat then this is a no-op.
// Returns true if new heartbeat round was started.
func (g *Gateway) heartbeatRestart() bool {
g.heartbeatCancelLock.Lock() // Make sure we're the only ones inspecting the g.heartbeatCancel var.
// There is a cancellable heartbeat round ongoing.
if g.heartbeatCancel != nil {
g.heartbeatCancel() // Request ongoing hearbeat round cancel itself.
g.heartbeatCancel = nil // Indicate there is no further cancellable heartbeat round.
g.heartbeatCancelLock.Unlock() // Release lock ready for g.heartbeat to acquire it.
// Start a new heartbeat round async that will run as soon as ongoing heartbeat round exits.
go g.heartbeat(g.ctx, hearbeatImmediate)
return true
}
// No cancellable heartbeat round, release lock.
g.heartbeatCancelLock.Unlock()
return false
}
func (g *Gateway) heartbeat(ctx context.Context, mode heartbeatMode) {
// Avoid concurent heartbeat loops.
// This is possible when both the regular task and the out of band heartbeat round from a dqlite
// connection or notification restart both kick in at the same time.
g.heartbeatLock.Lock()
defer g.heartbeatLock.Unlock()
// Acquire the cancellation lock and populate it so that this heartbeat round can be cancelled if a
// notification cancellation request arrives during the round. Also setup a defer so that the cancellation
// function is set to nil when this function ends to indicate there is no ongoing heartbeat round.
g.heartbeatCancelLock.Lock()
ctx, g.heartbeatCancel = context.WithCancel(ctx)
defer func() {
g.heartbeatCancelLock.Lock()
if g.heartbeatCancel != nil {
g.heartbeatCancel()
g.heartbeatCancel = nil
}
g.heartbeatCancelLock.Unlock()
}()
g.heartbeatCancelLock.Unlock()
if g.Cluster == nil || g.server == nil || g.memoryDial != nil {
// We're not a raft node or we're not clustered
return
}
raftNodes, err := g.currentRaftNodes()
if err != nil {
if errors.Cause(err) == ErrNotLeader {
return
}
logger.Error("Failed to get current raft members", log.Ctx{"err": err})
return
}
// Address of this node.
localAddress, err := node.ClusterAddress(g.db)
if err != nil {
logger.Error("Failed to fetch local cluster address", log.Ctx{"err": err})
}
var allNodes []db.NodeInfo
err = g.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
allNodes, err = tx.GetNodes()
if err != nil {
return err
}
return nil
})
if err != nil {
logger.Warn("Failed to get current cluster members", log.Ctx{"err": err})
return
}
modeStr := "normal"
switch mode {
case hearbeatImmediate:
modeStr = "immediate"
case hearbeatInitial:
modeStr = "initial"
}
if mode != hearbeatNormal {
// Log unscheduled heartbeats with a higher level than normal heartbeats.
logger.Info("Starting heartbeat round", log.Ctx{"mode": modeStr, "address": localAddress})
} else {
// Don't spam the normal log with regular heartbeat messages.
logger.Debug("Starting heartbeat round", log.Ctx{"mode": modeStr, "address": localAddress})
}
// Replace the local raft_nodes table immediately because it
// might miss a row containing ourselves, since we might have
// been elected leader before the former leader had chance to
// send us a fresh update through the heartbeat pool.
logger.Debug("Heartbeat updating local raft members", log.Ctx{"members": raftNodes})
err = g.db.Transaction(func(tx *db.NodeTx) error {
return tx.ReplaceRaftNodes(raftNodes)
})
if err != nil {
logger.Warn("Failed to replace local raft members", log.Ctx{"err": err})
return
}
if localAddress == "" {
logger.Warn("No local address set, aborting heartbeat round")
return
}
startTime := time.Now()
heartbeatInterval := g.heartbeatInterval()
// Cumulative set of node states (will be written back to database once done).
hbState := &APIHeartbeat{cluster: g.Cluster}
// If we are doing a normal heartbeat round then spread the requests over the heartbeatInterval in order
// to reduce load on the cluster.
spreadDuration := time.Duration(0)
if mode == hearbeatNormal {
spreadDuration = heartbeatInterval
}
// If this leader node hasn't sent a heartbeat recently, then its node state records
// are likely out of date, this can happen when a node becomes a leader.
// Send stale set to all nodes in database to get a fresh set of active nodes.
if mode == hearbeatInitial {
hbState.Update(false, raftNodes, allNodes, g.HeartbeatOfflineThreshold)
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, allNodes, spreadDuration)
// We have the latest set of node states now, lets send that state set to all nodes.
hbState.FullStateList = true
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, allNodes, spreadDuration)
} else {
hbState.Update(true, raftNodes, allNodes, g.HeartbeatOfflineThreshold)
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, allNodes, spreadDuration)
}
// Look for any new node which appeared since sending last heartbeat.
var currentNodes []db.NodeInfo
err = g.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
currentNodes, err = tx.GetNodes()
if err != nil {
return err
}
return nil
})
if err != nil {
logger.Warn("Failed to get current cluster members", log.Ctx{"err": err})
return
}
newNodes := []db.NodeInfo{}
for _, currentNode := range currentNodes {
existing := false
for _, node := range allNodes {
if node.Address == currentNode.Address && node.ID == currentNode.ID {
existing = true
break
}
}
if !existing {
// We found a new node
allNodes = append(allNodes, currentNode)
newNodes = append(newNodes, currentNode)
}
}
// If any new nodes found, send heartbeat to just them (with full node state).
if len(newNodes) > 0 {
hbState.Update(true, raftNodes, allNodes, g.HeartbeatOfflineThreshold)
hbState.Send(ctx, g.networkCert, g.serverCert(), localAddress, newNodes, 0)
}
// If the context has been cancelled, return immediately.
err = ctx.Err()
if err != nil {
logger.Warn("Aborting heartbeat round", log.Ctx{"err": err})
return
}
var unavailableMembers []string
err = query.Retry(func() error {
return g.Cluster.Transaction(func(tx *db.ClusterTx) error {
for _, node := range hbState.Members {
if !node.updated {
// If member has not been updated during this heartbeat round it means
// they are currently unreachable or rejecting heartbeats due to being
// in the process of shutting down. Eitherway we do not want to use this
// member as a candidate for role promotion.
unavailableMembers = append(unavailableMembers, node.Address)
continue
}
err := tx.SetNodeHeartbeat(node.Address, node.LastHeartbeat)
if err != nil && errors.Cause(err) != db.ErrNoSuchObject {
return errors.Wrapf(err, "Failed updating heartbeat time for member %q", node.Address)
}
}
return nil
})
})
if err != nil {
logger.Error("Failed updating cluster heartbeats", log.Ctx{"err": err})
return
}
// If full node state was sent and node refresh task is specified, run it async.
if g.HeartbeatNodeHook != nil {
g.HeartbeatNodeHook(hbState, true, unavailableMembers)
}
duration := time.Now().Sub(startTime)
if duration > heartbeatInterval {
logger.Warn("Heartbeat round duration greater than heartbeat interval", log.Ctx{"duration": duration, "interval": heartbeatInterval})
}
// Update last leader heartbeat time so next time a full node state list can be sent (if not this time).
logger.Debug("Completed heartbeat round", log.Ctx{"duration": duration, "address": localAddress})
}
// HeartbeatNode performs a single heartbeat request against the node with the given address.
func HeartbeatNode(taskCtx context.Context, address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, heartbeatData *APIHeartbeat) error {
logger.Debug("Sending heartbeat request", log.Ctx{"address": address})
config, err := tlsClientConfig(networkCert, serverCert)
if err != nil {
return err
}
timeout := 2 * time.Second
url := fmt.Sprintf("https://%s%s", address, databaseEndpoint)
transport, cleanup := tlsTransport(config)
defer cleanup()
client := &http.Client{
Transport: transport,
Timeout: timeout,
}
buffer := bytes.Buffer{}
heartbeatData.Lock()
err = json.NewEncoder(&buffer).Encode(heartbeatData)
heartbeatData.Unlock()
if err != nil {
return err
}
request, err := http.NewRequest("PUT", url, bytes.NewReader(buffer.Bytes()))
if err != nil {
return err
}
setDqliteVersionHeader(request)
// Use 1s later timeout to give HTTP client chance timeout with more useful info.
ctx, cancel := context.WithTimeout(taskCtx, timeout+time.Second)
defer cancel()
request = request.WithContext(ctx)
request.Close = true // Immediately close the connection after the request is done
response, err := client.Do(request)
if err != nil {
return errors.Wrap(err, "Failed to send heartbeat request")
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return fmt.Errorf("Heartbeat request failed with status: %w", api.StatusErrorf(response.StatusCode, response.Status))
}
return nil
}
|
package emitter_test
import (
"code.google.com/p/gogoprotobuf/proto"
"github.com/cloudfoundry-incubator/dropsonde/emitter"
"github.com/cloudfoundry-incubator/dropsonde/events"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net"
"os"
)
var _ = Describe("UdpEmitter", func() {
Describe("Emit()", func() {
var udpEmitter emitter.Emitter
var testEvent *events.DropsondeStatus
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "awesome_job")
os.Setenv("BOSH_JOB_INSTANCE", "1")
testEvent = &events.DropsondeStatus{SentCount: proto.Uint64(1), ErrorCount: proto.Uint64(0)}
udpEmitter, _ = emitter.NewUdpEmitter()
})
Context("when the agent is listening", func() {
var agentListener net.PacketConn
BeforeEach(func() {
agentListener, _ = net.ListenPacket("udp", ":42420")
})
AfterEach(func() {
agentListener.Close()
})
It("should send the envelope as a []byte", func(done Done) {
err := udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
buffer := make([]byte, 4096)
readCount, _, err := agentListener.ReadFrom(buffer)
Expect(err).To(BeNil())
var envelope events.Envelope
err = proto.Unmarshal(buffer[:readCount], &envelope)
Expect(err).To(BeNil())
Expect(envelope.GetEventType()).To(Equal(events.Envelope_DropsondeStatus))
Expect(envelope.GetDropsondeStatus()).To(Equal(testEvent))
close(done)
})
})
Context("when the agent is not listening", func() {
It("should attempt to send the envelope", func() {
err := udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
})
Context("then the agent starts Listening", func() {
It("should eventually send envelopes as a []byte", func(done Done) {
err := udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
agentListener, err := net.ListenPacket("udp", ":42420")
Expect(err).To(BeNil())
err = udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
buffer := make([]byte, 4096)
readCount, _, err := agentListener.ReadFrom(buffer)
Expect(err).To(BeNil())
var envelope events.Envelope
err = proto.Unmarshal(buffer[:readCount], &envelope)
Expect(err).To(BeNil())
Expect(envelope.GetEventType()).To(Equal(events.Envelope_DropsondeStatus))
Expect(envelope.GetDropsondeStatus()).To(Equal(testEvent))
close(done)
})
})
})
})
Describe("NewUdpEmitter()", func() {
Context("with missing environment variables", func() {
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "")
os.Setenv("BOSH_JOB_INSTANCE", "")
})
It("returns an error", func() {
emitter, err := emitter.NewUdpEmitter()
Expect(emitter).To(BeNil())
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("BOSH_JOB_NAME or BOSH_JOB_INSTANCE not set"))
})
})
Context("when ResolveUDPAddr fails", func() {
var originalDefaultAddress string
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "test-job-name")
os.Setenv("BOSH_JOB_INSTANCE", "0")
originalDefaultAddress = emitter.DefaultAddress
emitter.DefaultAddress = "invalid-address:"
})
AfterEach(func() {
emitter.DefaultAddress = originalDefaultAddress
})
It("returns an error", func() {
emitter, err := emitter.NewUdpEmitter()
Expect(emitter).To(BeNil())
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(ContainSubstring("unknown port"))
})
})
Context("when all is good", func() {
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "test-job-name")
os.Setenv("BOSH_JOB_INSTANCE", "0")
})
It("creates an emitter", func() {
emitter, err := emitter.NewUdpEmitter()
Expect(emitter).ToNot(BeNil())
Expect(err).To(BeNil())
})
})
})
})
[#67663654] Don't care about the error message when ResolveUDPAddr
fails.
Signed-off-by: caleb miles <906dbe5b21c7025e252f807d6724eb4d0fd7fb4c@pivotallabs.com>
package emitter_test
import (
"code.google.com/p/gogoprotobuf/proto"
"github.com/cloudfoundry-incubator/dropsonde/emitter"
"github.com/cloudfoundry-incubator/dropsonde/events"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net"
"os"
)
var _ = Describe("UdpEmitter", func() {
Describe("Emit()", func() {
var udpEmitter emitter.Emitter
var testEvent *events.DropsondeStatus
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "awesome_job")
os.Setenv("BOSH_JOB_INSTANCE", "1")
testEvent = &events.DropsondeStatus{SentCount: proto.Uint64(1), ErrorCount: proto.Uint64(0)}
udpEmitter, _ = emitter.NewUdpEmitter()
})
Context("when the agent is listening", func() {
var agentListener net.PacketConn
BeforeEach(func() {
agentListener, _ = net.ListenPacket("udp", ":42420")
})
AfterEach(func() {
agentListener.Close()
})
It("should send the envelope as a []byte", func(done Done) {
err := udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
buffer := make([]byte, 4096)
readCount, _, err := agentListener.ReadFrom(buffer)
Expect(err).To(BeNil())
var envelope events.Envelope
err = proto.Unmarshal(buffer[:readCount], &envelope)
Expect(err).To(BeNil())
Expect(envelope.GetEventType()).To(Equal(events.Envelope_DropsondeStatus))
Expect(envelope.GetDropsondeStatus()).To(Equal(testEvent))
close(done)
})
})
Context("when the agent is not listening", func() {
It("should attempt to send the envelope", func() {
err := udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
})
Context("then the agent starts Listening", func() {
It("should eventually send envelopes as a []byte", func(done Done) {
err := udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
agentListener, err := net.ListenPacket("udp", ":42420")
Expect(err).To(BeNil())
err = udpEmitter.Emit(testEvent)
Expect(err).To(BeNil())
buffer := make([]byte, 4096)
readCount, _, err := agentListener.ReadFrom(buffer)
Expect(err).To(BeNil())
var envelope events.Envelope
err = proto.Unmarshal(buffer[:readCount], &envelope)
Expect(err).To(BeNil())
Expect(envelope.GetEventType()).To(Equal(events.Envelope_DropsondeStatus))
Expect(envelope.GetDropsondeStatus()).To(Equal(testEvent))
close(done)
})
})
})
})
Describe("NewUdpEmitter()", func() {
Context("with missing environment variables", func() {
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "")
os.Setenv("BOSH_JOB_INSTANCE", "")
})
It("returns an error", func() {
emitter, err := emitter.NewUdpEmitter()
Expect(emitter).To(BeNil())
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(Equal("BOSH_JOB_NAME or BOSH_JOB_INSTANCE not set"))
})
})
Context("when ResolveUDPAddr fails", func() {
var originalDefaultAddress string
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "test-job-name")
os.Setenv("BOSH_JOB_INSTANCE", "0")
originalDefaultAddress = emitter.DefaultAddress
emitter.DefaultAddress = "invalid-address:"
})
AfterEach(func() {
emitter.DefaultAddress = originalDefaultAddress
})
It("returns an error", func() {
emitter, err := emitter.NewUdpEmitter()
Expect(emitter).To(BeNil())
Expect(err).ToNot(BeNil())
})
})
Context("when all is good", func() {
BeforeEach(func() {
os.Setenv("BOSH_JOB_NAME", "test-job-name")
os.Setenv("BOSH_JOB_INSTANCE", "0")
})
It("creates an emitter", func() {
emitter, err := emitter.NewUdpEmitter()
Expect(emitter).ToNot(BeNil())
Expect(err).To(BeNil())
})
})
})
})
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
// The comments for the structs and fields can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored and not exported to the SwaggerAPI.
//
// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh
// Common string formats
// ---------------------
// Many fields in this API have formatting requirements. The commonly used
// formats are defined here.
//
// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
// in the C language. This is captured by the following regex:
// [A-Za-z_][A-Za-z0-9_]*
// This defines the format, but not the length restriction, which should be
// specified at the definition of any field of this type.
//
// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
// to the definition of a "label" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
//
// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
// or more simply:
// DNS_LABEL(\.DNS_LABEL)*
//
// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
// conforms to the definition of IANA service name in RFC 6335.
// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
// Hypens ('-') cannot be leading or trailing character of the string
// and cannot be adjacent to other hyphens.
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.
// +k8s:openapi-gen=false
type ObjectMeta struct {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will
// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
// Namespace defines the space within each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
// SelfLink is a URL representing this object.
// Populated by the system.
// Read-only.
// +optional
SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
//
// Populated by the system.
// Read-only.
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field. Once set,
// this value may not be unset or be set further into the future, although it may be shortened
// or the resource may be deleted prior to this time. For example, a user may request that
// a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
// signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard
// termination signal (SIGKILL) to the container and after cleanup, remove the pod from the
// API. In the presence of network partitions, this object may still exist after this
// timestamp, until an administrator or automated process can determine the resource is
// fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
// Number of seconds allowed for this object to gracefully terminate before
// it will be removed from the system. Only set when deletionTimestamp is also set.
// May only be shortened.
// Read-only.
// +optional
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// +optional
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
// +optional
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
// An initializer is a controller which enforces some system invariant at object creation time.
// This field is a list of initializers that have not yet acted on this object. If nil or empty,
// this object has been completely initialized. Otherwise, the object is considered uninitialized
// and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
// observe uninitialized objects.
//
// When an object is created, the system will populate this list with the current set of initializers.
// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
// by any user.
Initializers *metav1.Initializers `json:"initializers,omitempty" patchStrategy:"merge" protobuf:"bytes,16,rep,name=initializers"`
// Must be empty before the object is deleted from the registry. Each entry
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
// +optional
// +patchStrategy=merge
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
// The name of the cluster which the object belongs to.
// This is used to distinguish resources with same name and namespace in different clusters.
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// Volume's name.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// VolumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// HostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// DownwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// CSI represents storage that handled by an external CSI driver
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
// AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume.
// Value is a string of the json representation of type NodeAffinity
AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// A description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// The actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// AccessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default) and Recycle.
// Recycling must be supported by the volume plugin underlying this persistent volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// Name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// A human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// Reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// AccessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// Represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
}
type PersistentVolumeAccessMode string
const (
// can be mounted read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
)
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// Path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// What type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// Path is the Glusterfs volume path.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
)
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// Registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// Volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// User to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// Group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
// If omitted, the default is "false".
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// Commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// Server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// Path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force
// the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// The Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// The URI the data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// VolumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// FSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// The key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity
type LocalVolumeSource struct {
// The full path to the volume on the node
// For alpha, this path must be a directory
// Once block as a source is supported, then this path can point to a block device
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// Represents storage that is managed by an external CSI volume driver
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// VolumeHandle is the unique volume name returned by the CSI volume
// plugin’s CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
// Optional: The value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP or TCP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is alpha in 1.8 and can be reworked or removed in a future
// release.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
}
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previous defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. The $(VAR_NAME)
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
// references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// This is an alpha feature and may change in the future.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// TODO: implement a realistic TCP lifecycle hook
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated.
// The container is terminated after the handler completes.
// The reason for termination is passed to the handler.
// Regardless of the outcome of the handler, the container is eventually terminated.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format 'docker://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted, currently based on
// the number of dead containers that have not yet been removed.
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images
// TODO(dchen1107): Which image the container is running with?
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format 'docker://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
}
// PodPhase is a label for the condition of a pod at the current time.
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are valid conditions of pod.
const (
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects.
type NodeSelectorTerm struct {
//Required. A list of node selector requirements. The requirements are ANDed.
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "SYSTEM" is a special keyword
// which indicates the highest priority. Any other name must be defined by
// creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
}
// PodQOSClass defines the supported qos classes of Pods.
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
// Current condition of the pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// Service External Traffic Policy Type string
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
}
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in
// use by others, it will be allocated to the service; otherwise, creation
// of the service will fail. This field can not be changed through updates.
// Valid values are "None", empty string (""), or a valid IP address. "None"
// can be specified for headless services when proxying is not required.
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
// type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
// endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object. If clusterIP is
// "None", no virtual IP is allocated and the endpoints are published as a
// set of endpoints rather than a stable IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP.
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
// and requires Type to be ExternalName.
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints
// when that annotation is deprecated and all clients have been converted to use this
// field.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
// Usually assigned by the system. If specified, it will be allocated to the service
// if unused or else creation of the service will fail.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name).
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP or TCP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// External ID of the node assigned by some machine database (e.g. a cloud provider).
// Deprecated.
// +optional
ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// If specified, the source to get node configuration from
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
metav1.TypeMeta `json:",inline"`
ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide
// node condition. In the future, we will add more. The proposed set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeConfigOK indicates whether the kubelet is correctly configured
NodeConfigOK NodeConditionType = "ConfigOK"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are valid address type of node.
const (
NodeHostName NodeAddressType = "Hostname"
NodeExternalIP NodeAddressType = "ExternalIP"
NodeInternalIP NodeAddressType = "InternalIP"
NodeExternalDNS NodeAddressType = "ExternalDNS"
NodeInternalDNS NodeAddressType = "InternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
// NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
)
const (
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
}
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation.
type DeletionPropagation string
const (
// Orphans the dependents.
DeletePropagationOrphan DeletionPropagation = "Orphan"
// Deletes the object from the key-value store, the garbage collector will delete the dependents in the background.
DeletePropagationBackground DeletionPropagation = "Background"
// The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store.
// API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp.
// This policy is cascading, i.e., the dependents will be deleted with Foreground.
DeletePropagationForeground DeletionPropagation = "Foreground"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeleteOptions may be provided when deleting an API object
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type DeleteOptions struct {
metav1.TypeMeta `json:",inline"`
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
// +optional
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
// returned.
// +optional
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
// Should the dependent objects be orphaned. If true/false, the "orphan"
// finalizer will be added to/removed from the object's finalizers list.
// Either this field or PropagationPolicy may be set, but not both.
// +optional
OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
// Whether and how garbage collection will be performed.
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
// +optional
PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call.
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type ListOptions struct {
metav1.TypeMeta `json:",inline"`
// A selector to restrict the list of returned objects by their labels.
// Defaults to everything.
// +optional
LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// A selector to restrict the list of returned objects by their fields.
// Defaults to everything.
// +optional
FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
// If true, partially initialized resources are included in the response.
// +optional
IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
// - if unset, then the result is returned from remote storage based on quorum-read flag;
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// Timeout for the list/watch call.
// +optional
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
}
// EventSeries contain information on series of events, i.e. thing that was/is happening
// continously for some time.
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// State of this Series: Ongoing or Finished
State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
}
type EventSeriesState string
const (
EventSeriesStateOngoing EventSeriesState = "Ongoing"
EventSeriesStateFinished EventSeriesState = "Finished"
EventSeriesStateUnknown EventSeriesState = "Unknown"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
// +optional
Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where !spec.activeDeadlineSeconds
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only convenience method.
// All keys and values are merged into the data field on write, overwriting any existing values.
// It is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secert.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
}
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// "default-scheduler" is the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int32 = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
Document CustomPodDNS feature gates for DNSConfig and None dnsPolicy
Kubernetes-commit: 6dc9eeb3dd7cdf227f1750ee9c2518ffe3af491e
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
// The comments for the structs and fields can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored and not exported to the SwaggerAPI.
//
// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh
// Common string formats
// ---------------------
// Many fields in this API have formatting requirements. The commonly used
// formats are defined here.
//
// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
// in the C language. This is captured by the following regex:
// [A-Za-z_][A-Za-z0-9_]*
// This defines the format, but not the length restriction, which should be
// specified at the definition of any field of this type.
//
// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
// to the definition of a "label" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
//
// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
// or more simply:
// DNS_LABEL(\.DNS_LABEL)*
//
// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
// conforms to the definition of IANA service name in RFC 6335.
// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
// Hypens ('-') cannot be leading or trailing character of the string
// and cannot be adjacent to other hyphens.
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.
// +k8s:openapi-gen=false
type ObjectMeta struct {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will
// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
// Namespace defines the space within each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
// SelfLink is a URL representing this object.
// Populated by the system.
// Read-only.
// +optional
SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
//
// Populated by the system.
// Read-only.
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field. Once set,
// this value may not be unset or be set further into the future, although it may be shortened
// or the resource may be deleted prior to this time. For example, a user may request that
// a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
// signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard
// termination signal (SIGKILL) to the container and after cleanup, remove the pod from the
// API. In the presence of network partitions, this object may still exist after this
// timestamp, until an administrator or automated process can determine the resource is
// fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
// Number of seconds allowed for this object to gracefully terminate before
// it will be removed from the system. Only set when deletionTimestamp is also set.
// May only be shortened.
// Read-only.
// +optional
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// +optional
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
// +optional
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
// An initializer is a controller which enforces some system invariant at object creation time.
// This field is a list of initializers that have not yet acted on this object. If nil or empty,
// this object has been completely initialized. Otherwise, the object is considered uninitialized
// and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
// observe uninitialized objects.
//
// When an object is created, the system will populate this list with the current set of initializers.
// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
// by any user.
Initializers *metav1.Initializers `json:"initializers,omitempty" patchStrategy:"merge" protobuf:"bytes,16,rep,name=initializers"`
// Must be empty before the object is deleted from the registry. Each entry
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
// +optional
// +patchStrategy=merge
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
// The name of the cluster which the object belongs to.
// This is used to distinguish resources with same name and namespace in different clusters.
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// Volume's name.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// VolumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// HostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// DownwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// CSI represents storage that handled by an external CSI driver
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
// AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume.
// Value is a string of the json representation of type NodeAffinity
AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// A description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// The actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// AccessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default) and Recycle.
// Recycling must be supported by the volume plugin underlying this persistent volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// Name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// A human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// Reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// AccessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// Represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
}
type PersistentVolumeAccessMode string
const (
// can be mounted read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
)
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// Path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// What type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// Path is the Glusterfs volume path.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
)
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// Registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// Volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// User to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// Group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
// If omitted, the default is "false".
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// Commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// Server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// Path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force
// the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// The Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// The URI the data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// VolumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// FSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// The key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity
type LocalVolumeSource struct {
// The full path to the volume on the node
// For alpha, this path must be a directory
// Once block as a source is supported, then this path can point to a block device
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// Represents storage that is managed by an external CSI volume driver
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// VolumeHandle is the unique volume name returned by the CSI volume
// plugin’s CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
// Optional: The value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP or TCP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is alpha in 1.8 and can be reworked or removed in a future
// release.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
}
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previous defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. The $(VAR_NAME)
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
// references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// This is an alpha feature and may change in the future.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// TODO: implement a realistic TCP lifecycle hook
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated.
// The container is terminated after the handler completes.
// The reason for termination is passed to the handler.
// Regardless of the outcome of the handler, the container is eventually terminated.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format 'docker://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted, currently based on
// the number of dead containers that have not yet been removed.
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images
// TODO(dchen1107): Which image the container is running with?
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format 'docker://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
}
// PodPhase is a label for the condition of a pod at the current time.
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are valid conditions of pod.
const (
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects.
type NodeSelectorTerm struct {
//Required. A list of node selector requirements. The requirements are ANDed.
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "SYSTEM" is a special keyword
// which indicates the highest priority. Any other name must be defined by
// creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
}
// PodQOSClass defines the supported qos classes of Pods.
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
// Current condition of the pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// Service External Traffic Policy Type string
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
}
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in
// use by others, it will be allocated to the service; otherwise, creation
// of the service will fail. This field can not be changed through updates.
// Valid values are "None", empty string (""), or a valid IP address. "None"
// can be specified for headless services when proxying is not required.
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
// type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
// endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object. If clusterIP is
// "None", no virtual IP is allocated and the endpoints are published as a
// set of endpoints rather than a stable IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP.
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
// and requires Type to be ExternalName.
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints
// when that annotation is deprecated and all clients have been converted to use this
// field.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
// Usually assigned by the system. If specified, it will be allocated to the service
// if unused or else creation of the service will fail.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name).
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP or TCP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// External ID of the node assigned by some machine database (e.g. a cloud provider).
// Deprecated.
// +optional
ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// If specified, the source to get node configuration from
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
metav1.TypeMeta `json:",inline"`
ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide
// node condition. In the future, we will add more. The proposed set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeConfigOK indicates whether the kubelet is correctly configured
NodeConfigOK NodeConditionType = "ConfigOK"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are valid address type of node.
const (
NodeHostName NodeAddressType = "Hostname"
NodeExternalIP NodeAddressType = "ExternalIP"
NodeInternalIP NodeAddressType = "InternalIP"
NodeExternalDNS NodeAddressType = "ExternalDNS"
NodeInternalDNS NodeAddressType = "InternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
// NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
)
const (
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
}
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation.
type DeletionPropagation string
const (
// Orphans the dependents.
DeletePropagationOrphan DeletionPropagation = "Orphan"
// Deletes the object from the key-value store, the garbage collector will delete the dependents in the background.
DeletePropagationBackground DeletionPropagation = "Background"
// The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store.
// API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp.
// This policy is cascading, i.e., the dependents will be deleted with Foreground.
DeletePropagationForeground DeletionPropagation = "Foreground"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeleteOptions may be provided when deleting an API object
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type DeleteOptions struct {
metav1.TypeMeta `json:",inline"`
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
// +optional
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
// returned.
// +optional
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
// Should the dependent objects be orphaned. If true/false, the "orphan"
// finalizer will be added to/removed from the object's finalizers list.
// Either this field or PropagationPolicy may be set, but not both.
// +optional
OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
// Whether and how garbage collection will be performed.
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
// allow the garbage collector to delete the dependents in the background;
// 'Foreground' - a cascading policy that deletes all dependents in the
// foreground.
// +optional
PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call.
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type ListOptions struct {
metav1.TypeMeta `json:",inline"`
// A selector to restrict the list of returned objects by their labels.
// Defaults to everything.
// +optional
LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// A selector to restrict the list of returned objects by their fields.
// Defaults to everything.
// +optional
FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
// If true, partially initialized resources are included in the response.
// +optional
IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
// - if unset, then the result is returned from remote storage based on quorum-read flag;
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// Timeout for the list/watch call.
// +optional
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
}
// EventSeries contain information on series of events, i.e. thing that was/is happening
// continously for some time.
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// State of this Series: Ongoing or Finished
State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
}
type EventSeriesState string
const (
EventSeriesStateOngoing EventSeriesState = "Ongoing"
EventSeriesStateFinished EventSeriesState = "Finished"
EventSeriesStateUnknown EventSeriesState = "Unknown"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
// +optional
Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where !spec.activeDeadlineSeconds
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only convenience method.
// All keys and values are merged into the data field on write, overwriting any existing values.
// It is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secert.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
}
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// "default-scheduler" is the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int32 = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
|
// Package jlexer contains a JSON lexer implementation.
//
// It is expected that it is mostly used with generated parser code, so the interface is tuned
// for a parser that knows what kind of data is expected.
package jlexer
import (
"encoding/base64"
"fmt"
"io"
"reflect"
"strconv"
"unicode/utf8"
"unsafe"
)
// tokenKind determines type of a token.
type tokenKind byte
const (
tokenUndef tokenKind = iota // No token.
tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
tokenString // A string literal, e.g. "abc\u1234"
tokenNumber // Number literal, e.g. 1.5e5
tokenBool // Boolean literal: true or false.
tokenNull // null keyword.
)
// token describes a single token: type, position in the input and value.
type token struct {
kind tokenKind // Type of a token.
boolValue bool // Value if a boolean literal token.
byteValue []byte // Raw value of a token.
delimValue byte
}
// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
type Lexer struct {
Data []byte // Input data given to the lexer.
start int // Start of the current token.
pos int // Current unscanned position in the input stream.
token token // Last scanned token, if token.kind != tokenUndef.
firstElement bool // Whether current element is the first in array or an object.
wantSep byte // A comma or a colon character, which need to occur before a token.
UseMultipleErrors bool // If we want to use multiple errors.
fatalError error // Fatal error occured during lexing. It is usually a syntax error.
nowSem bool // If semantic error occured during parsing.
multipleErrors []*LexerError // Semantic errors occured during lexing. Marshalling will be continued after finding this errors.
}
// fetchToken scans the input for the next token.
func (r *Lexer) fetchToken() {
r.token.kind = tokenUndef
r.start = r.pos
// Check if r.Data has r.pos element
// If it doesn't, it mean corrupted input data
if len(r.Data) < r.pos {
r.errParse("Unexpected end of data")
return
}
// Determine the type of a token by skipping whitespace and reading the
// first character.
for _, c := range r.Data[r.pos:] {
switch c {
case ':', ',':
if r.wantSep == c {
r.pos++
r.start++
r.wantSep = 0
} else {
r.errSyntax()
}
case ' ', '\t', '\r', '\n':
r.pos++
r.start++
case '"':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenString
r.fetchString()
return
case '{', '[':
if r.wantSep != 0 {
r.errSyntax()
}
r.firstElement = true
r.token.kind = tokenDelim
r.token.delimValue = r.Data[r.pos]
r.pos++
return
case '}', ']':
if !r.firstElement && (r.wantSep != ',') {
r.errSyntax()
}
r.wantSep = 0
r.token.kind = tokenDelim
r.token.delimValue = r.Data[r.pos]
r.pos++
return
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenNumber
r.fetchNumber()
return
case 'n':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenNull
r.fetchNull()
return
case 't':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenBool
r.token.boolValue = true
r.fetchTrue()
return
case 'f':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenBool
r.token.boolValue = false
r.fetchFalse()
return
default:
r.errSyntax()
return
}
}
r.fatalError = io.EOF
return
}
// isTokenEnd returns true if the char can follow a non-delimiter token
func isTokenEnd(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
}
// fetchNull fetches and checks remaining bytes of null keyword.
func (r *Lexer) fetchNull() {
r.pos += 4
if r.pos > len(r.Data) ||
r.Data[r.pos-3] != 'u' ||
r.Data[r.pos-2] != 'l' ||
r.Data[r.pos-1] != 'l' ||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 4
r.errSyntax()
}
}
// fetchTrue fetches and checks remaining bytes of true keyword.
func (r *Lexer) fetchTrue() {
r.pos += 4
if r.pos > len(r.Data) ||
r.Data[r.pos-3] != 'r' ||
r.Data[r.pos-2] != 'u' ||
r.Data[r.pos-1] != 'e' ||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 4
r.errSyntax()
}
}
// fetchFalse fetches and checks remaining bytes of false keyword.
func (r *Lexer) fetchFalse() {
r.pos += 5
if r.pos > len(r.Data) ||
r.Data[r.pos-4] != 'a' ||
r.Data[r.pos-3] != 'l' ||
r.Data[r.pos-2] != 's' ||
r.Data[r.pos-1] != 'e' ||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 5
r.errSyntax()
}
}
// bytesToStr creates a string pointing at the slice to avoid copying.
//
// Warning: the string returned by the function should be used with care, as the whole input data
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
// may be garbage-collected even when the string exists.
func bytesToStr(data []byte) string {
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
shdr := reflect.StringHeader{h.Data, h.Len}
return *(*string)(unsafe.Pointer(&shdr))
}
// fetchNumber scans a number literal token.
func (r *Lexer) fetchNumber() {
hasE := false
afterE := false
hasDot := false
r.pos++
for i, c := range r.Data[r.pos:] {
switch {
case c >= '0' && c <= '9':
afterE = false
case c == '.' && !hasDot:
hasDot = true
case (c == 'e' || c == 'E') && !hasE:
hasE = true
hasDot = true
afterE = true
case (c == '+' || c == '-') && afterE:
afterE = false
default:
r.pos += i
if !isTokenEnd(c) {
r.errSyntax()
} else {
r.token.byteValue = r.Data[r.start:r.pos]
}
return
}
}
r.pos = len(r.Data)
r.token.byteValue = r.Data[r.start:]
}
// findStringLen tries to scan into the string literal for ending quote char to determine required size.
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
func findStringLen(data []byte) (hasEscapes bool, length int) {
delta := 0
for i := 0; i < len(data); i++ {
switch data[i] {
case '\\':
i++
delta++
if i < len(data) && data[i] == 'u' {
delta++
}
case '"':
return (delta > 0), (i - delta)
}
}
return false, len(data)
}
// processEscape processes a single escape sequence and returns number of bytes processed.
func (r *Lexer) processEscape(data []byte) (int, error) {
if len(data) < 2 {
return 0, fmt.Errorf("syntax error at %v", string(data))
}
c := data[1]
switch c {
case '"', '/', '\\':
r.token.byteValue = append(r.token.byteValue, c)
return 2, nil
case 'b':
r.token.byteValue = append(r.token.byteValue, '\b')
return 2, nil
case 'f':
r.token.byteValue = append(r.token.byteValue, '\f')
return 2, nil
case 'n':
r.token.byteValue = append(r.token.byteValue, '\n')
return 2, nil
case 'r':
r.token.byteValue = append(r.token.byteValue, '\r')
return 2, nil
case 't':
r.token.byteValue = append(r.token.byteValue, '\t')
return 2, nil
case 'u':
default:
return 0, fmt.Errorf("syntax error")
}
var val rune
for i := 2; i < len(data) && i < 6; i++ {
var v byte
c = data[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
v = c - '0'
case 'a', 'b', 'c', 'd', 'e', 'f':
v = c - 'a' + 10
case 'A', 'B', 'C', 'D', 'E', 'F':
v = c - 'A' + 10
default:
return 0, fmt.Errorf("syntax error")
}
val <<= 4
val |= rune(v)
}
l := utf8.RuneLen(val)
if l == -1 {
return 0, fmt.Errorf("invalid unicode escape")
}
var d [4]byte
utf8.EncodeRune(d[:], val)
r.token.byteValue = append(r.token.byteValue, d[:l]...)
return 6, nil
}
// fetchString scans a string literal token.
func (r *Lexer) fetchString() {
r.pos++
data := r.Data[r.pos:]
hasEscapes, length := findStringLen(data)
if !hasEscapes {
r.token.byteValue = data[:length]
r.pos += length + 1
return
}
r.token.byteValue = make([]byte, 0, length)
p := 0
for i := 0; i < len(data); {
switch data[i] {
case '"':
r.pos += i + 1
r.token.byteValue = append(r.token.byteValue, data[p:i]...)
i++
return
case '\\':
r.token.byteValue = append(r.token.byteValue, data[p:i]...)
off, err := r.processEscape(data[i:])
if err != nil {
r.errParse(err.Error())
return
}
i += off
p = i
default:
i++
}
}
r.errParse("unterminated string literal")
}
// scanToken scans the next token if no token is currently available in the lexer.
func (r *Lexer) scanToken() {
if r.token.kind != tokenUndef || r.fatalError != nil {
return
}
r.fetchToken()
}
// consume resets the current token to allow scanning the next one.
func (r *Lexer) consume() {
r.token.kind = tokenUndef
r.token.delimValue = 0
}
// Ok returns true if no error (including io.EOF) was encountered during scanning.
func (r *Lexer) Ok() bool {
return r.fatalError == nil
}
const maxErrorContextLen = 13
func (r *Lexer) errParse(what string) {
if r.fatalError == nil {
var str string
if len(r.Data)-r.pos <= maxErrorContextLen {
str = string(r.Data)
} else {
str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
}
r.fatalError = &LexerError{
Reason: what,
Offset: r.pos,
Data: str,
}
}
}
func (r *Lexer) errSyntax() {
r.errParse("syntax error")
}
func (r *Lexer) errInvalidToken(expected string) {
if r.fatalError != nil {
return
}
if r.UseMultipleErrors {
r.pos = r.start
r.consume()
r.SkipRecursive()
switch expected {
case "[":
r.token.delimValue = ']'
r.token.kind = tokenDelim
case "{":
r.token.delimValue = '}'
r.token.kind = tokenDelim
case "]":
r.token.delimValue = ']'
r.token.kind = tokenDelim
return
case "}":
r.token.delimValue = '}'
r.token.kind = tokenDelim
return
}
r.addNonfatalError(&LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.start,
Data: string(r.Data[r.start:]),
})
return
}
var str string
if len(r.token.byteValue) <= maxErrorContextLen {
str = string(r.token.byteValue)
} else {
str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
}
r.fatalError = &LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.pos,
Data: str,
}
}
// Delim consumes a token and verifies that it is the given delimiter.
func (r *Lexer) Delim(c byte) {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.delimValue != c {
r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
r.errInvalidToken(string([]byte{c}))
} else {
r.consume()
}
}
// IsDelim returns true if there was no scanning error and next token is the given delimiter.
func (r *Lexer) IsDelim(c byte) bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
return !r.Ok() || r.token.delimValue == c
}
// Null verifies that the next token is null and consumes it.
func (r *Lexer) Null() {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenNull {
r.errInvalidToken("null")
}
r.consume()
}
// IsNull returns true if the next token is a null keyword.
func (r *Lexer) IsNull() bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
return r.Ok() && r.token.kind == tokenNull
}
// Skip skips a single token.
func (r *Lexer) Skip() {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
r.consume()
}
// SkipRecursive skips next array or object completely, or just skips a single token if not
// an array/object.
//
// Note: no syntax validation is performed on the skipped data.
func (r *Lexer) SkipRecursive() {
r.scanToken()
var start, end byte
if r.token.delimValue == '{' {
start, end = '{', '}'
} else if r.token.delimValue == '[' {
start, end = '[', ']'
} else {
r.consume()
return
}
r.consume()
level := 1
inQuotes := false
wasEscape := false
for i, c := range r.Data[r.pos:] {
switch {
case c == start && !inQuotes:
level++
case c == end && !inQuotes:
level--
if level == 0 {
r.pos += i + 1
return
}
case c == '\\' && inQuotes:
wasEscape = !wasEscape
continue
case c == '"' && inQuotes:
inQuotes = wasEscape
case c == '"':
inQuotes = true
}
wasEscape = false
}
r.pos = len(r.Data)
r.fatalError = &LexerError{
Reason: "EOF reached while skipping array/object or token",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
}
// Raw fetches the next item recursively as a data slice
func (r *Lexer) Raw() []byte {
r.SkipRecursive()
if !r.Ok() {
return nil
}
return r.Data[r.start:r.pos]
}
// IsStart returns whether the lexer is positioned at the start
// of an input string.
func (r *Lexer) IsStart() bool {
return r.pos == 0
}
// Consumed reads all remaining bytes from the input, publishing an error if
// there is anything but whitespace remaining.
func (r *Lexer) Consumed() {
if r.pos > len(r.Data) {
return
}
for _, c := range r.Data[r.pos:] {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
r.fatalError = &LexerError{
Reason: "invalid character '" + string(c) + "' after top-level value",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
return
}
r.pos++
r.start++
}
}
// UnsafeString returns the string value if the token is a string literal.
//
// Warning: returned string may point to the input buffer, so the string should not outlive
// the input buffer. Intended pattern of usage is as an argument to a switch statement.
func (r *Lexer) UnsafeString() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return ""
}
ret := bytesToStr(r.token.byteValue)
r.consume()
return ret
}
// String reads a string literal.
func (r *Lexer) String() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return ""
}
ret := string(r.token.byteValue)
r.consume()
return ret
}
// Bytes reads a string literal and base64 decodes it into a byte slice.
func (r *Lexer) Bytes() []byte {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return nil
}
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
if err != nil {
r.fatalError = &LexerError{
Reason: err.Error(),
}
return nil
}
r.consume()
return ret[:len]
}
// Bool reads a true or false boolean keyword.
func (r *Lexer) Bool() bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenBool {
r.errInvalidToken("bool")
return false
}
ret := r.token.boolValue
r.consume()
return ret
}
func (r *Lexer) number() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenNumber {
r.errInvalidToken("number")
return ""
}
ret := bytesToStr(r.token.byteValue)
r.consume()
return ret
}
func (r *Lexer) Uint8() uint8 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint8(n)
}
func (r *Lexer) Uint16() uint16 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint16(n)
}
func (r *Lexer) Uint32() uint32 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint32(n)
}
func (r *Lexer) Uint64() uint64 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) Uint() uint {
return uint(r.Uint64())
}
func (r *Lexer) Int8() int8 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int8(n)
}
func (r *Lexer) Int16() int16 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int16(n)
}
func (r *Lexer) Int32() int32 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int32(n)
}
func (r *Lexer) Int64() int64 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) Int() int {
return int(r.Int64())
}
func (r *Lexer) Uint8Str() uint8 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint8(n)
}
func (r *Lexer) Uint16Str() uint16 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint16(n)
}
func (r *Lexer) Uint32Str() uint32 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint32(n)
}
func (r *Lexer) Uint64Str() uint64 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) UintStr() uint {
return uint(r.Uint64Str())
}
func (r *Lexer) Int8Str() int8 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int8(n)
}
func (r *Lexer) Int16Str() int16 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int16(n)
}
func (r *Lexer) Int32Str() int32 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int32(n)
}
func (r *Lexer) Int64Str() int64 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) IntStr() int {
return int(r.Int64Str())
}
func (r *Lexer) Float32() float32 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseFloat(s, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return float32(n)
}
func (r *Lexer) Float64() float64 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseFloat(s, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) Error() error {
return r.fatalError
}
func (r *Lexer) AddError(e error) {
if r.fatalError == nil {
r.fatalError = e
}
}
func (r *Lexer) addNonfatalError(err *LexerError) {
if r.UseMultipleErrors {
if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
return
}
r.multipleErrors = append(r.multipleErrors, err)
return
}
r.fatalError = err
}
func (r *Lexer) GetNonFatalErrors() []*LexerError {
return r.multipleErrors
}
// Interface fetches an interface{} analogous to the 'encoding/json' package.
func (r *Lexer) Interface() interface{} {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() {
return nil
}
switch r.token.kind {
case tokenString:
return r.String()
case tokenNumber:
return r.Float64()
case tokenBool:
return r.Bool()
case tokenNull:
r.Null()
return nil
}
if r.token.delimValue == '{' {
r.consume()
ret := map[string]interface{}{}
for !r.IsDelim('}') {
key := r.String()
r.WantColon()
ret[key] = r.Interface()
r.WantComma()
}
r.Delim('}')
if r.Ok() {
return ret
} else {
return nil
}
} else if r.token.delimValue == '[' {
r.consume()
var ret []interface{}
for !r.IsDelim(']') {
ret = append(ret, r.Interface())
r.WantComma()
}
r.Delim(']')
if r.Ok() {
return ret
} else {
return nil
}
}
r.errSyntax()
return nil
}
// WantComma requires a comma to be present before fetching next token.
func (r *Lexer) WantComma() {
r.wantSep = ','
r.firstElement = false
}
// WantColon requires a colon to be present before fetching next token.
func (r *Lexer) WantColon() {
r.wantSep = ':'
r.firstElement = false
}
Removed unused code
// Package jlexer contains a JSON lexer implementation.
//
// It is expected that it is mostly used with generated parser code, so the interface is tuned
// for a parser that knows what kind of data is expected.
package jlexer
import (
"encoding/base64"
"fmt"
"io"
"reflect"
"strconv"
"unicode/utf8"
"unsafe"
)
// tokenKind determines type of a token.
type tokenKind byte
const (
tokenUndef tokenKind = iota // No token.
tokenDelim // Delimiter: one of '{', '}', '[' or ']'.
tokenString // A string literal, e.g. "abc\u1234"
tokenNumber // Number literal, e.g. 1.5e5
tokenBool // Boolean literal: true or false.
tokenNull // null keyword.
)
// token describes a single token: type, position in the input and value.
type token struct {
kind tokenKind // Type of a token.
boolValue bool // Value if a boolean literal token.
byteValue []byte // Raw value of a token.
delimValue byte
}
// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
type Lexer struct {
Data []byte // Input data given to the lexer.
start int // Start of the current token.
pos int // Current unscanned position in the input stream.
token token // Last scanned token, if token.kind != tokenUndef.
firstElement bool // Whether current element is the first in array or an object.
wantSep byte // A comma or a colon character, which need to occur before a token.
UseMultipleErrors bool // If we want to use multiple errors.
fatalError error // Fatal error occured during lexing. It is usually a syntax error.
nowSem bool // If semantic error occured during parsing.
multipleErrors []*LexerError // Semantic errors occured during lexing. Marshalling will be continued after finding this errors.
}
// fetchToken scans the input for the next token.
func (r *Lexer) fetchToken() {
r.token.kind = tokenUndef
r.start = r.pos
// Check if r.Data has r.pos element
// If it doesn't, it mean corrupted input data
if len(r.Data) < r.pos {
r.errParse("Unexpected end of data")
return
}
// Determine the type of a token by skipping whitespace and reading the
// first character.
for _, c := range r.Data[r.pos:] {
switch c {
case ':', ',':
if r.wantSep == c {
r.pos++
r.start++
r.wantSep = 0
} else {
r.errSyntax()
}
case ' ', '\t', '\r', '\n':
r.pos++
r.start++
case '"':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenString
r.fetchString()
return
case '{', '[':
if r.wantSep != 0 {
r.errSyntax()
}
r.firstElement = true
r.token.kind = tokenDelim
r.token.delimValue = r.Data[r.pos]
r.pos++
return
case '}', ']':
if !r.firstElement && (r.wantSep != ',') {
r.errSyntax()
}
r.wantSep = 0
r.token.kind = tokenDelim
r.token.delimValue = r.Data[r.pos]
r.pos++
return
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenNumber
r.fetchNumber()
return
case 'n':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenNull
r.fetchNull()
return
case 't':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenBool
r.token.boolValue = true
r.fetchTrue()
return
case 'f':
if r.wantSep != 0 {
r.errSyntax()
}
r.token.kind = tokenBool
r.token.boolValue = false
r.fetchFalse()
return
default:
r.errSyntax()
return
}
}
r.fatalError = io.EOF
return
}
// isTokenEnd returns true if the char can follow a non-delimiter token
func isTokenEnd(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
}
// fetchNull fetches and checks remaining bytes of null keyword.
func (r *Lexer) fetchNull() {
r.pos += 4
if r.pos > len(r.Data) ||
r.Data[r.pos-3] != 'u' ||
r.Data[r.pos-2] != 'l' ||
r.Data[r.pos-1] != 'l' ||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 4
r.errSyntax()
}
}
// fetchTrue fetches and checks remaining bytes of true keyword.
func (r *Lexer) fetchTrue() {
r.pos += 4
if r.pos > len(r.Data) ||
r.Data[r.pos-3] != 'r' ||
r.Data[r.pos-2] != 'u' ||
r.Data[r.pos-1] != 'e' ||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 4
r.errSyntax()
}
}
// fetchFalse fetches and checks remaining bytes of false keyword.
func (r *Lexer) fetchFalse() {
r.pos += 5
if r.pos > len(r.Data) ||
r.Data[r.pos-4] != 'a' ||
r.Data[r.pos-3] != 'l' ||
r.Data[r.pos-2] != 's' ||
r.Data[r.pos-1] != 'e' ||
(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
r.pos -= 5
r.errSyntax()
}
}
// bytesToStr creates a string pointing at the slice to avoid copying.
//
// Warning: the string returned by the function should be used with care, as the whole input data
// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
// may be garbage-collected even when the string exists.
func bytesToStr(data []byte) string {
h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
shdr := reflect.StringHeader{h.Data, h.Len}
return *(*string)(unsafe.Pointer(&shdr))
}
// fetchNumber scans a number literal token.
func (r *Lexer) fetchNumber() {
hasE := false
afterE := false
hasDot := false
r.pos++
for i, c := range r.Data[r.pos:] {
switch {
case c >= '0' && c <= '9':
afterE = false
case c == '.' && !hasDot:
hasDot = true
case (c == 'e' || c == 'E') && !hasE:
hasE = true
hasDot = true
afterE = true
case (c == '+' || c == '-') && afterE:
afterE = false
default:
r.pos += i
if !isTokenEnd(c) {
r.errSyntax()
} else {
r.token.byteValue = r.Data[r.start:r.pos]
}
return
}
}
r.pos = len(r.Data)
r.token.byteValue = r.Data[r.start:]
}
// findStringLen tries to scan into the string literal for ending quote char to determine required size.
// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
func findStringLen(data []byte) (hasEscapes bool, length int) {
delta := 0
for i := 0; i < len(data); i++ {
switch data[i] {
case '\\':
i++
delta++
if i < len(data) && data[i] == 'u' {
delta++
}
case '"':
return (delta > 0), (i - delta)
}
}
return false, len(data)
}
// processEscape processes a single escape sequence and returns number of bytes processed.
func (r *Lexer) processEscape(data []byte) (int, error) {
if len(data) < 2 {
return 0, fmt.Errorf("syntax error at %v", string(data))
}
c := data[1]
switch c {
case '"', '/', '\\':
r.token.byteValue = append(r.token.byteValue, c)
return 2, nil
case 'b':
r.token.byteValue = append(r.token.byteValue, '\b')
return 2, nil
case 'f':
r.token.byteValue = append(r.token.byteValue, '\f')
return 2, nil
case 'n':
r.token.byteValue = append(r.token.byteValue, '\n')
return 2, nil
case 'r':
r.token.byteValue = append(r.token.byteValue, '\r')
return 2, nil
case 't':
r.token.byteValue = append(r.token.byteValue, '\t')
return 2, nil
case 'u':
default:
return 0, fmt.Errorf("syntax error")
}
var val rune
for i := 2; i < len(data) && i < 6; i++ {
var v byte
c = data[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
v = c - '0'
case 'a', 'b', 'c', 'd', 'e', 'f':
v = c - 'a' + 10
case 'A', 'B', 'C', 'D', 'E', 'F':
v = c - 'A' + 10
default:
return 0, fmt.Errorf("syntax error")
}
val <<= 4
val |= rune(v)
}
l := utf8.RuneLen(val)
if l == -1 {
return 0, fmt.Errorf("invalid unicode escape")
}
var d [4]byte
utf8.EncodeRune(d[:], val)
r.token.byteValue = append(r.token.byteValue, d[:l]...)
return 6, nil
}
// fetchString scans a string literal token.
func (r *Lexer) fetchString() {
r.pos++
data := r.Data[r.pos:]
hasEscapes, length := findStringLen(data)
if !hasEscapes {
r.token.byteValue = data[:length]
r.pos += length + 1
return
}
r.token.byteValue = make([]byte, 0, length)
p := 0
for i := 0; i < len(data); {
switch data[i] {
case '"':
r.pos += i + 1
r.token.byteValue = append(r.token.byteValue, data[p:i]...)
i++
return
case '\\':
r.token.byteValue = append(r.token.byteValue, data[p:i]...)
off, err := r.processEscape(data[i:])
if err != nil {
r.errParse(err.Error())
return
}
i += off
p = i
default:
i++
}
}
r.errParse("unterminated string literal")
}
// scanToken scans the next token if no token is currently available in the lexer.
func (r *Lexer) scanToken() {
if r.token.kind != tokenUndef || r.fatalError != nil {
return
}
r.fetchToken()
}
// consume resets the current token to allow scanning the next one.
func (r *Lexer) consume() {
r.token.kind = tokenUndef
r.token.delimValue = 0
}
// Ok returns true if no error (including io.EOF) was encountered during scanning.
func (r *Lexer) Ok() bool {
return r.fatalError == nil
}
const maxErrorContextLen = 13
func (r *Lexer) errParse(what string) {
if r.fatalError == nil {
var str string
if len(r.Data)-r.pos <= maxErrorContextLen {
str = string(r.Data)
} else {
str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
}
r.fatalError = &LexerError{
Reason: what,
Offset: r.pos,
Data: str,
}
}
}
func (r *Lexer) errSyntax() {
r.errParse("syntax error")
}
func (r *Lexer) errInvalidToken(expected string) {
if r.fatalError != nil {
return
}
if r.UseMultipleErrors {
r.pos = r.start
r.consume()
r.SkipRecursive()
switch expected {
case "[":
r.token.delimValue = ']'
r.token.kind = tokenDelim
case "{":
r.token.delimValue = '}'
r.token.kind = tokenDelim
}
r.addNonfatalError(&LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.start,
Data: string(r.Data[r.start:]),
})
return
}
var str string
if len(r.token.byteValue) <= maxErrorContextLen {
str = string(r.token.byteValue)
} else {
str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
}
r.fatalError = &LexerError{
Reason: fmt.Sprintf("expected %s", expected),
Offset: r.pos,
Data: str,
}
}
// Delim consumes a token and verifies that it is the given delimiter.
func (r *Lexer) Delim(c byte) {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.delimValue != c {
r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
r.errInvalidToken(string([]byte{c}))
} else {
r.consume()
}
}
// IsDelim returns true if there was no scanning error and next token is the given delimiter.
func (r *Lexer) IsDelim(c byte) bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
return !r.Ok() || r.token.delimValue == c
}
// Null verifies that the next token is null and consumes it.
func (r *Lexer) Null() {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenNull {
r.errInvalidToken("null")
}
r.consume()
}
// IsNull returns true if the next token is a null keyword.
func (r *Lexer) IsNull() bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
return r.Ok() && r.token.kind == tokenNull
}
// Skip skips a single token.
func (r *Lexer) Skip() {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
r.consume()
}
// SkipRecursive skips next array or object completely, or just skips a single token if not
// an array/object.
//
// Note: no syntax validation is performed on the skipped data.
func (r *Lexer) SkipRecursive() {
r.scanToken()
var start, end byte
if r.token.delimValue == '{' {
start, end = '{', '}'
} else if r.token.delimValue == '[' {
start, end = '[', ']'
} else {
r.consume()
return
}
r.consume()
level := 1
inQuotes := false
wasEscape := false
for i, c := range r.Data[r.pos:] {
switch {
case c == start && !inQuotes:
level++
case c == end && !inQuotes:
level--
if level == 0 {
r.pos += i + 1
return
}
case c == '\\' && inQuotes:
wasEscape = !wasEscape
continue
case c == '"' && inQuotes:
inQuotes = wasEscape
case c == '"':
inQuotes = true
}
wasEscape = false
}
r.pos = len(r.Data)
r.fatalError = &LexerError{
Reason: "EOF reached while skipping array/object or token",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
}
// Raw fetches the next item recursively as a data slice
func (r *Lexer) Raw() []byte {
r.SkipRecursive()
if !r.Ok() {
return nil
}
return r.Data[r.start:r.pos]
}
// IsStart returns whether the lexer is positioned at the start
// of an input string.
func (r *Lexer) IsStart() bool {
return r.pos == 0
}
// Consumed reads all remaining bytes from the input, publishing an error if
// there is anything but whitespace remaining.
func (r *Lexer) Consumed() {
if r.pos > len(r.Data) {
return
}
for _, c := range r.Data[r.pos:] {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
r.fatalError = &LexerError{
Reason: "invalid character '" + string(c) + "' after top-level value",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
return
}
r.pos++
r.start++
}
}
// UnsafeString returns the string value if the token is a string literal.
//
// Warning: returned string may point to the input buffer, so the string should not outlive
// the input buffer. Intended pattern of usage is as an argument to a switch statement.
func (r *Lexer) UnsafeString() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return ""
}
ret := bytesToStr(r.token.byteValue)
r.consume()
return ret
}
// String reads a string literal.
func (r *Lexer) String() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return ""
}
ret := string(r.token.byteValue)
r.consume()
return ret
}
// Bytes reads a string literal and base64 decodes it into a byte slice.
func (r *Lexer) Bytes() []byte {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return nil
}
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
if err != nil {
r.fatalError = &LexerError{
Reason: err.Error(),
}
return nil
}
r.consume()
return ret[:len]
}
// Bool reads a true or false boolean keyword.
func (r *Lexer) Bool() bool {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenBool {
r.errInvalidToken("bool")
return false
}
ret := r.token.boolValue
r.consume()
return ret
}
func (r *Lexer) number() string {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenNumber {
r.errInvalidToken("number")
return ""
}
ret := bytesToStr(r.token.byteValue)
r.consume()
return ret
}
func (r *Lexer) Uint8() uint8 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint8(n)
}
func (r *Lexer) Uint16() uint16 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint16(n)
}
func (r *Lexer) Uint32() uint32 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint32(n)
}
func (r *Lexer) Uint64() uint64 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) Uint() uint {
return uint(r.Uint64())
}
func (r *Lexer) Int8() int8 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int8(n)
}
func (r *Lexer) Int16() int16 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int16(n)
}
func (r *Lexer) Int32() int32 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int32(n)
}
func (r *Lexer) Int64() int64 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) Int() int {
return int(r.Int64())
}
func (r *Lexer) Uint8Str() uint8 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint8(n)
}
func (r *Lexer) Uint16Str() uint16 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint16(n)
}
func (r *Lexer) Uint32Str() uint32 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return uint32(n)
}
func (r *Lexer) Uint64Str() uint64 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) UintStr() uint {
return uint(r.Uint64Str())
}
func (r *Lexer) Int8Str() int8 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 8)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int8(n)
}
func (r *Lexer) Int16Str() int16 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 16)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int16(n)
}
func (r *Lexer) Int32Str() int32 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return int32(n)
}
func (r *Lexer) Int64Str() int64 {
s := r.UnsafeString()
if !r.Ok() {
return 0
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) IntStr() int {
return int(r.Int64Str())
}
func (r *Lexer) Float32() float32 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseFloat(s, 32)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return float32(n)
}
func (r *Lexer) Float64() float64 {
s := r.number()
if !r.Ok() {
return 0
}
n, err := strconv.ParseFloat(s, 64)
if err != nil {
r.addNonfatalError(&LexerError{
Offset: r.start,
Reason: err.Error(),
})
}
return n
}
func (r *Lexer) Error() error {
return r.fatalError
}
func (r *Lexer) AddError(e error) {
if r.fatalError == nil {
r.fatalError = e
}
}
func (r *Lexer) addNonfatalError(err *LexerError) {
if r.UseMultipleErrors {
if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
return
}
r.multipleErrors = append(r.multipleErrors, err)
return
}
r.fatalError = err
}
func (r *Lexer) GetNonFatalErrors() []*LexerError {
return r.multipleErrors
}
// Interface fetches an interface{} analogous to the 'encoding/json' package.
func (r *Lexer) Interface() interface{} {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() {
return nil
}
switch r.token.kind {
case tokenString:
return r.String()
case tokenNumber:
return r.Float64()
case tokenBool:
return r.Bool()
case tokenNull:
r.Null()
return nil
}
if r.token.delimValue == '{' {
r.consume()
ret := map[string]interface{}{}
for !r.IsDelim('}') {
key := r.String()
r.WantColon()
ret[key] = r.Interface()
r.WantComma()
}
r.Delim('}')
if r.Ok() {
return ret
} else {
return nil
}
} else if r.token.delimValue == '[' {
r.consume()
var ret []interface{}
for !r.IsDelim(']') {
ret = append(ret, r.Interface())
r.WantComma()
}
r.Delim(']')
if r.Ok() {
return ret
} else {
return nil
}
}
r.errSyntax()
return nil
}
// WantComma requires a comma to be present before fetching next token.
func (r *Lexer) WantComma() {
r.wantSep = ','
r.firstElement = false
}
// WantColon requires a colon to be present before fetching next token.
func (r *Lexer) WantColon() {
r.wantSep = ':'
r.firstElement = false
}
|
package csvutil
import (
"encoding/csv"
"os"
"github.com/grokify/gotilla/strings/stringsutil"
)
/*
For UTF-8 BOM, csv.Reader.Read() will return error = "line 1, column 1: bare \" in non-quoted-field"
If you encounter this close the file and call again with stripBom = true
*/
// NewReader will create a csv.Reader and optionally strip off the
// byte order mark (BOM) if requested.
func NewReader(path string, comma rune, stripBom bool) (*csv.Reader, *os.File, error) {
var csvReader *csv.Reader
var file *os.File
file, err := os.Open(path)
if err != nil {
return csvReader, file, err
}
if stripBom {
b3 := make([]byte, 3)
_, err := file.Read(b3)
if err != nil {
return csvReader, file, err
}
}
csvReader = csv.NewReader(file)
csvReader.Comma = comma
return csvReader, file, nil
}
// Writer is a struct for a CSV/TSV writer.
type Writer struct {
Separator string
StripRepeatedSep bool
ReplaceSeparator bool
SeparatorAlt string
File *os.File
}
// NewWriter returns a Writer with the separator params set.
func NewWriter(filepath, sep string, replaceSeparator bool, alt string) (Writer, error) {
w := Writer{
Separator: sep,
StripRepeatedSep: false,
ReplaceSeparator: replaceSeparator,
SeparatorAlt: alt}
return w, w.open(filepath)
}
// Open opens a filepath.
func (w *Writer) open(filepath string) error {
f, err := os.Create(filepath)
if err != nil {
return err
}
w.File = f
return nil
}
// AddLine adds an []interface{} to the file.
func (w *Writer) AddLine(cells []interface{}) error {
_, err := w.File.WriteString(
stringsutil.JoinInterface(
cells, w.Separator, false, w.ReplaceSeparator, w.SeparatorAlt) + "\n")
if err != nil {
return err
}
return w.File.Sync()
}
// Close closes the file.
func (w *Writer) Close() error {
return w.File.Close()
}
update csvutil documentation
package csvutil
import (
"encoding/csv"
"os"
"github.com/grokify/gotilla/strings/stringsutil"
)
/*
For UTF-8 BOM, csv.Reader.Read() will return error = "line 1, column 1: bare \" in non-quoted-field"
If you encounter this close the file and call again with stripBom = true
*/
// NewReader will create a csv.Reader and optionally strip off the
// byte order mark (BOM) if requested. Close file reader with
// `defer f.Close()`.
func NewReader(path string, comma rune, stripBom bool) (*csv.Reader, *os.File, error) {
var csvReader *csv.Reader
var file *os.File
file, err := os.Open(path)
if err != nil {
return csvReader, file, err
}
if stripBom {
b3 := make([]byte, 3)
_, err := file.Read(b3)
if err != nil {
return csvReader, file, err
}
}
csvReader = csv.NewReader(file)
csvReader.Comma = comma
return csvReader, file, nil
}
// Writer is a struct for a CSV/TSV writer.
type Writer struct {
Separator string
StripRepeatedSep bool
ReplaceSeparator bool
SeparatorAlt string
File *os.File
}
// NewWriter returns a Writer with the separator params set.
func NewWriter(filepath, sep string, replaceSeparator bool, alt string) (Writer, error) {
w := Writer{
Separator: sep,
StripRepeatedSep: false,
ReplaceSeparator: replaceSeparator,
SeparatorAlt: alt}
return w, w.open(filepath)
}
// Open opens a filepath.
func (w *Writer) open(filepath string) error {
f, err := os.Create(filepath)
if err != nil {
return err
}
w.File = f
return nil
}
// AddLine adds an []interface{} to the file.
func (w *Writer) AddLine(cells []interface{}) error {
_, err := w.File.WriteString(
stringsutil.JoinInterface(
cells, w.Separator, false, w.ReplaceSeparator, w.SeparatorAlt) + "\n")
if err != nil {
return err
}
return w.File.Sync()
}
// Close closes the file.
func (w *Writer) Close() error {
return w.File.Close()
}
|
package main
import (
"net/http"
"github.com/apexskier/httpauth"
"github.com/gorilla/mux"
"fmt"
)
var (
backend httpauth.GobFileAuthBackend
aaa httpauth.Authorizer
)
func main() {
backend = httpauth.NewGobFileAuthBackend("auth.gob")
aaa, err = httpauth.NewAuthorizer(backend, []byte("cookie-encryption-key"))
if err != nil {
panic(err)
}
// set up routers and route handlers
r := mux.NewRouter()
r.HandleFunc("/login", getLogin).Methods("GET")
r.HandleFunc("/register", postRegister).Methods("POST")
r.HandleFunc("/login", postLogin).Methods("POST")
r.HandleFunc("/change", postChange).Methods("POST")
r.HandleFunc("/", handlePage).Methods("GET") // authorized page
r.HandleFunc("/logout", handleLogout)
http.Handle("/", r)
http.ListenAndServe(":8080", nil)
}
func getLogin(rw http.ResponseWriter, req *http.Request) {
messages := aaa.Messages(rw, req)
fmt.Fprintf(rw, `
<html>
<head><title>Login</title></head>
<body>
<h1>Httpauth example</h1>
<h2>Entry Page</h2>
<p><b>Messages: %v</b></p>
<h3>Login</h3>
<form action="/login" method="post" id="login">
<input type="text" name="username" placeholder="username"><br>
<input type="password" name="password" placeholder="password"></br>
<button type="submit">Login</button>
</form>
<h3>Register</h3>
<form action="/register" method="post" id="register">
<input type="text" name="username" placeholder="username"><br>
<input type="password" name="password" placeholder="password"></br>
<input type="email" name="email" placeholder="email@example.com"></br>
<button type="submit">Register</button>
</form>
</body>
</html>
`, messages)
}
func postLogin(rw http.ResponseWriter, req *http.Request) {
username := req.PostFormValue("username")
password := req.PostFormValue("password")
if err := aaa.Login(rw, req, username, password, "/"); err != nil {
fmt.Println(err)
http.Redirect(rw, req, "/login", http.StatusSeeOther)
}
}
func postRegister(rw http.ResponseWriter, req *http.Request) {
username := req.PostFormValue("username")
password := req.PostFormValue("password")
email := req.PostFormValue("email")
if err := aaa.Register(rw, req, username, password, email); err == nil {
postLogin(rw, req)
} else {
http.Redirect(rw, req, "/login", http.StatusSeeOther)
}
}
func postChange(rw http.ResponseWriter, req *http.Request) {
email := req.PostFormValue("new_email")
aaa.Update(rw, req, "", email)
http.Redirect(rw, req, "/", http.StatusSeeOther)
}
func handlePage(rw http.ResponseWriter, req *http.Request) {
if err := aaa.Authorize(rw, req, true); err != nil {
fmt.Println(err)
http.Redirect(rw, req, "/login", http.StatusSeeOther)
return
}
if user, ok := aaa.CurrentUser(rw, req); ok {
fmt.Fprintf(rw, `
<html>
<head><title>Secret page</title></head>
<body>
<h1>Httpauth example<h1>
<h2>Hello %v</h2>
<p>Your email is %v. <a href="/logout">Logout</a></p>
<form action="/change" method="post" id="change">
<h3>Change email</h3>
<p><input type="email" name="new_email" placeholder="new email"></p>
<button type="submit">Submit</button>
</form>
</body>
`, user.Username, user.Email)
}
}
func handleLogout(rw http.ResponseWriter, req *http.Request) {
if err := aaa.Logout(rw, req); err != nil {
fmt.Println(err)
// this shouldn't happen
return
}
http.Redirect(rw, req, "/logout", http.StatusSeeOther)
}
Added stuff for roles to server.
package main
import (
"net/http"
"github.com/apexskier/httpauth"
"github.com/gorilla/mux"
"html/template"
"fmt"
)
var (
backend httpauth.GobFileAuthBackend
aaa httpauth.Authorizer
roles map[string]httpauth.Role
)
func main() {
var err error
backend = httpauth.NewGobFileAuthBackend("auth.gob")
roles = make(map[string]httpauth.Role)
roles["user"] = 30
roles["admin"] = 80
aaa, err = httpauth.NewAuthorizer(backend, []byte("cookie-encryption-key"), "user", roles)
if err != nil {
panic(err)
}
// set up routers and route handlers
r := mux.NewRouter()
r.HandleFunc("/login", getLogin).Methods("GET")
r.HandleFunc("/register", postRegister).Methods("POST")
r.HandleFunc("/login", postLogin).Methods("POST")
r.HandleFunc("/admin", handleAdmin).Methods("GET")
r.HandleFunc("/add_user", postAddUser).Methods("POST")
r.HandleFunc("/change", postChange).Methods("POST")
r.HandleFunc("/", handlePage).Methods("GET") // authorized page
r.HandleFunc("/logout", handleLogout)
http.Handle("/", r)
http.ListenAndServe(":8080", nil)
}
func getLogin(rw http.ResponseWriter, req *http.Request) {
messages := aaa.Messages(rw, req)
fmt.Fprintf(rw, `
<html>
<head><title>Login</title></head>
<body>
<h1>Httpauth example</h1>
<h2>Entry Page</h2>
<p><b>Messages: %v</b></p>
<h3>Login</h3>
<form action="/login" method="post" id="login">
<input type="text" name="username" placeholder="username"><br>
<input type="password" name="password" placeholder="password"></br>
<button type="submit">Login</button>
</form>
<h3>Register</h3>
<form action="/register" method="post" id="register">
<input type="text" name="username" placeholder="username"><br>
<input type="password" name="password" placeholder="password"></br>
<input type="email" name="email" placeholder="email@example.com"></br>
<button type="submit">Register</button>
</form>
</body>
</html>
`, messages)
}
func postLogin(rw http.ResponseWriter, req *http.Request) {
username := req.PostFormValue("username")
password := req.PostFormValue("password")
if err := aaa.Login(rw, req, username, password, "/"); err != nil && err.Error() == "already authenticated" {
http.Redirect(rw, req, "/", http.StatusSeeOther)
} else if err != nil {
fmt.Println(err)
http.Redirect(rw, req, "/login", http.StatusSeeOther)
}
}
func postRegister(rw http.ResponseWriter, req *http.Request) {
var user httpauth.UserData
user.Username = req.PostFormValue("username")
user.Email = req.PostFormValue("email")
password := req.PostFormValue("password")
if err := aaa.Register(rw, req, user, password); err == nil {
postLogin(rw, req)
} else {
http.Redirect(rw, req, "/login", http.StatusSeeOther)
}
}
func postAddUser(rw http.ResponseWriter, req *http.Request) {
var user httpauth.UserData
user.Username = req.PostFormValue("username")
user.Email = req.PostFormValue("email")
password := req.PostFormValue("password")
user.Role = req.PostFormValue("role")
if err := aaa.Register(rw, req, user, password); err != nil {
// maybe something
}
http.Redirect(rw, req, "/admin", http.StatusSeeOther)
}
func postChange(rw http.ResponseWriter, req *http.Request) {
email := req.PostFormValue("new_email")
aaa.Update(rw, req, "", email)
http.Redirect(rw, req, "/", http.StatusSeeOther)
}
func handlePage(rw http.ResponseWriter, req *http.Request) {
if err := aaa.Authorize(rw, req, true); err != nil {
fmt.Println(err)
http.Redirect(rw, req, "/login", http.StatusSeeOther)
return
}
if user, ok := aaa.CurrentUser(rw, req); ok {
type data struct {
User httpauth.UserData
}
d := data{User:user}
t, err := template.New("page").Parse(`
<html>
<head><title>Secret page</title></head>
<body>
<h1>Httpauth example<h1>
{{ with .User }}
<h2>Hello {{ .Username }}</h2>
<p>Your role is '{{ .Role }}'. Your email is {{ .Email }}.</p>
<p>{{ if .Role | eq "admin" }}<a href="/admin">Admin page</a> {{ end }}<a href="/logout">Logout</a></p>
{{ end }}
<form action="/change" method="post" id="change">
<h3>Change email</h3>
<p><input type="email" name="new_email" placeholder="new email"></p>
<button type="submit">Submit</button>
</form>
</body>
`)
if err != nil {
panic(err)
}
t.Execute(rw, d)
}
}
func handleAdmin(rw http.ResponseWriter, req *http.Request) {
if err := aaa.AuthorizeRole(rw, req, "admin", true); err != nil {
fmt.Println(err)
http.Redirect(rw, req, "/login", http.StatusSeeOther)
return
}
if user, ok := aaa.CurrentUser(rw, req); ok {
type data struct {
User httpauth.UserData
Roles map[string]httpauth.Role
Users []httpauth.UserData
Msg []string
}
messages := aaa.Messages(rw, req)
d := data{User:user, Roles:roles, Users:backend.Users(), Msg:messages}
t, err := template.New("admin").Parse(`
<html>
<head><title>Admin page</title></head>
<body>
<h1>Httpauth example<h1>
<h2>Admin Page</h2>
<p>{{.Msg}}</p>
{{ with .User }}<p>Hello {{ .Username }}, your role is '{{ .Role }}'. Your email is {{ .Email }}.</p>{{ end }}
<p><a href="/">Back</a> <a href="/logout">Logout</a></p>
<h3>Users</h3>
<ul>{{ range .Users }}<li>{{.Username}}</li>{{ end }}</ul>
<form action="/add_user" method="post" id="add_user">
<h3>Add user</h3>
<p><input type="text" name="username" placeholder="username"><br>
<input type="password" name="password" placeholder="password"><br>
<input type="email" name="email" placeholder="email"><br>
<select name="role">
<option value="">role<option>
{{ range $key, $val := .Roles }}<option value="{{$key}}">{{$key}} - {{$val}}</option>{{ end }}
</select></p>
<button type="submit">Submit</button>
</form>
</body>
`)
if err != nil {
panic(err)
}
t.Execute(rw, d)
}
}
func handleLogout(rw http.ResponseWriter, req *http.Request) {
if err := aaa.Logout(rw, req); err != nil {
fmt.Println(err)
// this shouldn't happen
return
}
http.Redirect(rw, req, "/", http.StatusSeeOther)
}
|
// Package methdecl tests code facts for a method declaration.
package methdecl
type w struct{}
//- @LessThan defines/binding LT
//- LT code LTCode
//-
//- LTCode child.0 LTFunc
//- LTCode child.1 LTRecv
//- LTCode child.2 LTName
//- LTCode child.3 LTParams
//- LTCode child.4 LTResult
//-
//- LTFunc.pre_text "func "
//-
//- LTRecv.kind "PARAMETER"
//- LTRecv.pre_text "("
//- LTRecv.post_text ") "
//- LTRecv child.0 LTRType
//-
//- LTName child.0 LTContext
//- LTName child.1 LTIdent
//-
//- LTParams.kind "PARAMETER_LOOKUP_BY_PARAM"
//- LTParams.lookup_index 1
//- LTParams.pre_text "("
//- LTParams.post_text ")"
//- LTParams.post_child_text ", "
//-
//- LTResult.pre_text " "
//- LTResult.kind "TYPE"
//- LTResult child.0 LTReturn
//- LTReturn.pre_text "bool"
//-
//- LTRType.kind "TYPE"
//- LTRType.pre_text "*w"
//-
//- LTContext.kind "CONTEXT"
//- LTContext.post_child_text "."
//- LTContext child.0 LTPkg
//- LTContext child.1 LTCType
//- LTPkg.pre_text "methdecl"
//- LTCType.pre_text "w"
//- LTIdent.pre_text "LessThan"
//-
//- @x defines/binding LTX
//- LTX code XCode
//- XCode child.0 XName
//- XName child.0 XCtx
//- XCtx.kind "CONTEXT"
//- XCtx child.0 XPkg
//- XCtx child.1 XRec
//- XCtx child.2 XFun
//- XName child.1 XId
//- XPkg.kind "IDENTIFIER"
//- XPkg.pre_text "methdecl"
//- XRec.kind "IDENTIFIER"
//- XRec.pre_text "w"
//- XFun.kind "IDENTIFIER"
//- XFun.pre_text "LessThan"
//- XId.kind "IDENTIFIER"
//- XId.pre_text "x"
//-
func (rec *w) LessThan(x int) bool {
return x < 0
}
chore(go_indexer): Add a marked source test for methods (#4747)
This doesn't change any functionality but records the current behavior
for marked source for go methods.
// Package methdecl tests code facts for a method declaration.
package methdecl
type w struct{}
//- @LessThan defines/binding LT
//- LT code LTCode
//-
//- LTCode child.0 LTFunc
//- LTCode child.1 LTRecv
//- LTCode child.2 LTName
//- LTCode child.3 LTParams
//- LTCode child.4 LTResult
//-
//- LTFunc.pre_text "func "
//-
//- LTRecv.kind "PARAMETER"
//- LTRecv.pre_text "("
//- LTRecv.post_text ") "
//- LTRecv child.0 LTRType
//-
//- LTName child.0 LTContext
//- LTName child.1 LTIdent
//-
//- LTParams.kind "PARAMETER_LOOKUP_BY_PARAM"
//- LTParams.lookup_index 1
//- LTParams.pre_text "("
//- LTParams.post_text ")"
//- LTParams.post_child_text ", "
//-
//- LTResult.pre_text " "
//- LTResult.kind "TYPE"
//- LTResult child.0 LTReturn
//- LTReturn.pre_text "bool"
//-
//- LTRType.kind "TYPE"
//- LTRType.pre_text "*w"
//-
//- LTContext.kind "CONTEXT"
//- LTContext.post_child_text "."
//- LTContext child.0 LTPkg
//- LTContext child.1 LTCType
//- LTPkg.pre_text "methdecl"
//- LTCType.pre_text "w"
//- LTIdent.pre_text "LessThan"
//-
//- @x defines/binding LTX
//- LTX code XCode
//- XCode child.0 XName
//- XName child.0 XCtx
//- XCtx.kind "CONTEXT"
//- XCtx child.0 XPkg
//- XCtx child.1 XRec
//- XCtx child.2 XFun
//- XName child.1 XId
//- XPkg.kind "IDENTIFIER"
//- XPkg.pre_text "methdecl"
//- XRec.kind "IDENTIFIER"
//- XRec.pre_text "w"
//- XFun.kind "IDENTIFIER"
//- XFun.pre_text "LessThan"
//- XId.kind "IDENTIFIER"
//- XId.pre_text "x"
//-
func (rec *w) LessThan(x int) bool {
return x < 0
}
type decorCommand struct{}
type Context interface{}
type FlagSet struct{}
type API struct{}
//- @Run defines/binding RunFunc
//- RunFunc code RFCode
//-
//- RFCode child.0 RFFunc
//- RFCode child.1 RFRecv
//- RFCode child.2 RFName
//- RFCode child.3 RFParams
//- RFCode child.4 RFResult
//-
//- RFFunc.pre_text "func "
//-
//- RFRecv.kind "PARAMETER"
//- RFRecv.pre_text "("
//- RFRecv.post_text ") "
//- RFRecv child.0 RFRType
//-
//- RFName child.0 RFContext
//- RFName child.1 RFIdent
//-
//- RFParams.kind "PARAMETER_LOOKUP_BY_PARAM"
//- RFParams.lookup_index 1
//- RFParams.pre_text "("
//- RFParams.post_text ")"
//- RFParams.post_child_text ", "
//-
//- RFResult.pre_text " "
//- RFResult.kind "TYPE"
//- RFResult child.0 RFReturn
//- RFReturn.pre_text "error"
//-
//- RFRType.kind "TYPE"
//- RFRType.pre_text "decorCommand"
//-
//- RFContext.kind "CONTEXT"
//- RFContext.post_child_text "."
//- RFContext child.0 RFPkg
//- RFContext child.1 RFCType
//- RFPkg.pre_text "methdecl"
//- RFCType.pre_text "decorCommand"
//- RFIdent.pre_text "Run"
//-
//- @ctx defines/binding RFCtx
//- RFCtx code CtxCode
//- CtxCode child.0 CtxName
//- CtxCode child.1 CtxType
//- CtxName child.0 CtxCtx
//- CtxCtx.kind "CONTEXT"
//- CtxCtx child.0 CtxPkg
//- CtxCtx child.1 CtxRec
//- CtxCtx child.2 CtxFun
//- CtxName child.1 CtxId
//- CtxPkg.kind "IDENTIFIER"
//- CtxPkg.pre_text "methdecl"
//- CtxRec.kind "IDENTIFIER"
//- CtxRec.pre_text "decorCommand"
//- CtxFun.kind "IDENTIFIER"
//- CtxFun.pre_text "Run"
//- CtxId.kind "IDENTIFIER"
//- CtxId.pre_text "ctx"
//- CtxType.kind "LOOKUP_BY_TYPED"
//- CtxType.lookup_index 0
//-
//- @Context ref CtxTypeValue
//- CtxTypeValue code CtxTypeValueCode
//- CtxTypeValueCode child.0 CtxTypeValueCodeCtx
//- CtxTypeValueCodeCtx.kind "CONTEXT"
//- CtxTypeValueCodeCtx.post_child_text "."
//- CtxTypeValueCodeCtx child.0 CtxTypeValueCodeCtxChild
//- CtxTypeValueCodeCtxChild.kind "IDENTIFIER"
//- CtxTypeValueCodeCtxChild.pre_text "methdecl"
//- CtxTypeValueCode child.1 CtxTypeValueCodeID
//- CtxTypeValueCodeID.kind "IDENTIFIER"
//- CtxTypeValueCodeID.pre_text "Context"
//-
//- @flag defines/binding RFFlag
//- RFFlag code FlagCode
//- FlagCode child.0 FlagName
//- FlagName child.0 FlagCtx
//- FlagCtx.kind "CONTEXT"
//- FlagCtx child.0 FlagPkg
//- FlagCtx child.1 FlagRec
//- FlagCtx child.2 FlagFun
//- FlagName child.1 FlagId
//- FlagPkg.kind "IDENTIFIER"
//- FlagPkg.pre_text "methdecl"
//- FlagRec.kind "IDENTIFIER"
//- FlagRec.pre_text "decorCommand"
//- FlagFun.kind "IDENTIFIER"
//- FlagFun.pre_text "Run"
//- FlagId.kind "IDENTIFIER"
//- FlagId.pre_text "flag"
//-
//- @FlagSet ref FlagTypeValue
//- FlagTypeValue code FlagTypeValueCode
//- FlagTypeValueCode child.0 FlagTypeValueCodeCtx
//- FlagTypeValueCode child.1 FlagTypeValueCodeID
//- FlagTypeValueCodeCtx.kind "CONTEXT"
//- FlagTypeValueCodeCtx.post_child_text "."
//- FlagTypeValueCodeCtx child.0 FlagTypeValueCodeCtxChild
//- FlagTypeValueCodeCtxChild.kind "IDENTIFIER"
//- FlagTypeValueCodeCtxChild.pre_text "methdecl"
//- FlagTypeValueCodeID.kind "IDENTIFIER"
//- FlagTypeValueCodeID.pre_text "FlagSet"
//-
//- @api defines/binding RFApi
//- RFApi code ApiCode
//- ApiCode child.0 ApiName
//- ApiName child.0 ApiCtx
//- ApiCtx.kind "CONTEXT"
//- ApiCtx child.0 ApiPkg
//- ApiCtx child.1 ApiRec
//- ApiCtx child.2 ApiFun
//- ApiName child.1 ApiId
//- ApiPkg.kind "IDENTIFIER"
//- ApiPkg.pre_text "methdecl"
//- ApiRec.kind "IDENTIFIER"
//- ApiRec.pre_text "decorCommand"
//- ApiFun.kind "IDENTIFIER"
//- ApiFun.pre_text "Run"
//- ApiId.kind "IDENTIFIER"
//- ApiId.pre_text "api"
//-
//- @API ref ApiTypeValue
//- ApiTypeValue code ApiTypeValueCode
//- ApiTypeValueCode child.0 ApiTypeValueCodeCtx
//- ApiTypeValueCode child.1 ApiTypeValueCodeID
//- ApiTypeValueCodeCtx.kind "CONTEXT"
//- ApiTypeValueCodeCtx.post_child_text "."
//- ApiTypeValueCodeCtx child.0 ApiTypeValueCodeCtxChild
//- ApiTypeValueCodeCtxChild.kind "IDENTIFIER"
//- ApiTypeValueCodeCtxChild.pre_text "methdecl"
//- ApiTypeValueCodeID.kind "IDENTIFIER"
//- ApiTypeValueCodeID.pre_text "API"
func (c decorCommand) Run(ctx Context, flag *FlagSet, api API) error {
return nil
}
|
package net2
import (
"net"
"strings"
"time"
rp "github.com/dropbox/godropbox/resource_pool"
)
const defaultDialTimeout = 500 * time.Millisecond
func parseResourceLocation(resourceLocation string) (
network string,
address string) {
idx := strings.Index(resourceLocation, " ")
if idx >= 0 {
return resourceLocation[:idx], resourceLocation[idx+1:]
}
return "", resourceLocation
}
// A thin wrapper around the underlying resource pool.
type BaseConnectionPool struct {
options ConnectionOptions
pool rp.ResourcePool
}
// This returns a connection pool where all connections are connected
// to the same (network, address)
func newBaseConnectionPool(
options ConnectionOptions,
createPool func(rp.Options) rp.ResourcePool) ConnectionPool {
dial := options.Dial
if dial == nil {
dial = func(network string, address string) (net.Conn, error) {
net.DialTimeout(network, address, defaultDialTimeout)
}
}
openFunc := func(loc string) (interface{}, error) {
network, address := parseResourceLocation(loc)
return dial(network, address)
}
closeFunc := func(handle interface{}) error {
return handle.(net.Conn).Close()
}
poolOptions := rp.Options{
MaxActiveHandles: options.MaxActiveConnections,
MaxIdleHandles: options.MaxIdleConnections,
MaxIdleTime: options.MaxIdleTime,
Open: openFunc,
Close: closeFunc,
NowFunc: options.NowFunc,
}
return &BaseConnectionPool{
options: options,
pool: createPool(poolOptions),
}
}
// This returns a connection pool where all connections are connected
// to the same (network, address)
func NewSimpleConnectionPool(options ConnectionOptions) ConnectionPool {
return newBaseConnectionPool(options, rp.NewSimpleResourcePool)
}
// This returns a connection pool that manages multiple (network, address)
// entries. The connections to each (network, address) entry acts
// independently. For example ("tcp", "localhost:11211") could act as memcache
// shard 0 and ("tcp", "localhost:11212") could act as memcache shard 1.
func NewMultiConnectionPool(options ConnectionOptions) ConnectionPool {
return newBaseConnectionPool(
options,
func(poolOptions rp.Options) rp.ResourcePool {
return rp.NewMultiResourcePool(poolOptions, nil)
})
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) NumActive() int32 {
return p.pool.NumActive()
}
// This returns the number of alive idle connections. This method is not part
// of ConnectionPool's API. It is used only for testing.
func (p *BaseConnectionPool) NumIdle() int {
return p.pool.NumIdle()
}
// BaseConnectionPool can only register a single (network, address) entry.
// Register should be call before any Get calls.
func (p *BaseConnectionPool) Register(network string, address string) error {
return p.pool.Register(network + " " + address)
}
// BaseConnectionPool has nothing to do on Unregister.
func (p *BaseConnectionPool) Unregister(network string, address string) error {
return nil
}
func (p *BaseConnectionPool) ListRegistered() []NetworkAddress {
result := make([]NetworkAddress, 0, 1)
for _, location := range p.pool.ListRegistered() {
network, address := parseResourceLocation(location)
result = append(
result,
NetworkAddress{
Network: network,
Address: address,
})
}
return result
}
// This gets an active connection from the connection pool. Note that network
// and address arguments are ignored (The connections with point to the
// network/address provided by the first Register call).
func (p *BaseConnectionPool) Get(
network string,
address string) (ManagedConn, error) {
handle, err := p.pool.Get(network + " " + address)
if err != nil {
return nil, err
}
return NewManagedConn(network, address, handle, p, p.options), nil
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) Release(conn ManagedConn) error {
return conn.ReleaseConnection()
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) Discard(conn ManagedConn) error {
return conn.DiscardConnection()
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) EnterLameDuckMode() {
p.pool.EnterLameDuckMode()
}
shrink default timeout
package net2
import (
"net"
"strings"
"time"
rp "github.com/dropbox/godropbox/resource_pool"
)
const defaultDialTimeout = 100 * time.Millisecond
func parseResourceLocation(resourceLocation string) (
network string,
address string) {
idx := strings.Index(resourceLocation, " ")
if idx >= 0 {
return resourceLocation[:idx], resourceLocation[idx+1:]
}
return "", resourceLocation
}
// A thin wrapper around the underlying resource pool.
type BaseConnectionPool struct {
options ConnectionOptions
pool rp.ResourcePool
}
// This returns a connection pool where all connections are connected
// to the same (network, address)
func newBaseConnectionPool(
options ConnectionOptions,
createPool func(rp.Options) rp.ResourcePool) ConnectionPool {
dial := options.Dial
if dial == nil {
dial = func(network string, address string) (net.Conn, error) {
return net.DialTimeout(network, address, defaultDialTimeout)
}
}
openFunc := func(loc string) (interface{}, error) {
network, address := parseResourceLocation(loc)
return dial(network, address)
}
closeFunc := func(handle interface{}) error {
return handle.(net.Conn).Close()
}
poolOptions := rp.Options{
MaxActiveHandles: options.MaxActiveConnections,
MaxIdleHandles: options.MaxIdleConnections,
MaxIdleTime: options.MaxIdleTime,
Open: openFunc,
Close: closeFunc,
NowFunc: options.NowFunc,
}
return &BaseConnectionPool{
options: options,
pool: createPool(poolOptions),
}
}
// This returns a connection pool where all connections are connected
// to the same (network, address)
func NewSimpleConnectionPool(options ConnectionOptions) ConnectionPool {
return newBaseConnectionPool(options, rp.NewSimpleResourcePool)
}
// This returns a connection pool that manages multiple (network, address)
// entries. The connections to each (network, address) entry acts
// independently. For example ("tcp", "localhost:11211") could act as memcache
// shard 0 and ("tcp", "localhost:11212") could act as memcache shard 1.
func NewMultiConnectionPool(options ConnectionOptions) ConnectionPool {
return newBaseConnectionPool(
options,
func(poolOptions rp.Options) rp.ResourcePool {
return rp.NewMultiResourcePool(poolOptions, nil)
})
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) NumActive() int32 {
return p.pool.NumActive()
}
// This returns the number of alive idle connections. This method is not part
// of ConnectionPool's API. It is used only for testing.
func (p *BaseConnectionPool) NumIdle() int {
return p.pool.NumIdle()
}
// BaseConnectionPool can only register a single (network, address) entry.
// Register should be call before any Get calls.
func (p *BaseConnectionPool) Register(network string, address string) error {
return p.pool.Register(network + " " + address)
}
// BaseConnectionPool has nothing to do on Unregister.
func (p *BaseConnectionPool) Unregister(network string, address string) error {
return nil
}
func (p *BaseConnectionPool) ListRegistered() []NetworkAddress {
result := make([]NetworkAddress, 0, 1)
for _, location := range p.pool.ListRegistered() {
network, address := parseResourceLocation(location)
result = append(
result,
NetworkAddress{
Network: network,
Address: address,
})
}
return result
}
// This gets an active connection from the connection pool. Note that network
// and address arguments are ignored (The connections with point to the
// network/address provided by the first Register call).
func (p *BaseConnectionPool) Get(
network string,
address string) (ManagedConn, error) {
handle, err := p.pool.Get(network + " " + address)
if err != nil {
return nil, err
}
return NewManagedConn(network, address, handle, p, p.options), nil
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) Release(conn ManagedConn) error {
return conn.ReleaseConnection()
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) Discard(conn ManagedConn) error {
return conn.DiscardConnection()
}
// See ConnectionPool for documentation.
func (p *BaseConnectionPool) EnterLameDuckMode() {
p.pool.EnterLameDuckMode()
}
|
package protobuf
import (
"encoding/binary"
"errors"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/name5566/leaf/log"
"github.com/name5566/leaf/util"
"math"
"reflect"
)
// -------------------------
// | id | protobuf message |
// -------------------------
type Processor struct {
littleEndian bool
msgInfo []*MsgInfo
msgID map[reflect.Type]uint16
}
type MsgInfo struct {
msgType reflect.Type
msgRouter *util.CallRouter
msgHandler MsgHandler
}
type MsgHandler func([]interface{})
func NewProcessor() *Processor {
p := new(Processor)
p.littleEndian = false
p.msgID = make(map[reflect.Type]uint16)
return p
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) SetByteOrder(littleEndian bool) {
p.littleEndian = littleEndian
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) Register(msg proto.Message) {
msgType := reflect.TypeOf(msg)
if msgType == nil || msgType.Kind() != reflect.Ptr {
log.Fatal("protobuf message pointer required")
}
if _, ok := p.msgID[msgType]; ok {
log.Fatal("message %s is already registered", msgType)
}
if len(p.msgInfo) >= math.MaxUint16 {
log.Fatal("too many protobuf messages (max = %v)", math.MaxUint16)
}
i := new(MsgInfo)
i.msgType = msgType
p.msgInfo = append(p.msgInfo, i)
p.msgID[msgType] = uint16(len(p.msgInfo) - 1)
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) SetRouter(msg proto.Message, msgRouter *util.CallRouter) {
msgType := reflect.TypeOf(msg)
id, ok := p.msgID[msgType]
if !ok {
log.Fatal("message %s not registered", msgType)
}
p.msgInfo[id].msgRouter = msgRouter
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) SetHandler(msg proto.Message, msgHandler MsgHandler) {
msgType := reflect.TypeOf(msg)
id, ok := p.msgID[msgType]
if !ok {
log.Fatal("message %s not registered", msgType)
}
p.msgInfo[id].msgHandler = msgHandler
}
// goroutine safe
func (p *Processor) Route(msg proto.Message, userData interface{}) error {
msgType := reflect.TypeOf(msg)
id, ok := p.msgID[msgType]
if !ok {
return errors.New(fmt.Sprintf("message %s not registered", msgType))
}
i := p.msgInfo[id]
if i.msgHandler != nil {
i.msgHandler([]interface{}{msg, userData})
}
if i.msgRouter != nil {
i.msgRouter.AsynCall0(msgType, msg, userData)
}
return nil
}
// goroutine safe
func (p *Processor) Unmarshal(data []byte) (proto.Message, error) {
if len(data) < 2 {
return nil, errors.New("protobuf data too short")
}
// id
var id uint16
if p.littleEndian {
id = binary.LittleEndian.Uint16(data)
} else {
id = binary.BigEndian.Uint16(data)
}
// msg
if id >= uint16(len(p.msgInfo)) {
return nil, errors.New(fmt.Sprintf("message id %v not registered", id))
}
msg := reflect.New(p.msgInfo[id].msgType.Elem()).Interface().(proto.Message)
return msg, proto.UnmarshalMerge(data[2:], msg)
}
// goroutine safe
func (p *Processor) Marshal(msg proto.Message) (id []byte, data []byte, err error) {
msgType := reflect.TypeOf(msg)
// id
_id, ok := p.msgID[msgType]
if !ok {
err = errors.New(fmt.Sprintf("message %s not registered", msgType))
return
}
id = make([]byte, 2)
if p.littleEndian {
binary.LittleEndian.PutUint16(id, _id)
} else {
binary.BigEndian.PutUint16(id, _id)
}
// data
data, err = proto.Marshal(msg)
return
}
add range method
package protobuf
import (
"encoding/binary"
"errors"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/name5566/leaf/log"
"github.com/name5566/leaf/util"
"math"
"reflect"
)
// -------------------------
// | id | protobuf message |
// -------------------------
type Processor struct {
littleEndian bool
msgInfo []*MsgInfo
msgID map[reflect.Type]uint16
}
type MsgInfo struct {
msgType reflect.Type
msgRouter *util.CallRouter
msgHandler MsgHandler
}
type MsgHandler func([]interface{})
func NewProcessor() *Processor {
p := new(Processor)
p.littleEndian = false
p.msgID = make(map[reflect.Type]uint16)
return p
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) SetByteOrder(littleEndian bool) {
p.littleEndian = littleEndian
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) Register(msg proto.Message) {
msgType := reflect.TypeOf(msg)
if msgType == nil || msgType.Kind() != reflect.Ptr {
log.Fatal("protobuf message pointer required")
}
if _, ok := p.msgID[msgType]; ok {
log.Fatal("message %s is already registered", msgType)
}
if len(p.msgInfo) >= math.MaxUint16 {
log.Fatal("too many protobuf messages (max = %v)", math.MaxUint16)
}
i := new(MsgInfo)
i.msgType = msgType
p.msgInfo = append(p.msgInfo, i)
p.msgID[msgType] = uint16(len(p.msgInfo) - 1)
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) SetRouter(msg proto.Message, msgRouter *util.CallRouter) {
msgType := reflect.TypeOf(msg)
id, ok := p.msgID[msgType]
if !ok {
log.Fatal("message %s not registered", msgType)
}
p.msgInfo[id].msgRouter = msgRouter
}
// It's dangerous to call the method on routing or marshaling (unmarshaling)
func (p *Processor) SetHandler(msg proto.Message, msgHandler MsgHandler) {
msgType := reflect.TypeOf(msg)
id, ok := p.msgID[msgType]
if !ok {
log.Fatal("message %s not registered", msgType)
}
p.msgInfo[id].msgHandler = msgHandler
}
// goroutine safe
func (p *Processor) Route(msg proto.Message, userData interface{}) error {
msgType := reflect.TypeOf(msg)
id, ok := p.msgID[msgType]
if !ok {
return errors.New(fmt.Sprintf("message %s not registered", msgType))
}
i := p.msgInfo[id]
if i.msgHandler != nil {
i.msgHandler([]interface{}{msg, userData})
}
if i.msgRouter != nil {
i.msgRouter.AsynCall0(msgType, msg, userData)
}
return nil
}
// goroutine safe
func (p *Processor) Unmarshal(data []byte) (proto.Message, error) {
if len(data) < 2 {
return nil, errors.New("protobuf data too short")
}
// id
var id uint16
if p.littleEndian {
id = binary.LittleEndian.Uint16(data)
} else {
id = binary.BigEndian.Uint16(data)
}
// msg
if id >= uint16(len(p.msgInfo)) {
return nil, errors.New(fmt.Sprintf("message id %v not registered", id))
}
msg := reflect.New(p.msgInfo[id].msgType.Elem()).Interface().(proto.Message)
return msg, proto.UnmarshalMerge(data[2:], msg)
}
// goroutine safe
func (p *Processor) Marshal(msg proto.Message) (id []byte, data []byte, err error) {
msgType := reflect.TypeOf(msg)
// id
_id, ok := p.msgID[msgType]
if !ok {
err = errors.New(fmt.Sprintf("message %s not registered", msgType))
return
}
id = make([]byte, 2)
if p.littleEndian {
binary.LittleEndian.PutUint16(id, _id)
} else {
binary.BigEndian.PutUint16(id, _id)
}
// data
data, err = proto.Marshal(msg)
return
}
// goroutine safe
func (p *Processor) Range(f func(id uint16, t reflect.Type)) {
for id, i := range p.msgInfo {
f(uint16(id), i.msgType)
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package builder
import (
"os"
"path/filepath"
"regexp"
"mynewt.apache.org/newt/newt/newtutil"
"mynewt.apache.org/newt/newt/pkg"
"mynewt.apache.org/newt/newt/resolve"
"mynewt.apache.org/newt/newt/syscfg"
"mynewt.apache.org/newt/newt/toolchain"
"mynewt.apache.org/newt/util"
)
type BuildPackage struct {
rpkg *resolve.ResolvePackage
SourceDirectories []string
ci *toolchain.CompilerInfo
}
func NewBuildPackage(rpkg *resolve.ResolvePackage) *BuildPackage {
bpkg := &BuildPackage{
rpkg: rpkg,
}
return bpkg
}
// Recursively iterates through an pkg's dependencies, adding each pkg
// encountered to the supplied set.
func (bpkg *BuildPackage) collectDepsAux(b *Builder,
set *map[*BuildPackage]bool) error {
if (*set)[bpkg] {
return nil
}
(*set)[bpkg] = true
for _, dep := range bpkg.rpkg.Deps {
dbpkg := b.PkgMap[dep.Rpkg]
if dbpkg == nil {
return util.FmtNewtError("Package not found %s; required by %s",
dep.Rpkg.Lpkg.Name(), bpkg.rpkg.Lpkg.Name())
}
if err := dbpkg.collectDepsAux(b, set); err != nil {
return err
}
}
return nil
}
// Recursively iterates through an pkg's dependencies. The resulting array
// contains a pointer to each encountered pkg.
func (bpkg *BuildPackage) collectDeps(b *Builder) ([]*BuildPackage, error) {
set := map[*BuildPackage]bool{}
err := bpkg.collectDepsAux(b, &set)
if err != nil {
return nil, err
}
arr := []*BuildPackage{}
for p, _ := range set {
arr = append(arr, p)
}
return arr, nil
}
// Calculates the include paths exported by the specified pkg and all of
// its recursive dependencies.
func (bpkg *BuildPackage) recursiveIncludePaths(
b *Builder) ([]string, error) {
deps, err := bpkg.collectDeps(b)
if err != nil {
return nil, err
}
incls := []string{}
for _, p := range deps {
incls = append(incls, p.publicIncludeDirs(b.targetBuilder.bspPkg)...)
}
return incls, nil
}
// Replaces instances of "@<repo-name>" with repo paths.
func expandFlags(flags []string) {
for i, f := range flags {
newFlag, changed := newtutil.ReplaceRepoDesignators(f)
if changed {
flags[i] = newFlag
}
}
}
// Retrieves the build package's build profile override, as specified in its
// `pkg.yml` file. If the package does not override the build profile, "" is
// returned.
func (bpkg *BuildPackage) BuildProfile(b *Builder) string {
settings := b.cfg.AllSettingsForLpkg(bpkg.rpkg.Lpkg)
profile, err := bpkg.rpkg.Lpkg.PkgY.GetValString("pkg.build_profile", settings)
util.OneTimeWarningError(err)
return profile
}
func (bpkg *BuildPackage) CompilerInfo(
b *Builder) (*toolchain.CompilerInfo, error) {
// If this package's compiler info has already been generated, return the
// cached copy.
if bpkg.ci != nil {
return bpkg.ci, nil
}
ci := toolchain.NewCompilerInfo()
settings := b.cfg.AllSettingsForLpkg(bpkg.rpkg.Lpkg)
var err error
// Read each set of flags and expand repo designators ("@<repo-name>") into
// paths.
ci.Cflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.cflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.Cflags)
ci.CXXflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.cxxflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.CXXflags)
ci.Lflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.lflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.Lflags)
ci.Aflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.aflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.Aflags)
// Package-specific injected settings get specified as C flags on the
// command line.
for k, _ := range bpkg.rpkg.Lpkg.InjectedSettings() {
ci.Cflags = append(ci.Cflags, syscfg.FeatureToCflag(k))
}
ci.IgnoreFiles = []*regexp.Regexp{}
ignPats, err := bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.ign_files", settings)
util.OneTimeWarningError(err)
for _, str := range ignPats {
re, err := regexp.Compile(str)
if err != nil {
return nil, util.NewNewtError(
"Ignore files, unable to compile re: " + err.Error())
}
ci.IgnoreFiles = append(ci.IgnoreFiles, re)
}
ci.IgnoreDirs = []*regexp.Regexp{}
ignPats, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.ign_dirs", settings)
util.OneTimeWarningError(err)
for _, str := range ignPats {
re, err := regexp.Compile(str)
if err != nil {
return nil, util.NewNewtError(
"Ignore dirs, unable to compile re: " + err.Error())
}
ci.IgnoreDirs = append(ci.IgnoreDirs, re)
}
bpkg.SourceDirectories, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.src_dirs", settings)
util.OneTimeWarningError(err)
includePaths, err := bpkg.recursiveIncludePaths(b)
if err != nil {
return nil, err
}
ci.Includes = append(bpkg.privateIncludeDirs(b), includePaths...)
bpkg.ci = ci
return bpkg.ci, nil
}
func (bpkg *BuildPackage) findSdkIncludes() []string {
sdkDir := bpkg.rpkg.Lpkg.BasePath() + "/src/ext/"
if _, err := os.Stat(sdkDir); err != nil {
return []string{}
}
sdkPathList := []string{}
err := filepath.Walk(sdkDir,
func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
return nil
}
sdkPathList = append(sdkPathList, path)
return nil
})
if err != nil {
return []string{}
}
return sdkPathList
}
func (bpkg *BuildPackage) publicIncludeDirs(bspPkg *pkg.BspPackage) []string {
pkgBase := filepath.Base(bpkg.rpkg.Lpkg.Name())
bp := bpkg.rpkg.Lpkg.BasePath()
incls := []string{
bp + "/include",
bp + "/include/" + pkgBase + "/arch/" + bspPkg.Arch,
}
if bpkg.rpkg.Lpkg.Type() == pkg.PACKAGE_TYPE_SDK {
incls = append(incls, bspPkg.BasePath()+"/include/bsp/")
sdkIncls := bpkg.findSdkIncludes()
incls = append(incls, sdkIncls...)
}
return incls
}
func (bpkg *BuildPackage) privateIncludeDirs(b *Builder) []string {
srcDir := bpkg.rpkg.Lpkg.BasePath() + "/src/"
incls := []string{}
incls = append(incls, srcDir)
incls = append(incls, srcDir+"/arch/"+b.targetBuilder.bspPkg.Arch)
switch bpkg.rpkg.Lpkg.Type() {
case pkg.PACKAGE_TYPE_SDK:
// If pkgType == SDK, include all the items in "ext" directly into the
// include path
incls = append(incls, b.bspPkg.rpkg.Lpkg.BasePath()+"/include/bsp/")
sdkIncls := bpkg.findSdkIncludes()
incls = append(incls, sdkIncls...)
case pkg.PACKAGE_TYPE_UNITTEST:
// A unittest package gets access to its parent package's private
// includes.
parentPkg := b.testOwner(bpkg)
if parentPkg != nil {
parentIncls := parentPkg.privateIncludeDirs(b)
incls = append(incls, parentIncls...)
}
default:
}
return incls
}
Add include_dirs setting to syscfg
Allows to specify directories to include in an external SDK
that does not follow the convention used by Newt of putting
everything under "src/ext/".
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package builder
import (
"os"
"path/filepath"
"regexp"
"mynewt.apache.org/newt/newt/newtutil"
"mynewt.apache.org/newt/newt/pkg"
"mynewt.apache.org/newt/newt/resolve"
"mynewt.apache.org/newt/newt/syscfg"
"mynewt.apache.org/newt/newt/toolchain"
"mynewt.apache.org/newt/util"
)
type BuildPackage struct {
rpkg *resolve.ResolvePackage
SourceDirectories []string
ci *toolchain.CompilerInfo
}
func NewBuildPackage(rpkg *resolve.ResolvePackage) *BuildPackage {
bpkg := &BuildPackage{
rpkg: rpkg,
}
return bpkg
}
// Recursively iterates through an pkg's dependencies, adding each pkg
// encountered to the supplied set.
func (bpkg *BuildPackage) collectDepsAux(b *Builder,
set *map[*BuildPackage]bool) error {
if (*set)[bpkg] {
return nil
}
(*set)[bpkg] = true
for _, dep := range bpkg.rpkg.Deps {
dbpkg := b.PkgMap[dep.Rpkg]
if dbpkg == nil {
return util.FmtNewtError("Package not found %s; required by %s",
dep.Rpkg.Lpkg.Name(), bpkg.rpkg.Lpkg.Name())
}
if err := dbpkg.collectDepsAux(b, set); err != nil {
return err
}
}
return nil
}
// Recursively iterates through an pkg's dependencies. The resulting array
// contains a pointer to each encountered pkg.
func (bpkg *BuildPackage) collectDeps(b *Builder) ([]*BuildPackage, error) {
set := map[*BuildPackage]bool{}
err := bpkg.collectDepsAux(b, &set)
if err != nil {
return nil, err
}
arr := []*BuildPackage{}
for p, _ := range set {
arr = append(arr, p)
}
return arr, nil
}
// Calculates the include paths exported by the specified pkg and all of
// its recursive dependencies.
func (bpkg *BuildPackage) recursiveIncludePaths(
b *Builder) ([]string, error) {
deps, err := bpkg.collectDeps(b)
if err != nil {
return nil, err
}
incls := []string{}
for _, p := range deps {
incls = append(incls, p.publicIncludeDirs(b)...)
}
return incls, nil
}
// Replaces instances of "@<repo-name>" with repo paths.
func expandFlags(flags []string) {
for i, f := range flags {
newFlag, changed := newtutil.ReplaceRepoDesignators(f)
if changed {
flags[i] = newFlag
}
}
}
// Retrieves the build package's build profile override, as specified in its
// `pkg.yml` file. If the package does not override the build profile, "" is
// returned.
func (bpkg *BuildPackage) BuildProfile(b *Builder) string {
settings := b.cfg.AllSettingsForLpkg(bpkg.rpkg.Lpkg)
profile, err := bpkg.rpkg.Lpkg.PkgY.GetValString("pkg.build_profile", settings)
util.OneTimeWarningError(err)
return profile
}
func (bpkg *BuildPackage) CompilerInfo(
b *Builder) (*toolchain.CompilerInfo, error) {
// If this package's compiler info has already been generated, return the
// cached copy.
if bpkg.ci != nil {
return bpkg.ci, nil
}
ci := toolchain.NewCompilerInfo()
settings := b.cfg.AllSettingsForLpkg(bpkg.rpkg.Lpkg)
var err error
// Read each set of flags and expand repo designators ("@<repo-name>") into
// paths.
ci.Cflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.cflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.Cflags)
ci.CXXflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.cxxflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.CXXflags)
ci.Lflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.lflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.Lflags)
ci.Aflags, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice("pkg.aflags", settings)
util.OneTimeWarningError(err)
expandFlags(ci.Aflags)
// Package-specific injected settings get specified as C flags on the
// command line.
for k, _ := range bpkg.rpkg.Lpkg.InjectedSettings() {
ci.Cflags = append(ci.Cflags, syscfg.FeatureToCflag(k))
}
ci.IgnoreFiles = []*regexp.Regexp{}
ignPats, err := bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.ign_files", settings)
util.OneTimeWarningError(err)
for _, str := range ignPats {
re, err := regexp.Compile(str)
if err != nil {
return nil, util.NewNewtError(
"Ignore files, unable to compile re: " + err.Error())
}
ci.IgnoreFiles = append(ci.IgnoreFiles, re)
}
ci.IgnoreDirs = []*regexp.Regexp{}
ignPats, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.ign_dirs", settings)
util.OneTimeWarningError(err)
for _, str := range ignPats {
re, err := regexp.Compile(str)
if err != nil {
return nil, util.NewNewtError(
"Ignore dirs, unable to compile re: " + err.Error())
}
ci.IgnoreDirs = append(ci.IgnoreDirs, re)
}
bpkg.SourceDirectories, err = bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.src_dirs", settings)
util.OneTimeWarningError(err)
includePaths, err := bpkg.recursiveIncludePaths(b)
if err != nil {
return nil, err
}
ci.Includes = append(bpkg.privateIncludeDirs(b), includePaths...)
bpkg.ci = ci
return bpkg.ci, nil
}
func (bpkg *BuildPackage) findSdkIncludes() []string {
sdkDir := bpkg.rpkg.Lpkg.BasePath() + "/src/ext/"
if _, err := os.Stat(sdkDir); err != nil {
return []string{}
}
sdkPathList := []string{}
err := filepath.Walk(sdkDir,
func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
return nil
}
sdkPathList = append(sdkPathList, path)
return nil
})
if err != nil {
return []string{}
}
return sdkPathList
}
func (bpkg *BuildPackage) publicIncludeDirs(b *Builder) []string {
bspPkg := b.targetBuilder.bspPkg
pkgBase := filepath.Base(bpkg.rpkg.Lpkg.Name())
bp := bpkg.rpkg.Lpkg.BasePath()
incls := []string{
bp + "/include",
bp + "/include/" + pkgBase + "/arch/" + bspPkg.Arch,
}
if bpkg.rpkg.Lpkg.Type() == pkg.PACKAGE_TYPE_SDK {
incls = append(incls, bspPkg.BasePath()+"/include/bsp/")
sdkIncls := bpkg.findSdkIncludes()
incls = append(incls, sdkIncls...)
settings := b.cfg.AllSettingsForLpkg(bpkg.rpkg.Lpkg)
inclDirs, err := bpkg.rpkg.Lpkg.PkgY.GetValStringSlice(
"pkg.include_dirs", settings)
util.OneTimeWarningError(err)
for _, dir := range inclDirs {
incls = append(incls, bp + "/" + dir)
}
}
return incls
}
func (bpkg *BuildPackage) privateIncludeDirs(b *Builder) []string {
srcDir := bpkg.rpkg.Lpkg.BasePath() + "/src/"
incls := []string{}
incls = append(incls, srcDir)
incls = append(incls, srcDir+"/arch/"+b.targetBuilder.bspPkg.Arch)
switch bpkg.rpkg.Lpkg.Type() {
case pkg.PACKAGE_TYPE_SDK:
// If pkgType == SDK, include all the items in "ext" directly into the
// include path
incls = append(incls, b.bspPkg.rpkg.Lpkg.BasePath()+"/include/bsp/")
sdkIncls := bpkg.findSdkIncludes()
incls = append(incls, sdkIncls...)
case pkg.PACKAGE_TYPE_UNITTEST:
// A unittest package gets access to its parent package's private
// includes.
parentPkg := b.testOwner(bpkg)
if parentPkg != nil {
parentIncls := parentPkg.privateIncludeDirs(b)
incls = append(incls, parentIncls...)
}
default:
}
return incls
}
|
package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/satori/go.uuid"
)
type treeRequest struct {
RequestType string
Action string
User string
JobId uuid.UUID
reply chan somaResult
Repository somaRepositoryRequest
Bucket somaBucketRequest
Group somaGroupRequest
Cluster somaClusterRequest
Node somaNodeRequest
CheckConfig somaCheckConfigRequest
}
type treeResult struct {
ResultType string
ResultError error
JobId uuid.UUID
Repository somaRepositoryResult
Bucket somaRepositoryRequest
}
type treeKeeper struct {
repoId string
repoName string
team string
broken bool
ready bool
frozen bool
input chan treeRequest
shutdown chan bool
conn *sql.DB
tree *tree.Tree
errChan chan *tree.Error
actionChan chan *tree.Action
start_job *sql.Stmt
get_view *sql.Stmt
}
func (tk *treeKeeper) run() {
log.Printf("Starting TreeKeeper for Repo %s (%s)", tk.repoName, tk.repoId)
tk.startupLoad()
if tk.broken {
tickTack := time.NewTicker(time.Second * 10).C
hoverloop:
for {
select {
case <-tickTack:
log.Printf("TK[%s]: BROKEN REPOSITORY %s flying holding patterns!\n",
tk.repoName, tk.repoId)
case <-tk.shutdown:
break hoverloop
}
}
return
}
var err error
if tk.start_job, err = tk.conn.Prepare(tkStmtStartJob); err != nil {
log.Fatal("treekeeper/start-job: ", err)
}
defer tk.start_job.Close()
if tk.get_view, err = tk.conn.Prepare(tkStmtGetViewFromCapability); err != nil {
log.Fatal("treekeeper/get-view-by-capability: ", err)
}
defer tk.get_view.Close()
log.Printf("TK[%s]: ready for service!\n", tk.repoName)
tk.ready = true
if SomaCfg.Observer {
fmt.Printf("TreeKeeper [%s] entered observer mode\n", tk.repoName)
<-tk.shutdown
goto exit
}
runloop:
for {
select {
case <-tk.shutdown:
break runloop
case req := <-tk.input:
tk.process(&req)
handlerMap[`jobDelay`].(jobDelay).notify <- req.JobId.String()
if !tk.frozen {
tk.buildDeploymentDetails()
tk.orderDeploymentDetails()
}
}
}
exit:
}
func (tk *treeKeeper) isReady() bool {
return tk.ready
}
func (tk *treeKeeper) isBroken() bool {
return tk.broken
}
func (tk *treeKeeper) process(q *treeRequest) {
var (
err error
hasErrors bool
tx *sql.Tx
treeCheck *tree.Check
nullBucket sql.NullString
txStmtPropertyInstanceCreate *sql.Stmt
txStmtRepositoryPropertyServiceCreate *sql.Stmt
txStmtRepositoryPropertySystemCreate *sql.Stmt
txStmtRepositoryPropertyOncallCreate *sql.Stmt
txStmtRepositoryPropertyCustomCreate *sql.Stmt
txStmtCreateBucket *sql.Stmt
txStmtBucketPropertyServiceCreate *sql.Stmt
txStmtBucketPropertySystemCreate *sql.Stmt
txStmtBucketPropertyOncallCreate *sql.Stmt
txStmtBucketPropertyCustomCreate *sql.Stmt
txStmtGroupCreate *sql.Stmt
txStmtGroupUpdate *sql.Stmt
txStmtGroupDelete *sql.Stmt
txStmtGroupMemberNewNode *sql.Stmt
txStmtGroupMemberNewCluster *sql.Stmt
txStmtGroupMemberNewGroup *sql.Stmt
txStmtGroupMemberRemoveNode *sql.Stmt
txStmtGroupMemberRemoveCluster *sql.Stmt
txStmtGroupMemberRemoveGroup *sql.Stmt
txStmtGroupPropertyServiceCreate *sql.Stmt
txStmtGroupPropertySystemCreate *sql.Stmt
txStmtGroupPropertyOncallCreate *sql.Stmt
txStmtGroupPropertyCustomCreate *sql.Stmt
txStmtClusterCreate *sql.Stmt
txStmtClusterUpdate *sql.Stmt
txStmtClusterDelete *sql.Stmt
txStmtClusterMemberNew *sql.Stmt
txStmtClusterMemberRemove *sql.Stmt
txStmtClusterPropertyServiceCreate *sql.Stmt
txStmtClusterPropertySystemCreate *sql.Stmt
txStmtClusterPropertyOncallCreate *sql.Stmt
txStmtClusterPropertyCustomCreate *sql.Stmt
txStmtBucketAssignNode *sql.Stmt
txStmtUpdateNodeState *sql.Stmt
txStmtNodeUnassignFromBucket *sql.Stmt
txStmtNodePropertyServiceCreate *sql.Stmt
txStmtNodePropertySystemCreate *sql.Stmt
txStmtNodePropertyOncallCreate *sql.Stmt
txStmtNodePropertyCustomCreate *sql.Stmt
txStmtCreateCheckConfigurationBase *sql.Stmt
txStmtCreateCheckConfigurationThreshold *sql.Stmt
txStmtCreateCheckConfigurationConstraintSystem *sql.Stmt
txStmtCreateCheckConfigurationConstraintNative *sql.Stmt
txStmtCreateCheckConfigurationConstraintOncall *sql.Stmt
txStmtCreateCheckConfigurationConstraintCustom *sql.Stmt
txStmtCreateCheckConfigurationConstraintService *sql.Stmt
txStmtCreateCheckConfigurationConstraintAttribute *sql.Stmt
txStmtCreateCheck *sql.Stmt
txStmtCreateCheckInstance *sql.Stmt
txStmtCreateCheckInstanceConfiguration *sql.Stmt
txStmtDeleteCheck *sql.Stmt
txStmtDeleteCheckInstance *sql.Stmt
)
_, err = tk.start_job.Exec(q.JobId.String(), time.Now().UTC())
if err != nil {
log.Println(err)
}
log.Printf("Processing job: %s\n", q.JobId.String())
tk.tree.Begin()
switch q.Action {
//
// REPOSITORY MANIPULATION REQUESTS
case "add_system_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case `delete_system_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `repository`,
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case "add_service_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
Attributes: (*q.Repository.Repository.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
})
case "add_oncall_property_to_repository":
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_repository":
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
//
// BUCKET MANIPULATION REQUESTS
case "create_bucket":
tree.NewBucket(tree.BucketSpec{
Id: uuid.NewV4().String(),
Name: q.Bucket.Bucket.Name,
Environment: q.Bucket.Bucket.Environment,
Team: tk.team,
Deleted: q.Bucket.Bucket.IsDeleted,
Frozen: q.Bucket.Bucket.IsFrozen,
Repository: q.Bucket.Bucket.RepositoryId,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "repository",
ParentId: tk.repoId,
ParentName: tk.repoName,
})
case "add_system_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case `delete_system_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case "add_service_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
Attributes: (*q.Bucket.Bucket.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
})
case "add_oncall_property_to_bucket":
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_bucket":
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
//
// GROUP MANIPULATION REQUESTS
case "create_group":
tree.NewGroup(tree.GroupSpec{
Id: uuid.NewV4().String(),
Name: q.Group.Group.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Group.Group.BucketId,
})
case "delete_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_group_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_group_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: (*q.Group.Group.MemberGroups)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case `delete_system_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case "add_service_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
Attributes: (*q.Group.Group.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
})
case "add_oncall_property_to_group":
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_group":
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
//
// CLUSTER MANIPULATION REQUESTS
case "create_cluster":
tree.NewCluster(tree.ClusterSpec{
Id: uuid.NewV4().String(),
Name: q.Cluster.Cluster.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Cluster.Cluster.BucketId,
})
case "delete_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_cluster_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_cluster_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: (*q.Group.Group.MemberClusters)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case `delete_system_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case "add_service_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
Attributes: (*q.Cluster.Cluster.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
})
case "add_oncall_property_to_cluster":
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_cluster":
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
//
// NODE MANIPULATION REQUESTS
case "assign_node":
tree.NewNode(tree.NodeSpec{
Id: q.Node.Node.Id,
AssetId: q.Node.Node.AssetId,
Name: q.Node.Node.Name,
Team: q.Node.Node.TeamId,
ServerId: q.Node.Node.ServerId,
Online: q.Node.Node.IsOnline,
Deleted: q.Node.Node.IsDeleted,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Node.Node.Config.BucketId,
})
case "delete_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_node_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_node_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Group.Group.MemberNodes)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_node_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Cluster.Cluster.Members)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "cluster",
ParentId: q.Cluster.Cluster.Id,
})
case "add_system_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case `delete_system_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case "add_service_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
Attributes: (*q.Node.Node.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
})
case "add_oncall_property_to_node":
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_node":
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
//
// CHECK MANIPULATION REQUESTS
case `add_check_to_repository`:
fallthrough
case `add_check_to_bucket`:
fallthrough
case `add_check_to_group`:
fallthrough
case `add_check_to_cluster`:
fallthrough
case `add_check_to_node`:
if treeCheck, err = tk.convertCheck(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).SetCheck(*treeCheck)
}
case `remove_check_from_repository`:
fallthrough
case `remove_check_from_bucket`:
fallthrough
case `remove_check_from_group`:
fallthrough
case `remove_check_from_cluster`:
fallthrough
case `remove_check_from_node`:
if treeCheck, err = tk.convertCheckForDelete(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).DeleteCheck(*treeCheck)
}
}
// check if we accumulated an error in one of the switch cases
if err != nil {
goto bailout
}
// recalculate check instances
tk.tree.ComputeCheckInstances()
// open multi-statement transaction
if tx, err = tk.conn.Begin(); err != nil {
goto bailout
}
defer tx.Rollback()
// prepare statements within tx context
if txStmtPropertyInstanceCreate, err = tx.Prepare(tkStmtPropertyInstanceCreate); err != nil {
log.Println("Failed to prepare: tkStmtPropertyInstanceCreate")
goto bailout
}
defer txStmtPropertyInstanceCreate.Close()
if txStmtCreateCheckConfigurationBase, err = tx.Prepare(tkStmtCreateCheckConfigurationBase); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationBase")
goto bailout
}
defer txStmtCreateCheckConfigurationBase.Close()
if txStmtCreateCheckConfigurationThreshold, err = tx.Prepare(tkStmtCreateCheckConfigurationThreshold); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationThreshold")
goto bailout
}
defer txStmtCreateCheckConfigurationThreshold.Close()
if txStmtCreateCheckConfigurationConstraintSystem, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintSystem); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintSystem")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintSystem.Close()
if txStmtCreateCheckConfigurationConstraintNative, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintNative); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintNative")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintNative.Close()
if txStmtCreateCheckConfigurationConstraintOncall, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintOncall); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintOncall")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintOncall.Close()
if txStmtCreateCheckConfigurationConstraintCustom, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintCustom); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintCustom")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintCustom.Close()
if txStmtCreateCheckConfigurationConstraintService, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintService); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintService")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintService.Close()
if txStmtCreateCheckConfigurationConstraintAttribute, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintAttribute); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintAttribute")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintAttribute.Close()
if txStmtCreateCheck, err = tx.Prepare(tkStmtCreateCheck); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheck")
goto bailout
}
defer txStmtCreateCheck.Close()
if txStmtCreateCheckInstance, err = tx.Prepare(tkStmtCreateCheckInstance); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstance")
goto bailout
}
defer txStmtCreateCheckInstance.Close()
if txStmtCreateCheckInstanceConfiguration, err = tx.Prepare(tkStmtCreateCheckInstanceConfiguration); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstanceConfiguration")
goto bailout
}
defer txStmtCreateCheckInstanceConfiguration.Close()
if txStmtDeleteCheck, err = tx.Prepare(stmt.TxMarkCheckDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheck")
goto bailout
}
if txStmtDeleteCheckInstance, err = tx.Prepare(stmt.TxMarkCheckInstanceDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheckInstance")
goto bailout
}
//
// REPOSITORY
if txStmtRepositoryPropertyOncallCreate, err = tx.Prepare(tkStmtRepositoryPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyOncallCreate")
goto bailout
}
defer txStmtRepositoryPropertyOncallCreate.Close()
if txStmtRepositoryPropertyServiceCreate, err = tx.Prepare(tkStmtRepositoryPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyServiceCreate")
goto bailout
}
defer txStmtRepositoryPropertyServiceCreate.Close()
if txStmtRepositoryPropertySystemCreate, err = tx.Prepare(tkStmtRepositoryPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertySystemCreate")
goto bailout
}
defer txStmtRepositoryPropertySystemCreate.Close()
if txStmtRepositoryPropertyCustomCreate, err = tx.Prepare(tkStmtRepositoryPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyCustomCreate")
goto bailout
}
defer txStmtRepositoryPropertyCustomCreate.Close()
//
// BUCKET
if txStmtCreateBucket, err = tx.Prepare(tkStmtCreateBucket); err != nil {
log.Println("Failed to prepare: tkStmtCreateBucket")
goto bailout
}
defer txStmtCreateBucket.Close()
if txStmtBucketPropertyOncallCreate, err = tx.Prepare(tkStmtBucketPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyOncallCreate")
goto bailout
}
defer txStmtBucketPropertyOncallCreate.Close()
if txStmtBucketPropertyServiceCreate, err = tx.Prepare(tkStmtBucketPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyServiceCreate")
goto bailout
}
defer txStmtBucketPropertyServiceCreate.Close()
if txStmtBucketPropertySystemCreate, err = tx.Prepare(tkStmtBucketPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertySystemCreate")
goto bailout
}
defer txStmtBucketPropertySystemCreate.Close()
if txStmtBucketPropertyCustomCreate, err = tx.Prepare(tkStmtBucketPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyCustomCreate")
goto bailout
}
defer txStmtBucketPropertyCustomCreate.Close()
//
// GROUP
if txStmtGroupCreate, err = tx.Prepare(tkStmtGroupCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupCreate")
goto bailout
}
defer txStmtGroupCreate.Close()
if txStmtGroupUpdate, err = tx.Prepare(tkStmtGroupUpdate); err != nil {
log.Println("Failed to prepare: tkStmtGroupUpdate")
goto bailout
}
defer txStmtGroupUpdate.Close()
if txStmtGroupDelete, err = tx.Prepare(tkStmtGroupDelete); err != nil {
log.Println("Failed to prepare: tkStmtGroupDelete")
goto bailout
}
defer txStmtGroupDelete.Close()
if txStmtGroupMemberNewNode, err = tx.Prepare(tkStmtGroupMemberNewNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewNode")
goto bailout
}
defer txStmtGroupMemberNewNode.Close()
if txStmtGroupMemberNewCluster, err = tx.Prepare(tkStmtGroupMemberNewCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewCluster")
goto bailout
}
defer txStmtGroupMemberNewCluster.Close()
if txStmtGroupMemberNewGroup, err = tx.Prepare(tkStmtGroupMemberNewGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewGroup")
goto bailout
}
defer txStmtGroupMemberNewGroup.Close()
if txStmtGroupMemberRemoveNode, err = tx.Prepare(tkStmtGroupMemberRemoveNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveNode")
goto bailout
}
defer txStmtGroupMemberRemoveNode.Close()
if txStmtGroupMemberRemoveCluster, err = tx.Prepare(tkStmtGroupMemberRemoveCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveCluster")
goto bailout
}
defer txStmtGroupMemberRemoveCluster.Close()
if txStmtGroupMemberRemoveGroup, err = tx.Prepare(tkStmtGroupMemberRemoveGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveGroup")
goto bailout
}
defer txStmtGroupMemberRemoveGroup.Close()
if txStmtGroupPropertyOncallCreate, err = tx.Prepare(tkStmtGroupPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyOncallCreate")
goto bailout
}
defer txStmtGroupPropertyOncallCreate.Close()
if txStmtGroupPropertyServiceCreate, err = tx.Prepare(tkStmtGroupPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyServiceCreate")
goto bailout
}
defer txStmtGroupPropertyServiceCreate.Close()
if txStmtGroupPropertySystemCreate, err = tx.Prepare(tkStmtGroupPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertySystemCreate")
goto bailout
}
defer txStmtGroupPropertySystemCreate.Close()
if txStmtGroupPropertyCustomCreate, err = tx.Prepare(tkStmtGroupPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyCustomCreate")
goto bailout
}
defer txStmtGroupPropertyCustomCreate.Close()
//
// CLUSTER
if txStmtClusterCreate, err = tx.Prepare(tkStmtClusterCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterCreate")
goto bailout
}
defer txStmtClusterCreate.Close()
if txStmtClusterUpdate, err = tx.Prepare(tkStmtClusterUpdate); err != nil {
log.Println("Failed to prepare: tkStmtClusterUpdate")
goto bailout
}
defer txStmtClusterUpdate.Close()
if txStmtClusterDelete, err = tx.Prepare(tkStmtClusterDelete); err != nil {
log.Println("Failed to prepare: tkStmtClusterDelete")
goto bailout
}
defer txStmtClusterDelete.Close()
if txStmtClusterMemberNew, err = tx.Prepare(tkStmtClusterMemberNew); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberNew")
goto bailout
}
defer txStmtClusterMemberNew.Close()
if txStmtClusterMemberRemove, err = tx.Prepare(tkStmtClusterMemberRemove); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberRemove")
goto bailout
}
defer txStmtClusterMemberRemove.Close()
if txStmtClusterPropertyOncallCreate, err = tx.Prepare(tkStmtClusterPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyOncallCreate")
goto bailout
}
defer txStmtClusterPropertyOncallCreate.Close()
if txStmtClusterPropertyServiceCreate, err = tx.Prepare(tkStmtClusterPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyServiceCreate")
goto bailout
}
defer txStmtClusterPropertyServiceCreate.Close()
if txStmtClusterPropertySystemCreate, err = tx.Prepare(tkStmtClusterPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertySystemCreate")
goto bailout
}
defer txStmtClusterPropertySystemCreate.Close()
if txStmtClusterPropertyCustomCreate, err = tx.Prepare(tkStmtClusterPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyCustomCreate")
goto bailout
}
defer txStmtClusterPropertyCustomCreate.Close()
//
// NODE
if txStmtBucketAssignNode, err = tx.Prepare(tkStmtBucketAssignNode); err != nil {
log.Println("Failed to prepare: tkStmtBucketAssignNode")
goto bailout
}
defer txStmtBucketAssignNode.Close()
if txStmtUpdateNodeState, err = tx.Prepare(tkStmtUpdateNodeState); err != nil {
log.Println("Failed to prepare: tkStmtUpdateNodeState")
goto bailout
}
defer txStmtUpdateNodeState.Close()
if txStmtNodeUnassignFromBucket, err = tx.Prepare(tkStmtNodeUnassignFromBucket); err != nil {
log.Println("Failed to prepare: tkStmtNodeUnassignFromBucket")
goto bailout
}
defer txStmtNodeUnassignFromBucket.Close()
if txStmtNodePropertyOncallCreate, err = tx.Prepare(tkStmtNodePropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyOncallCreate")
goto bailout
}
defer txStmtNodePropertyOncallCreate.Close()
if txStmtNodePropertyServiceCreate, err = tx.Prepare(tkStmtNodePropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyServiceCreate")
goto bailout
}
defer txStmtNodePropertyServiceCreate.Close()
if txStmtNodePropertySystemCreate, err = tx.Prepare(tkStmtNodePropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertySystemCreate")
goto bailout
}
defer txStmtNodePropertySystemCreate.Close()
if txStmtNodePropertyCustomCreate, err = tx.Prepare(tkStmtNodePropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyCustomCreate")
goto bailout
}
defer txStmtNodePropertyCustomCreate.Close()
// defer constraint checks
if _, err = tx.Exec(tkStmtDeferAllConstraints); err != nil {
log.Println("Failed to exec: tkStmtDeferAllConstraints")
goto bailout
}
// save the check configuration as part of the transaction before
// processing the action channel
if strings.Contains(q.Action, "add_check_to_") {
if q.CheckConfig.CheckConfig.BucketId != "" {
nullBucket = sql.NullString{
String: q.CheckConfig.CheckConfig.BucketId,
Valid: true,
}
} else {
nullBucket = sql.NullString{String: "", Valid: false}
}
if _, err = txStmtCreateCheckConfigurationBase.Exec(
q.CheckConfig.CheckConfig.Id,
q.CheckConfig.CheckConfig.Name,
int64(q.CheckConfig.CheckConfig.Interval),
q.CheckConfig.CheckConfig.RepositoryId,
nullBucket,
q.CheckConfig.CheckConfig.CapabilityId,
q.CheckConfig.CheckConfig.ObjectId,
q.CheckConfig.CheckConfig.ObjectType,
q.CheckConfig.CheckConfig.IsActive,
q.CheckConfig.CheckConfig.IsEnabled,
q.CheckConfig.CheckConfig.Inheritance,
q.CheckConfig.CheckConfig.ChildrenOnly,
q.CheckConfig.CheckConfig.ExternalId,
); err != nil {
goto bailout
}
threshloop:
for _, thr := range q.CheckConfig.CheckConfig.Thresholds {
if _, err = txStmtCreateCheckConfigurationThreshold.Exec(
q.CheckConfig.CheckConfig.Id,
thr.Predicate.Symbol,
strconv.FormatInt(thr.Value, 10),
thr.Level.Name,
); err != nil {
break threshloop
}
}
if err != nil {
goto bailout
}
constrloop:
for _, constr := range q.CheckConfig.CheckConfig.Constraints {
switch constr.ConstraintType {
case "native":
if _, err = txStmtCreateCheckConfigurationConstraintNative.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Native.Name,
constr.Native.Value,
); err != nil {
break constrloop
}
case "oncall":
if _, err = txStmtCreateCheckConfigurationConstraintOncall.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Oncall.Id,
); err != nil {
break constrloop
}
case "custom":
if _, err = txStmtCreateCheckConfigurationConstraintCustom.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Custom.Id,
constr.Custom.RepositoryId,
constr.Custom.Value,
); err != nil {
break constrloop
}
case "system":
if _, err = txStmtCreateCheckConfigurationConstraintSystem.Exec(
q.CheckConfig.CheckConfig.Id,
constr.System.Name,
constr.System.Value,
); err != nil {
break constrloop
}
case "service":
if constr.Service.TeamId != tk.team {
err = fmt.Errorf("Service constraint has mismatched TeamID values: %s/%s",
tk.team, constr.Service.TeamId)
fmt.Println(err)
break constrloop
}
log.Printf(`SQL: tkStmtCreateCheckConfigurationConstraintService:
CheckConfig ID: %s
Team ID: %s
Service Name: %s%s`,
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name, "\n")
if _, err = txStmtCreateCheckConfigurationConstraintService.Exec(
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name,
); err != nil {
break constrloop
}
case "attribute":
if _, err = txStmtCreateCheckConfigurationConstraintAttribute.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Attribute.Name,
constr.Attribute.Value,
); err != nil {
break constrloop
}
}
}
if err != nil {
goto bailout
}
}
// mark the check configuration as deleted
if strings.HasPrefix(q.Action, `remove_check_from_`) {
if _, err = tx.Exec(stmt.TxMarkCheckConfigDeleted, q.CheckConfig.CheckConfig.Id); err != nil {
goto bailout
}
}
// if the error channel has entries, we can fully ignore the
// action channel
for i := len(tk.errChan); i > 0; i-- {
e := <-tk.errChan
b, _ := json.Marshal(e)
log.Println(string(b))
hasErrors = true
if err == nil {
err = fmt.Errorf(e.Action)
}
}
if hasErrors {
goto bailout
}
actionloop:
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
// we need all messages to figure out why for example a deferred
// constraint later failed
//jBxX, _ := json.Marshal(a)
//log.Printf("%s - Processing: %s\n", q.JobId.String(), string(jBxX))
switch a.Type {
// REPOSITORY
case "repository":
switch a.Action {
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtRepositoryPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Property.Custom.RepositoryId,
a.Property.View,
a.Property.Custom.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtRepositoryPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtRepositoryPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtRepositoryPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtRepositoryPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtRepositoryPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtRepositoryPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtRepositoryPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
sql.NullString{String: "", Valid: false},
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Repository.Id,
"repository",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// BUCKET
case "bucket":
switch a.Action {
case "create":
if _, err = txStmtCreateBucket.Exec(
a.Bucket.Id,
a.Bucket.Name,
a.Bucket.IsFrozen,
a.Bucket.IsDeleted,
a.Bucket.RepositoryId,
a.Bucket.Environment,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "node_assignment":
if _, err = txStmtBucketAssignNode.Exec(
a.ChildNode.Id,
a.Bucket.Id,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtBucketPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtBucketPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtBucketPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Bucket ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtBucketPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtBucketPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtBucketPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtBucketPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtBucketPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtBucketPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Bucket.Id,
"bucket",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// GROUP
case "group":
switch a.Action {
case "create":
if _, err = txStmtGroupCreate.Exec(
a.Group.Id,
a.Group.BucketId,
a.Group.Name,
a.Group.ObjectState,
a.Group.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtGroupUpdate.Exec(
a.Group.Id,
a.Group.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtGroupDelete.Exec(
a.Group.Id,
); err != nil {
break actionloop
}
case "member_new":
switch a.ChildType {
case "group":
log.Println("==> group/new membergroup")
if _, err = txStmtGroupMemberNewGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "cluster":
log.Println("==> group/new membercluster")
if _, err = txStmtGroupMemberNewCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "node":
log.Println("==> group/new membernode")
if _, err = txStmtGroupMemberNewNode.Exec(
a.Group.Id,
a.ChildNode.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
}
case "member_removed":
switch a.ChildType {
case "group":
if _, err = txStmtGroupMemberRemoveGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
); err != nil {
break actionloop
}
case "cluster":
if _, err = txStmtGroupMemberRemoveCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
); err != nil {
break actionloop
}
case "node":
if _, err = txStmtGroupMemberRemoveNode.Exec(
a.Group.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtGroupPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtGroupPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtGroupPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtGroupPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtGroupPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtGroupPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtGroupPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtGroupPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Group.Id,
"group",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// CLUSTER
case "cluster":
switch a.Action {
case "create":
if _, err = txStmtClusterCreate.Exec(
a.Cluster.Id,
a.Cluster.Name,
a.Cluster.BucketId,
a.Cluster.ObjectState,
a.Cluster.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtClusterUpdate.Exec(
a.Cluster.Id,
a.Cluster.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtClusterDelete.Exec(
a.Cluster.Id,
); err != nil {
break actionloop
}
case "member_new":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberNew.Exec(
a.Cluster.Id,
a.ChildNode.Id,
a.Cluster.BucketId,
); err != nil {
break actionloop
}
case "member_removed":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberRemove.Exec(
a.Cluster.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtClusterPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtClusterPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtClusterPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Cluster ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtClusterPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtClusterPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtClusterPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtClusterPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtClusterPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtClusterPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Cluster.Id,
"cluster",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// NODE
case "node":
switch a.Action {
case "delete":
if _, err = txStmtNodeUnassignFromBucket.Exec(
a.Node.Id,
a.Node.Config.BucketId,
a.Node.TeamId,
); err != nil {
break actionloop
}
fallthrough // need to call txStmtUpdateNodeState for delete as well
case "update":
log.Println("==> node/update")
if _, err = txStmtUpdateNodeState.Exec(
a.Node.Id,
a.Node.State,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtNodePropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
log.Printf(`SQL: tkStmtNodePropertySystemCreate:
Instance ID: %s
Source Instance ID: %s
Node ID: %s
View: %s
SystemProperty: %s
Object Type: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t
System Property Value: %s
Is Inherited: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited, "\n")
if _, err = txStmtNodePropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtNodePropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtNodePropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtNodePropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtNodePropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtNodePropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtNodePropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
log.Printf(`SQL: tkStmtCreateCheck:
Check ID: %s
Repository ID: %s
Bucket ID: %s
Source Check ID: %s
Source Type: %s
Inherited From: %s
Check Config ID: %s
Check Capability ID: %s
Node ID: %s%s`,
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id, "\n")
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id,
"node",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = tx.Exec(stmt.TxMarkCheckDeleted,
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
case "errorchannel":
continue actionloop
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
}
if err != nil {
goto bailout
}
// mark job as finished
if _, err = tx.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"success",
``, // empty error field
); err != nil {
goto bailout
}
// commit transaction
if err = tx.Commit(); err != nil {
goto bailout
}
log.Printf("SUCCESS - Finished job: %s\n", q.JobId.String())
// accept tree changes
tk.tree.Commit()
return
bailout:
log.Printf("FAILED - Finished job: %s\n", q.JobId.String())
log.Println(err)
tk.tree.Rollback()
tx.Rollback()
tk.conn.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"failed",
err.Error(),
)
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
jB, _ := json.Marshal(a)
log.Printf("Cleaned message: %s\n", string(jB))
}
return
}
func (tk *treeKeeper) convertCheckForDelete(conf *proto.CheckConfig) (*tree.Check, error) {
var err error
treechk := &tree.Check{
Id: uuid.Nil,
InheritedFrom: uuid.Nil,
}
if treechk.SourceId, err = uuid.FromString(conf.ExternalId); err != nil {
return nil, err
}
if treechk.ConfigId, err = uuid.FromString(conf.Id); err != nil {
return nil, err
}
return treechk, nil
}
func (tk *treeKeeper) convertCheck(conf *proto.CheckConfig) (*tree.Check, error) {
treechk := &tree.Check{
Id: uuid.Nil,
SourceId: uuid.Nil,
InheritedFrom: uuid.Nil,
Inheritance: conf.Inheritance,
ChildrenOnly: conf.ChildrenOnly,
Interval: conf.Interval,
}
treechk.CapabilityId, _ = uuid.FromString(conf.CapabilityId)
treechk.ConfigId, _ = uuid.FromString(conf.Id)
if err := tk.get_view.QueryRow(conf.CapabilityId).Scan(&treechk.View); err != nil {
return &tree.Check{}, err
}
treechk.Thresholds = make([]tree.CheckThreshold, len(conf.Thresholds))
for i, thr := range conf.Thresholds {
nthr := tree.CheckThreshold{
Predicate: thr.Predicate.Symbol,
Level: uint8(thr.Level.Numeric),
Value: thr.Value,
}
treechk.Thresholds[i] = nthr
}
treechk.Constraints = make([]tree.CheckConstraint, len(conf.Constraints))
for i, constr := range conf.Constraints {
ncon := tree.CheckConstraint{
Type: constr.ConstraintType,
}
switch constr.ConstraintType {
case "native":
ncon.Key = constr.Native.Name
ncon.Value = constr.Native.Value
case "oncall":
ncon.Key = "OncallId"
ncon.Value = constr.Oncall.Id
case "custom":
ncon.Key = constr.Custom.Id
ncon.Value = constr.Custom.Value
case "system":
ncon.Key = constr.System.Name
ncon.Value = constr.System.Value
case "service":
ncon.Key = "name"
ncon.Value = constr.Service.Name
case "attribute":
ncon.Key = constr.Attribute.Name
ncon.Value = constr.Attribute.Value
}
treechk.Constraints[i] = ncon
}
return treechk, nil
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
TreeKeeper: add stopped atrribute
package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/satori/go.uuid"
)
type treeRequest struct {
RequestType string
Action string
User string
JobId uuid.UUID
reply chan somaResult
Repository somaRepositoryRequest
Bucket somaBucketRequest
Group somaGroupRequest
Cluster somaClusterRequest
Node somaNodeRequest
CheckConfig somaCheckConfigRequest
}
type treeResult struct {
ResultType string
ResultError error
JobId uuid.UUID
Repository somaRepositoryResult
Bucket somaRepositoryRequest
}
type treeKeeper struct {
repoId string
repoName string
team string
broken bool
ready bool
stopped bool
frozen bool
input chan treeRequest
shutdown chan bool
conn *sql.DB
tree *tree.Tree
errChan chan *tree.Error
actionChan chan *tree.Action
start_job *sql.Stmt
get_view *sql.Stmt
}
func (tk *treeKeeper) run() {
log.Printf("Starting TreeKeeper for Repo %s (%s)", tk.repoName, tk.repoId)
tk.startupLoad()
if tk.broken {
tickTack := time.NewTicker(time.Second * 10).C
hoverloop:
for {
select {
case <-tickTack:
log.Printf("TK[%s]: BROKEN REPOSITORY %s flying holding patterns!\n",
tk.repoName, tk.repoId)
case <-tk.shutdown:
break hoverloop
}
}
return
}
var err error
if tk.start_job, err = tk.conn.Prepare(tkStmtStartJob); err != nil {
log.Fatal("treekeeper/start-job: ", err)
}
defer tk.start_job.Close()
if tk.get_view, err = tk.conn.Prepare(tkStmtGetViewFromCapability); err != nil {
log.Fatal("treekeeper/get-view-by-capability: ", err)
}
defer tk.get_view.Close()
log.Printf("TK[%s]: ready for service!\n", tk.repoName)
tk.ready = true
if SomaCfg.Observer {
fmt.Printf("TreeKeeper [%s] entered observer mode\n", tk.repoName)
<-tk.shutdown
goto exit
}
runloop:
for {
select {
case <-tk.shutdown:
break runloop
case req := <-tk.input:
tk.process(&req)
handlerMap[`jobDelay`].(jobDelay).notify <- req.JobId.String()
if !tk.frozen {
tk.buildDeploymentDetails()
tk.orderDeploymentDetails()
}
}
}
exit:
}
func (tk *treeKeeper) isReady() bool {
return tk.ready
}
func (tk *treeKeeper) isBroken() bool {
return tk.broken
}
func (tk *treeKeeper) stop() {
tk.ready = false
tk.broken = false
tk.stopped = true
}
func (tk *treeKeeper) process(q *treeRequest) {
var (
err error
hasErrors bool
tx *sql.Tx
treeCheck *tree.Check
nullBucket sql.NullString
txStmtPropertyInstanceCreate *sql.Stmt
txStmtRepositoryPropertyServiceCreate *sql.Stmt
txStmtRepositoryPropertySystemCreate *sql.Stmt
txStmtRepositoryPropertyOncallCreate *sql.Stmt
txStmtRepositoryPropertyCustomCreate *sql.Stmt
txStmtCreateBucket *sql.Stmt
txStmtBucketPropertyServiceCreate *sql.Stmt
txStmtBucketPropertySystemCreate *sql.Stmt
txStmtBucketPropertyOncallCreate *sql.Stmt
txStmtBucketPropertyCustomCreate *sql.Stmt
txStmtGroupCreate *sql.Stmt
txStmtGroupUpdate *sql.Stmt
txStmtGroupDelete *sql.Stmt
txStmtGroupMemberNewNode *sql.Stmt
txStmtGroupMemberNewCluster *sql.Stmt
txStmtGroupMemberNewGroup *sql.Stmt
txStmtGroupMemberRemoveNode *sql.Stmt
txStmtGroupMemberRemoveCluster *sql.Stmt
txStmtGroupMemberRemoveGroup *sql.Stmt
txStmtGroupPropertyServiceCreate *sql.Stmt
txStmtGroupPropertySystemCreate *sql.Stmt
txStmtGroupPropertyOncallCreate *sql.Stmt
txStmtGroupPropertyCustomCreate *sql.Stmt
txStmtClusterCreate *sql.Stmt
txStmtClusterUpdate *sql.Stmt
txStmtClusterDelete *sql.Stmt
txStmtClusterMemberNew *sql.Stmt
txStmtClusterMemberRemove *sql.Stmt
txStmtClusterPropertyServiceCreate *sql.Stmt
txStmtClusterPropertySystemCreate *sql.Stmt
txStmtClusterPropertyOncallCreate *sql.Stmt
txStmtClusterPropertyCustomCreate *sql.Stmt
txStmtBucketAssignNode *sql.Stmt
txStmtUpdateNodeState *sql.Stmt
txStmtNodeUnassignFromBucket *sql.Stmt
txStmtNodePropertyServiceCreate *sql.Stmt
txStmtNodePropertySystemCreate *sql.Stmt
txStmtNodePropertyOncallCreate *sql.Stmt
txStmtNodePropertyCustomCreate *sql.Stmt
txStmtCreateCheckConfigurationBase *sql.Stmt
txStmtCreateCheckConfigurationThreshold *sql.Stmt
txStmtCreateCheckConfigurationConstraintSystem *sql.Stmt
txStmtCreateCheckConfigurationConstraintNative *sql.Stmt
txStmtCreateCheckConfigurationConstraintOncall *sql.Stmt
txStmtCreateCheckConfigurationConstraintCustom *sql.Stmt
txStmtCreateCheckConfigurationConstraintService *sql.Stmt
txStmtCreateCheckConfigurationConstraintAttribute *sql.Stmt
txStmtCreateCheck *sql.Stmt
txStmtCreateCheckInstance *sql.Stmt
txStmtCreateCheckInstanceConfiguration *sql.Stmt
txStmtDeleteCheck *sql.Stmt
txStmtDeleteCheckInstance *sql.Stmt
)
_, err = tk.start_job.Exec(q.JobId.String(), time.Now().UTC())
if err != nil {
log.Println(err)
}
log.Printf("Processing job: %s\n", q.JobId.String())
tk.tree.Begin()
switch q.Action {
//
// REPOSITORY MANIPULATION REQUESTS
case "add_system_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case `delete_system_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `repository`,
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].System.Name,
Value: (*q.Repository.Repository.Properties)[0].System.Value,
})
case "add_service_property_to_repository":
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
Attributes: (*q.Repository.Repository.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Repository.Repository.Properties)[0].View,
Service: (*q.Repository.Repository.Properties)[0].Service.Name,
})
case "add_oncall_property_to_repository":
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Repository.Repository.Properties)[0].View,
Name: (*q.Repository.Repository.Properties)[0].Oncall.Name,
Number: (*q.Repository.Repository.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_repository":
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Repository.Repository.Properties)[0].Inheritance,
ChildrenOnly: (*q.Repository.Repository.Properties)[0].ChildrenOnly,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_repository`:
srcUUID, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Repository.Repository.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "repository",
ElementId: q.Repository.Repository.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Repository.Repository.Properties)[0].View,
Key: (*q.Repository.Repository.Properties)[0].Custom.Name,
Value: (*q.Repository.Repository.Properties)[0].Custom.Value,
})
//
// BUCKET MANIPULATION REQUESTS
case "create_bucket":
tree.NewBucket(tree.BucketSpec{
Id: uuid.NewV4().String(),
Name: q.Bucket.Bucket.Name,
Environment: q.Bucket.Bucket.Environment,
Team: tk.team,
Deleted: q.Bucket.Bucket.IsDeleted,
Frozen: q.Bucket.Bucket.IsFrozen,
Repository: q.Bucket.Bucket.RepositoryId,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "repository",
ParentId: tk.repoId,
ParentName: tk.repoName,
})
case "add_system_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case `delete_system_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].System.Name,
Value: (*q.Bucket.Bucket.Properties)[0].System.Value,
})
case "add_service_property_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
Attributes: (*q.Bucket.Bucket.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Bucket.Bucket.Properties)[0].View,
Service: (*q.Bucket.Bucket.Properties)[0].Service.Name,
})
case "add_oncall_property_to_bucket":
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Name: (*q.Bucket.Bucket.Properties)[0].Oncall.Name,
Number: (*q.Bucket.Bucket.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_bucket":
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "bucket",
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Bucket.Bucket.Properties)[0].Inheritance,
ChildrenOnly: (*q.Bucket.Bucket.Properties)[0].ChildrenOnly,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_bucket`:
srcUUID, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Bucket.Bucket.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `bucket`,
ElementId: q.Bucket.Bucket.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Bucket.Bucket.Properties)[0].View,
Key: (*q.Bucket.Bucket.Properties)[0].Custom.Name,
Value: (*q.Bucket.Bucket.Properties)[0].Custom.Value,
})
//
// GROUP MANIPULATION REQUESTS
case "create_group":
tree.NewGroup(tree.GroupSpec{
Id: uuid.NewV4().String(),
Name: q.Group.Group.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Group.Group.BucketId,
})
case "delete_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_group_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_group_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: (*q.Group.Group.MemberGroups)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case `delete_system_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].System.Name,
Value: (*q.Group.Group.Properties)[0].System.Value,
})
case "add_service_property_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
Attributes: (*q.Group.Group.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Group.Group.Properties)[0].View,
Service: (*q.Group.Group.Properties)[0].Service.Name,
})
case "add_oncall_property_to_group":
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Group.Group.Properties)[0].View,
Name: (*q.Group.Group.Properties)[0].Oncall.Name,
Number: (*q.Group.Group.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_group":
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "group",
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Group.Group.Properties)[0].Inheritance,
ChildrenOnly: (*q.Group.Group.Properties)[0].ChildrenOnly,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_group`:
srcUUID, _ := uuid.FromString((*q.Group.Group.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Group.Group.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `group`,
ElementId: q.Group.Group.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Group.Group.Properties)[0].View,
Key: (*q.Group.Group.Properties)[0].Custom.Name,
Value: (*q.Group.Group.Properties)[0].Custom.Value,
})
//
// CLUSTER MANIPULATION REQUESTS
case "create_cluster":
tree.NewCluster(tree.ClusterSpec{
Id: uuid.NewV4().String(),
Name: q.Cluster.Cluster.Name,
Team: tk.team,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Cluster.Cluster.BucketId,
})
case "delete_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_cluster_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_cluster_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: (*q.Group.Group.MemberClusters)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_system_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case `delete_system_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].System.Name,
Value: (*q.Cluster.Cluster.Properties)[0].System.Value,
})
case "add_service_property_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
Attributes: (*q.Cluster.Cluster.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Cluster.Cluster.Properties)[0].View,
Service: (*q.Cluster.Cluster.Properties)[0].Service.Name,
})
case "add_oncall_property_to_cluster":
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Name: (*q.Cluster.Cluster.Properties)[0].Oncall.Name,
Number: (*q.Cluster.Cluster.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_cluster":
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "cluster",
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Cluster.Cluster.Properties)[0].Inheritance,
ChildrenOnly: (*q.Cluster.Cluster.Properties)[0].ChildrenOnly,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_cluster`:
srcUUID, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Cluster.Cluster.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `cluster`,
ElementId: q.Cluster.Cluster.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Cluster.Cluster.Properties)[0].View,
Key: (*q.Cluster.Cluster.Properties)[0].Custom.Name,
Value: (*q.Cluster.Cluster.Properties)[0].Custom.Value,
})
//
// NODE MANIPULATION REQUESTS
case "assign_node":
tree.NewNode(tree.NodeSpec{
Id: q.Node.Node.Id,
AssetId: q.Node.Node.AssetId,
Name: q.Node.Node.Name,
Team: q.Node.Node.TeamId,
ServerId: q.Node.Node.ServerId,
Online: q.Node.Node.IsOnline,
Deleted: q.Node.Node.IsDeleted,
}).Attach(tree.AttachRequest{
Root: tk.tree,
ParentType: "bucket",
ParentId: q.Node.Node.Config.BucketId,
})
case "delete_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Destroy()
case "reset_node_to_bucket":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.BucketAttacher).Detach()
case "add_node_to_group":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Group.Group.MemberNodes)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "group",
ParentId: q.Group.Group.Id,
})
case "add_node_to_cluster":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: (*q.Cluster.Cluster.Members)[0].Id,
}, true).(tree.BucketAttacher).ReAttach(tree.AttachRequest{
Root: tk.tree,
ParentType: "cluster",
ParentId: q.Cluster.Cluster.Id,
})
case "add_system_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertySystem{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case `delete_system_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertySystem{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].System.Name,
Value: (*q.Node.Node.Properties)[0].System.Value,
})
case "add_service_property_to_node":
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyService{
Id: uuid.NewV4(),
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
Attributes: (*q.Node.Node.Properties)[0].Service.Attributes,
})
case `delete_service_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyService{
SourceId: srcUUID,
View: (*q.Node.Node.Properties)[0].View,
Service: (*q.Node.Node.Properties)[0].Service.Name,
})
case "add_oncall_property_to_node":
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyOncall{
Id: uuid.NewV4(),
OncallId: oncallId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case `delete_oncall_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
oncallId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Oncall.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyOncall{
SourceId: srcUUID,
OncallId: oncallId,
View: (*q.Node.Node.Properties)[0].View,
Name: (*q.Node.Node.Properties)[0].Oncall.Name,
Number: (*q.Node.Node.Properties)[0].Oncall.Number,
})
case "add_custom_property_to_node":
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: "node",
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).SetProperty(&tree.PropertyCustom{
Id: uuid.NewV4(),
CustomId: customId,
Inheritance: (*q.Node.Node.Properties)[0].Inheritance,
ChildrenOnly: (*q.Node.Node.Properties)[0].ChildrenOnly,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
case `delete_custom_property_from_node`:
srcUUID, _ := uuid.FromString((*q.Node.Node.Properties)[0].SourceInstanceId)
customId, _ := uuid.FromString((*q.Node.Node.Properties)[0].Custom.Id)
tk.tree.Find(tree.FindRequest{
ElementType: `node`,
ElementId: q.Node.Node.Id,
}, true).(tree.Propertier).DeleteProperty(&tree.PropertyCustom{
SourceId: srcUUID,
CustomId: customId,
View: (*q.Node.Node.Properties)[0].View,
Key: (*q.Node.Node.Properties)[0].Custom.Name,
Value: (*q.Node.Node.Properties)[0].Custom.Value,
})
//
// CHECK MANIPULATION REQUESTS
case `add_check_to_repository`:
fallthrough
case `add_check_to_bucket`:
fallthrough
case `add_check_to_group`:
fallthrough
case `add_check_to_cluster`:
fallthrough
case `add_check_to_node`:
if treeCheck, err = tk.convertCheck(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).SetCheck(*treeCheck)
}
case `remove_check_from_repository`:
fallthrough
case `remove_check_from_bucket`:
fallthrough
case `remove_check_from_group`:
fallthrough
case `remove_check_from_cluster`:
fallthrough
case `remove_check_from_node`:
if treeCheck, err = tk.convertCheckForDelete(&q.CheckConfig.CheckConfig); err == nil {
tk.tree.Find(tree.FindRequest{
ElementType: q.CheckConfig.CheckConfig.ObjectType,
ElementId: q.CheckConfig.CheckConfig.ObjectId,
}, true).DeleteCheck(*treeCheck)
}
}
// check if we accumulated an error in one of the switch cases
if err != nil {
goto bailout
}
// recalculate check instances
tk.tree.ComputeCheckInstances()
// open multi-statement transaction
if tx, err = tk.conn.Begin(); err != nil {
goto bailout
}
defer tx.Rollback()
// prepare statements within tx context
if txStmtPropertyInstanceCreate, err = tx.Prepare(tkStmtPropertyInstanceCreate); err != nil {
log.Println("Failed to prepare: tkStmtPropertyInstanceCreate")
goto bailout
}
defer txStmtPropertyInstanceCreate.Close()
if txStmtCreateCheckConfigurationBase, err = tx.Prepare(tkStmtCreateCheckConfigurationBase); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationBase")
goto bailout
}
defer txStmtCreateCheckConfigurationBase.Close()
if txStmtCreateCheckConfigurationThreshold, err = tx.Prepare(tkStmtCreateCheckConfigurationThreshold); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationThreshold")
goto bailout
}
defer txStmtCreateCheckConfigurationThreshold.Close()
if txStmtCreateCheckConfigurationConstraintSystem, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintSystem); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintSystem")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintSystem.Close()
if txStmtCreateCheckConfigurationConstraintNative, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintNative); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintNative")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintNative.Close()
if txStmtCreateCheckConfigurationConstraintOncall, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintOncall); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintOncall")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintOncall.Close()
if txStmtCreateCheckConfigurationConstraintCustom, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintCustom); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintCustom")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintCustom.Close()
if txStmtCreateCheckConfigurationConstraintService, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintService); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintService")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintService.Close()
if txStmtCreateCheckConfigurationConstraintAttribute, err = tx.Prepare(tkStmtCreateCheckConfigurationConstraintAttribute); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckConfigurationConstraintAttribute")
goto bailout
}
defer txStmtCreateCheckConfigurationConstraintAttribute.Close()
if txStmtCreateCheck, err = tx.Prepare(tkStmtCreateCheck); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheck")
goto bailout
}
defer txStmtCreateCheck.Close()
if txStmtCreateCheckInstance, err = tx.Prepare(tkStmtCreateCheckInstance); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstance")
goto bailout
}
defer txStmtCreateCheckInstance.Close()
if txStmtCreateCheckInstanceConfiguration, err = tx.Prepare(tkStmtCreateCheckInstanceConfiguration); err != nil {
log.Println("Failed to prepare: tkStmtCreateCheckInstanceConfiguration")
goto bailout
}
defer txStmtCreateCheckInstanceConfiguration.Close()
if txStmtDeleteCheck, err = tx.Prepare(stmt.TxMarkCheckDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheck")
goto bailout
}
if txStmtDeleteCheckInstance, err = tx.Prepare(stmt.TxMarkCheckInstanceDeleted); err != nil {
log.Println("Failed to prepare: txStmtDeleteCheckInstance")
goto bailout
}
//
// REPOSITORY
if txStmtRepositoryPropertyOncallCreate, err = tx.Prepare(tkStmtRepositoryPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyOncallCreate")
goto bailout
}
defer txStmtRepositoryPropertyOncallCreate.Close()
if txStmtRepositoryPropertyServiceCreate, err = tx.Prepare(tkStmtRepositoryPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyServiceCreate")
goto bailout
}
defer txStmtRepositoryPropertyServiceCreate.Close()
if txStmtRepositoryPropertySystemCreate, err = tx.Prepare(tkStmtRepositoryPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertySystemCreate")
goto bailout
}
defer txStmtRepositoryPropertySystemCreate.Close()
if txStmtRepositoryPropertyCustomCreate, err = tx.Prepare(tkStmtRepositoryPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtRepositoryPropertyCustomCreate")
goto bailout
}
defer txStmtRepositoryPropertyCustomCreate.Close()
//
// BUCKET
if txStmtCreateBucket, err = tx.Prepare(tkStmtCreateBucket); err != nil {
log.Println("Failed to prepare: tkStmtCreateBucket")
goto bailout
}
defer txStmtCreateBucket.Close()
if txStmtBucketPropertyOncallCreate, err = tx.Prepare(tkStmtBucketPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyOncallCreate")
goto bailout
}
defer txStmtBucketPropertyOncallCreate.Close()
if txStmtBucketPropertyServiceCreate, err = tx.Prepare(tkStmtBucketPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyServiceCreate")
goto bailout
}
defer txStmtBucketPropertyServiceCreate.Close()
if txStmtBucketPropertySystemCreate, err = tx.Prepare(tkStmtBucketPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertySystemCreate")
goto bailout
}
defer txStmtBucketPropertySystemCreate.Close()
if txStmtBucketPropertyCustomCreate, err = tx.Prepare(tkStmtBucketPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtBucketPropertyCustomCreate")
goto bailout
}
defer txStmtBucketPropertyCustomCreate.Close()
//
// GROUP
if txStmtGroupCreate, err = tx.Prepare(tkStmtGroupCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupCreate")
goto bailout
}
defer txStmtGroupCreate.Close()
if txStmtGroupUpdate, err = tx.Prepare(tkStmtGroupUpdate); err != nil {
log.Println("Failed to prepare: tkStmtGroupUpdate")
goto bailout
}
defer txStmtGroupUpdate.Close()
if txStmtGroupDelete, err = tx.Prepare(tkStmtGroupDelete); err != nil {
log.Println("Failed to prepare: tkStmtGroupDelete")
goto bailout
}
defer txStmtGroupDelete.Close()
if txStmtGroupMemberNewNode, err = tx.Prepare(tkStmtGroupMemberNewNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewNode")
goto bailout
}
defer txStmtGroupMemberNewNode.Close()
if txStmtGroupMemberNewCluster, err = tx.Prepare(tkStmtGroupMemberNewCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewCluster")
goto bailout
}
defer txStmtGroupMemberNewCluster.Close()
if txStmtGroupMemberNewGroup, err = tx.Prepare(tkStmtGroupMemberNewGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberNewGroup")
goto bailout
}
defer txStmtGroupMemberNewGroup.Close()
if txStmtGroupMemberRemoveNode, err = tx.Prepare(tkStmtGroupMemberRemoveNode); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveNode")
goto bailout
}
defer txStmtGroupMemberRemoveNode.Close()
if txStmtGroupMemberRemoveCluster, err = tx.Prepare(tkStmtGroupMemberRemoveCluster); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveCluster")
goto bailout
}
defer txStmtGroupMemberRemoveCluster.Close()
if txStmtGroupMemberRemoveGroup, err = tx.Prepare(tkStmtGroupMemberRemoveGroup); err != nil {
log.Println("Failed to prepare: tkStmtGroupMemberRemoveGroup")
goto bailout
}
defer txStmtGroupMemberRemoveGroup.Close()
if txStmtGroupPropertyOncallCreate, err = tx.Prepare(tkStmtGroupPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyOncallCreate")
goto bailout
}
defer txStmtGroupPropertyOncallCreate.Close()
if txStmtGroupPropertyServiceCreate, err = tx.Prepare(tkStmtGroupPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyServiceCreate")
goto bailout
}
defer txStmtGroupPropertyServiceCreate.Close()
if txStmtGroupPropertySystemCreate, err = tx.Prepare(tkStmtGroupPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertySystemCreate")
goto bailout
}
defer txStmtGroupPropertySystemCreate.Close()
if txStmtGroupPropertyCustomCreate, err = tx.Prepare(tkStmtGroupPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtGroupPropertyCustomCreate")
goto bailout
}
defer txStmtGroupPropertyCustomCreate.Close()
//
// CLUSTER
if txStmtClusterCreate, err = tx.Prepare(tkStmtClusterCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterCreate")
goto bailout
}
defer txStmtClusterCreate.Close()
if txStmtClusterUpdate, err = tx.Prepare(tkStmtClusterUpdate); err != nil {
log.Println("Failed to prepare: tkStmtClusterUpdate")
goto bailout
}
defer txStmtClusterUpdate.Close()
if txStmtClusterDelete, err = tx.Prepare(tkStmtClusterDelete); err != nil {
log.Println("Failed to prepare: tkStmtClusterDelete")
goto bailout
}
defer txStmtClusterDelete.Close()
if txStmtClusterMemberNew, err = tx.Prepare(tkStmtClusterMemberNew); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberNew")
goto bailout
}
defer txStmtClusterMemberNew.Close()
if txStmtClusterMemberRemove, err = tx.Prepare(tkStmtClusterMemberRemove); err != nil {
log.Println("Failed to prepare: tkStmtClusterMemberRemove")
goto bailout
}
defer txStmtClusterMemberRemove.Close()
if txStmtClusterPropertyOncallCreate, err = tx.Prepare(tkStmtClusterPropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyOncallCreate")
goto bailout
}
defer txStmtClusterPropertyOncallCreate.Close()
if txStmtClusterPropertyServiceCreate, err = tx.Prepare(tkStmtClusterPropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyServiceCreate")
goto bailout
}
defer txStmtClusterPropertyServiceCreate.Close()
if txStmtClusterPropertySystemCreate, err = tx.Prepare(tkStmtClusterPropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertySystemCreate")
goto bailout
}
defer txStmtClusterPropertySystemCreate.Close()
if txStmtClusterPropertyCustomCreate, err = tx.Prepare(tkStmtClusterPropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtClusterPropertyCustomCreate")
goto bailout
}
defer txStmtClusterPropertyCustomCreate.Close()
//
// NODE
if txStmtBucketAssignNode, err = tx.Prepare(tkStmtBucketAssignNode); err != nil {
log.Println("Failed to prepare: tkStmtBucketAssignNode")
goto bailout
}
defer txStmtBucketAssignNode.Close()
if txStmtUpdateNodeState, err = tx.Prepare(tkStmtUpdateNodeState); err != nil {
log.Println("Failed to prepare: tkStmtUpdateNodeState")
goto bailout
}
defer txStmtUpdateNodeState.Close()
if txStmtNodeUnassignFromBucket, err = tx.Prepare(tkStmtNodeUnassignFromBucket); err != nil {
log.Println("Failed to prepare: tkStmtNodeUnassignFromBucket")
goto bailout
}
defer txStmtNodeUnassignFromBucket.Close()
if txStmtNodePropertyOncallCreate, err = tx.Prepare(tkStmtNodePropertyOncallCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyOncallCreate")
goto bailout
}
defer txStmtNodePropertyOncallCreate.Close()
if txStmtNodePropertyServiceCreate, err = tx.Prepare(tkStmtNodePropertyServiceCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyServiceCreate")
goto bailout
}
defer txStmtNodePropertyServiceCreate.Close()
if txStmtNodePropertySystemCreate, err = tx.Prepare(tkStmtNodePropertySystemCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertySystemCreate")
goto bailout
}
defer txStmtNodePropertySystemCreate.Close()
if txStmtNodePropertyCustomCreate, err = tx.Prepare(tkStmtNodePropertyCustomCreate); err != nil {
log.Println("Failed to prepare: tkStmtNodePropertyCustomCreate")
goto bailout
}
defer txStmtNodePropertyCustomCreate.Close()
// defer constraint checks
if _, err = tx.Exec(tkStmtDeferAllConstraints); err != nil {
log.Println("Failed to exec: tkStmtDeferAllConstraints")
goto bailout
}
// save the check configuration as part of the transaction before
// processing the action channel
if strings.Contains(q.Action, "add_check_to_") {
if q.CheckConfig.CheckConfig.BucketId != "" {
nullBucket = sql.NullString{
String: q.CheckConfig.CheckConfig.BucketId,
Valid: true,
}
} else {
nullBucket = sql.NullString{String: "", Valid: false}
}
if _, err = txStmtCreateCheckConfigurationBase.Exec(
q.CheckConfig.CheckConfig.Id,
q.CheckConfig.CheckConfig.Name,
int64(q.CheckConfig.CheckConfig.Interval),
q.CheckConfig.CheckConfig.RepositoryId,
nullBucket,
q.CheckConfig.CheckConfig.CapabilityId,
q.CheckConfig.CheckConfig.ObjectId,
q.CheckConfig.CheckConfig.ObjectType,
q.CheckConfig.CheckConfig.IsActive,
q.CheckConfig.CheckConfig.IsEnabled,
q.CheckConfig.CheckConfig.Inheritance,
q.CheckConfig.CheckConfig.ChildrenOnly,
q.CheckConfig.CheckConfig.ExternalId,
); err != nil {
goto bailout
}
threshloop:
for _, thr := range q.CheckConfig.CheckConfig.Thresholds {
if _, err = txStmtCreateCheckConfigurationThreshold.Exec(
q.CheckConfig.CheckConfig.Id,
thr.Predicate.Symbol,
strconv.FormatInt(thr.Value, 10),
thr.Level.Name,
); err != nil {
break threshloop
}
}
if err != nil {
goto bailout
}
constrloop:
for _, constr := range q.CheckConfig.CheckConfig.Constraints {
switch constr.ConstraintType {
case "native":
if _, err = txStmtCreateCheckConfigurationConstraintNative.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Native.Name,
constr.Native.Value,
); err != nil {
break constrloop
}
case "oncall":
if _, err = txStmtCreateCheckConfigurationConstraintOncall.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Oncall.Id,
); err != nil {
break constrloop
}
case "custom":
if _, err = txStmtCreateCheckConfigurationConstraintCustom.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Custom.Id,
constr.Custom.RepositoryId,
constr.Custom.Value,
); err != nil {
break constrloop
}
case "system":
if _, err = txStmtCreateCheckConfigurationConstraintSystem.Exec(
q.CheckConfig.CheckConfig.Id,
constr.System.Name,
constr.System.Value,
); err != nil {
break constrloop
}
case "service":
if constr.Service.TeamId != tk.team {
err = fmt.Errorf("Service constraint has mismatched TeamID values: %s/%s",
tk.team, constr.Service.TeamId)
fmt.Println(err)
break constrloop
}
log.Printf(`SQL: tkStmtCreateCheckConfigurationConstraintService:
CheckConfig ID: %s
Team ID: %s
Service Name: %s%s`,
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name, "\n")
if _, err = txStmtCreateCheckConfigurationConstraintService.Exec(
q.CheckConfig.CheckConfig.Id,
tk.team,
constr.Service.Name,
); err != nil {
break constrloop
}
case "attribute":
if _, err = txStmtCreateCheckConfigurationConstraintAttribute.Exec(
q.CheckConfig.CheckConfig.Id,
constr.Attribute.Name,
constr.Attribute.Value,
); err != nil {
break constrloop
}
}
}
if err != nil {
goto bailout
}
}
// mark the check configuration as deleted
if strings.HasPrefix(q.Action, `remove_check_from_`) {
if _, err = tx.Exec(stmt.TxMarkCheckConfigDeleted, q.CheckConfig.CheckConfig.Id); err != nil {
goto bailout
}
}
// if the error channel has entries, we can fully ignore the
// action channel
for i := len(tk.errChan); i > 0; i-- {
e := <-tk.errChan
b, _ := json.Marshal(e)
log.Println(string(b))
hasErrors = true
if err == nil {
err = fmt.Errorf(e.Action)
}
}
if hasErrors {
goto bailout
}
actionloop:
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
// we need all messages to figure out why for example a deferred
// constraint later failed
//jBxX, _ := json.Marshal(a)
//log.Printf("%s - Processing: %s\n", q.JobId.String(), string(jBxX))
switch a.Type {
// REPOSITORY
case "repository":
switch a.Action {
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtRepositoryPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Property.Custom.RepositoryId,
a.Property.View,
a.Property.Custom.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtRepositoryPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtRepositoryPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtRepositoryPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Repository.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtRepositoryPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtRepositoryPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtRepositoryPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtRepositoryPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
sql.NullString{String: "", Valid: false},
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Repository.Id,
"repository",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// BUCKET
case "bucket":
switch a.Action {
case "create":
if _, err = txStmtCreateBucket.Exec(
a.Bucket.Id,
a.Bucket.Name,
a.Bucket.IsFrozen,
a.Bucket.IsDeleted,
a.Bucket.RepositoryId,
a.Bucket.Environment,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "node_assignment":
if _, err = txStmtBucketAssignNode.Exec(
a.ChildNode.Id,
a.Bucket.Id,
a.Bucket.TeamId,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtBucketPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtBucketPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtBucketPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Bucket ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtBucketPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtBucketPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Bucket.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtBucketPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtBucketPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtBucketPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtBucketPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Bucket.Id,
"bucket",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// GROUP
case "group":
switch a.Action {
case "create":
if _, err = txStmtGroupCreate.Exec(
a.Group.Id,
a.Group.BucketId,
a.Group.Name,
a.Group.ObjectState,
a.Group.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtGroupUpdate.Exec(
a.Group.Id,
a.Group.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtGroupDelete.Exec(
a.Group.Id,
); err != nil {
break actionloop
}
case "member_new":
switch a.ChildType {
case "group":
log.Println("==> group/new membergroup")
if _, err = txStmtGroupMemberNewGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "cluster":
log.Println("==> group/new membercluster")
if _, err = txStmtGroupMemberNewCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
case "node":
log.Println("==> group/new membernode")
if _, err = txStmtGroupMemberNewNode.Exec(
a.Group.Id,
a.ChildNode.Id,
a.Group.BucketId,
); err != nil {
break actionloop
}
}
case "member_removed":
switch a.ChildType {
case "group":
if _, err = txStmtGroupMemberRemoveGroup.Exec(
a.Group.Id,
a.ChildGroup.Id,
); err != nil {
break actionloop
}
case "cluster":
if _, err = txStmtGroupMemberRemoveCluster.Exec(
a.Group.Id,
a.ChildCluster.Id,
); err != nil {
break actionloop
}
case "node":
if _, err = txStmtGroupMemberRemoveNode.Exec(
a.Group.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtGroupPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtGroupPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtGroupPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtGroupPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Group.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtGroupPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtGroupPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtGroupPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtGroupPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Group.Id,
"group",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// CLUSTER
case "cluster":
switch a.Action {
case "create":
if _, err = txStmtClusterCreate.Exec(
a.Cluster.Id,
a.Cluster.Name,
a.Cluster.BucketId,
a.Cluster.ObjectState,
a.Cluster.TeamId,
); err != nil {
break actionloop
}
case "update":
if _, err = txStmtClusterUpdate.Exec(
a.Cluster.Id,
a.Cluster.ObjectState,
); err != nil {
break actionloop
}
case "delete":
if _, err = txStmtClusterDelete.Exec(
a.Cluster.Id,
); err != nil {
break actionloop
}
case "member_new":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberNew.Exec(
a.Cluster.Id,
a.ChildNode.Id,
a.Cluster.BucketId,
); err != nil {
break actionloop
}
case "member_removed":
log.Println("==> cluster/new membernode")
if _, err = txStmtClusterMemberRemove.Exec(
a.Cluster.Id,
a.ChildNode.Id,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtClusterPropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
if _, err = txStmtClusterPropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
log.Printf(`SQL: tkStmtClusterPropertyServiceCreate:
Instance ID: %s
Source Instance ID: %s
Cluster ID: %s
View: %s
Service Name: %s
Service TeamId: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly, "\n")
if _, err = txStmtClusterPropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtClusterPropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Cluster.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtClusterPropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtClusterPropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtClusterPropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtClusterPropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Cluster.Id,
"cluster",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = txStmtDeleteCheck.Exec(
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
// NODE
case "node":
switch a.Action {
case "delete":
if _, err = txStmtNodeUnassignFromBucket.Exec(
a.Node.Id,
a.Node.Config.BucketId,
a.Node.TeamId,
); err != nil {
break actionloop
}
fallthrough // need to call txStmtUpdateNodeState for delete as well
case "update":
log.Println("==> node/update")
if _, err = txStmtUpdateNodeState.Exec(
a.Node.Id,
a.Node.State,
); err != nil {
break actionloop
}
case "property_new":
if _, err = txStmtPropertyInstanceCreate.Exec(
a.Property.InstanceId,
a.Property.RepositoryId,
a.Property.SourceInstanceId,
a.Property.SourceType,
a.Property.InheritedFrom,
); err != nil {
break actionloop
}
switch a.Property.Type {
case "custom":
if _, err = txStmtNodePropertyCustomCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Custom.Id,
a.Property.BucketId,
a.Property.Custom.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.Custom.Value,
); err != nil {
break actionloop
}
case "system":
log.Printf(`SQL: tkStmtNodePropertySystemCreate:
Instance ID: %s
Source Instance ID: %s
Node ID: %s
View: %s
SystemProperty: %s
Object Type: %s
Repository ID: %s
Inheritance Enabled: %t
Children Only: %t
System Property Value: %s
Is Inherited: %t%s`,
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited, "\n")
if _, err = txStmtNodePropertySystemCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.System.Name,
a.Property.SourceType,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
a.Property.System.Value,
a.Property.IsInherited,
); err != nil {
break actionloop
}
case "service":
if _, err = txStmtNodePropertyServiceCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Service.Name,
a.Property.Service.TeamId,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
case "oncall":
if _, err = txStmtNodePropertyOncallCreate.Exec(
a.Property.InstanceId,
a.Property.SourceInstanceId,
a.Node.Id,
a.Property.View,
a.Property.Oncall.Id,
a.Property.RepositoryId,
a.Property.Inheritance,
a.Property.ChildrenOnly,
); err != nil {
break actionloop
}
}
case `property_delete`:
if _, err = tx.Exec(tkStmtPropertyInstanceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
switch a.Property.Type {
case `custom`:
if _, err = tx.Exec(tkStmtNodePropertyCustomDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `system`:
if _, err = tx.Exec(tkStmtNodePropertySystemDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `service`:
if _, err = tx.Exec(tkStmtNodePropertyServiceDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
case `oncall`:
if _, err = tx.Exec(tkStmtNodePropertyOncallDelete,
a.Property.InstanceId,
); err != nil {
break actionloop
}
}
case "check_new":
log.Printf(`SQL: tkStmtCreateCheck:
Check ID: %s
Repository ID: %s
Bucket ID: %s
Source Check ID: %s
Source Type: %s
Inherited From: %s
Check Config ID: %s
Check Capability ID: %s
Node ID: %s%s`,
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id, "\n")
if _, err = txStmtCreateCheck.Exec(
a.Check.CheckId,
a.Check.RepositoryId,
a.Check.BucketId,
a.Check.SourceCheckId,
a.Check.SourceType,
a.Check.InheritedFrom,
a.Check.CheckConfigId,
a.Check.CapabilityId,
a.Node.Id,
"node",
); err != nil {
break actionloop
}
case `check_removed`:
if _, err = tx.Exec(stmt.TxMarkCheckDeleted,
a.Check.CheckId,
); err != nil {
break actionloop
}
case "check_instance_create":
if _, err = txStmtCreateCheckInstance.Exec(
a.CheckInstance.InstanceId,
a.CheckInstance.CheckId,
a.CheckInstance.ConfigId,
"00000000-0000-0000-0000-000000000000",
time.Now().UTC(),
); err != nil {
break actionloop
}
fallthrough
case "check_instance_update":
if _, err = txStmtCreateCheckInstanceConfiguration.Exec(
a.CheckInstance.InstanceConfigId,
a.CheckInstance.Version,
a.CheckInstance.InstanceId,
a.CheckInstance.ConstraintHash,
a.CheckInstance.ConstraintValHash,
a.CheckInstance.InstanceService,
a.CheckInstance.InstanceSvcCfgHash,
a.CheckInstance.InstanceServiceConfig,
time.Now().UTC(),
"awaiting_computation",
"none",
false,
"{}",
); err != nil {
fmt.Println(`Failed CreateCheckInstanceConfiguration`, a.CheckInstance.InstanceConfigId)
break actionloop
}
case "check_instance_delete":
if _, err = txStmtDeleteCheckInstance.Exec(
a.CheckInstance.InstanceId,
); err != nil {
break actionloop
}
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
case "errorchannel":
continue actionloop
default:
jB, _ := json.Marshal(a)
log.Printf("Unhandled message: %s\n", string(jB))
}
}
if err != nil {
goto bailout
}
// mark job as finished
if _, err = tx.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"success",
``, // empty error field
); err != nil {
goto bailout
}
// commit transaction
if err = tx.Commit(); err != nil {
goto bailout
}
log.Printf("SUCCESS - Finished job: %s\n", q.JobId.String())
// accept tree changes
tk.tree.Commit()
return
bailout:
log.Printf("FAILED - Finished job: %s\n", q.JobId.String())
log.Println(err)
tk.tree.Rollback()
tx.Rollback()
tk.conn.Exec(
tkStmtFinishJob,
q.JobId.String(),
time.Now().UTC(),
"failed",
err.Error(),
)
for i := len(tk.actionChan); i > 0; i-- {
a := <-tk.actionChan
jB, _ := json.Marshal(a)
log.Printf("Cleaned message: %s\n", string(jB))
}
return
}
func (tk *treeKeeper) convertCheckForDelete(conf *proto.CheckConfig) (*tree.Check, error) {
var err error
treechk := &tree.Check{
Id: uuid.Nil,
InheritedFrom: uuid.Nil,
}
if treechk.SourceId, err = uuid.FromString(conf.ExternalId); err != nil {
return nil, err
}
if treechk.ConfigId, err = uuid.FromString(conf.Id); err != nil {
return nil, err
}
return treechk, nil
}
func (tk *treeKeeper) convertCheck(conf *proto.CheckConfig) (*tree.Check, error) {
treechk := &tree.Check{
Id: uuid.Nil,
SourceId: uuid.Nil,
InheritedFrom: uuid.Nil,
Inheritance: conf.Inheritance,
ChildrenOnly: conf.ChildrenOnly,
Interval: conf.Interval,
}
treechk.CapabilityId, _ = uuid.FromString(conf.CapabilityId)
treechk.ConfigId, _ = uuid.FromString(conf.Id)
if err := tk.get_view.QueryRow(conf.CapabilityId).Scan(&treechk.View); err != nil {
return &tree.Check{}, err
}
treechk.Thresholds = make([]tree.CheckThreshold, len(conf.Thresholds))
for i, thr := range conf.Thresholds {
nthr := tree.CheckThreshold{
Predicate: thr.Predicate.Symbol,
Level: uint8(thr.Level.Numeric),
Value: thr.Value,
}
treechk.Thresholds[i] = nthr
}
treechk.Constraints = make([]tree.CheckConstraint, len(conf.Constraints))
for i, constr := range conf.Constraints {
ncon := tree.CheckConstraint{
Type: constr.ConstraintType,
}
switch constr.ConstraintType {
case "native":
ncon.Key = constr.Native.Name
ncon.Value = constr.Native.Value
case "oncall":
ncon.Key = "OncallId"
ncon.Value = constr.Oncall.Id
case "custom":
ncon.Key = constr.Custom.Id
ncon.Value = constr.Custom.Value
case "system":
ncon.Key = constr.System.Name
ncon.Value = constr.System.Value
case "service":
ncon.Key = "name"
ncon.Value = constr.Service.Name
case "attribute":
ncon.Key = constr.Attribute.Name
ncon.Value = constr.Attribute.Value
}
treechk.Constraints[i] = ncon
}
return treechk, nil
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dual_test
import (
"fmt"
"gonum.org/v1/gonum/num/dual"
)
func Example_Number_fike() {
// Calculate the value and derivative of the function
// e^x/(sqrt(sin(x)^3 + cos(x)^3)).
fn := func(x dual.Number) dual.Number {
return dual.Mul(
dual.Exp(x),
dual.Inv(dual.Sqrt(
dual.Add(
dual.PowReal(dual.Sin(x), 3),
dual.PowReal(dual.Cos(x), 3)))))
}
v := fn(dual.Number{Real: 1.5, Emag: 1})
fmt.Printf("v=%.4f\n", v)
fmt.Printf("fn(1.5)=%.4f\nfn'(1.5)=%.4f\n", v.Real, v.Emag)
// Output:
//
// v=(4.4978+4.0534ϵ)
// fn(1.5)=4.4978
// fn'(1.5)=4.0534
}
num/dual: fix Number example
// Copyright ©2018 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dual_test
import (
"fmt"
"gonum.org/v1/gonum/num/dual"
)
func ExampleNumber_fike() {
// Calculate the value and derivative of the function
// e^x/(sqrt(sin(x)^3 + cos(x)^3)).
fn := func(x dual.Number) dual.Number {
return dual.Mul(
dual.Exp(x),
dual.Inv(dual.Sqrt(
dual.Add(
dual.PowReal(dual.Sin(x), 3),
dual.PowReal(dual.Cos(x), 3)))))
}
v := fn(dual.Number{Real: 1.5, Emag: 1})
fmt.Printf("v=%.4f\n", v)
fmt.Printf("fn(1.5)=%.4f\nfn'(1.5)=%.4f\n", v.Real, v.Emag)
// Output:
//
// v=(4.4978+4.0534ϵ)
// fn(1.5)=4.4978
// fn'(1.5)=4.0534
}
|
package ast
import (
"fmt"
"strconv"
"strings"
)
type addressNode struct {
Expr IExprNode
}
type arefNode struct {
Expr IExprNode
Index IExprNode
}
func (self arefNode) String() string {
return fmt.Sprintf("(vector-ref %s %s)", self.Expr, self.Index)
}
type assignNode struct {
Lhs IExprNode
Rhs IExprNode
}
func AssignNode(lhs IExprNode, rhs IExprNode) assignNode {
return assignNode { lhs, rhs }
}
func (self assignNode) String() string {
return fmt.Sprintf("(%s %s)", self.Lhs, self.Rhs)
}
type binaryOpNode struct {
Operator string
Left IExprNode
Right IExprNode
}
func BinaryOpNode(operator string, left IExprNode, right IExprNode) binaryOpNode {
return binaryOpNode { operator, left, right }
}
func (self binaryOpNode) String() string {
switch self.Operator {
case "&&": return fmt.Sprintf("(and %s %s)", self.Left, self.Right)
case "||": return fmt.Sprintf("(or %s %s)", self.Left, self.Right)
case "==": return fmt.Sprintf("(= %s %s)", self.Left, self.Right)
case "!=": return fmt.Sprintf("(not (= %s %s))", self.Left, self.Right)
default: return fmt.Sprintf("(%s %s %s)", self.Operator, self.Left, self.Right)
}
}
type castNode struct {
Type ITypeNode
Expr IExprNode
}
type condExprNode struct {
Cond IExprNode
ThenExpr IExprNode
ElseExpr IExprNode
}
func CondExprNode(cond IExprNode, thenExpr IExprNode, elseExpr IExprNode) condExprNode {
return condExprNode { cond, thenExpr, elseExpr }
}
func (self condExprNode) String() string {
return fmt.Sprintf("(if %s %s %s)", self.Cond, self.ThenExpr, self.ElseExpr)
}
type dereferenceNode struct {
Expr IExprNode
}
type funcallNode struct {
Expr IExprNode
Args []IExprNode
}
func FuncallNode(_expr INode, _args []INode) funcallNode {
expr := _expr.(IExprNode)
args := make([]IExprNode, len(_args))
for i := range _args {
args[i] = _args[i].(IExprNode)
}
return funcallNode { expr, args }
}
func (self funcallNode) String() string {
sArgs := make([]string, len(self.Args))
for i := range self.Args {
sArgs[i] = fmt.Sprintf("%s", self.Args[i])
}
if len(sArgs) == 0 {
return fmt.Sprintf("(%s)", self.Expr)
} else {
return fmt.Sprintf("(%s %s)", self.Expr, strings.Join(sArgs, " "))
}
}
type integerLiteralNode struct {
Value int
}
func IntegerLiteralNode(literal string) integerLiteralNode {
value, err := strconv.Atoi(literal)
if err != nil { panic(err) }
return integerLiteralNode { value }
}
func (self integerLiteralNode) String() string {
return strconv.Itoa(self.Value)
}
type logicalAndNode struct {
Left IExprNode
Right IExprNode
}
func LogicalAndNode(left IExprNode, right IExprNode) logicalAndNode {
return logicalAndNode { left, right }
}
func (self logicalAndNode) String() string {
return fmt.Sprintf("(and %s %s)", self.Left, self.Right)
}
type logicalOrNode struct {
Left IExprNode
Right IExprNode
}
func LogicalOrNode(left IExprNode, right IExprNode) logicalOrNode {
return logicalOrNode { left, right }
}
func (self logicalOrNode) String() string {
return fmt.Sprintf("(or %s %s)", self.Left, self.Right)
}
type memberNode struct {
Expr IExprNode
Member string
}
func (self memberNode) String() string {
return fmt.Sprintf("(slot-ref %s '%s)", self.Expr, self.Member)
}
type opAssignNode struct {
Operator string
Lhs IExprNode
Rhs IExprNode
}
func OpAssignNode(operator string, lhs IExprNode, rhs IExprNode) opAssignNode {
return opAssignNode { operator, lhs, rhs }
}
func (self opAssignNode) String() string {
return fmt.Sprintf("(%s (%s %s %s)", self.Lhs, self.Operator, self.Lhs, self.Rhs)
}
type prefixOpNode struct {
Operator string
Expr IExprNode
}
func PrefixOpNode(operator string, expr IExprNode) prefixOpNode {
return prefixOpNode { operator, expr }
}
func (self prefixOpNode) String() string {
switch self.Operator {
case "++": return fmt.Sprintf("(+ 1 %s)", self.Expr)
case "--": return fmt.Sprintf("(- 1 %s)", self.Expr)
default: return fmt.Sprintf("(%s %s)", self.Operator, self.Expr)
}
}
type ptrMemberNode struct {
Expr IExprNode
Member string
}
func (self ptrMemberNode) String() string {
return fmt.Sprintf("(slot-ref %s '%s)", self.Expr, self.Member)
}
type sizeofExprNode struct {
Expr IExprNode
Type ITypeNode
}
type sizeofTypeNode struct {
Type ITypeNode
Operand ITypeNode
}
type stringLiteralNode struct {
Value string
}
func StringLiteralNode(literal string) stringLiteralNode {
return stringLiteralNode { literal }
}
func (self stringLiteralNode) String() string {
return self.Value
}
type suffixOpNode struct {
Operator string
Expr IExprNode
}
func SuffixOpNode(operator string, expr IExprNode) suffixOpNode {
return suffixOpNode { operator, expr }
}
func (self suffixOpNode) String() string {
switch self.Operator {
case "++": return fmt.Sprintf("(+ %s 1)", self.Expr)
case "--": return fmt.Sprintf("(- %s 1)", self.Expr)
default: return fmt.Sprintf("(%s %s)", self.Operator, self.Expr)
}
}
type unaryOpNode struct {
Operator string
Expr IExprNode
}
func UnaryOpNode(operator string, expr IExprNode) unaryOpNode {
return unaryOpNode { operator, expr }
}
func (self unaryOpNode) String() string {
switch self.Operator {
case "!": return fmt.Sprintf("(not %s)", self.Expr)
default: return fmt.Sprintf("(%s %s)", self.Operator, self.Expr)
}
}
type variableNode struct {
Name string
}
func VariableNode(name string) variableNode {
return variableNode { name }
}
func (self variableNode) String() string {
return self.Name
}
Use Scheme functions
package ast
import (
"fmt"
"strconv"
"strings"
)
type addressNode struct {
Expr IExprNode
}
type arefNode struct {
Expr IExprNode
Index IExprNode
}
func (self arefNode) String() string {
return fmt.Sprintf("(vector-ref %s %s)", self.Expr, self.Index)
}
type assignNode struct {
Lhs IExprNode
Rhs IExprNode
}
func AssignNode(lhs IExprNode, rhs IExprNode) assignNode {
return assignNode { lhs, rhs }
}
func (self assignNode) String() string {
return fmt.Sprintf("(%s %s)", self.Lhs, self.Rhs)
}
type binaryOpNode struct {
Operator string
Left IExprNode
Right IExprNode
}
func BinaryOpNode(operator string, left IExprNode, right IExprNode) binaryOpNode {
return binaryOpNode { operator, left, right }
}
func (self binaryOpNode) String() string {
switch self.Operator {
case "&&": return fmt.Sprintf("(and %s %s)", self.Left, self.Right)
case "||": return fmt.Sprintf("(or %s %s)", self.Left, self.Right)
case "==": return fmt.Sprintf("(= %s %s)", self.Left, self.Right)
case "!=": return fmt.Sprintf("(not (= %s %s))", self.Left, self.Right)
case "<<": return fmt.Sprintf("(bitwise-arithmetic-left %s %s)", self.Left, self.Right)
case ">>": return fmt.Sprintf("(bitwise-arithmetic-right %s %s)", self.Left, self.Right)
case "%": return fmt.Sprintf("(modulo %s %s)", self.Left, self.Right)
default: return fmt.Sprintf("(%s %s %s)", self.Operator, self.Left, self.Right)
}
}
type castNode struct {
Type ITypeNode
Expr IExprNode
}
type condExprNode struct {
Cond IExprNode
ThenExpr IExprNode
ElseExpr IExprNode
}
func CondExprNode(cond IExprNode, thenExpr IExprNode, elseExpr IExprNode) condExprNode {
return condExprNode { cond, thenExpr, elseExpr }
}
func (self condExprNode) String() string {
return fmt.Sprintf("(if %s %s %s)", self.Cond, self.ThenExpr, self.ElseExpr)
}
type dereferenceNode struct {
Expr IExprNode
}
type funcallNode struct {
Expr IExprNode
Args []IExprNode
}
func FuncallNode(_expr INode, _args []INode) funcallNode {
expr := _expr.(IExprNode)
args := make([]IExprNode, len(_args))
for i := range _args {
args[i] = _args[i].(IExprNode)
}
return funcallNode { expr, args }
}
func (self funcallNode) String() string {
sArgs := make([]string, len(self.Args))
for i := range self.Args {
sArgs[i] = fmt.Sprintf("%s", self.Args[i])
}
if len(sArgs) == 0 {
return fmt.Sprintf("(%s)", self.Expr)
} else {
return fmt.Sprintf("(%s %s)", self.Expr, strings.Join(sArgs, " "))
}
}
type integerLiteralNode struct {
Value int
}
func IntegerLiteralNode(literal string) integerLiteralNode {
value, err := strconv.Atoi(literal)
if err != nil { panic(err) }
return integerLiteralNode { value }
}
func (self integerLiteralNode) String() string {
return strconv.Itoa(self.Value)
}
type logicalAndNode struct {
Left IExprNode
Right IExprNode
}
func LogicalAndNode(left IExprNode, right IExprNode) logicalAndNode {
return logicalAndNode { left, right }
}
func (self logicalAndNode) String() string {
return fmt.Sprintf("(and %s %s)", self.Left, self.Right)
}
type logicalOrNode struct {
Left IExprNode
Right IExprNode
}
func LogicalOrNode(left IExprNode, right IExprNode) logicalOrNode {
return logicalOrNode { left, right }
}
func (self logicalOrNode) String() string {
return fmt.Sprintf("(or %s %s)", self.Left, self.Right)
}
type memberNode struct {
Expr IExprNode
Member string
}
func (self memberNode) String() string {
return fmt.Sprintf("(slot-ref %s '%s)", self.Expr, self.Member)
}
type opAssignNode struct {
Operator string
Lhs IExprNode
Rhs IExprNode
}
func OpAssignNode(operator string, lhs IExprNode, rhs IExprNode) opAssignNode {
return opAssignNode { operator, lhs, rhs }
}
func (self opAssignNode) String() string {
return fmt.Sprintf("(%s (%s %s %s)", self.Lhs, self.Operator, self.Lhs, self.Rhs)
}
type prefixOpNode struct {
Operator string
Expr IExprNode
}
func PrefixOpNode(operator string, expr IExprNode) prefixOpNode {
return prefixOpNode { operator, expr }
}
func (self prefixOpNode) String() string {
switch self.Operator {
case "++": return fmt.Sprintf("(+ 1 %s)", self.Expr)
case "--": return fmt.Sprintf("(- 1 %s)", self.Expr)
default: return fmt.Sprintf("(%s %s)", self.Operator, self.Expr)
}
}
type ptrMemberNode struct {
Expr IExprNode
Member string
}
func (self ptrMemberNode) String() string {
return fmt.Sprintf("(slot-ref %s '%s)", self.Expr, self.Member)
}
type sizeofExprNode struct {
Expr IExprNode
Type ITypeNode
}
type sizeofTypeNode struct {
Type ITypeNode
Operand ITypeNode
}
type stringLiteralNode struct {
Value string
}
func StringLiteralNode(literal string) stringLiteralNode {
return stringLiteralNode { literal }
}
func (self stringLiteralNode) String() string {
return self.Value
}
type suffixOpNode struct {
Operator string
Expr IExprNode
}
func SuffixOpNode(operator string, expr IExprNode) suffixOpNode {
return suffixOpNode { operator, expr }
}
func (self suffixOpNode) String() string {
switch self.Operator {
case "++": return fmt.Sprintf("(+ %s 1)", self.Expr)
case "--": return fmt.Sprintf("(- %s 1)", self.Expr)
default: return fmt.Sprintf("(%s %s)", self.Operator, self.Expr)
}
}
type unaryOpNode struct {
Operator string
Expr IExprNode
}
func UnaryOpNode(operator string, expr IExprNode) unaryOpNode {
return unaryOpNode { operator, expr }
}
func (self unaryOpNode) String() string {
switch self.Operator {
case "!": return fmt.Sprintf("(not %s)", self.Expr)
default: return fmt.Sprintf("(%s %s)", self.Operator, self.Expr)
}
}
type variableNode struct {
Name string
}
func VariableNode(name string) variableNode {
return variableNode { name }
}
func (self variableNode) String() string {
return self.Name
}
|
package astquery
import (
"go/ast"
"go/build"
"go/parser"
"go/token"
"reflect"
"regexp"
"testing"
)
type nodeInfo struct {
Name string
Type reflect.Type
}
func TestSetFilter(t *testing.T) {
servicePkg := getTestPkg(t)
serviceTypes := Find([]ast.Node{servicePkg}, SetFilter{
Names: []string{"ServiceOne", "ServiceTwo"},
Type: reflect.TypeOf((*ast.TypeSpec)(nil)),
})
expServiceTypes := []nodeInfo{
{Name: "ServiceOne", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
{Name: "ServiceTwo", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
}
checkNodesExpected(t, expServiceTypes, serviceTypes)
}
func TestRegexpFilter(t *testing.T) {
servicePkg := getTestPkg(t)
serviceTypes := Find([]ast.Node{servicePkg}, RegexpFilter{
Pattern: regexp.MustCompile(`^Service[A-Za-z]*$`),
Type: reflect.TypeOf((*ast.TypeSpec)(nil)),
})
expServiceTypes := []nodeInfo{
{Name: "ServiceOne", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
{Name: "ServiceTwo", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
}
checkNodesExpected(t, expServiceTypes, serviceTypes)
}
func TestFilterChildren(t *testing.T) {
servicePkg := getTestPkg(t)
type expMethodInfo struct {
method nodeInfo
calls []nodeInfo
}
type expServiceInfo struct {
service nodeInfo
methods []expMethodInfo
}
testcases := []expServiceInfo{{
service: nodeInfo{Name: "ServiceOne", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
methods: []expMethodInfo{{
method: nodeInfo{Name: "Get", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}, {
method: nodeInfo{Name: "List", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}},
}, {
service: nodeInfo{Name: "ServiceTwo", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
methods: []expMethodInfo{{
method: nodeInfo{Name: "Get", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}, {
method: nodeInfo{Name: "List", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}, {
method: nodeInfo{Name: "UncheckedMeth", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{},
}},
}}
for _, test := range testcases {
service_ := Find([]ast.Node{servicePkg},
SetFilter{Names: []string{test.service.Name}, Type: reflect.TypeOf((*ast.TypeSpec)(nil))})
if len(service_) != 1 {
t.Fatalf("expected to get 1 AST node back, but got %d: %v", len(service_), service_)
}
service := service_[0]
serviceName, _ := getName(service)
expMethods := make([]nodeInfo, len(test.methods))
for i, m := range test.methods {
expMethods[i] = m.method
}
actMethods := Find([]ast.Node{servicePkg}, MethodFilter{
ReceiverType: serviceName,
ExportedOnly: true,
})
checkNodesExpected(t, expMethods, actMethods)
for _, method := range actMethods {
methodInfo := nodeInfoFromNode(method)
var expCalls []nodeInfo
for _, expMethodInfo := range test.methods {
if expMethodInfo.method == methodInfo {
expCalls = expMethodInfo.calls
}
}
calls := Find([]ast.Node{method}, RegexpFilter{Pattern: regexp.MustCompile(`.*`), Type: reflect.TypeOf((*ast.SelectorExpr)(nil))})
checkNodesExpected(t, expCalls, calls)
}
}
}
//
// Helpers
//
func nodeInfoFromNode(node ast.Node) nodeInfo {
var info nodeInfo
if name, nameExists := getName(node); nameExists {
info.Name = name
}
info.Type = reflect.TypeOf(node)
return info
}
func checkNodesExpected(t *testing.T, exp []nodeInfo, actual []ast.Node) {
exp_ := make(map[nodeInfo]bool)
for _, e := range exp {
exp_[e] = true
}
actual_ := make(map[nodeInfo]bool)
for _, node := range actual {
actual_[nodeInfoFromNode(node)] = true
}
if !reflect.DeepEqual(exp_, actual_) {
t.Errorf("expected nodes %+v, but got %+v", exp_, actual_)
}
}
func getTestPkg(t *testing.T) *ast.Package {
pkg, err := build.Import("github.com/beyang/go-astquery/testpkg", "", build.FindOnly)
if err != nil {
t.Fatal(err)
}
pkgs, err := parser.ParseDir(token.NewFileSet(), pkg.Dir, nil, parser.AllErrors)
if err != nil {
t.Fatal(err)
}
servicePkg, in := pkgs["service"]
if !in {
t.Fatal("service package not found")
}
return servicePkg
}
rename test
package astquery
import (
"go/ast"
"go/build"
"go/parser"
"go/token"
"reflect"
"regexp"
"testing"
)
type nodeInfo struct {
Name string
Type reflect.Type
}
func TestSetFilter(t *testing.T) {
servicePkg := getTestPkg(t)
serviceTypes := Find([]ast.Node{servicePkg}, SetFilter{
Names: []string{"ServiceOne", "ServiceTwo"},
Type: reflect.TypeOf((*ast.TypeSpec)(nil)),
})
expServiceTypes := []nodeInfo{
{Name: "ServiceOne", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
{Name: "ServiceTwo", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
}
checkNodesExpected(t, expServiceTypes, serviceTypes)
}
func TestRegexpFilter(t *testing.T) {
servicePkg := getTestPkg(t)
serviceTypes := Find([]ast.Node{servicePkg}, RegexpFilter{
Pattern: regexp.MustCompile(`^Service[A-Za-z]*$`),
Type: reflect.TypeOf((*ast.TypeSpec)(nil)),
})
expServiceTypes := []nodeInfo{
{Name: "ServiceOne", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
{Name: "ServiceTwo", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
}
checkNodesExpected(t, expServiceTypes, serviceTypes)
}
func TestNestedFilters(t *testing.T) {
servicePkg := getTestPkg(t)
type expMethodInfo struct {
method nodeInfo
calls []nodeInfo
}
type expServiceInfo struct {
service nodeInfo
methods []expMethodInfo
}
testcases := []expServiceInfo{{
service: nodeInfo{Name: "ServiceOne", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
methods: []expMethodInfo{{
method: nodeInfo{Name: "Get", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}, {
method: nodeInfo{Name: "List", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}},
}, {
service: nodeInfo{Name: "ServiceTwo", Type: reflect.TypeOf((*ast.TypeSpec)(nil))},
methods: []expMethodInfo{{
method: nodeInfo{Name: "Get", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}, {
method: nodeInfo{Name: "List", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{{Name: "Check", Type: reflect.TypeOf((*ast.SelectorExpr)(nil))}},
}, {
method: nodeInfo{Name: "UncheckedMeth", Type: reflect.TypeOf((*ast.FuncDecl)(nil))},
calls: []nodeInfo{},
}},
}}
for _, test := range testcases {
service_ := Find([]ast.Node{servicePkg},
SetFilter{Names: []string{test.service.Name}, Type: reflect.TypeOf((*ast.TypeSpec)(nil))})
if len(service_) != 1 {
t.Fatalf("expected to get 1 AST node back, but got %d: %v", len(service_), service_)
}
service := service_[0]
serviceName, _ := getName(service)
expMethods := make([]nodeInfo, len(test.methods))
for i, m := range test.methods {
expMethods[i] = m.method
}
actMethods := Find([]ast.Node{servicePkg}, MethodFilter{
ReceiverType: serviceName,
ExportedOnly: true,
})
checkNodesExpected(t, expMethods, actMethods)
for _, method := range actMethods {
methodInfo := nodeInfoFromNode(method)
var expCalls []nodeInfo
for _, expMethodInfo := range test.methods {
if expMethodInfo.method == methodInfo {
expCalls = expMethodInfo.calls
}
}
calls := Find([]ast.Node{method}, RegexpFilter{Pattern: regexp.MustCompile(`.*`), Type: reflect.TypeOf((*ast.SelectorExpr)(nil))})
checkNodesExpected(t, expCalls, calls)
}
}
}
//
// Helpers
//
func nodeInfoFromNode(node ast.Node) nodeInfo {
var info nodeInfo
if name, nameExists := getName(node); nameExists {
info.Name = name
}
info.Type = reflect.TypeOf(node)
return info
}
func checkNodesExpected(t *testing.T, exp []nodeInfo, actual []ast.Node) {
exp_ := make(map[nodeInfo]bool)
for _, e := range exp {
exp_[e] = true
}
actual_ := make(map[nodeInfo]bool)
for _, node := range actual {
actual_[nodeInfoFromNode(node)] = true
}
if !reflect.DeepEqual(exp_, actual_) {
t.Errorf("expected nodes %+v, but got %+v", exp_, actual_)
}
}
func getTestPkg(t *testing.T) *ast.Package {
pkg, err := build.Import("github.com/beyang/go-astquery/testpkg", "", build.FindOnly)
if err != nil {
t.Fatal(err)
}
pkgs, err := parser.ParseDir(token.NewFileSet(), pkg.Dir, nil, parser.AllErrors)
if err != nil {
t.Fatal(err)
}
servicePkg, in := pkgs["service"]
if !in {
t.Fatal("service package not found")
}
return servicePkg
}
|
// Copyright 2016 The Serviced Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"errors"
"net"
)
/*
When establishing a connection to the mux, in addition to the address of the receiver,
the sender sends an authentication token and signs the whole message. The token determines
if the sender is authorized to send data to the receiver or not
---------------------------------------------------------------------------------------------------------
| Auth Token length (4 bytes) | Auth Token (N bytes) | Address (6 bytes) | Signature (256 bytes) |
---------------------------------------------------------------------------------------------------------
*/
const (
ADDRESS_BYTES = 6
)
var (
ErrBadMuxAddress = errors.New("Bad mux address")
ErrBadMuxHeader = errors.New("Bad mux header")
)
func BuildAuthMuxHeader(address []byte, token string) ([]byte, error) {
if len(address) != ADDRESS_BYTES {
return nil, ErrBadMuxAddress
}
headerBuf := new(bytes.Buffer)
// add token length
var tokenLen uint32 = uint32(len(token))
tokenLenBuf := make([]byte, 4)
endian.PutUint32(tokenLenBuf, tokenLen)
headerBuf.Write(tokenLenBuf)
// add token
headerBuf.Write([]byte(token))
// add address
headerBuf.Write([]byte(address))
// Sign what we have so far
signature, err := SignAsDelegate(headerBuf.Bytes())
if err != nil {
return nil, err
}
// add signature to header
headerBuf.Write(signature)
return headerBuf.Bytes(), nil
}
func errorExtractingHeader(err error) ([]byte, Identity, error) {
return nil, nil, err
}
func ExtractMuxHeader(rawHeader []byte) ([]byte, Identity, error) {
if len(rawHeader) <= TOKEN_LEN_BYTES+ADDRESS_BYTES {
return errorExtractingHeader(ErrBadMuxHeader)
}
var offset uint32 = 0
// First four bytes represents the token length
tokenLen := endian.Uint32(rawHeader[offset : offset+TOKEN_LEN_BYTES])
offset += TOKEN_LEN_BYTES
if len(rawHeader) <= TOKEN_LEN_BYTES+int(tokenLen)+ADDRESS_BYTES {
return errorExtractingHeader(ErrBadMuxHeader)
}
// Next tokeLen bytes contain the token
token := string(rawHeader[offset : offset+tokenLen])
offset += tokenLen
// Validate the token can be parsed
senderIdentity, err := ParseJWTIdentity(token)
if err != nil {
return errorExtractingHeader(err)
}
if senderIdentity == nil {
return errorExtractingHeader(ErrBadToken)
}
// Next six bytes is going to be the address
address := rawHeader[offset : offset+ADDRESS_BYTES]
offset += ADDRESS_BYTES
// get the part of the header that has been signed
signed_message := rawHeader[:offset]
// Whatever is left is the signature
signature := rawHeader[offset:]
// Verify the identity of the signed message
senderVerifier, err := senderIdentity.Verifier()
if err != nil {
return errorExtractingHeader(err)
}
err = senderVerifier.Verify(signed_message, signature)
if err != nil {
return errorExtractingHeader(err)
}
return address, senderIdentity, nil
}
func ReadMuxHeader(conn net.Conn) ([]byte, error) {
// Read token Length
tokenLenBuff := make([]byte, TOKEN_LEN_BYTES)
_, err := conn.Read(tokenLenBuff)
if err != nil {
return nil, err
}
tokenLen := endian.Uint32(tokenLenBuff)
// Read rest of the header
remainderBuff := make([]byte, tokenLen+ADDRESS_BYTES+SIGNATURE_BYTES)
_, err = conn.Read(remainderBuff)
if err != nil {
return nil, err
}
return append(tokenLenBuff, remainderBuff...), nil
}
force read to fill the buffer
// Copyright 2016 The Serviced Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"errors"
"io"
"net"
)
/*
When establishing a connection to the mux, in addition to the address of the receiver,
the sender sends an authentication token and signs the whole message. The token determines
if the sender is authorized to send data to the receiver or not
---------------------------------------------------------------------------------------------------------
| Auth Token length (4 bytes) | Auth Token (N bytes) | Address (6 bytes) | Signature (256 bytes) |
---------------------------------------------------------------------------------------------------------
*/
const (
ADDRESS_BYTES = 6
)
var (
ErrBadMuxAddress = errors.New("Bad mux address")
ErrBadMuxHeader = errors.New("Bad mux header")
)
func BuildAuthMuxHeader(address []byte, token string) ([]byte, error) {
if len(address) != ADDRESS_BYTES {
return nil, ErrBadMuxAddress
}
headerBuf := new(bytes.Buffer)
// add token length
var tokenLen uint32 = uint32(len(token))
tokenLenBuf := make([]byte, 4)
endian.PutUint32(tokenLenBuf, tokenLen)
headerBuf.Write(tokenLenBuf)
// add token
headerBuf.Write([]byte(token))
// add address
headerBuf.Write([]byte(address))
// Sign what we have so far
signature, err := SignAsDelegate(headerBuf.Bytes())
if err != nil {
return nil, err
}
// add signature to header
headerBuf.Write(signature)
return headerBuf.Bytes(), nil
}
func errorExtractingHeader(err error) ([]byte, Identity, error) {
return nil, nil, err
}
func ExtractMuxHeader(rawHeader []byte) ([]byte, Identity, error) {
if len(rawHeader) <= TOKEN_LEN_BYTES+ADDRESS_BYTES {
return errorExtractingHeader(ErrBadMuxHeader)
}
var offset uint32 = 0
// First four bytes represents the token length
tokenLen := endian.Uint32(rawHeader[offset : offset+TOKEN_LEN_BYTES])
offset += TOKEN_LEN_BYTES
if len(rawHeader) <= TOKEN_LEN_BYTES+int(tokenLen)+ADDRESS_BYTES {
return errorExtractingHeader(ErrBadMuxHeader)
}
// Next tokeLen bytes contain the token
token := string(rawHeader[offset : offset+tokenLen])
offset += tokenLen
// Validate the token can be parsed
senderIdentity, err := ParseJWTIdentity(token)
if err != nil {
return errorExtractingHeader(err)
}
if senderIdentity == nil {
return errorExtractingHeader(ErrBadToken)
}
// Next six bytes is going to be the address
address := rawHeader[offset : offset+ADDRESS_BYTES]
offset += ADDRESS_BYTES
// get the part of the header that has been signed
signed_message := rawHeader[:offset]
// Whatever is left is the signature
signature := rawHeader[offset:]
// Verify the identity of the signed message
senderVerifier, err := senderIdentity.Verifier()
if err != nil {
return errorExtractingHeader(err)
}
err = senderVerifier.Verify(signed_message, signature)
if err != nil {
return errorExtractingHeader(err)
}
return address, senderIdentity, nil
}
func ReadMuxHeader(conn net.Conn) ([]byte, error) {
// Read token Length
tokenLenBuff := make([]byte, TOKEN_LEN_BYTES)
_, err := io.ReadFull(conn, tokenLenBuff)
if err != nil {
return nil, err
}
tokenLen := endian.Uint32(tokenLenBuff)
// Read rest of the header
remainderBuff := make([]byte, tokenLen+ADDRESS_BYTES+SIGNATURE_BYTES)
_, err = io.ReadFull(conn, remainderBuff)
if err != nil {
return nil, err
}
return append(tokenLenBuff, remainderBuff...), nil
}
|
package main
import (
"fmt"
"log"
"time"
"github.com/icholy/killable"
)
type Worker struct {
name string
ch chan int64
killable.Killable
}
func NewWorker(name string) *Worker {
return &Worker{
name: name,
ch: make(chan int64),
Killable: killable.New(),
}
}
func (w *Worker) startProducer() {
// producer (non-blocking)
killable.Go(w, func() error {
defer close(w.ch)
var i int64
for {
select {
case w.ch <- i:
i++
case <-w.Dying():
return killable.ErrDying
}
if i > 100 {
return fmt.Errorf("worker: %s: limit reached", w.name)
}
}
return nil
})
}
func (w *Worker) consumer() error {
return killable.Do(w, func() error {
for i := range w.ch {
if i == 123 {
return fmt.Errorf("worker: %s: I don't like 123", w.name)
}
if err := killable.Sleep(w, 100*time.Millisecond); err != nil {
return err
}
fmt.Printf("worker: %s: %d\n", w.name, i)
}
return nil
})
}
func (w *Worker) Start() {
killable.Defer(w, func() {
fmt.Printf("worker: %s: all processes complete, cleaning up", w.name)
})
killable.Go(w, func() error {
w.startProducer()
return w.consumer()
})
}
func main() {
var (
w1 = NewWorker("Worker 1")
w2 = NewWorker("Worker 2")
w3 = NewWorker("Worker 3")
g = killable.NewGroup(w1, w2, w3)
)
w1.Start()
w2.Start()
w3.Start()
go func() {
time.Sleep(2 * time.Second)
fmt.Println("Killing the worker group")
g.Kill(fmt.Errorf("time to die!"))
}()
if err := g.Err(); err != nil {
log.Fatal(err)
}
}
add defer to group
package main
import (
"fmt"
"log"
"time"
"github.com/icholy/killable"
)
type Worker struct {
name string
ch chan int64
killable.Killable
}
func NewWorker(name string) *Worker {
return &Worker{
name: name,
ch: make(chan int64),
Killable: killable.New(),
}
}
func (w *Worker) startProducer() {
// producer (non-blocking)
killable.Go(w, func() error {
defer close(w.ch)
var i int64
for {
select {
case w.ch <- i:
i++
case <-w.Dying():
return killable.ErrDying
}
if i > 100 {
return fmt.Errorf("worker: %s: limit reached", w.name)
}
}
return nil
})
}
func (w *Worker) consumer() error {
return killable.Do(w, func() error {
for i := range w.ch {
if i == 123 {
return fmt.Errorf("worker: %s: I don't like 123", w.name)
}
if err := killable.Sleep(w, 100*time.Millisecond); err != nil {
return err
}
fmt.Printf("worker: %s: %d\n", w.name, i)
}
return nil
})
}
func (w *Worker) Start() {
killable.Defer(w, func() {
fmt.Printf("worker: %s: all processes complete, cleaning up", w.name)
})
killable.Go(w, func() error {
w.startProducer()
return w.consumer()
})
}
func main() {
var (
w1 = NewWorker("Worker 1")
w2 = NewWorker("Worker 2")
w3 = NewWorker("Worker 3")
g = killable.NewGroup(w1, w2, w3)
)
killable.Defer(g, func() {
fmt.Println("All workers are dead")
})
w1.Start()
w2.Start()
w3.Start()
go func() {
time.Sleep(2 * time.Second)
fmt.Println("Killing the worker group")
g.Kill(fmt.Errorf("time to die!"))
}()
if err := g.Err(); err != nil {
log.Fatal(err)
}
}
|
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package execution provides an abstraction for a single execution of a command
// with the context of daemonservice.
package execution
import (
"context"
"errors"
"flag"
"fmt"
"os"
"os/exec"
"sync"
"sync/atomic"
"time"
log "github.com/golang/glog"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/google/fleetspeak/fleetspeak/src/client/channel"
"github.com/google/fleetspeak/fleetspeak/src/client/daemonservice/command"
"github.com/google/fleetspeak/fleetspeak/src/client/internal/monitoring"
"github.com/google/fleetspeak/fleetspeak/src/client/service"
fcpb "github.com/google/fleetspeak/fleetspeak/src/client/channel/proto/fleetspeak_channel"
dspb "github.com/google/fleetspeak/fleetspeak/src/client/daemonservice/proto/fleetspeak_daemonservice"
fspb "github.com/google/fleetspeak/fleetspeak/src/common/proto/fleetspeak"
mpb "github.com/google/fleetspeak/fleetspeak/src/common/proto/fleetspeak_monitoring"
)
// We flush the output when either of these thresholds are met.
const (
maxFlushBytes = 1 << 20 // 1MB
defaultFlushTimeSeconds = int32(60)
)
var (
// ErrShuttingDown is returned once an execution has started shutting down and
// is no longer accepting messages.
ErrShuttingDown = errors.New("shutting down")
stdForward = flag.Bool("std_forward", false,
"If set attaches the dependent service to the client's stdin, stdout, stderr. Meant for testing individual daemonservice integrations.")
// How long to wait for the daemon service to send startup data before starting to
// monitor resource-usage.
startupDataTimeout = 10 * time.Second
)
type atomicString struct {
v atomic.Value
}
func (s *atomicString) Set(val string) {
s.v.Store(val)
}
func (s *atomicString) Get() *string {
stored := s.v.Load()
if stored == nil {
return nil
}
ret := stored.(string)
return &ret
}
// An Execution represents a specific execution of a daemonservice.
type Execution struct {
daemonServiceName string
memoryLimit int64
samplePeriod time.Duration
sampleSize int
Done chan struct{} // Closed when this execution is dead or dying - essentially when Shutdown has been called.
Out chan<- *fspb.Message // Messages to send to the process go here. User should close when finished.
sc service.Context
channel *channel.Channel
cmd *command.Command
StartTime time.Time
outData *dspb.StdOutputData // The next output data to send, once full.
lastOut time.Time // Time of most recent output of outData.
outLock sync.Mutex // Protects outData, lastOut
outFlushBytes int // How many bytes trigger an output. Constant.
outServiceName string // The service to send StdOutput messages to. Constant.
shutdown sync.Once
lastActive int64 // Time of the last message input or output in seconds since epoch (UTC), atomic access only.
dead chan struct{} // closed when the underlying process has died.
waitResult error // result of Wait call - should only be read after dead is closed
inProcess sync.WaitGroup // count of active goroutines
startupData chan *fcpb.StartupData // Startup data sent by the daemon process.
heartbeat int64 // Time of the last input message in seconds since epoch (UTC), atomic access only.
monitorHeartbeats bool // Whether to monitor the daemon process's hearbeats and kill unresponsive processes.
heartbeatUnresponsiveGracePeriod time.Duration // How long to wait for initial heartbeat.
heartbeatUnresponsiveKillPeriod time.Duration // How long to wait for subsequent heartbeats.
sending int32 // Non-zero when we are sending a messaging into FS. atomic access only.
serviceVersion atomicString // Version reported by the daemon process.
}
// New creates and starts an execution of the command described in cfg. Messages
// received from the resulting process are passed to sc, as are StdOutput and
// ResourceUsage messages.
func New(daemonServiceName string, cfg *dspb.Config, sc service.Context) (*Execution, error) {
cfg = proto.Clone(cfg).(*dspb.Config)
ret := Execution{
daemonServiceName: daemonServiceName,
memoryLimit: cfg.MemoryLimit,
sampleSize: int(cfg.ResourceMonitoringSampleSize),
samplePeriod: time.Duration(cfg.ResourceMonitoringSamplePeriodSeconds) * time.Second,
Done: make(chan struct{}),
sc: sc,
cmd: &command.Command{Cmd: *exec.Command(cfg.Argv[0], cfg.Argv[1:]...)},
StartTime: time.Now(),
outData: &dspb.StdOutputData{},
lastOut: time.Now(),
dead: make(chan struct{}),
startupData: make(chan *fcpb.StartupData, 1),
monitorHeartbeats: cfg.MonitorHeartbeats,
heartbeatUnresponsiveGracePeriod: time.Duration(cfg.HeartbeatUnresponsiveGracePeriodSeconds) * time.Second,
heartbeatUnresponsiveKillPeriod: time.Duration(cfg.HeartbeatUnresponsiveKillPeriodSeconds) * time.Second,
}
var err error
ret.channel, err = ret.cmd.SetupCommsChannel()
if err != nil {
return nil, fmt.Errorf("failed to setup a comms channel: %v", err)
}
ret.Out = ret.channel.Out
if cfg.StdParams != nil && cfg.StdParams.ServiceName == "" {
log.Errorf("std_params is set, but service_name is empty. Ignoring std_params: %v", cfg.StdParams)
cfg.StdParams = nil
}
if *stdForward {
log.Warningf("std_forward is set, connecting std... to %s", cfg.Argv[0])
ret.cmd.Stdin = os.Stdin
ret.cmd.Stdout = os.Stdout
ret.cmd.Stderr = os.Stderr
} else if cfg.StdParams != nil {
if cfg.StdParams.FlushBytes <= 0 || cfg.StdParams.FlushBytes > maxFlushBytes {
cfg.StdParams.FlushBytes = maxFlushBytes
}
if cfg.StdParams.FlushTimeSeconds <= 0 {
cfg.StdParams.FlushTimeSeconds = defaultFlushTimeSeconds
}
ret.outServiceName = cfg.StdParams.ServiceName
ret.outFlushBytes = int(cfg.StdParams.FlushBytes)
ret.cmd.Stdout = stdoutWriter{&ret}
ret.cmd.Stderr = stderrWriter{&ret}
} else {
ret.cmd.Stdout = nil
ret.cmd.Stderr = nil
}
if err := ret.cmd.Start(); err != nil {
close(ret.Done)
return nil, err
}
if cfg.StdParams != nil {
ret.inProcess.Add(1)
go ret.stdFlushRoutine(time.Second * time.Duration(cfg.StdParams.FlushTimeSeconds))
}
ret.inProcess.Add(2)
go ret.inRoutine()
if !cfg.DisableResourceMonitoring {
ret.inProcess.Add(1)
go ret.statsRoutine()
}
go func() {
defer func() {
ret.Shutdown()
ret.inProcess.Done()
}()
ret.waitResult = ret.cmd.Wait()
close(ret.dead)
if ret.waitResult != nil {
log.Warningf("subprocess ended with error: %v", ret.waitResult)
}
startTime, err := ptypes.TimestampProto(ret.StartTime)
if err != nil {
log.Errorf("Failed to convert process start time: %v", err)
return
}
if !cfg.DisableResourceMonitoring {
rud := &mpb.ResourceUsageData{
Scope: ret.daemonServiceName,
Pid: int64(ret.cmd.Process.Pid),
ProcessStartTime: startTime,
DataTimestamp: ptypes.TimestampNow(),
ResourceUsage: &mpb.AggregatedResourceUsage{},
ProcessTerminated: true,
}
if err := monitoring.SendProtoToServer(rud, "ResourceUsage", ret.sc); err != nil {
log.Errorf("Failed to send final resource-usage proto: %v", err)
}
}
}()
return &ret, nil
}
// Wait waits for all aspects of this execution to finish. This should happen
// soon after shutdown is called.
//
// Note that while it is a bug for this to take more than some seconds, the
// method isn't needed in normal operation - it exists primarily for tests to
// ensure that resources are not leaked.
func (e *Execution) Wait() {
<-e.Done
e.channel.Wait()
e.inProcess.Wait()
}
// LastActive returns the last time that a message was sent or received, to the
// nearest second.
func (e *Execution) LastActive() time.Time {
return time.Unix(atomic.LoadInt64(&e.lastActive), 0).UTC()
}
func (e *Execution) setLastActive(t time.Time) {
atomic.StoreInt64(&e.lastActive, t.Unix())
}
// getHeartbeat returns the last time that a message was received, to the nearest
// second.
func (e *Execution) getHeartbeat() time.Time {
return time.Unix(atomic.LoadInt64(&e.heartbeat), 0).UTC()
}
func (e *Execution) recordHeartbeat() {
atomic.StoreInt64(&e.heartbeat, time.Now().Unix())
}
func dataSize(o *dspb.StdOutputData) int {
return len(o.Stdout) + len(o.Stderr)
}
// flushOut flushes e.outData. It assumes that e.outLock is already held.
func (e *Execution) flushOut() {
// set lastOut before the blocking call to sc.Send, so the next flush
// has an accurate sense of how stale the data might be.
n := time.Now()
e.lastOut = n
if dataSize(e.outData) == 0 {
return
}
e.setLastActive(n)
e.outData.Pid = int64(e.cmd.Process.Pid)
d, err := ptypes.MarshalAny(e.outData)
if err != nil {
log.Errorf("unable to marshal StdOutputData: %v", err)
} else {
e.sc.Send(context.Background(), service.AckMessage{
M: &fspb.Message{
Destination: &fspb.Address{ServiceName: e.outServiceName},
MessageType: "StdOutput",
Data: d,
}})
}
e.outData = &dspb.StdOutputData{
MessageIndex: e.outData.MessageIndex + 1,
}
}
func (e *Execution) writeToOut(p []byte, isErr bool) {
e.outLock.Lock()
defer e.outLock.Unlock()
for {
currSize := dataSize(e.outData)
// If it all fits, write it and return.
if currSize+len(p) <= e.outFlushBytes {
if isErr {
e.outData.Stderr = append(e.outData.Stderr, p...)
} else {
e.outData.Stdout = append(e.outData.Stdout, p...)
}
return
}
// Write what does fit, flush, continue.
toWrite := e.outFlushBytes - currSize
if isErr {
e.outData.Stderr = append(e.outData.Stderr, p[:toWrite]...)
} else {
e.outData.Stdout = append(e.outData.Stdout, p[:toWrite]...)
}
p = p[toWrite:]
e.flushOut()
}
}
type stdoutWriter struct {
e *Execution
}
func (w stdoutWriter) Write(p []byte) (int, error) {
w.e.writeToOut(p, false)
return len(p), nil
}
type stderrWriter struct {
e *Execution
}
func (w stderrWriter) Write(p []byte) (int, error) {
w.e.writeToOut(p, true)
return len(p), nil
}
func (e *Execution) stdFlushRoutine(flushTime time.Duration) {
defer e.inProcess.Done()
t := time.NewTicker(flushTime)
defer t.Stop()
for {
select {
case <-t.C:
e.outLock.Lock()
if e.lastOut.Add(flushTime).Before(time.Now()) {
e.flushOut()
}
e.outLock.Unlock()
case <-e.dead:
e.outLock.Lock()
e.flushOut()
e.outLock.Unlock()
return
}
}
}
func (e *Execution) waitForDeath(d time.Duration) bool {
t := time.NewTimer(d)
defer t.Stop()
select {
case <-e.dead:
return true
case <-t.C:
return false
}
}
// Shutdown shuts down this execution.
func (e *Execution) Shutdown() {
e.shutdown.Do(func() {
// First we attempt a gentle shutdown. Closing e.Done tells our
// user not to give us any more data, in response they should
// close e.Out a.k.a e.channel.Out. Closing e.channel.Out will
// cause channel to close the pipe to the dependent process,
// which then causes it to clean up nicely.
close(e.Done)
if e.waitForDeath(time.Second) {
return
}
// This pattern is technically racy - the process could end and the process
// id could be recycled since the end of waitForDeath and before we SoftKill
// or Kill using the process id.
//
// A formally correct way to implement this is to spawn a wrapper process
// which does not die in response to SIGTERM but forwards the signal to the
// wrapped process - its child. This would ensure that the process is still
// around all the way to the SIGKILL.
if err := e.cmd.SoftKill(); err != nil {
log.Errorf("SoftKill [%d] returned error: %v", e.cmd.Process.Pid, err)
}
if e.waitForDeath(time.Second) {
return
}
if err := e.cmd.Kill(); err != nil {
log.Errorf("Kill [%d] returned error: %v", e.cmd.Process.Pid, err)
}
if e.waitForDeath(time.Second) {
return
}
// It is hard to imagine how we might end up here - maybe the process is
// somehow stuck in a system call or there is some other OS level weirdness.
// One possibility is that cmd is a zombie process now.
log.Errorf("Subprocess [%d] appears to have survived SIGKILL.", e.cmd.Process.Pid)
})
}
// inRoutine reads messages from the dependent process and passes them to
// fleetspeak.
func (e *Execution) inRoutine() {
defer func() {
e.Shutdown()
e.inProcess.Done()
}()
var startupDone bool
for {
m := e.readMsg()
if m == nil {
return
}
e.setLastActive(time.Now())
e.recordHeartbeat()
if m.Destination != nil && m.Destination.ServiceName == "system" {
switch m.MessageType {
case "StartupData":
if startupDone {
log.Warning("Received spurious startup message, ignoring.")
break
}
startupDone = true
sd := &fcpb.StartupData{}
if err := ptypes.UnmarshalAny(m.Data, sd); err != nil {
log.Warningf("Failed to parse startup data from initial message: %v", err)
} else {
if sd.Version != "" {
e.serviceVersion.Set(sd.Version)
}
e.startupData <- sd
}
close(e.startupData) // No more values to send through the channel.
case "Heartbeat":
// Pass, handled above.
default:
log.Warningf("Unknown system message type: %s", m.MessageType)
}
} else {
atomic.StoreInt32(&e.sending, 1)
if err := e.sc.Send(context.Background(), service.AckMessage{M: m}); err != nil {
log.Errorf("error sending message to server: %v", err)
}
atomic.StoreInt32(&e.sending, 0)
}
}
}
// readMsg blocks until a message is available from the channel.
func (e *Execution) readMsg() *fspb.Message {
select {
case m, ok := <-e.channel.In:
if !ok {
return nil
}
return m
case err := <-e.channel.Err:
log.Errorf("channel produced error: %v", err)
return nil
}
}
// statsRoutine monitors the daemon process's resource usage, sending reports to the server
// at regular intervals.
func (e *Execution) statsRoutine() {
defer e.inProcess.Done()
pid := e.cmd.Process.Pid
var version string
select {
case sd, ok := <-e.startupData:
if ok {
if int(sd.Pid) != pid {
log.Infof("%s's self-reported PID (%d) is different from that of the process launched by Fleetspeak (%d)", e.daemonServiceName, sd.Pid, pid)
pid = int(sd.Pid)
}
version = sd.Version
} else {
log.Warningf("%s startup data not received", e.daemonServiceName)
}
case <-time.After(startupDataTimeout):
log.Warningf("%s startup data not received after %v", e.daemonServiceName, startupDataTimeout)
case <-e.Done:
return
}
if e.monitorHeartbeats {
e.inProcess.Add(1)
go e.heartbeatMonitorRoutine(pid)
}
rum, err := monitoring.New(e.sc, monitoring.ResourceUsageMonitorParams{
Scope: e.daemonServiceName,
Pid: pid,
MemoryLimit: e.memoryLimit,
ProcessStartTime: e.StartTime,
Version: version,
MaxSamplePeriod: e.samplePeriod,
SampleSize: e.sampleSize,
Done: e.Done,
})
if err != nil {
log.Errorf("Failed to create resource-usage monitor: %v", err)
return
}
// This blocks until the daemon process terminates.
rum.Run()
}
// heartbeatMonitorRoutine monitors the daemon process's hearbeats and kills
// unresponsive processes.
func (e *Execution) heartbeatMonitorRoutine(pid int) {
defer e.inProcess.Done()
// Give the child process some time to start up. During boot it sometimes
// takes significantly more time than the unresponsive_kill_period to start
// the child so we disable checking for heartbeats for a while.
e.recordHeartbeat()
sleepTime := e.heartbeatUnresponsiveGracePeriod
for {
select {
case <-time.After(sleepTime):
case <-e.dead:
return
}
now := time.Now()
var heartbeat time.Time
if atomic.LoadInt32(&e.sending) != 0 {
// We are blocked waiting for Send to complete, e.g. because there is no
// network connection. Treat this as if we got a heartbeat 'now'.
heartbeat = now
} else {
heartbeat = e.getHeartbeat()
}
sleepTime = e.heartbeatUnresponsiveKillPeriod - now.Sub(heartbeat)
if now.Sub(heartbeat) > e.heartbeatUnresponsiveKillPeriod {
// There is a very unlikely race condition if the machine gets suspended
// for longer than unresponsive_kill_period seconds so we give the client
// some time to catch up.
select {
case <-time.After(2 * time.Second):
case <-e.dead:
return
}
heartbeat = e.getHeartbeat()
if now.Sub(heartbeat) > e.heartbeatUnresponsiveKillPeriod && atomic.LoadInt32(&e.sending) == 0 {
// We have not received a heartbeat in a while, kill the child.
log.Warningf("No heartbeat received from %s (pid %d), killing.", e.daemonServiceName, pid)
// For consistency with MEMORY_EXCEEDED kills, send a notification before attempting to
// kill the process.
startTime, err := ptypes.TimestampProto(e.StartTime)
if err != nil {
log.Errorf("Failed to convert process start time: %v", err)
startTime = nil
}
kn := &mpb.KillNotification{
Service: e.daemonServiceName,
Pid: int64(pid),
ProcessStartTime: startTime,
KilledWhen: ptypes.TimestampNow(),
Reason: mpb.KillNotification_HEARTBEAT_FAILURE,
}
if version := e.serviceVersion.Get(); version != nil {
kn.Version = *version
}
if err := monitoring.SendProtoToServer(kn, "KillNotification", e.sc); err != nil {
log.Errorf("Failed to send kill notification to server: %v", err)
}
process := os.Process{Pid: pid}
if err := process.Kill(); err != nil {
log.Errorf("Error while killing a process that doesn't heartbeat - %s (pid %d): %v", e.daemonServiceName, pid, err)
continue // Keep retrying.
}
return
}
}
}
}
Anticipate system suspend when monitoring heartbeats.
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package execution provides an abstraction for a single execution of a command
// with the context of daemonservice.
package execution
import (
"context"
"errors"
"flag"
"fmt"
"os"
"os/exec"
"sync"
"sync/atomic"
"time"
log "github.com/golang/glog"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/google/fleetspeak/fleetspeak/src/client/channel"
"github.com/google/fleetspeak/fleetspeak/src/client/daemonservice/command"
"github.com/google/fleetspeak/fleetspeak/src/client/internal/monitoring"
"github.com/google/fleetspeak/fleetspeak/src/client/service"
fcpb "github.com/google/fleetspeak/fleetspeak/src/client/channel/proto/fleetspeak_channel"
dspb "github.com/google/fleetspeak/fleetspeak/src/client/daemonservice/proto/fleetspeak_daemonservice"
fspb "github.com/google/fleetspeak/fleetspeak/src/common/proto/fleetspeak"
mpb "github.com/google/fleetspeak/fleetspeak/src/common/proto/fleetspeak_monitoring"
)
// We flush the output when either of these thresholds are met.
const (
maxFlushBytes = 1 << 20 // 1MB
defaultFlushTimeSeconds = int32(60)
)
var (
// ErrShuttingDown is returned once an execution has started shutting down and
// is no longer accepting messages.
ErrShuttingDown = errors.New("shutting down")
stdForward = flag.Bool("std_forward", false,
"If set attaches the dependent service to the client's stdin, stdout, stderr. Meant for testing individual daemonservice integrations.")
// How long to wait for the daemon service to send startup data before starting to
// monitor resource-usage.
startupDataTimeout = 10 * time.Second
)
type atomicString struct {
v atomic.Value
}
func (s *atomicString) Set(val string) {
s.v.Store(val)
}
func (s *atomicString) Get() string {
stored := s.v.Load()
if stored == nil {
return ""
}
return stored.(string)
}
type atomicBool struct {
v atomic.Value
}
func (b *atomicBool) Set(val bool) {
b.v.Store(val)
}
func (b *atomicBool) Get() bool {
stored := b.v.Load()
if stored == nil {
return false
}
return stored.(bool)
}
type atomicTime struct {
v atomic.Value
}
func (t *atomicTime) Set(val time.Time) {
t.v.Store(val)
}
func (t *atomicTime) Get() time.Time {
stored := t.v.Load()
if stored == nil {
return time.Unix(0, 0).UTC()
}
return stored.(time.Time)
}
// An Execution represents a specific execution of a daemonservice.
type Execution struct {
daemonServiceName string
memoryLimit int64
samplePeriod time.Duration
sampleSize int
Done chan struct{} // Closed when this execution is dead or dying - essentially when Shutdown has been called.
Out chan<- *fspb.Message // Messages to send to the process go here. User should close when finished.
sc service.Context
channel *channel.Channel
cmd *command.Command
StartTime time.Time
outData *dspb.StdOutputData // The next output data to send, once full.
lastOut time.Time // Time of most recent output of outData.
outLock sync.Mutex // Protects outData, lastOut
outFlushBytes int // How many bytes trigger an output. Constant.
outServiceName string // The service to send StdOutput messages to. Constant.
shutdown sync.Once
lastActive int64 // Time of the last message input or output in seconds since epoch (UTC), atomic access only.
dead chan struct{} // closed when the underlying process has died.
waitResult error // result of Wait call - should only be read after dead is closed
inProcess sync.WaitGroup // count of active goroutines
startupData chan *fcpb.StartupData // Startup data sent by the daemon process.
heartbeat atomicTime // Time when the last message was received from the daemon process.
monitorHeartbeats bool // Whether to monitor the daemon process's hearbeats, killing it if it doesn't heartbeat often enough.
initialHeartbeatDeadlineSecs int // How long to wait for the initial heartbeat.
heartbeatDeadlineSecs int // How long to wait for subsequent heartbeats.
sending atomicBool // Indicates whether a message-send operation is in progress.
serviceVersion atomicString // Version reported by the daemon process.
}
// New creates and starts an execution of the command described in cfg. Messages
// received from the resulting process are passed to sc, as are StdOutput and
// ResourceUsage messages.
func New(daemonServiceName string, cfg *dspb.Config, sc service.Context) (*Execution, error) {
cfg = proto.Clone(cfg).(*dspb.Config)
ret := Execution{
daemonServiceName: daemonServiceName,
memoryLimit: cfg.MemoryLimit,
sampleSize: int(cfg.ResourceMonitoringSampleSize),
samplePeriod: time.Duration(cfg.ResourceMonitoringSamplePeriodSeconds) * time.Second,
Done: make(chan struct{}),
sc: sc,
cmd: &command.Command{Cmd: *exec.Command(cfg.Argv[0], cfg.Argv[1:]...)},
StartTime: time.Now(),
outData: &dspb.StdOutputData{},
lastOut: time.Now(),
dead: make(chan struct{}),
startupData: make(chan *fcpb.StartupData, 1),
monitorHeartbeats: cfg.MonitorHeartbeats,
initialHeartbeatDeadlineSecs: int(cfg.HeartbeatUnresponsiveGracePeriodSeconds),
heartbeatDeadlineSecs: int(cfg.HeartbeatUnresponsiveKillPeriodSeconds),
}
var err error
ret.channel, err = ret.cmd.SetupCommsChannel()
if err != nil {
return nil, fmt.Errorf("failed to setup a comms channel: %v", err)
}
ret.Out = ret.channel.Out
if cfg.StdParams != nil && cfg.StdParams.ServiceName == "" {
log.Errorf("std_params is set, but service_name is empty. Ignoring std_params: %v", cfg.StdParams)
cfg.StdParams = nil
}
if *stdForward {
log.Warningf("std_forward is set, connecting std... to %s", cfg.Argv[0])
ret.cmd.Stdin = os.Stdin
ret.cmd.Stdout = os.Stdout
ret.cmd.Stderr = os.Stderr
} else if cfg.StdParams != nil {
if cfg.StdParams.FlushBytes <= 0 || cfg.StdParams.FlushBytes > maxFlushBytes {
cfg.StdParams.FlushBytes = maxFlushBytes
}
if cfg.StdParams.FlushTimeSeconds <= 0 {
cfg.StdParams.FlushTimeSeconds = defaultFlushTimeSeconds
}
ret.outServiceName = cfg.StdParams.ServiceName
ret.outFlushBytes = int(cfg.StdParams.FlushBytes)
ret.cmd.Stdout = stdoutWriter{&ret}
ret.cmd.Stderr = stderrWriter{&ret}
} else {
ret.cmd.Stdout = nil
ret.cmd.Stderr = nil
}
if err := ret.cmd.Start(); err != nil {
close(ret.Done)
return nil, err
}
if cfg.StdParams != nil {
ret.inProcess.Add(1)
go ret.stdFlushRoutine(time.Second * time.Duration(cfg.StdParams.FlushTimeSeconds))
}
ret.inProcess.Add(2)
go ret.inRoutine()
if !cfg.DisableResourceMonitoring {
ret.inProcess.Add(1)
go ret.statsRoutine()
}
go func() {
defer func() {
ret.Shutdown()
ret.inProcess.Done()
}()
ret.waitResult = ret.cmd.Wait()
close(ret.dead)
if ret.waitResult != nil {
log.Warningf("subprocess ended with error: %v", ret.waitResult)
}
startTime, err := ptypes.TimestampProto(ret.StartTime)
if err != nil {
log.Errorf("Failed to convert process start time: %v", err)
return
}
if !cfg.DisableResourceMonitoring {
rud := &mpb.ResourceUsageData{
Scope: ret.daemonServiceName,
Pid: int64(ret.cmd.Process.Pid),
ProcessStartTime: startTime,
DataTimestamp: ptypes.TimestampNow(),
ResourceUsage: &mpb.AggregatedResourceUsage{},
ProcessTerminated: true,
}
if err := monitoring.SendProtoToServer(rud, "ResourceUsage", ret.sc); err != nil {
log.Errorf("Failed to send final resource-usage proto: %v", err)
}
}
}()
return &ret, nil
}
// Wait waits for all aspects of this execution to finish. This should happen
// soon after shutdown is called.
//
// Note that while it is a bug for this to take more than some seconds, the
// method isn't needed in normal operation - it exists primarily for tests to
// ensure that resources are not leaked.
func (e *Execution) Wait() {
<-e.Done
e.channel.Wait()
e.inProcess.Wait()
}
// LastActive returns the last time that a message was sent or received, to the
// nearest second.
func (e *Execution) LastActive() time.Time {
return time.Unix(atomic.LoadInt64(&e.lastActive), 0).UTC()
}
func (e *Execution) setLastActive(t time.Time) {
atomic.StoreInt64(&e.lastActive, t.Unix())
}
func dataSize(o *dspb.StdOutputData) int {
return len(o.Stdout) + len(o.Stderr)
}
// flushOut flushes e.outData. It assumes that e.outLock is already held.
func (e *Execution) flushOut() {
// set lastOut before the blocking call to sc.Send, so the next flush
// has an accurate sense of how stale the data might be.
n := time.Now()
e.lastOut = n
if dataSize(e.outData) == 0 {
return
}
e.setLastActive(n)
e.outData.Pid = int64(e.cmd.Process.Pid)
d, err := ptypes.MarshalAny(e.outData)
if err != nil {
log.Errorf("unable to marshal StdOutputData: %v", err)
} else {
e.sc.Send(context.Background(), service.AckMessage{
M: &fspb.Message{
Destination: &fspb.Address{ServiceName: e.outServiceName},
MessageType: "StdOutput",
Data: d,
}})
}
e.outData = &dspb.StdOutputData{
MessageIndex: e.outData.MessageIndex + 1,
}
}
func (e *Execution) writeToOut(p []byte, isErr bool) {
e.outLock.Lock()
defer e.outLock.Unlock()
for {
currSize := dataSize(e.outData)
// If it all fits, write it and return.
if currSize+len(p) <= e.outFlushBytes {
if isErr {
e.outData.Stderr = append(e.outData.Stderr, p...)
} else {
e.outData.Stdout = append(e.outData.Stdout, p...)
}
return
}
// Write what does fit, flush, continue.
toWrite := e.outFlushBytes - currSize
if isErr {
e.outData.Stderr = append(e.outData.Stderr, p[:toWrite]...)
} else {
e.outData.Stdout = append(e.outData.Stdout, p[:toWrite]...)
}
p = p[toWrite:]
e.flushOut()
}
}
type stdoutWriter struct {
e *Execution
}
func (w stdoutWriter) Write(p []byte) (int, error) {
w.e.writeToOut(p, false)
return len(p), nil
}
type stderrWriter struct {
e *Execution
}
func (w stderrWriter) Write(p []byte) (int, error) {
w.e.writeToOut(p, true)
return len(p), nil
}
func (e *Execution) stdFlushRoutine(flushTime time.Duration) {
defer e.inProcess.Done()
t := time.NewTicker(flushTime)
defer t.Stop()
for {
select {
case <-t.C:
e.outLock.Lock()
if e.lastOut.Add(flushTime).Before(time.Now()) {
e.flushOut()
}
e.outLock.Unlock()
case <-e.dead:
e.outLock.Lock()
e.flushOut()
e.outLock.Unlock()
return
}
}
}
func (e *Execution) waitForDeath(d time.Duration) bool {
t := time.NewTimer(d)
defer t.Stop()
select {
case <-e.dead:
return true
case <-t.C:
return false
}
}
// Shutdown shuts down this execution.
func (e *Execution) Shutdown() {
e.shutdown.Do(func() {
// First we attempt a gentle shutdown. Closing e.Done tells our
// user not to give us any more data, in response they should
// close e.Out a.k.a e.channel.Out. Closing e.channel.Out will
// cause channel to close the pipe to the dependent process,
// which then causes it to clean up nicely.
close(e.Done)
if e.waitForDeath(time.Second) {
return
}
// This pattern is technically racy - the process could end and the process
// id could be recycled since the end of waitForDeath and before we SoftKill
// or Kill using the process id.
//
// A formally correct way to implement this is to spawn a wrapper process
// which does not die in response to SIGTERM but forwards the signal to the
// wrapped process - its child. This would ensure that the process is still
// around all the way to the SIGKILL.
if err := e.cmd.SoftKill(); err != nil {
log.Errorf("SoftKill [%d] returned error: %v", e.cmd.Process.Pid, err)
}
if e.waitForDeath(time.Second) {
return
}
if err := e.cmd.Kill(); err != nil {
log.Errorf("Kill [%d] returned error: %v", e.cmd.Process.Pid, err)
}
if e.waitForDeath(time.Second) {
return
}
// It is hard to imagine how we might end up here - maybe the process is
// somehow stuck in a system call or there is some other OS level weirdness.
// One possibility is that cmd is a zombie process now.
log.Errorf("Subprocess [%d] appears to have survived SIGKILL.", e.cmd.Process.Pid)
})
}
// inRoutine reads messages from the dependent process and passes them to
// fleetspeak.
func (e *Execution) inRoutine() {
defer func() {
e.Shutdown()
e.inProcess.Done()
}()
var startupDone bool
for {
m := e.readMsg()
if m == nil {
return
}
e.setLastActive(time.Now())
e.heartbeat.Set(time.Now())
if m.Destination != nil && m.Destination.ServiceName == "system" {
switch m.MessageType {
case "StartupData":
if startupDone {
log.Warning("Received spurious startup message, ignoring.")
break
}
startupDone = true
sd := &fcpb.StartupData{}
if err := ptypes.UnmarshalAny(m.Data, sd); err != nil {
log.Warningf("Failed to parse startup data from initial message: %v", err)
} else {
if sd.Version != "" {
e.serviceVersion.Set(sd.Version)
}
e.startupData <- sd
}
close(e.startupData) // No more values to send through the channel.
case "Heartbeat":
// Pass, handled above.
default:
log.Warningf("Unknown system message type: %s", m.MessageType)
}
} else {
e.sending.Set(true)
// sc.Send() buffers the message locally for sending to the Fleetspeak server. It will
// block if the buffer is full.
if err := e.sc.Send(context.Background(), service.AckMessage{M: m}); err != nil {
log.Errorf("error sending message to server: %v", err)
}
e.sending.Set(false)
}
}
}
// readMsg blocks until a message is available from the channel.
func (e *Execution) readMsg() *fspb.Message {
select {
case m, ok := <-e.channel.In:
if !ok {
return nil
}
return m
case err := <-e.channel.Err:
log.Errorf("channel produced error: %v", err)
return nil
}
}
// statsRoutine monitors the daemon process's resource usage, sending reports to the server
// at regular intervals.
func (e *Execution) statsRoutine() {
defer e.inProcess.Done()
pid := e.cmd.Process.Pid
var version string
select {
case sd, ok := <-e.startupData:
if ok {
if int(sd.Pid) != pid {
log.Infof("%s's self-reported PID (%d) is different from that of the process launched by Fleetspeak (%d)", e.daemonServiceName, sd.Pid, pid)
pid = int(sd.Pid)
}
version = sd.Version
} else {
log.Warningf("%s startup data not received", e.daemonServiceName)
}
case <-time.After(startupDataTimeout):
log.Warningf("%s startup data not received after %v", e.daemonServiceName, startupDataTimeout)
case <-e.Done:
return
}
if e.monitorHeartbeats {
e.inProcess.Add(1)
go e.heartbeatMonitorRoutine(pid)
}
rum, err := monitoring.New(e.sc, monitoring.ResourceUsageMonitorParams{
Scope: e.daemonServiceName,
Pid: pid,
MemoryLimit: e.memoryLimit,
ProcessStartTime: e.StartTime,
Version: version,
MaxSamplePeriod: e.samplePeriod,
SampleSize: e.sampleSize,
Done: e.Done,
})
if err != nil {
log.Errorf("Failed to create resource-usage monitor: %v", err)
return
}
// This blocks until the daemon process terminates.
rum.Run()
}
// busySleep sleeps for a given number of seconds, not counting the time
// when the Fleetspeak process is suspended. Returns true if execution
// should continue (i.e if the daemon process is still alive).
func (e *Execution) busySleep(sleepSecs int) bool {
for i := 0; i < sleepSecs; i++ {
select {
// With very high probability, if the system gets suspended, it will occur
// while waiting for this channel.
case <-time.After(time.Second):
case <-e.dead:
return false
}
}
return true
}
// heartbeatMonitorRoutine monitors the daemon process's hearbeats and kills
// unresponsive processes.
func (e *Execution) heartbeatMonitorRoutine(pid int) {
defer e.inProcess.Done()
// Give the child process some time to start up. During boot it sometimes
// takes significantly more time than the unresponsive_kill_period to start
// the child so we disable checking for heartbeats for a while.
e.heartbeat.Set(time.Now())
if !e.busySleep(e.initialHeartbeatDeadlineSecs) {
return
}
for {
if e.sending.Get() { // Blocked trying to buffer a message for sending to the FS server.
if e.busySleep(e.heartbeatDeadlineSecs) {
continue
} else {
return
}
}
secsSinceLastHB := int(time.Now().Sub(e.heartbeat.Get()).Seconds())
if secsSinceLastHB > e.heartbeatDeadlineSecs {
// There is a very unlikely race condition if the machine gets suspended
// for longer than unresponsive_kill_period seconds so we give the client
// some time to catch up.
if !e.busySleep(2) {
return
}
secsSinceLastHB = int(time.Now().Sub(e.heartbeat.Get()).Seconds())
if secsSinceLastHB > e.heartbeatDeadlineSecs && !e.sending.Get() {
// We have not received a heartbeat in a while, kill the child.
log.Warningf("No heartbeat received from %s (pid %d), killing.", e.daemonServiceName, pid)
// For consistency with MEMORY_EXCEEDED kills, send a notification before attempting to
// kill the process.
startTime, err := ptypes.TimestampProto(e.StartTime)
if err != nil {
log.Errorf("Failed to convert process start time: %v", err)
startTime = nil
}
kn := &mpb.KillNotification{
Service: e.daemonServiceName,
Pid: int64(pid),
ProcessStartTime: startTime,
KilledWhen: ptypes.TimestampNow(),
Reason: mpb.KillNotification_HEARTBEAT_FAILURE,
}
if version := e.serviceVersion.Get(); version != "" {
kn.Version = version
}
if err := monitoring.SendProtoToServer(kn, "KillNotification", e.sc); err != nil {
log.Errorf("Failed to send kill notification to server: %v", err)
}
process := os.Process{Pid: pid}
if err := process.Kill(); err != nil {
log.Errorf("Error while killing a process that doesn't heartbeat - %s (pid %d): %v", e.daemonServiceName, pid, err)
continue // Keep retrying.
}
return
}
}
// Sleep until when the next heartbeat is due.
if !e.busySleep(e.heartbeatDeadlineSecs - secsSinceLastHB) {
return
}
}
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lzw implements the Lempel-Ziv-Welch compressed data format,
// described in T. A. Welch, ``A Technique for High-Performance Data
// Compression'', Computer, 17(6) (June 1984), pp 8-19.
//
// In particular, it implements LZW as used by the GIF, TIFF and PDF file
// formats, which means variable-width codes up to 12 bits and the first
// two non-literal codes are a clear code and an EOF code.
package lzw
// TODO(nigeltao): check that TIFF and PDF use LZW in the same way as GIF,
// modulo LSB/MSB packing order.
import (
"bufio"
"fmt"
"io"
"os"
)
// Order specifies the bit ordering in an LZW data stream.
type Order int
const (
// LSB means Least Significant Bits first, as used in the GIF file format.
LSB Order = iota
// MSB means Most Significant Bits first, as used in the TIFF and PDF
// file formats.
MSB
)
const (
maxWidth = 12
decoderInvalidCode = 0xffff
flushBuffer = 1 << maxWidth
)
// decoder is the state from which the readXxx method converts a byte
// stream into a code stream.
type decoder struct {
r io.ByteReader
bits uint32
nBits uint
width uint
read func(*decoder) (uint16, os.Error) // readLSB or readMSB
litWidth int // width in bits of literal codes
err os.Error
// The first 1<<litWidth codes are literal codes.
// The next two codes mean clear and EOF.
// Other valid codes are in the range [lo, hi] where lo := clear + 2,
// with the upper bound incrementing on each code seen.
// overflow is the code at which hi overflows the code width.
// last is the most recently seen code, or decoderInvalidCode.
clear, eof, hi, overflow, last uint16
// Each code c in [lo, hi] expands to two or more bytes. For c != hi:
// suffix[c] is the last of these bytes.
// prefix[c] is the code for all but the last byte.
// This code can either be a literal code or another code in [lo, c).
// The c == hi case is a special case.
suffix [1 << maxWidth]uint8
prefix [1 << maxWidth]uint16
// buf is a scratch buffer for reconstituting the bytes that a code expands to.
// Code suffixes are written right-to-left from the end of the buffer.
buf [1 << maxWidth]byte
// output is the temporary output buffer.
// It is flushed when it contains >= 1<<maxWidth bytes,
// so that there is always room to copy buf into it while decoding.
output [2 * 1 << maxWidth]byte
o int // write index into output
toRead []byte // bytes to return from Read
}
// readLSB returns the next code for "Least Significant Bits first" data.
func (d *decoder) readLSB() (uint16, os.Error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << d.nBits
d.nBits += 8
}
code := uint16(d.bits & (1<<d.width - 1))
d.bits >>= d.width
d.nBits -= d.width
return code, nil
}
// readMSB returns the next code for "Most Significant Bits first" data.
func (d *decoder) readMSB() (uint16, os.Error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << (24 - d.nBits)
d.nBits += 8
}
code := uint16(d.bits >> (32 - d.width))
d.bits <<= d.width
d.nBits -= d.width
return code, nil
}
func (d *decoder) Read(b []byte) (int, os.Error) {
for {
if len(d.toRead) > 0 {
n := copy(b, d.toRead)
d.toRead = d.toRead[n:]
return n, nil
}
if d.err != nil {
return 0, d.err
}
d.decode()
}
panic("unreachable")
}
// decode decompresses bytes from r and leaves them in d.toRead.
// read specifies how to decode bytes into codes.
// litWidth is the width in bits of literal codes.
func (d *decoder) decode() {
// Loop over the code stream, converting codes into decompressed bytes.
for {
code, err := d.read(d)
if err != nil {
if err == os.EOF {
err = io.ErrUnexpectedEOF
}
d.err = err
return
}
switch {
case code < d.clear:
// We have a literal code.
d.output[d.o] = uint8(code)
d.o++
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(code)
d.prefix[d.hi] = d.last
}
case code == d.clear:
d.width = 1 + uint(d.litWidth)
d.hi = d.eof
d.overflow = 1 << d.width
d.last = decoderInvalidCode
continue
case code == d.eof:
d.flush()
d.err = os.EOF
return
case code <= d.hi:
c, i := code, len(d.buf)-1
if code == d.hi {
// code == hi is a special case which expands to the last expansion
// followed by the head of the last expansion. To find the head, we walk
// the prefix chain until we find a literal code.
c = d.last
for c >= d.clear {
c = d.prefix[c]
}
d.buf[i] = uint8(c)
i--
c = d.last
}
// Copy the suffix chain into buf and then write that to w.
for c >= d.clear {
d.buf[i] = d.suffix[c]
i--
c = d.prefix[c]
}
d.buf[i] = uint8(c)
d.o += copy(d.output[d.o:], d.buf[i:])
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(c)
d.prefix[d.hi] = d.last
}
default:
d.err = os.NewError("lzw: invalid code")
return
}
d.last, d.hi = code, d.hi+1
if d.hi >= d.overflow {
if d.width == maxWidth {
d.last = decoderInvalidCode
} else {
d.width++
d.overflow <<= 1
}
}
if d.o >= flushBuffer {
d.flush()
return
}
}
panic("unreachable")
}
func (d *decoder) flush() {
d.toRead = d.output[:d.o]
d.o = 0
}
func (d *decoder) Close() os.Error {
d.err = os.EINVAL // in case any Reads come along
return nil
}
// NewReader creates a new io.ReadCloser that satisfies reads by decompressing
// the data read from r.
// It is the caller's responsibility to call Close on the ReadCloser when
// finished reading.
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8.
func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
d := new(decoder)
switch order {
case LSB:
d.read = (*decoder).readLSB
case MSB:
d.read = (*decoder).readMSB
default:
d.err = os.NewError("lzw: unknown order")
return d
}
if litWidth < 2 || 8 < litWidth {
d.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth)
return d
}
if br, ok := r.(io.ByteReader); ok {
d.r = br
} else {
d.r = bufio.NewReader(r)
}
d.litWidth = litWidth
d.width = 1 + uint(litWidth)
d.clear = uint16(1) << uint(litWidth)
d.eof, d.hi = d.clear+1, d.clear+1
d.overflow = uint16(1) << d.width
d.last = decoderInvalidCode
return d
}
compress/lzw: reduce decoder buffer size from 3*4096 to 2*4096.
This happens to speed up the decoder benchmarks by 50% on my computer
(GOARCH=amd64 GOOS=linux), but I don't have a good intuition as to why.
For example, just adding an unused [4096]byte field to the decoder
struct doesn't significantly change the numbers.
Before:
lzw.BenchmarkDecoder1e4 5000 488057 ns/op 20.49 MB/s
lzw.BenchmarkDecoder1e5 500 4613638 ns/op 21.67 MB/s
lzw.BenchmarkDecoder1e6 50 45672260 ns/op 21.90 MB/s
lzw.BenchmarkEncoder1e4 5000 353563 ns/op 28.28 MB/s
lzw.BenchmarkEncoder1e5 500 3431618 ns/op 29.14 MB/s
lzw.BenchmarkEncoder1e6 50 34009640 ns/op 29.40 MB/s
After:
lzw.BenchmarkDecoder1e4 5000 339725 ns/op 29.44 MB/s
lzw.BenchmarkDecoder1e5 500 3166894 ns/op 31.58 MB/s
lzw.BenchmarkDecoder1e6 50 31317260 ns/op 31.93 MB/s
lzw.BenchmarkEncoder1e4 5000 354909 ns/op 28.18 MB/s
lzw.BenchmarkEncoder1e5 500 3432710 ns/op 29.13 MB/s
lzw.BenchmarkEncoder1e6 50 34010500 ns/op 29.40 MB/s
R=rsc, r
CC=golang-dev
http://codereview.appspot.com/4535123
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lzw implements the Lempel-Ziv-Welch compressed data format,
// described in T. A. Welch, ``A Technique for High-Performance Data
// Compression'', Computer, 17(6) (June 1984), pp 8-19.
//
// In particular, it implements LZW as used by the GIF, TIFF and PDF file
// formats, which means variable-width codes up to 12 bits and the first
// two non-literal codes are a clear code and an EOF code.
package lzw
// TODO(nigeltao): check that TIFF and PDF use LZW in the same way as GIF,
// modulo LSB/MSB packing order.
import (
"bufio"
"fmt"
"io"
"os"
)
// Order specifies the bit ordering in an LZW data stream.
type Order int
const (
// LSB means Least Significant Bits first, as used in the GIF file format.
LSB Order = iota
// MSB means Most Significant Bits first, as used in the TIFF and PDF
// file formats.
MSB
)
const (
maxWidth = 12
decoderInvalidCode = 0xffff
flushBuffer = 1 << maxWidth
)
// decoder is the state from which the readXxx method converts a byte
// stream into a code stream.
type decoder struct {
r io.ByteReader
bits uint32
nBits uint
width uint
read func(*decoder) (uint16, os.Error) // readLSB or readMSB
litWidth int // width in bits of literal codes
err os.Error
// The first 1<<litWidth codes are literal codes.
// The next two codes mean clear and EOF.
// Other valid codes are in the range [lo, hi] where lo := clear + 2,
// with the upper bound incrementing on each code seen.
// overflow is the code at which hi overflows the code width.
// last is the most recently seen code, or decoderInvalidCode.
clear, eof, hi, overflow, last uint16
// Each code c in [lo, hi] expands to two or more bytes. For c != hi:
// suffix[c] is the last of these bytes.
// prefix[c] is the code for all but the last byte.
// This code can either be a literal code or another code in [lo, c).
// The c == hi case is a special case.
suffix [1 << maxWidth]uint8
prefix [1 << maxWidth]uint16
// output is the temporary output buffer.
// Literal codes are accumulated from the start of the buffer.
// Non-literal codes decode to a sequence of suffixes that are first
// written right-to-left from the end of the buffer before being copied
// to the start of the buffer.
// It is flushed when it contains >= 1<<maxWidth bytes,
// so that there is always room to decode an entire code.
output [2 * 1 << maxWidth]byte
o int // write index into output
toRead []byte // bytes to return from Read
}
// readLSB returns the next code for "Least Significant Bits first" data.
func (d *decoder) readLSB() (uint16, os.Error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << d.nBits
d.nBits += 8
}
code := uint16(d.bits & (1<<d.width - 1))
d.bits >>= d.width
d.nBits -= d.width
return code, nil
}
// readMSB returns the next code for "Most Significant Bits first" data.
func (d *decoder) readMSB() (uint16, os.Error) {
for d.nBits < d.width {
x, err := d.r.ReadByte()
if err != nil {
return 0, err
}
d.bits |= uint32(x) << (24 - d.nBits)
d.nBits += 8
}
code := uint16(d.bits >> (32 - d.width))
d.bits <<= d.width
d.nBits -= d.width
return code, nil
}
func (d *decoder) Read(b []byte) (int, os.Error) {
for {
if len(d.toRead) > 0 {
n := copy(b, d.toRead)
d.toRead = d.toRead[n:]
return n, nil
}
if d.err != nil {
return 0, d.err
}
d.decode()
}
panic("unreachable")
}
// decode decompresses bytes from r and leaves them in d.toRead.
// read specifies how to decode bytes into codes.
// litWidth is the width in bits of literal codes.
func (d *decoder) decode() {
// Loop over the code stream, converting codes into decompressed bytes.
for {
code, err := d.read(d)
if err != nil {
if err == os.EOF {
err = io.ErrUnexpectedEOF
}
d.err = err
return
}
switch {
case code < d.clear:
// We have a literal code.
d.output[d.o] = uint8(code)
d.o++
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(code)
d.prefix[d.hi] = d.last
}
case code == d.clear:
d.width = 1 + uint(d.litWidth)
d.hi = d.eof
d.overflow = 1 << d.width
d.last = decoderInvalidCode
continue
case code == d.eof:
d.flush()
d.err = os.EOF
return
case code <= d.hi:
c, i := code, len(d.output)-1
if code == d.hi {
// code == hi is a special case which expands to the last expansion
// followed by the head of the last expansion. To find the head, we walk
// the prefix chain until we find a literal code.
c = d.last
for c >= d.clear {
c = d.prefix[c]
}
d.output[i] = uint8(c)
i--
c = d.last
}
// Copy the suffix chain into output and then write that to w.
for c >= d.clear {
d.output[i] = d.suffix[c]
i--
c = d.prefix[c]
}
d.output[i] = uint8(c)
d.o += copy(d.output[d.o:], d.output[i:])
if d.last != decoderInvalidCode {
// Save what the hi code expands to.
d.suffix[d.hi] = uint8(c)
d.prefix[d.hi] = d.last
}
default:
d.err = os.NewError("lzw: invalid code")
return
}
d.last, d.hi = code, d.hi+1
if d.hi >= d.overflow {
if d.width == maxWidth {
d.last = decoderInvalidCode
} else {
d.width++
d.overflow <<= 1
}
}
if d.o >= flushBuffer {
d.flush()
return
}
}
panic("unreachable")
}
func (d *decoder) flush() {
d.toRead = d.output[:d.o]
d.o = 0
}
func (d *decoder) Close() os.Error {
d.err = os.EINVAL // in case any Reads come along
return nil
}
// NewReader creates a new io.ReadCloser that satisfies reads by decompressing
// the data read from r.
// It is the caller's responsibility to call Close on the ReadCloser when
// finished reading.
// The number of bits to use for literal codes, litWidth, must be in the
// range [2,8] and is typically 8.
func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser {
d := new(decoder)
switch order {
case LSB:
d.read = (*decoder).readLSB
case MSB:
d.read = (*decoder).readMSB
default:
d.err = os.NewError("lzw: unknown order")
return d
}
if litWidth < 2 || 8 < litWidth {
d.err = fmt.Errorf("lzw: litWidth %d out of range", litWidth)
return d
}
if br, ok := r.(io.ByteReader); ok {
d.r = br
} else {
d.r = bufio.NewReader(r)
}
d.litWidth = litWidth
d.width = 1 + uint(litWidth)
d.clear = uint16(1) << uint(litWidth)
d.eof, d.hi = d.clear+1, d.clear+1
d.overflow = uint16(1) << d.width
d.last = decoderInvalidCode
return d
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the exported entry points for invoking the parser.
package parser
import (
"bytes"
"go/ast"
"go/scanner"
"go/token"
"io"
"io/ioutil"
"os"
pathutil "path"
)
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, os.Error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
_, err := io.Copy(&buf, s)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
default:
return nil, os.ErrorString("invalid source")
}
}
return ioutil.ReadFile(filename)
}
func (p *parser) parseEOF() os.Error {
p.expect(token.EOF)
return p.GetError(scanner.Sorted)
}
// ParseExpr parses a Go expression and returns the corresponding
// AST node. The filename and src arguments have the same interpretation
// as for ParseFile. If there is an error, the result expression
// may be nil or contain a partial AST.
//
func ParseExpr(filename string, src interface{}) (ast.Expr, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, 0)
return p.parseExpr(), p.parseEOF()
}
// ParseStmtList parses a list of Go statements and returns the list
// of corresponding AST nodes. The filename and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
// list may be nil or contain partial ASTs.
//
func ParseStmtList(filename string, src interface{}) ([]ast.Stmt, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, 0)
return p.parseStmtList(), p.parseEOF()
}
// ParseDeclList parses a list of Go declarations and returns the list
// of corresponding AST nodes. The filename and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
// list may be nil or contain partial ASTs.
//
func ParseDeclList(filename string, src interface{}) ([]ast.Decl, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, 0)
return p.parseDeclList(), p.parseEOF()
}
// ParseFile parses a Go source file and returns a File node.
//
// If src != nil, ParseFile parses the file source from src. src may
// be provided in a variety of formats. At the moment the following types
// are supported: string, []byte, and io.Reader. In this case, filename is
// only used for source position information and error messages.
//
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.BadX nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(filename string, src interface{}, mode uint) (*ast.File, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, mode)
return p.parseFile(), p.GetError(scanner.NoMultiples) // parseFile() reads to EOF
}
// ParseFiles calls ParseFile for each file in the filenames list and returns
// a map of package name -> package AST with all the packages found. The mode
// bits are passed to ParseFile unchanged.
//
// Files with parse errors are ignored. In this case the map of packages may
// be incomplete (missing packages and/or incomplete packages) and the first
// error encountered is returned.
//
func ParseFiles(filenames []string, mode uint) (pkgs map[string]*ast.Package, first os.Error) {
pkgs = make(map[string]*ast.Package)
for _, filename := range filenames {
if src, err := ParseFile(filename, nil, mode); err == nil {
name := src.Name.Name
pkg, found := pkgs[name]
if !found {
pkg = &ast.Package{name, nil, make(map[string]*ast.File)}
pkgs[name] = pkg
}
pkg.Files[filename] = src
} else if first == nil {
first = err
}
}
return
}
// ParseDir calls ParseFile for the files in the directory specified by path and
// returns a map of package name -> package AST with all the packages found. If
// filter != nil, only the files with os.FileInfo entries passing through the filter
// are considered. The mode bits are passed to ParseFile unchanged.
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occured, a non-nil but incomplete map and the
// error are returned.
//
func ParseDir(path string, filter func(*os.FileInfo) bool, mode uint) (map[string]*ast.Package, os.Error) {
fd, err := os.Open(path, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdir(-1)
if err != nil {
return nil, err
}
filenames := make([]string, len(list))
n := 0
for i := 0; i < len(list); i++ {
d := &list[i]
if filter == nil || filter(d) {
filenames[n] = pathutil.Join(path, d.Name)
n++
}
}
filenames = filenames[0:n]
return ParseFiles(filenames, mode)
}
go/parser: consume auto-inserted semi when calling ParseExpr()
Fixes issue 1170.
R=rsc
CC=golang-dev
http://codereview.appspot.com/2622041
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the exported entry points for invoking the parser.
package parser
import (
"bytes"
"go/ast"
"go/scanner"
"go/token"
"io"
"io/ioutil"
"os"
pathutil "path"
)
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, os.Error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
_, err := io.Copy(&buf, s)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
default:
return nil, os.ErrorString("invalid source")
}
}
return ioutil.ReadFile(filename)
}
func (p *parser) parseEOF() os.Error {
p.expect(token.EOF)
return p.GetError(scanner.Sorted)
}
// ParseExpr parses a Go expression and returns the corresponding
// AST node. The filename and src arguments have the same interpretation
// as for ParseFile. If there is an error, the result expression
// may be nil or contain a partial AST.
//
func ParseExpr(filename string, src interface{}) (ast.Expr, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, 0)
x := p.parseExpr()
if p.tok == token.SEMICOLON {
p.next() // consume automatically inserted semicolon, if any
}
return x, p.parseEOF()
}
// ParseStmtList parses a list of Go statements and returns the list
// of corresponding AST nodes. The filename and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
// list may be nil or contain partial ASTs.
//
func ParseStmtList(filename string, src interface{}) ([]ast.Stmt, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, 0)
return p.parseStmtList(), p.parseEOF()
}
// ParseDeclList parses a list of Go declarations and returns the list
// of corresponding AST nodes. The filename and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
// list may be nil or contain partial ASTs.
//
func ParseDeclList(filename string, src interface{}) ([]ast.Decl, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, 0)
return p.parseDeclList(), p.parseEOF()
}
// ParseFile parses a Go source file and returns a File node.
//
// If src != nil, ParseFile parses the file source from src. src may
// be provided in a variety of formats. At the moment the following types
// are supported: string, []byte, and io.Reader. In this case, filename is
// only used for source position information and error messages.
//
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.BadX nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(filename string, src interface{}, mode uint) (*ast.File, os.Error) {
data, err := readSource(filename, src)
if err != nil {
return nil, err
}
var p parser
p.init(filename, data, mode)
return p.parseFile(), p.GetError(scanner.NoMultiples) // parseFile() reads to EOF
}
// ParseFiles calls ParseFile for each file in the filenames list and returns
// a map of package name -> package AST with all the packages found. The mode
// bits are passed to ParseFile unchanged.
//
// Files with parse errors are ignored. In this case the map of packages may
// be incomplete (missing packages and/or incomplete packages) and the first
// error encountered is returned.
//
func ParseFiles(filenames []string, mode uint) (pkgs map[string]*ast.Package, first os.Error) {
pkgs = make(map[string]*ast.Package)
for _, filename := range filenames {
if src, err := ParseFile(filename, nil, mode); err == nil {
name := src.Name.Name
pkg, found := pkgs[name]
if !found {
pkg = &ast.Package{name, nil, make(map[string]*ast.File)}
pkgs[name] = pkg
}
pkg.Files[filename] = src
} else if first == nil {
first = err
}
}
return
}
// ParseDir calls ParseFile for the files in the directory specified by path and
// returns a map of package name -> package AST with all the packages found. If
// filter != nil, only the files with os.FileInfo entries passing through the filter
// are considered. The mode bits are passed to ParseFile unchanged.
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occured, a non-nil but incomplete map and the
// error are returned.
//
func ParseDir(path string, filter func(*os.FileInfo) bool, mode uint) (map[string]*ast.Package, os.Error) {
fd, err := os.Open(path, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdir(-1)
if err != nil {
return nil, err
}
filenames := make([]string, len(list))
n := 0
for i := 0; i < len(list); i++ {
d := &list[i]
if filter == nil || filter(d) {
filenames[n] = pathutil.Join(path, d.Name)
n++
}
}
filenames = filenames[0:n]
return ParseFiles(filenames, mode)
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for transport.go
package http_test
import (
"bytes"
"compress/gzip"
"crypto/rand"
"fmt"
. "http"
"http/httptest"
"io"
"io/ioutil"
"os"
"strconv"
"testing"
"time"
)
// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close
// and then verify that the final 2 responses get errors back.
// hostPortHandler writes back the client's "host:port".
var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
if r.FormValue("close") == "true" {
w.Header().Set("Connection", "close")
}
w.Write([]byte(r.RemoteAddr))
})
// Two subsequent requests and verify their response is the same.
// The response from the server is our own IP:port
func TestTransportKeepAlives(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
for _, disableKeepAlive := range []bool{false, true} {
tr := &Transport{DisableKeepAlives: disableKeepAlive}
c := &Client{Transport: tr}
fetch := func(n int) string {
res, _, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err)
}
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
bodiesDiffer := body1 != body2
if bodiesDiffer != disableKeepAlive {
t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
disableKeepAlive, bodiesDiffer, body1, body2)
}
}
}
func TestTransportConnectionCloseOnResponse(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
for _, connectionClose := range []bool{false, true} {
tr := &Transport{}
c := &Client{Transport: tr}
fetch := func(n int) string {
req := new(Request)
var err os.Error
req.URL, err = ParseURL(ts.URL + fmt.Sprintf("?close=%v", connectionClose))
if err != nil {
t.Fatalf("URL parse error: %v", err)
}
req.Method = "GET"
req.Proto = "HTTP/1.1"
req.ProtoMajor = 1
req.ProtoMinor = 1
res, err := c.Do(req)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
}
body, err := ioutil.ReadAll(res.Body)
defer res.Body.Close()
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
}
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
bodiesDiffer := body1 != body2
if bodiesDiffer != connectionClose {
t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
connectionClose, bodiesDiffer, body1, body2)
}
}
}
func TestTransportConnectionCloseOnRequest(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
for _, connectionClose := range []bool{false, true} {
tr := &Transport{}
c := &Client{Transport: tr}
fetch := func(n int) string {
req := new(Request)
var err os.Error
req.URL, err = ParseURL(ts.URL)
if err != nil {
t.Fatalf("URL parse error: %v", err)
}
req.Method = "GET"
req.Proto = "HTTP/1.1"
req.ProtoMajor = 1
req.ProtoMinor = 1
req.Close = connectionClose
res, err := c.Do(req)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
}
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
bodiesDiffer := body1 != body2
if bodiesDiffer != connectionClose {
t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
connectionClose, bodiesDiffer, body1, body2)
}
}
}
func TestTransportIdleCacheKeys(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
}
resp, _, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
}
ioutil.ReadAll(resp.Body)
keys := tr.IdleConnKeysForTesting()
if e, g := 1, len(keys); e != g {
t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g)
}
if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e {
t.Errorf("Expected idle cache key %q; got %q", e, keys[0])
}
tr.CloseIdleConnections()
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
}
}
func TestTransportMaxPerHostIdleConns(t *testing.T) {
resch := make(chan string)
gotReq := make(chan bool)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
gotReq <- true
msg := <-resch
_, err := w.Write([]byte(msg))
if err != nil {
t.Fatalf("Write: %v", err)
}
}))
defer ts.Close()
maxIdleConns := 2
tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns}
c := &Client{Transport: tr}
// Start 3 outstanding requests and wait for the server to get them.
// Their responses will hang until we we write to resch, though.
donech := make(chan bool)
doReq := func() {
resp, _, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
}
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
donech <- true
}
go doReq()
<-gotReq
go doReq()
<-gotReq
go doReq()
<-gotReq
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g)
}
resch <- "res1"
<-donech
keys := tr.IdleConnKeysForTesting()
if e, g := 1, len(keys); e != g {
t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g)
}
cacheKey := "|http|" + ts.Listener.Addr().String()
if keys[0] != cacheKey {
t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0])
}
if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g {
t.Errorf("after first response, expected %d idle conns; got %d", e, g)
}
resch <- "res2"
<-donech
if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g {
t.Errorf("after second response, expected %d idle conns; got %d", e, g)
}
resch <- "res3"
<-donech
if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g {
t.Errorf("after third response, still expected %d idle conns; got %d", e, g)
}
}
func TestTransportServerClosingUnexpectedly(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
tr := &Transport{}
c := &Client{Transport: tr}
fetch := func(n int) string {
res, _, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("error in req #%d, GET: %v", n, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("error in req #%d, ReadAll: %v", n, err)
}
res.Body.Close()
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
ts.CloseClientConnections() // surprise!
time.Sleep(25e6) // idle for a bit (test is inherently racey, but expectedly)
body3 := fetch(3)
if body1 != body2 {
t.Errorf("expected body1 and body2 to be equal")
}
if body2 == body3 {
t.Errorf("expected body2 and body3 to be different")
}
}
// TestTransportHeadResponses verifies that we deal with Content-Lengths
// with no bodies properly
func TestTransportHeadResponses(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.Method != "HEAD" {
panic("expected HEAD; got " + r.Method)
}
w.Header().Set("Content-Length", "123")
w.WriteHeader(200)
}))
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
for i := 0; i < 2; i++ {
res, err := c.Head(ts.URL)
if err != nil {
t.Errorf("error on loop %d: %v", i, err)
}
if e, g := "123", res.Header.Get("Content-Length"); e != g {
t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g)
}
if e, g := int64(0), res.ContentLength; e != g {
t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g)
}
}
}
// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding
// on responses to HEAD requests.
func TestTransportHeadChunkedResponse(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.Method != "HEAD" {
panic("expected HEAD; got " + r.Method)
}
w.Header().Set("Transfer-Encoding", "chunked") // client should ignore
w.Header().Set("x-client-ipport", r.RemoteAddr)
w.WriteHeader(200)
}))
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
res1, err := c.Head(ts.URL)
if err != nil {
t.Fatalf("request 1 error: %v", err)
}
res2, err := c.Head(ts.URL)
if err != nil {
t.Fatalf("request 2 error: %v", err)
}
if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 {
t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2)
}
}
func TestTransportNilURL(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "Hi")
}))
defer ts.Close()
req := new(Request)
req.URL = nil // what we're actually testing
req.Method = "GET"
req.RawURL = ts.URL
req.Proto = "HTTP/1.1"
req.ProtoMajor = 1
req.ProtoMinor = 1
req.Header = make(Header)
tr := &Transport{}
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatalf("unexpected RoundTrip error: %v", err)
}
body, err := ioutil.ReadAll(res.Body)
if g, e := string(body), "Hi"; g != e {
t.Fatalf("Expected response body of %q; got %q", e, g)
}
}
func TestTransportGzip(t *testing.T) {
const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
const nRandBytes = 1024 * 1024
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e {
t.Errorf("Accept-Encoding = %q, want %q", g, e)
}
rw.Header().Set("Content-Encoding", "gzip")
var w io.Writer = rw
var buf bytes.Buffer
if req.FormValue("chunked") == "0" {
w = &buf
defer io.Copy(rw, &buf)
defer func() {
rw.Header().Set("Content-Length", strconv.Itoa(buf.Len()))
}()
}
gz, _ := gzip.NewWriter(w)
gz.Write([]byte(testString))
if req.FormValue("body") == "large" {
io.Copyn(gz, rand.Reader, nRandBytes)
}
gz.Close()
}))
defer ts.Close()
for _, chunked := range []string{"1", "0"} {
c := &Client{Transport: &Transport{}}
// First fetch something large, but only read some of it.
res, _, err := c.Get(ts.URL + "?body=large&chunked=" + chunked)
if err != nil {
t.Fatalf("large get: %v", err)
}
buf := make([]byte, len(testString))
n, err := io.ReadFull(res.Body, buf)
if err != nil {
t.Fatalf("partial read of large response: size=%d, %v", n, err)
}
if e, g := testString, string(buf); e != g {
t.Errorf("partial read got %q, expected %q", g, e)
}
res.Body.Close()
// Read on the body, even though it's closed
n, err = res.Body.Read(buf)
if n != 0 || err == nil {
t.Errorf("expected error post-closed large Read; got = %d, %v", n, err)
}
// Then something small.
res, _, err = c.Get(ts.URL + "?chunked=" + chunked)
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if g, e := string(body), testString; g != e {
t.Fatalf("body = %q; want %q", g, e)
}
if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
t.Fatalf("Content-Encoding = %q; want %q", g, e)
}
// Read on the body after it's been fully read:
n, err = res.Body.Read(buf)
if n != 0 || err == nil {
t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err)
}
res.Body.Close()
n, err = res.Body.Read(buf)
if n != 0 || err == nil {
t.Errorf("expected Read error after Close; got %d, %v", n, err)
}
}
}
// TestTransportGzipRecursive sends a gzip quine and checks that the
// client gets the same value back. This is more cute than anything,
// but checks that we don't recurse forever, and checks that
// Content-Encoding is removed.
func TestTransportGzipRecursive(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "gzip")
w.Write(rgz)
}))
defer ts.Close()
c := &Client{Transport: &Transport{}}
res, _, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(body, rgz) {
t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x",
body, rgz)
}
if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
t.Fatalf("Content-Encoding = %q; want %q", g, e)
}
}
// rgz is a gzip quine that uncompresses to itself.
var rgz = []byte{
0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0,
0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2,
0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17,
0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60,
0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2,
0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00,
0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00,
0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16,
0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05,
0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff,
0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00,
0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00,
0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88,
0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff,
0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00,
0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00,
0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff,
0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00,
0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16,
0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08,
0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa,
0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06,
0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00,
0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
0x00, 0x00,
}
http: improve a test
Should prevent failures on slow machines, such as:
http://godashboard.appspot.com/log/47b5cae591b7ad8908704e327f3b9b41945d7d5fecfc0c8c945d5545ece1a813
Verified the change (on a fast machine) by removing the
existing sleep, in which case the race happens ~50% of the
time with GOMAXPROCS > 1, but recovers gracefully with
retries.
R=rsc
CC=golang-dev
https://golang.org/cl/4441089
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests for transport.go
package http_test
import (
"bytes"
"compress/gzip"
"crypto/rand"
"fmt"
. "http"
"http/httptest"
"io"
"io/ioutil"
"os"
"strconv"
"testing"
"time"
)
// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close
// and then verify that the final 2 responses get errors back.
// hostPortHandler writes back the client's "host:port".
var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
if r.FormValue("close") == "true" {
w.Header().Set("Connection", "close")
}
w.Write([]byte(r.RemoteAddr))
})
// Two subsequent requests and verify their response is the same.
// The response from the server is our own IP:port
func TestTransportKeepAlives(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
for _, disableKeepAlive := range []bool{false, true} {
tr := &Transport{DisableKeepAlives: disableKeepAlive}
c := &Client{Transport: tr}
fetch := func(n int) string {
res, _, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err)
}
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
bodiesDiffer := body1 != body2
if bodiesDiffer != disableKeepAlive {
t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
disableKeepAlive, bodiesDiffer, body1, body2)
}
}
}
func TestTransportConnectionCloseOnResponse(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
for _, connectionClose := range []bool{false, true} {
tr := &Transport{}
c := &Client{Transport: tr}
fetch := func(n int) string {
req := new(Request)
var err os.Error
req.URL, err = ParseURL(ts.URL + fmt.Sprintf("?close=%v", connectionClose))
if err != nil {
t.Fatalf("URL parse error: %v", err)
}
req.Method = "GET"
req.Proto = "HTTP/1.1"
req.ProtoMajor = 1
req.ProtoMinor = 1
res, err := c.Do(req)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
}
body, err := ioutil.ReadAll(res.Body)
defer res.Body.Close()
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
}
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
bodiesDiffer := body1 != body2
if bodiesDiffer != connectionClose {
t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
connectionClose, bodiesDiffer, body1, body2)
}
}
}
func TestTransportConnectionCloseOnRequest(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
for _, connectionClose := range []bool{false, true} {
tr := &Transport{}
c := &Client{Transport: tr}
fetch := func(n int) string {
req := new(Request)
var err os.Error
req.URL, err = ParseURL(ts.URL)
if err != nil {
t.Fatalf("URL parse error: %v", err)
}
req.Method = "GET"
req.Proto = "HTTP/1.1"
req.ProtoMajor = 1
req.ProtoMinor = 1
req.Close = connectionClose
res, err := c.Do(req)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
}
return string(body)
}
body1 := fetch(1)
body2 := fetch(2)
bodiesDiffer := body1 != body2
if bodiesDiffer != connectionClose {
t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q",
connectionClose, bodiesDiffer, body1, body2)
}
}
}
func TestTransportIdleCacheKeys(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
}
resp, _, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
}
ioutil.ReadAll(resp.Body)
keys := tr.IdleConnKeysForTesting()
if e, g := 1, len(keys); e != g {
t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g)
}
if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e {
t.Errorf("Expected idle cache key %q; got %q", e, keys[0])
}
tr.CloseIdleConnections()
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
}
}
func TestTransportMaxPerHostIdleConns(t *testing.T) {
resch := make(chan string)
gotReq := make(chan bool)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
gotReq <- true
msg := <-resch
_, err := w.Write([]byte(msg))
if err != nil {
t.Fatalf("Write: %v", err)
}
}))
defer ts.Close()
maxIdleConns := 2
tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns}
c := &Client{Transport: tr}
// Start 3 outstanding requests and wait for the server to get them.
// Their responses will hang until we we write to resch, though.
donech := make(chan bool)
doReq := func() {
resp, _, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
}
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
donech <- true
}
go doReq()
<-gotReq
go doReq()
<-gotReq
go doReq()
<-gotReq
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g)
}
resch <- "res1"
<-donech
keys := tr.IdleConnKeysForTesting()
if e, g := 1, len(keys); e != g {
t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g)
}
cacheKey := "|http|" + ts.Listener.Addr().String()
if keys[0] != cacheKey {
t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0])
}
if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g {
t.Errorf("after first response, expected %d idle conns; got %d", e, g)
}
resch <- "res2"
<-donech
if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g {
t.Errorf("after second response, expected %d idle conns; got %d", e, g)
}
resch <- "res3"
<-donech
if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g {
t.Errorf("after third response, still expected %d idle conns; got %d", e, g)
}
}
func TestTransportServerClosingUnexpectedly(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
tr := &Transport{}
c := &Client{Transport: tr}
fetch := func(n, retries int) string {
condFatalf := func(format string, arg ...interface{}) {
if retries <= 0 {
t.Fatalf(format, arg...)
}
t.Logf("retrying shortly after expected error: "+format, arg...)
time.Sleep(1e9 / int64(retries))
}
for retries >= 0 {
retries--
res, _, err := c.Get(ts.URL)
if err != nil {
condFatalf("error in req #%d, GET: %v", n, err)
continue
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
condFatalf("error in req #%d, ReadAll: %v", n, err)
continue
}
res.Body.Close()
return string(body)
}
panic("unreachable")
}
body1 := fetch(1, 0)
body2 := fetch(2, 0)
ts.CloseClientConnections() // surprise!
// This test has an expected race. Sleeping for 25 ms prevents
// it on most fast machines, causing the next fetch() call to
// succeed quickly. But if we do get errors, fetch() will retry 5
// times with some delays between.
time.Sleep(25e6)
body3 := fetch(3, 5)
if body1 != body2 {
t.Errorf("expected body1 and body2 to be equal")
}
if body2 == body3 {
t.Errorf("expected body2 and body3 to be different")
}
}
// TestTransportHeadResponses verifies that we deal with Content-Lengths
// with no bodies properly
func TestTransportHeadResponses(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.Method != "HEAD" {
panic("expected HEAD; got " + r.Method)
}
w.Header().Set("Content-Length", "123")
w.WriteHeader(200)
}))
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
for i := 0; i < 2; i++ {
res, err := c.Head(ts.URL)
if err != nil {
t.Errorf("error on loop %d: %v", i, err)
}
if e, g := "123", res.Header.Get("Content-Length"); e != g {
t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g)
}
if e, g := int64(0), res.ContentLength; e != g {
t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g)
}
}
}
// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding
// on responses to HEAD requests.
func TestTransportHeadChunkedResponse(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.Method != "HEAD" {
panic("expected HEAD; got " + r.Method)
}
w.Header().Set("Transfer-Encoding", "chunked") // client should ignore
w.Header().Set("x-client-ipport", r.RemoteAddr)
w.WriteHeader(200)
}))
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
res1, err := c.Head(ts.URL)
if err != nil {
t.Fatalf("request 1 error: %v", err)
}
res2, err := c.Head(ts.URL)
if err != nil {
t.Fatalf("request 2 error: %v", err)
}
if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 {
t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2)
}
}
func TestTransportNilURL(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "Hi")
}))
defer ts.Close()
req := new(Request)
req.URL = nil // what we're actually testing
req.Method = "GET"
req.RawURL = ts.URL
req.Proto = "HTTP/1.1"
req.ProtoMajor = 1
req.ProtoMinor = 1
req.Header = make(Header)
tr := &Transport{}
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatalf("unexpected RoundTrip error: %v", err)
}
body, err := ioutil.ReadAll(res.Body)
if g, e := string(body), "Hi"; g != e {
t.Fatalf("Expected response body of %q; got %q", e, g)
}
}
func TestTransportGzip(t *testing.T) {
const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
const nRandBytes = 1024 * 1024
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e {
t.Errorf("Accept-Encoding = %q, want %q", g, e)
}
rw.Header().Set("Content-Encoding", "gzip")
var w io.Writer = rw
var buf bytes.Buffer
if req.FormValue("chunked") == "0" {
w = &buf
defer io.Copy(rw, &buf)
defer func() {
rw.Header().Set("Content-Length", strconv.Itoa(buf.Len()))
}()
}
gz, _ := gzip.NewWriter(w)
gz.Write([]byte(testString))
if req.FormValue("body") == "large" {
io.Copyn(gz, rand.Reader, nRandBytes)
}
gz.Close()
}))
defer ts.Close()
for _, chunked := range []string{"1", "0"} {
c := &Client{Transport: &Transport{}}
// First fetch something large, but only read some of it.
res, _, err := c.Get(ts.URL + "?body=large&chunked=" + chunked)
if err != nil {
t.Fatalf("large get: %v", err)
}
buf := make([]byte, len(testString))
n, err := io.ReadFull(res.Body, buf)
if err != nil {
t.Fatalf("partial read of large response: size=%d, %v", n, err)
}
if e, g := testString, string(buf); e != g {
t.Errorf("partial read got %q, expected %q", g, e)
}
res.Body.Close()
// Read on the body, even though it's closed
n, err = res.Body.Read(buf)
if n != 0 || err == nil {
t.Errorf("expected error post-closed large Read; got = %d, %v", n, err)
}
// Then something small.
res, _, err = c.Get(ts.URL + "?chunked=" + chunked)
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if g, e := string(body), testString; g != e {
t.Fatalf("body = %q; want %q", g, e)
}
if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
t.Fatalf("Content-Encoding = %q; want %q", g, e)
}
// Read on the body after it's been fully read:
n, err = res.Body.Read(buf)
if n != 0 || err == nil {
t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err)
}
res.Body.Close()
n, err = res.Body.Read(buf)
if n != 0 || err == nil {
t.Errorf("expected Read error after Close; got %d, %v", n, err)
}
}
}
// TestTransportGzipRecursive sends a gzip quine and checks that the
// client gets the same value back. This is more cute than anything,
// but checks that we don't recurse forever, and checks that
// Content-Encoding is removed.
func TestTransportGzipRecursive(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "gzip")
w.Write(rgz)
}))
defer ts.Close()
c := &Client{Transport: &Transport{}}
res, _, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(body, rgz) {
t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x",
body, rgz)
}
if g, e := res.Header.Get("Content-Encoding"), ""; g != e {
t.Fatalf("Content-Encoding = %q; want %q", g, e)
}
}
// rgz is a gzip quine that uncompresses to itself.
var rgz = []byte{
0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0,
0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2,
0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17,
0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60,
0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2,
0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00,
0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00,
0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16,
0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05,
0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff,
0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00,
0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00,
0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88,
0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff,
0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00,
0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00,
0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff,
0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00,
0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16,
0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08,
0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa,
0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06,
0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00,
0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00,
0x00, 0x00,
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fcgi
// This file implements FastCGI from the perspective of a child process.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/cgi"
"os"
"strings"
"time"
)
// request holds the state for an in-progress request. As soon as it's complete,
// it's converted to an http.Request.
type request struct {
pw *io.PipeWriter
reqId uint16
params map[string]string
buf [1024]byte
rawParams []byte
keepConn bool
}
func newRequest(reqId uint16, flags uint8) *request {
r := &request{
reqId: reqId,
params: map[string]string{},
keepConn: flags&flagKeepConn != 0,
}
r.rawParams = r.buf[:0]
return r
}
// parseParams reads an encoded []byte into Params.
func (r *request) parseParams() {
text := r.rawParams
r.rawParams = nil
for len(text) > 0 {
keyLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
valLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
key := readString(text, keyLen)
text = text[keyLen:]
val := readString(text, valLen)
text = text[valLen:]
r.params[key] = val
}
}
// response implements http.ResponseWriter.
type response struct {
req *request
header http.Header
w *bufWriter
wroteHeader bool
}
func newResponse(c *child, req *request) *response {
return &response{
req: req,
header: http.Header{},
w: newWriter(c.conn, typeStdout, req.reqId),
}
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(data []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
return r.w.Write(data)
}
func (r *response) WriteHeader(code int) {
if r.wroteHeader {
return
}
r.wroteHeader = true
if code == http.StatusNotModified {
// Must not have body.
r.header.Del("Content-Type")
r.header.Del("Content-Length")
r.header.Del("Transfer-Encoding")
} else if r.header.Get("Content-Type") == "" {
r.header.Set("Content-Type", "text/html; charset=utf-8")
}
if r.header.Get("Date") == "" {
r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
}
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
r.header.Write(r.w)
r.w.WriteString("\r\n")
}
func (r *response) Flush() {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
r.w.Flush()
}
func (r *response) Close() error {
r.Flush()
return r.w.Close()
}
type child struct {
conn *conn
handler http.Handler
requests map[uint16]*request // keyed by request ID
}
func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
return &child{
conn: newConn(rwc),
handler: handler,
requests: make(map[uint16]*request),
}
}
func (c *child) serve() {
defer c.conn.Close()
var rec record
for {
if err := rec.read(c.conn.rwc); err != nil {
return
}
if err := c.handleRecord(&rec); err != nil {
return
}
}
}
var errCloseConn = errors.New("fcgi: connection should be closed")
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
func (c *child) handleRecord(rec *record) error {
req, ok := c.requests[rec.h.Id]
if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
// The spec says to ignore unknown request IDs.
return nil
}
if ok && rec.h.Type == typeBeginRequest {
// The server is trying to begin a request with the same ID
// as an in-progress request. This is an error.
return errors.New("fcgi: received ID that is already in-flight")
}
switch rec.h.Type {
case typeBeginRequest:
var br beginRequest
if err := br.read(rec.content()); err != nil {
return err
}
if br.role != roleResponder {
c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
return nil
}
c.requests[rec.h.Id] = newRequest(rec.h.Id, br.flags)
case typeParams:
// NOTE(eds): Technically a key-value pair can straddle the boundary
// between two packets. We buffer until we've received all parameters.
if len(rec.content()) > 0 {
req.rawParams = append(req.rawParams, rec.content()...)
return nil
}
req.parseParams()
case typeStdin:
content := rec.content()
if req.pw == nil {
var body io.ReadCloser
if len(content) > 0 {
// body could be an io.LimitReader, but it shouldn't matter
// as long as both sides are behaving.
body, req.pw = io.Pipe()
} else {
body = emptyBody
}
go c.serveRequest(req, body)
}
if len(content) > 0 {
// TODO(eds): This blocks until the handler reads from the pipe.
// If the handler takes a long time, it might be a problem.
req.pw.Write(content)
} else if req.pw != nil {
req.pw.Close()
}
case typeGetValues:
values := map[string]string{"FCGI_MPXS_CONNS": "1"}
c.conn.writePairs(typeGetValuesResult, 0, values)
case typeData:
// If the filter role is implemented, read the data stream here.
case typeAbortRequest:
delete(c.requests, rec.h.Id)
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
if !req.keepConn {
// connection will close upon return
return errCloseConn
}
default:
b := make([]byte, 8)
b[0] = byte(rec.h.Type)
c.conn.writeRecord(typeUnknownType, 0, b)
}
return nil
}
func (c *child) serveRequest(req *request, body io.ReadCloser) {
r := newResponse(c, req)
httpReq, err := cgi.RequestFromMap(req.params)
if err != nil {
// there was an error reading the request
r.WriteHeader(http.StatusInternalServerError)
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
} else {
httpReq.Body = body
c.handler.ServeHTTP(r, httpReq)
}
body.Close()
r.Close()
c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
if !req.keepConn {
c.conn.Close()
}
}
// Serve accepts incoming FastCGI connections on the listener l, creating a new
// goroutine for each. The goroutine reads requests and then calls handler
// to reply to them.
// If l is nil, Serve accepts connections from os.Stdin.
// If handler is nil, http.DefaultServeMux is used.
func Serve(l net.Listener, handler http.Handler) error {
if l == nil {
var err error
l, err = net.FileListener(os.Stdin)
if err != nil {
return err
}
defer l.Close()
}
if handler == nil {
handler = http.DefaultServeMux
}
for {
rw, err := l.Accept()
if err != nil {
return err
}
c := newChild(rw, handler)
go c.serve()
}
}
net/http/fcgi: fix a shutdown race
If a handler didn't consume all its Request.Body, child.go was
closing the socket while the host was still writing to it,
causing the child to send a RST and the host (at least nginx)
to send an empty response body.
Now, we tell the host we're done with the request/response
first, and then close our input pipe after consuming a bit of
it. Consuming the body fixes the problem, and flushing to the
host first to tell it that we're done increases the chance
that the host cuts off further data to us, meaning we won't
have much to consume.
No new tests, because this package is lacking in tests.
Tested by hand with nginx. See issue for testing details.
Fixes issue 4183
R=golang-dev, rsc
CC=golang-dev
https://codereview.appspot.com/7939045
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fcgi
// This file implements FastCGI from the perspective of a child process.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/cgi"
"os"
"strings"
"time"
)
// request holds the state for an in-progress request. As soon as it's complete,
// it's converted to an http.Request.
type request struct {
pw *io.PipeWriter
reqId uint16
params map[string]string
buf [1024]byte
rawParams []byte
keepConn bool
}
func newRequest(reqId uint16, flags uint8) *request {
r := &request{
reqId: reqId,
params: map[string]string{},
keepConn: flags&flagKeepConn != 0,
}
r.rawParams = r.buf[:0]
return r
}
// parseParams reads an encoded []byte into Params.
func (r *request) parseParams() {
text := r.rawParams
r.rawParams = nil
for len(text) > 0 {
keyLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
valLen, n := readSize(text)
if n == 0 {
return
}
text = text[n:]
key := readString(text, keyLen)
text = text[keyLen:]
val := readString(text, valLen)
text = text[valLen:]
r.params[key] = val
}
}
// response implements http.ResponseWriter.
type response struct {
req *request
header http.Header
w *bufWriter
wroteHeader bool
}
func newResponse(c *child, req *request) *response {
return &response{
req: req,
header: http.Header{},
w: newWriter(c.conn, typeStdout, req.reqId),
}
}
func (r *response) Header() http.Header {
return r.header
}
func (r *response) Write(data []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
return r.w.Write(data)
}
func (r *response) WriteHeader(code int) {
if r.wroteHeader {
return
}
r.wroteHeader = true
if code == http.StatusNotModified {
// Must not have body.
r.header.Del("Content-Type")
r.header.Del("Content-Length")
r.header.Del("Transfer-Encoding")
} else if r.header.Get("Content-Type") == "" {
r.header.Set("Content-Type", "text/html; charset=utf-8")
}
if r.header.Get("Date") == "" {
r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
}
fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code))
r.header.Write(r.w)
r.w.WriteString("\r\n")
}
func (r *response) Flush() {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
r.w.Flush()
}
func (r *response) Close() error {
r.Flush()
return r.w.Close()
}
type child struct {
conn *conn
handler http.Handler
requests map[uint16]*request // keyed by request ID
}
func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
return &child{
conn: newConn(rwc),
handler: handler,
requests: make(map[uint16]*request),
}
}
func (c *child) serve() {
defer c.conn.Close()
var rec record
for {
if err := rec.read(c.conn.rwc); err != nil {
return
}
if err := c.handleRecord(&rec); err != nil {
return
}
}
}
var errCloseConn = errors.New("fcgi: connection should be closed")
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
func (c *child) handleRecord(rec *record) error {
req, ok := c.requests[rec.h.Id]
if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues {
// The spec says to ignore unknown request IDs.
return nil
}
switch rec.h.Type {
case typeBeginRequest:
if req != nil {
// The server is trying to begin a request with the same ID
// as an in-progress request. This is an error.
return errors.New("fcgi: received ID that is already in-flight")
}
var br beginRequest
if err := br.read(rec.content()); err != nil {
return err
}
if br.role != roleResponder {
c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole)
return nil
}
c.requests[rec.h.Id] = newRequest(rec.h.Id, br.flags)
return nil
case typeParams:
// NOTE(eds): Technically a key-value pair can straddle the boundary
// between two packets. We buffer until we've received all parameters.
if len(rec.content()) > 0 {
req.rawParams = append(req.rawParams, rec.content()...)
return nil
}
req.parseParams()
return nil
case typeStdin:
content := rec.content()
if req.pw == nil {
var body io.ReadCloser
if len(content) > 0 {
// body could be an io.LimitReader, but it shouldn't matter
// as long as both sides are behaving.
body, req.pw = io.Pipe()
} else {
body = emptyBody
}
go c.serveRequest(req, body)
}
if len(content) > 0 {
// TODO(eds): This blocks until the handler reads from the pipe.
// If the handler takes a long time, it might be a problem.
req.pw.Write(content)
} else if req.pw != nil {
req.pw.Close()
}
return nil
case typeGetValues:
values := map[string]string{"FCGI_MPXS_CONNS": "1"}
c.conn.writePairs(typeGetValuesResult, 0, values)
return nil
case typeData:
// If the filter role is implemented, read the data stream here.
return nil
case typeAbortRequest:
println("abort")
delete(c.requests, rec.h.Id)
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
if !req.keepConn {
// connection will close upon return
return errCloseConn
}
return nil
default:
b := make([]byte, 8)
b[0] = byte(rec.h.Type)
c.conn.writeRecord(typeUnknownType, 0, b)
return nil
}
}
func (c *child) serveRequest(req *request, body io.ReadCloser) {
r := newResponse(c, req)
httpReq, err := cgi.RequestFromMap(req.params)
if err != nil {
// there was an error reading the request
r.WriteHeader(http.StatusInternalServerError)
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
} else {
httpReq.Body = body
c.handler.ServeHTTP(r, httpReq)
}
r.Close()
c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete)
// Consume the entire body, so the host isn't still writing to
// us when we close the socket below in the !keepConn case,
// otherwise we'd send a RST. (golang.org/issue/4183)
// TODO(bradfitz): also bound this copy in time. Or send
// some sort of abort request to the host, so the host
// can properly cut off the client sending all the data.
// For now just bound it a little and
io.CopyN(ioutil.Discard, body, 100<<20)
body.Close()
if !req.keepConn {
c.conn.Close()
}
}
// Serve accepts incoming FastCGI connections on the listener l, creating a new
// goroutine for each. The goroutine reads requests and then calls handler
// to reply to them.
// If l is nil, Serve accepts connections from os.Stdin.
// If handler is nil, http.DefaultServeMux is used.
func Serve(l net.Listener, handler http.Handler) error {
if l == nil {
var err error
l, err = net.FileListener(os.Stdin)
if err != nil {
return err
}
defer l.Close()
}
if handler == nil {
handler = http.DefaultServeMux
}
for {
rw, err := l.Accept()
if err != nil {
return err
}
c := newChild(rw, handler)
go c.serve()
}
}
|
package main
import (
"fmt"
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/ebiten/ebitenutil"
"log"
"runtime"
)
const (
screenWidth = 320
screenHeight = 240
)
type Game struct {
canvasRenderTarget ebiten.RenderTargetID
}
func (g *Game) Update() error {
// TODO: Implement
return nil
}
func (g *Game) Draw(gr ebiten.GraphicsContext) error {
if g.canvasRenderTarget.IsNil() {
var err error
g.canvasRenderTarget, err = ebiten.NewRenderTargetID(screenWidth, screenHeight, ebiten.FilterNearest)
if err != nil {
return err
}
gr.PushRenderTarget(g.canvasRenderTarget)
gr.Fill(0xff, 0xff, 0xff)
gr.PopRenderTarget()
}
ebiten.DrawWhole(gr.RenderTarget(g.canvasRenderTarget), screenWidth, screenHeight, ebiten.GeometryMatrixI(), ebiten.ColorMatrixI())
mx, my := ebiten.CursorPosition()
ebitenutil.DebugPrint(gr, fmt.Sprintf("(%d, %d)", mx, my))
return nil
}
func init() {
runtime.LockOSThread()
}
func main() {
game := new(Game)
if err := ebiten.Run(game, screenWidth, screenHeight, 2, "Paint (Ebiten Demo)", 60); err != nil {
log.Fatal(err)
}
}
Implement paint.go
package main
import (
"fmt"
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/ebiten/ebitenutil"
"log"
"runtime"
)
const (
screenWidth = 320
screenHeight = 240
)
type Game struct {
brushRenderTarget ebiten.RenderTargetID
canvasRenderTarget ebiten.RenderTargetID
}
func (g *Game) Update() error {
return nil
}
func (g *Game) Draw(gr ebiten.GraphicsContext) error {
if g.brushRenderTarget.IsNil() {
var err error
g.brushRenderTarget, err = ebiten.NewRenderTargetID(1, 1, ebiten.FilterNearest)
if err != nil {
return err
}
gr.PushRenderTarget(g.brushRenderTarget)
gr.Fill(0, 0, 0)
gr.PopRenderTarget()
}
if g.canvasRenderTarget.IsNil() {
var err error
g.canvasRenderTarget, err = ebiten.NewRenderTargetID(screenWidth, screenHeight, ebiten.FilterNearest)
if err != nil {
return err
}
gr.PushRenderTarget(g.canvasRenderTarget)
gr.Fill(0xff, 0xff, 0xff)
gr.PopRenderTarget()
}
mx, my := ebiten.CursorPosition()
if ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) {
gr.PushRenderTarget(g.canvasRenderTarget)
geo := ebiten.GeometryMatrixI()
geo.Translate(float64(mx), float64(my))
ebiten.DrawWhole(gr.RenderTarget(g.brushRenderTarget), 1, 1, geo, ebiten.ColorMatrixI())
gr.PopRenderTarget()
}
ebiten.DrawWhole(gr.RenderTarget(g.canvasRenderTarget), screenWidth, screenHeight, ebiten.GeometryMatrixI(), ebiten.ColorMatrixI())
ebitenutil.DebugPrint(gr, fmt.Sprintf("(%d, %d)", mx, my))
return nil
}
func init() {
runtime.LockOSThread()
}
func main() {
game := new(Game)
if err := ebiten.Run(game, screenWidth, screenHeight, 2, "Paint (Ebiten Demo)", 60); err != nil {
log.Fatal(err)
}
}
|
package statistics
import (
"math/big"
"sync"
"time"
"github.com/edinnen/Thanksgiving_Intranet/analyzer/models"
"github.com/sirupsen/logrus"
"gopkg.in/oleiade/reflections.v1"
"github.com/jmoiron/sqlx"
"github.com/sec51/goanomaly"
)
// Engine holds necessary values for our statistics engine
type Engine struct {
db *sqlx.DB
mutex *sync.Mutex
readings []models.CabinReading
timeRange TimeRange
}
// TimeRange defines a to/from range for our queries
type TimeRange struct {
From time.Time
To time.Time
}
// NewClient creates a new statistics engine
func NewClient(db *sqlx.DB, mutex *sync.Mutex) Engine {
return Engine{
db: db,
mutex: mutex,
}
}
// SetReadingsData searches for readings within the engine's currently defined range
func (stats *Engine) SetReadingsData() error {
var readings []models.CabinReading
from := stats.timeRange.From.Unix()
to := stats.timeRange.To.Unix()
statement := `SELECT * FROM readings WHERE unix >= ? AND unix <= ?;`
stats.mutex.Lock()
err := stats.db.Select(&readings, statement, from, to)
stats.mutex.Unlock()
stats.readings = readings
logrus.Infof("Found %d readings in range", len(readings))
return err
}
// DetectStreamAnomalies checks for anomalous data over a three minute time range into the past
func (stats *Engine) DetectStreamAnomalies() {
logrus.Info("Monitoring data stream for anomalies")
for now := range time.Tick(3 * time.Minute) {
from := now.Add(time.Duration(-3) * time.Minute)
stats.timeRange = TimeRange{
To: now,
From: from,
}
logrus.Infof("Searching for battery voltages between %s and %s", from.String(), now.String())
stats.SetReadingsData()
if len(stats.readings) <= 0 {
continue
}
bvAnomalies := batteryVoltageAnomalies(stats.readings, stats.timeRange)
svAnomalies := solarVoltageAnomalies(stats.readings, stats.timeRange)
baAnomalies := batteryAmperageAnomalies(stats.readings, stats.timeRange)
saAnomalies := solarAmperageAnomalies(stats.readings, stats.timeRange)
laAnomalies := loadAmperageAnomalies(stats.readings, stats.timeRange)
btAnomalies := batteryTempAnomalies(stats.readings, stats.timeRange)
bvAnomalies.SendToDB(stats.db, stats.mutex)
svAnomalies.SendToDB(stats.db, stats.mutex)
baAnomalies.SendToDB(stats.db, stats.mutex)
saAnomalies.SendToDB(stats.db, stats.mutex)
laAnomalies.SendToDB(stats.db, stats.mutex)
btAnomalies.SendToDB(stats.db, stats.mutex)
}
}
// computeAnomalous detects any data anomalies for a particular field in a CabinReading
// between the given TimeRange. Anomalous data is computed as `p(x) < k` via the
// Gaussian normal distribution formaula where k is 0.001.
func computeAnomalous(field string, readings []models.CabinReading, values []big.Float, tsRange TimeRange) (anomalies models.Anomalies) {
anomalyDetector := goanomaly.NewAnomalyDetection(values...)
var anomalous []models.CabinReading
for _, reading := range readings {
if reading.Timestamp.Before(tsRange.From) {
continue
}
if reading.Timestamp.After(tsRange.To) {
return
}
value, err := reflections.GetField(reading, field)
if err != nil {
logrus.Error(err)
return models.Anomalies{}
}
event := *big.NewFloat(value.(float64))
if event == *big.NewFloat(0) {
continue
}
anomaly, _ := anomalyDetector.EventIsAnomalous(event, big.NewFloat(0.001))
if anomaly {
anomalous = append(anomalous, reading)
logrus.Infof("Found anomalous value %v for %s", value, field)
}
}
return models.Anomalies{
Name: field,
Readings: anomalous,
}
}
func appendIfNotZero(values []big.Float, new float64) []big.Float {
if new == 0 {
return values
}
res := append(values, *big.NewFloat(new))
return res
}
func batteryVoltageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.BatteryVoltage)
}
anomalies = computeAnomalous("BatteryVoltage", readings, values, tsRange)
return
}
func solarVoltageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.SolarVoltage)
}
anomalies = computeAnomalous("SolarVoltage", readings, values, tsRange)
return
}
func batteryAmperageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.BatteryAmperage)
}
anomalies = computeAnomalous("BatteryAmperage", readings, values, tsRange)
return
}
func solarAmperageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.SolarAmperage)
}
anomalies = computeAnomalous("SolarAmperage", readings, values, tsRange)
return
}
func loadAmperageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.LoadAmperage)
}
anomalies = computeAnomalous("LoadAmperage", readings, values, tsRange)
return
}
func batteryTempAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.BatteryTemp)
}
anomalies = computeAnomalous("BatteryTemp", readings, values, tsRange)
return
}
Fix invalid operation
package statistics
import (
"math/big"
"sync"
"time"
"github.com/edinnen/Thanksgiving_Intranet/analyzer/models"
"github.com/sirupsen/logrus"
"gopkg.in/oleiade/reflections.v1"
"github.com/jmoiron/sqlx"
"github.com/sec51/goanomaly"
)
// Engine holds necessary values for our statistics engine
type Engine struct {
db *sqlx.DB
mutex *sync.Mutex
readings []models.CabinReading
timeRange TimeRange
}
// TimeRange defines a to/from range for our queries
type TimeRange struct {
From time.Time
To time.Time
}
// NewClient creates a new statistics engine
func NewClient(db *sqlx.DB, mutex *sync.Mutex) Engine {
return Engine{
db: db,
mutex: mutex,
}
}
// SetReadingsData searches for readings within the engine's currently defined range
func (stats *Engine) SetReadingsData() error {
var readings []models.CabinReading
from := stats.timeRange.From.Unix()
to := stats.timeRange.To.Unix()
statement := `SELECT * FROM readings WHERE unix >= ? AND unix <= ?;`
stats.mutex.Lock()
err := stats.db.Select(&readings, statement, from, to)
stats.mutex.Unlock()
stats.readings = readings
logrus.Infof("Found %d readings in range", len(readings))
return err
}
// DetectStreamAnomalies checks for anomalous data over a three minute time range into the past
func (stats *Engine) DetectStreamAnomalies() {
logrus.Info("Monitoring data stream for anomalies")
for now := range time.Tick(3 * time.Minute) {
from := now.Add(time.Duration(-3) * time.Minute)
stats.timeRange = TimeRange{
To: now,
From: from,
}
logrus.Infof("Searching for battery voltages between %s and %s", from.String(), now.String())
stats.SetReadingsData()
if len(stats.readings) <= 0 {
continue
}
bvAnomalies := batteryVoltageAnomalies(stats.readings, stats.timeRange)
svAnomalies := solarVoltageAnomalies(stats.readings, stats.timeRange)
baAnomalies := batteryAmperageAnomalies(stats.readings, stats.timeRange)
saAnomalies := solarAmperageAnomalies(stats.readings, stats.timeRange)
laAnomalies := loadAmperageAnomalies(stats.readings, stats.timeRange)
btAnomalies := batteryTempAnomalies(stats.readings, stats.timeRange)
bvAnomalies.SendToDB(stats.db, stats.mutex)
svAnomalies.SendToDB(stats.db, stats.mutex)
baAnomalies.SendToDB(stats.db, stats.mutex)
saAnomalies.SendToDB(stats.db, stats.mutex)
laAnomalies.SendToDB(stats.db, stats.mutex)
btAnomalies.SendToDB(stats.db, stats.mutex)
}
}
// computeAnomalous detects any data anomalies for a particular field in a CabinReading
// between the given TimeRange. Anomalous data is computed as `p(x) < k` via the
// Gaussian normal distribution formaula where k is 0.001.
func computeAnomalous(field string, readings []models.CabinReading, values []big.Float, tsRange TimeRange) (anomalies models.Anomalies) {
anomalyDetector := goanomaly.NewAnomalyDetection(values...)
var anomalous []models.CabinReading
for _, reading := range readings {
if reading.Timestamp.Before(tsRange.From) {
continue
}
if reading.Timestamp.After(tsRange.To) {
return
}
value, err := reflections.GetField(reading, field)
if err != nil {
logrus.Error(err)
return models.Anomalies{}
}
if value == 0 {
continue
}
event := *big.NewFloat(value.(float64))
anomaly, _ := anomalyDetector.EventIsAnomalous(event, big.NewFloat(0.001))
if anomaly {
anomalous = append(anomalous, reading)
logrus.Infof("Found anomalous value %v for %s", value, field)
}
}
return models.Anomalies{
Name: field,
Readings: anomalous,
}
}
func appendIfNotZero(values []big.Float, new float64) []big.Float {
if new == 0 {
return values
}
res := append(values, *big.NewFloat(new))
return res
}
func batteryVoltageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.BatteryVoltage)
}
anomalies = computeAnomalous("BatteryVoltage", readings, values, tsRange)
return
}
func solarVoltageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.SolarVoltage)
}
anomalies = computeAnomalous("SolarVoltage", readings, values, tsRange)
return
}
func batteryAmperageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.BatteryAmperage)
}
anomalies = computeAnomalous("BatteryAmperage", readings, values, tsRange)
return
}
func solarAmperageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.SolarAmperage)
}
anomalies = computeAnomalous("SolarAmperage", readings, values, tsRange)
return
}
func loadAmperageAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.LoadAmperage)
}
anomalies = computeAnomalous("LoadAmperage", readings, values, tsRange)
return
}
func batteryTempAnomalies(readings []models.CabinReading, tsRange TimeRange) (anomalies models.Anomalies) {
var values []big.Float
for _, reading := range readings {
values = appendIfNotZero(values, reading.BatteryTemp)
}
anomalies = computeAnomalous("BatteryTemp", readings, values, tsRange)
return
}
|
package estafette
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/estafette/estafette-ci-api/cockroach"
contracts "github.com/estafette/estafette-ci-contracts"
manifest "github.com/estafette/estafette-ci-manifest"
"github.com/rs/zerolog/log"
)
// BuildService encapsulates build and release creation and re-triggering
type BuildService interface {
CreateBuild(build contracts.Build, waitForJobToStart bool) (*contracts.Build, error)
FinishBuild(repoSource, repoOwner, repoName string, buildID int, buildStatus string) error
CreateRelease(release contracts.Release, mft manifest.EstafetteManifest, repoBranch, repoRevision string, waitForJobToStart bool) (*contracts.Release, error)
FinishRelease(repoSource, repoOwner, repoName string, releaseID int, releaseStatus string) error
FirePipelineTriggers(build contracts.Build, event string) error
FireReleaseTriggers(release contracts.Release, event string) error
FireCronTriggers() error
}
type buildServiceImpl struct {
cockroachDBClient cockroach.DBClient
ciBuilderClient CiBuilderClient
githubJobVarsFunc func(string, string, string) (string, string, error)
bitbucketJobVarsFunc func(string, string, string) (string, string, error)
}
// NewBuildService returns a new estafette.BuildService
func NewBuildService(cockroachDBClient cockroach.DBClient, ciBuilderClient CiBuilderClient, githubJobVarsFunc func(string, string, string) (string, string, error), bitbucketJobVarsFunc func(string, string, string) (string, string, error)) (buildService BuildService) {
buildService = &buildServiceImpl{
cockroachDBClient: cockroachDBClient,
ciBuilderClient: ciBuilderClient,
githubJobVarsFunc: githubJobVarsFunc,
bitbucketJobVarsFunc: bitbucketJobVarsFunc,
}
return
}
func (s *buildServiceImpl) CreateBuild(build contracts.Build, waitForJobToStart bool) (createdBuild *contracts.Build, err error) {
// validate manifest
hasValidManifest := false
mft, manifestError := manifest.ReadManifest(build.Manifest)
if manifestError != nil {
log.Warn().Err(manifestError).Str("manifest", build.Manifest).Msgf("Deserializing Estafette manifest for pipeline %v/%v/%v and revision %v failed, continuing though so developer gets useful feedback", build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision)
} else {
hasValidManifest = true
}
// set builder track
builderTrack := "stable"
if hasValidManifest {
builderTrack = mft.Builder.Track
}
// get short version of repo source
shortRepoSource := s.getShortRepoSource(build.RepoSource)
// set build status
buildStatus := "failed"
if hasValidManifest {
buildStatus = "running"
}
// inject build stages
if hasValidManifest {
mft, err = InjectSteps(mft, builderTrack, shortRepoSource)
if err != nil {
log.Error().Err(err).
Msg("Failed injecting build stages for pipeline %v/%v/%v and revision %v")
return
}
}
// get or set autoincrement and build version
autoincrement := 0
if build.BuildVersion == "" {
// get autoincrement number
autoincrement, err = s.cockroachDBClient.GetAutoIncrement(shortRepoSource, build.RepoOwner, build.RepoName)
if err != nil {
return
}
// set build version number
if hasValidManifest {
build.BuildVersion = mft.Version.Version(manifest.EstafetteVersionParams{
AutoIncrement: autoincrement,
Branch: build.RepoBranch,
Revision: build.RepoRevision,
})
} else {
// set build version to autoincrement so there's at least a version in the db and gui
build.BuildVersion = strconv.Itoa(autoincrement)
}
} else {
// get autoincrement from build version
autoincrementCandidate := build.BuildVersion
if hasValidManifest && mft.Version.SemVer != nil {
re := regexp.MustCompile(`^[0-9]+\.[0-9]+\.([0-9]+)(-[0-9a-z-]+)?$`)
match := re.FindStringSubmatch(build.BuildVersion)
if len(match) > 1 {
autoincrementCandidate = match[1]
}
}
autoincrement, err = strconv.Atoi(autoincrementCandidate)
if err != nil {
log.Warn().Err(err).Str("buildversion", build.BuildVersion).Msgf("Failed extracting autoincrement from build version %v for pipeline %v/%v/%v revision %v", build.BuildVersion, build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision)
}
}
if len(build.Labels) == 0 {
var labels []contracts.Label
if hasValidManifest {
for k, v := range mft.Labels {
labels = append(labels, contracts.Label{
Key: k,
Value: v,
})
}
}
build.Labels = labels
}
if len(build.ReleaseTargets) == 0 {
var releaseTargets []contracts.ReleaseTarget
if hasValidManifest {
for _, r := range mft.Releases {
releaseTarget := contracts.ReleaseTarget{
Name: r.Name,
Actions: make([]manifest.EstafetteReleaseAction, 0),
}
if r.Actions != nil && len(r.Actions) > 0 {
for _, a := range r.Actions {
releaseTarget.Actions = append(releaseTarget.Actions, *a)
}
}
releaseTargets = append(releaseTargets, releaseTarget)
}
}
build.ReleaseTargets = releaseTargets
}
// get authenticated url
authenticatedRepositoryURL, environmentVariableWithToken, err := s.getAuthenticatedRepositoryURL(build.RepoSource, build.RepoOwner, build.RepoName)
if err != nil {
return
}
// store build in db
createdBuild, err = s.cockroachDBClient.InsertBuild(contracts.Build{
RepoSource: build.RepoSource,
RepoOwner: build.RepoOwner,
RepoName: build.RepoName,
RepoBranch: build.RepoBranch,
RepoRevision: build.RepoRevision,
BuildVersion: build.BuildVersion,
BuildStatus: buildStatus,
Labels: build.Labels,
ReleaseTargets: build.ReleaseTargets,
Manifest: build.Manifest,
Commits: build.Commits,
Triggers: mft.GetAllTriggers(),
})
if err != nil {
return
}
buildID, err := strconv.Atoi(createdBuild.ID)
if err != nil {
return
}
// define ci builder params
ciBuilderParams := CiBuilderParams{
JobType: "build",
RepoSource: build.RepoSource,
RepoOwner: build.RepoOwner,
RepoName: build.RepoName,
RepoURL: authenticatedRepositoryURL,
RepoBranch: build.RepoBranch,
RepoRevision: build.RepoRevision,
EnvironmentVariables: environmentVariableWithToken,
Track: builderTrack,
AutoIncrement: autoincrement,
VersionNumber: build.BuildVersion,
Manifest: mft,
BuildID: buildID,
}
// create ci builder job
if hasValidManifest {
log.Debug().Msgf("Pipeline %v/%v/%v revision %v has valid manifest, creating build job...", build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision)
// create ci builder job
if waitForJobToStart {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
return
}
} else {
go func(ciBuilderParams CiBuilderParams) {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
log.Warn().Err(err).Msgf("Failed creating async build job")
}
}(ciBuilderParams)
}
// handle triggers
go func() {
s.FirePipelineTriggers(build, "started")
}()
} else if manifestError != nil {
log.Debug().Msgf("Pipeline %v/%v/%v revision %v with build id %v has invalid manifest, storing log...", build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision, build.ID)
// store log with manifest unmarshalling error
buildLog := contracts.BuildLog{
BuildID: createdBuild.ID,
RepoSource: createdBuild.RepoSource,
RepoOwner: createdBuild.RepoOwner,
RepoName: createdBuild.RepoName,
RepoBranch: createdBuild.RepoBranch,
RepoRevision: createdBuild.RepoRevision,
Steps: []contracts.BuildLogStep{
contracts.BuildLogStep{
Step: "validate-manifest",
Image: &contracts.BuildLogStepDockerImage{
Name: "estafette/estafette-ci-builder",
Tag: "stable",
IsPulled: true,
ImageSize: 0,
PullDuration: time.Duration(0),
Error: "",
IsTrusted: true,
},
ExitCode: 1,
Status: "failed",
AutoInjected: true,
RunIndex: 0,
LogLines: []contracts.BuildLogLine{
contracts.BuildLogLine{
LineNumber: 1,
Timestamp: time.Now().UTC(),
StreamType: "stderr",
Text: manifestError.Error(),
},
},
},
},
}
err = s.cockroachDBClient.InsertBuildLog(buildLog)
if err != nil {
log.Warn().Err(err).Msgf("Failed inserting build log for invalid manifest")
}
}
return
}
func (s *buildServiceImpl) FinishBuild(repoSource, repoOwner, repoName string, buildID int, buildStatus string) error {
err := s.cockroachDBClient.UpdateBuildStatus(repoSource, repoOwner, repoName, buildID, buildStatus)
if err != nil {
return err
}
// handle triggers
go func() {
build, err := s.cockroachDBClient.GetPipelineBuildByID(repoSource, repoOwner, repoName, buildID, false)
if err != nil {
return
}
if build != nil {
s.FirePipelineTriggers(*build, "finished")
}
}()
return nil
}
func (s *buildServiceImpl) CreateRelease(release contracts.Release, mft manifest.EstafetteManifest, repoBranch, repoRevision string, waitForJobToStart bool) (createdRelease *contracts.Release, err error) {
// set builder track
builderTrack := mft.Builder.Track
// get short version of repo source
shortRepoSource := s.getShortRepoSource(release.RepoSource)
// set release status
releaseStatus := "running"
// inject build stages
mft, err = InjectSteps(mft, builderTrack, shortRepoSource)
if err != nil {
log.Error().Err(err).
Msgf("Failed injecting build stages for release to %v of pipeline %v/%v/%v version %v", release.Name, release.RepoSource, release.RepoOwner, release.RepoName, release.ReleaseVersion)
return
}
// get autoincrement from release version
autoincrementCandidate := release.ReleaseVersion
if mft.Version.SemVer != nil {
re := regexp.MustCompile(`^[0-9]+\.[0-9]+\.([0-9]+)(-[0-9a-zA-Z-/]+)?$`)
match := re.FindStringSubmatch(release.ReleaseVersion)
if len(match) > 1 {
autoincrementCandidate = match[1]
}
}
autoincrement, err := strconv.Atoi(autoincrementCandidate)
if err != nil {
log.Warn().Err(err).Str("releaseversion", release.ReleaseVersion).Msgf("Failed extracting autoincrement from build version %v for pipeline %v/%v/%v", release.ReleaseVersion, release.RepoSource, release.RepoOwner, release.RepoName)
}
// get authenticated url
authenticatedRepositoryURL, environmentVariableWithToken, err := s.getAuthenticatedRepositoryURL(release.RepoSource, release.RepoOwner, release.RepoName)
if err != nil {
return
}
// create release in database
createdRelease, err = s.cockroachDBClient.InsertRelease(contracts.Release{
Name: release.Name,
Action: release.Action,
RepoSource: release.RepoSource,
RepoOwner: release.RepoOwner,
RepoName: release.RepoName,
ReleaseVersion: release.ReleaseVersion,
ReleaseStatus: releaseStatus,
TriggeredBy: release.TriggeredBy,
})
if err != nil {
return
}
insertedReleaseID, err := strconv.Atoi(createdRelease.ID)
if err != nil {
return
}
// define ci builder params
ciBuilderParams := CiBuilderParams{
JobType: "release",
RepoSource: release.RepoSource,
RepoOwner: release.RepoOwner,
RepoName: release.RepoName,
RepoURL: authenticatedRepositoryURL,
RepoBranch: repoBranch,
RepoRevision: repoRevision,
EnvironmentVariables: environmentVariableWithToken,
Track: builderTrack,
AutoIncrement: autoincrement,
VersionNumber: release.ReleaseVersion,
Manifest: mft,
ReleaseID: insertedReleaseID,
ReleaseName: release.Name,
ReleaseAction: release.Action,
ReleaseTriggeredBy: release.TriggeredBy,
}
// create ci release job
if waitForJobToStart {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
return
}
} else {
go func(ciBuilderParams CiBuilderParams) {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
log.Warn().Err(err).Msgf("Failed creating async release job")
}
}(ciBuilderParams)
}
// handle triggers
go func() {
s.FireReleaseTriggers(release, "started")
}()
return
}
func (s *buildServiceImpl) FinishRelease(repoSource, repoOwner, repoName string, releaseID int, releaseStatus string) error {
err := s.cockroachDBClient.UpdateReleaseStatus(repoSource, repoOwner, repoName, releaseID, releaseStatus)
if err != nil {
return err
}
// handle triggers
go func() {
release, err := s.cockroachDBClient.GetPipelineRelease(repoSource, repoOwner, repoName, releaseID)
if err != nil {
return
}
if release != nil {
s.FireReleaseTriggers(*release, "finished")
}
}()
return nil
}
func (s *buildServiceImpl) FirePipelineTriggers(build contracts.Build, event string) error {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Checking if triggers need to be fired...", build.RepoSource, build.RepoOwner, build.RepoName, event)
// retrieve all pipeline triggers
pipelines, err := s.cockroachDBClient.GetPipelineTriggers(build, event)
if err != nil {
return err
}
// create event object
pe := manifest.EstafettePipelineEvent{
RepoSource: build.RepoSource,
RepoOwner: build.RepoOwner,
RepoName: build.RepoName,
Branch: build.RepoBranch,
Status: build.BuildStatus,
Event: event,
}
// check for each whether it should fire
for _, p := range pipelines {
for _, t := range p.Triggers {
log.Debug().Interface("event", pe).Interface("trigger", t).Msgf("[trigger:pipeline(%v/%v/%v:%v)] Checking if pipeline '%v/%v/%v' trigger should fire...", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName)
if t.Pipeline == nil {
continue
}
if t.Pipeline.Fires(&pe) {
// create new build for t.Run
if t.BuildAction != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Firing build action '%v/%v/%v', branch '%v'...", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
err := s.fireBuild(*p, t)
if err != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Failed starting build action'%v/%v/%v', branch '%v'", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
}
} else if t.ReleaseAction != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Firing release action '%v/%v/%v', target '%v', action '%v'...", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
err := s.fireRelease(*p, t, fmt.Sprintf("trigger.pipeline { name: %v/%v/%v, event: %v }", build.RepoSource, build.RepoOwner, build.RepoName, event))
if err != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Failed starting release action '%v/%v/%v', target '%v', action '%v'", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
}
}
}
}
}
return nil
}
func (s *buildServiceImpl) FireReleaseTriggers(release contracts.Release, event string) error {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v] Checking if triggers need to be fired...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event)
pipelines, err := s.cockroachDBClient.GetReleaseTriggers(release, event)
if err != nil {
return err
}
// create event object
re := manifest.EstafetteReleaseEvent{
RepoSource: release.RepoSource,
RepoOwner: release.RepoOwner,
RepoName: release.RepoName,
Target: release.Name,
Status: release.ReleaseStatus,
Event: event,
}
// check for each whether it should fire
for _, p := range pipelines {
for _, t := range p.Triggers {
log.Debug().Interface("event", re).Interface("trigger", t).Msgf("[trigger:release(%v/%v/%v-%v:%v)] Checking if pipeline '%v/%v/%v' trigger should fire...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName)
if t.Release == nil {
continue
}
if t.Release.Fires(&re) {
if t.BuildAction != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Firing build action '%v/%v/%v', branch '%v'...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
err := s.fireBuild(*p, t)
if err != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Failed starting build action '%v/%v/%v', branch '%v'", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
}
} else if t.ReleaseAction != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Firing release action '%v/%v/%v', target '%v', action '%v'...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
err := s.fireRelease(*p, t, fmt.Sprintf("trigger.release { name: %v/%v/%v, target: %v, event: %v }", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event))
if err != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Failed starting release action '%v/%v/%v', target '%v', action '%v'", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
}
}
}
}
}
return nil
}
func (s *buildServiceImpl) fireBuild(p contracts.Pipeline, t manifest.EstafetteTrigger) error {
if t.BuildAction == nil {
return fmt.Errorf("Trigger to fire does not have a 'builds' property, shouldn't get to here")
}
// get last build for branch defined in 'builds' section
lastBuildForBranch, err := s.cockroachDBClient.GetLastPipelineBuildForBranch(p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
if lastBuildForBranch == nil {
return fmt.Errorf("There's no build for pipeline '%v/%v/%v' branch '%v', cannot trigger one", p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
}
// empty the build version so a new one gets created
lastBuildForBranch.BuildVersion = ""
_, err = s.CreateBuild(*lastBuildForBranch, true)
if err != nil {
return err
}
return nil
}
func (s *buildServiceImpl) fireRelease(p contracts.Pipeline, t manifest.EstafetteTrigger, triggeredBy string) error {
if t.ReleaseAction == nil {
return fmt.Errorf("Trigger to fire does not have a 'releases' property, shouldn't get to here")
}
_, err := s.CreateRelease(contracts.Release{
Name: t.ReleaseAction.Target,
Action: t.ReleaseAction.Action,
RepoSource: p.RepoSource,
RepoOwner: p.RepoOwner,
RepoName: p.RepoName,
ReleaseVersion: p.BuildVersion,
TriggeredBy: triggeredBy,
}, *p.ManifestObject, p.RepoBranch, p.RepoRevision, true)
if err != nil {
return err
}
return nil
}
func (s *buildServiceImpl) FireCronTriggers() error {
log.Info().Msgf("Checking if triggers for cron need to be fired...")
pipelines, err := s.cockroachDBClient.GetCronTriggers()
if err != nil {
return err
}
// create event object
ce := manifest.EstafetteCronEvent{}
// check for each whether it should fire
for _, p := range pipelines {
for _, t := range p.Triggers {
if t.Cron == nil {
continue
}
if t.Cron.Fires(&ce) {
// create new release for t.Run
log.Info().Msgf("Firing %v because of cron", ce)
}
}
}
return nil
}
func (s *buildServiceImpl) getShortRepoSource(repoSource string) string {
repoSourceArray := strings.Split(repoSource, ".")
if len(repoSourceArray) <= 0 {
return repoSource
}
return repoSourceArray[0]
}
func (s *buildServiceImpl) getAuthenticatedRepositoryURL(repoSource, repoOwner, repoName string) (authenticatedRepositoryURL string, environmentVariableWithToken map[string]string, err error) {
switch repoSource {
case "github.com":
var accessToken string
accessToken, authenticatedRepositoryURL, err = s.githubJobVarsFunc(repoSource, repoOwner, repoName)
if err != nil {
return
}
environmentVariableWithToken = map[string]string{"ESTAFETTE_GITHUB_API_TOKEN": accessToken}
return
case "bitbucket.org":
var accessToken string
accessToken, authenticatedRepositoryURL, err = s.bitbucketJobVarsFunc(repoSource, repoOwner, repoName)
if err != nil {
return
}
environmentVariableWithToken = map[string]string{"ESTAFETTE_BITBUCKET_API_TOKEN": accessToken}
return
}
return authenticatedRepositoryURL, environmentVariableWithToken, fmt.Errorf("Source %v not supported for generating authenticated repository url", repoSource)
}
fix manifest failure log status casing
package estafette
import (
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/estafette/estafette-ci-api/cockroach"
contracts "github.com/estafette/estafette-ci-contracts"
manifest "github.com/estafette/estafette-ci-manifest"
"github.com/rs/zerolog/log"
)
// BuildService encapsulates build and release creation and re-triggering
type BuildService interface {
CreateBuild(build contracts.Build, waitForJobToStart bool) (*contracts.Build, error)
FinishBuild(repoSource, repoOwner, repoName string, buildID int, buildStatus string) error
CreateRelease(release contracts.Release, mft manifest.EstafetteManifest, repoBranch, repoRevision string, waitForJobToStart bool) (*contracts.Release, error)
FinishRelease(repoSource, repoOwner, repoName string, releaseID int, releaseStatus string) error
FirePipelineTriggers(build contracts.Build, event string) error
FireReleaseTriggers(release contracts.Release, event string) error
FireCronTriggers() error
}
type buildServiceImpl struct {
cockroachDBClient cockroach.DBClient
ciBuilderClient CiBuilderClient
githubJobVarsFunc func(string, string, string) (string, string, error)
bitbucketJobVarsFunc func(string, string, string) (string, string, error)
}
// NewBuildService returns a new estafette.BuildService
func NewBuildService(cockroachDBClient cockroach.DBClient, ciBuilderClient CiBuilderClient, githubJobVarsFunc func(string, string, string) (string, string, error), bitbucketJobVarsFunc func(string, string, string) (string, string, error)) (buildService BuildService) {
buildService = &buildServiceImpl{
cockroachDBClient: cockroachDBClient,
ciBuilderClient: ciBuilderClient,
githubJobVarsFunc: githubJobVarsFunc,
bitbucketJobVarsFunc: bitbucketJobVarsFunc,
}
return
}
func (s *buildServiceImpl) CreateBuild(build contracts.Build, waitForJobToStart bool) (createdBuild *contracts.Build, err error) {
// validate manifest
hasValidManifest := false
mft, manifestError := manifest.ReadManifest(build.Manifest)
if manifestError != nil {
log.Warn().Err(manifestError).Str("manifest", build.Manifest).Msgf("Deserializing Estafette manifest for pipeline %v/%v/%v and revision %v failed, continuing though so developer gets useful feedback", build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision)
} else {
hasValidManifest = true
}
// set builder track
builderTrack := "stable"
if hasValidManifest {
builderTrack = mft.Builder.Track
}
// get short version of repo source
shortRepoSource := s.getShortRepoSource(build.RepoSource)
// set build status
buildStatus := "failed"
if hasValidManifest {
buildStatus = "running"
}
// inject build stages
if hasValidManifest {
mft, err = InjectSteps(mft, builderTrack, shortRepoSource)
if err != nil {
log.Error().Err(err).
Msg("Failed injecting build stages for pipeline %v/%v/%v and revision %v")
return
}
}
// get or set autoincrement and build version
autoincrement := 0
if build.BuildVersion == "" {
// get autoincrement number
autoincrement, err = s.cockroachDBClient.GetAutoIncrement(shortRepoSource, build.RepoOwner, build.RepoName)
if err != nil {
return
}
// set build version number
if hasValidManifest {
build.BuildVersion = mft.Version.Version(manifest.EstafetteVersionParams{
AutoIncrement: autoincrement,
Branch: build.RepoBranch,
Revision: build.RepoRevision,
})
} else {
// set build version to autoincrement so there's at least a version in the db and gui
build.BuildVersion = strconv.Itoa(autoincrement)
}
} else {
// get autoincrement from build version
autoincrementCandidate := build.BuildVersion
if hasValidManifest && mft.Version.SemVer != nil {
re := regexp.MustCompile(`^[0-9]+\.[0-9]+\.([0-9]+)(-[0-9a-z-]+)?$`)
match := re.FindStringSubmatch(build.BuildVersion)
if len(match) > 1 {
autoincrementCandidate = match[1]
}
}
autoincrement, err = strconv.Atoi(autoincrementCandidate)
if err != nil {
log.Warn().Err(err).Str("buildversion", build.BuildVersion).Msgf("Failed extracting autoincrement from build version %v for pipeline %v/%v/%v revision %v", build.BuildVersion, build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision)
}
}
if len(build.Labels) == 0 {
var labels []contracts.Label
if hasValidManifest {
for k, v := range mft.Labels {
labels = append(labels, contracts.Label{
Key: k,
Value: v,
})
}
}
build.Labels = labels
}
if len(build.ReleaseTargets) == 0 {
var releaseTargets []contracts.ReleaseTarget
if hasValidManifest {
for _, r := range mft.Releases {
releaseTarget := contracts.ReleaseTarget{
Name: r.Name,
Actions: make([]manifest.EstafetteReleaseAction, 0),
}
if r.Actions != nil && len(r.Actions) > 0 {
for _, a := range r.Actions {
releaseTarget.Actions = append(releaseTarget.Actions, *a)
}
}
releaseTargets = append(releaseTargets, releaseTarget)
}
}
build.ReleaseTargets = releaseTargets
}
// get authenticated url
authenticatedRepositoryURL, environmentVariableWithToken, err := s.getAuthenticatedRepositoryURL(build.RepoSource, build.RepoOwner, build.RepoName)
if err != nil {
return
}
// store build in db
createdBuild, err = s.cockroachDBClient.InsertBuild(contracts.Build{
RepoSource: build.RepoSource,
RepoOwner: build.RepoOwner,
RepoName: build.RepoName,
RepoBranch: build.RepoBranch,
RepoRevision: build.RepoRevision,
BuildVersion: build.BuildVersion,
BuildStatus: buildStatus,
Labels: build.Labels,
ReleaseTargets: build.ReleaseTargets,
Manifest: build.Manifest,
Commits: build.Commits,
Triggers: mft.GetAllTriggers(),
})
if err != nil {
return
}
buildID, err := strconv.Atoi(createdBuild.ID)
if err != nil {
return
}
// define ci builder params
ciBuilderParams := CiBuilderParams{
JobType: "build",
RepoSource: build.RepoSource,
RepoOwner: build.RepoOwner,
RepoName: build.RepoName,
RepoURL: authenticatedRepositoryURL,
RepoBranch: build.RepoBranch,
RepoRevision: build.RepoRevision,
EnvironmentVariables: environmentVariableWithToken,
Track: builderTrack,
AutoIncrement: autoincrement,
VersionNumber: build.BuildVersion,
Manifest: mft,
BuildID: buildID,
}
// create ci builder job
if hasValidManifest {
log.Debug().Msgf("Pipeline %v/%v/%v revision %v has valid manifest, creating build job...", build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision)
// create ci builder job
if waitForJobToStart {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
return
}
} else {
go func(ciBuilderParams CiBuilderParams) {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
log.Warn().Err(err).Msgf("Failed creating async build job")
}
}(ciBuilderParams)
}
// handle triggers
go func() {
s.FirePipelineTriggers(build, "started")
}()
} else if manifestError != nil {
log.Debug().Msgf("Pipeline %v/%v/%v revision %v with build id %v has invalid manifest, storing log...", build.RepoSource, build.RepoOwner, build.RepoName, build.RepoRevision, build.ID)
// store log with manifest unmarshalling error
buildLog := contracts.BuildLog{
BuildID: createdBuild.ID,
RepoSource: createdBuild.RepoSource,
RepoOwner: createdBuild.RepoOwner,
RepoName: createdBuild.RepoName,
RepoBranch: createdBuild.RepoBranch,
RepoRevision: createdBuild.RepoRevision,
Steps: []contracts.BuildLogStep{
contracts.BuildLogStep{
Step: "validate-manifest",
Image: &contracts.BuildLogStepDockerImage{
Name: "estafette/estafette-ci-builder",
Tag: "stable",
IsPulled: true,
ImageSize: 0,
PullDuration: time.Duration(0),
Error: "",
IsTrusted: true,
},
ExitCode: 1,
Status: "FAILED",
AutoInjected: true,
RunIndex: 0,
LogLines: []contracts.BuildLogLine{
contracts.BuildLogLine{
LineNumber: 1,
Timestamp: time.Now().UTC(),
StreamType: "stderr",
Text: manifestError.Error(),
},
},
},
},
}
err = s.cockroachDBClient.InsertBuildLog(buildLog)
if err != nil {
log.Warn().Err(err).Msgf("Failed inserting build log for invalid manifest")
}
}
return
}
func (s *buildServiceImpl) FinishBuild(repoSource, repoOwner, repoName string, buildID int, buildStatus string) error {
err := s.cockroachDBClient.UpdateBuildStatus(repoSource, repoOwner, repoName, buildID, buildStatus)
if err != nil {
return err
}
// handle triggers
go func() {
build, err := s.cockroachDBClient.GetPipelineBuildByID(repoSource, repoOwner, repoName, buildID, false)
if err != nil {
return
}
if build != nil {
s.FirePipelineTriggers(*build, "finished")
}
}()
return nil
}
func (s *buildServiceImpl) CreateRelease(release contracts.Release, mft manifest.EstafetteManifest, repoBranch, repoRevision string, waitForJobToStart bool) (createdRelease *contracts.Release, err error) {
// set builder track
builderTrack := mft.Builder.Track
// get short version of repo source
shortRepoSource := s.getShortRepoSource(release.RepoSource)
// set release status
releaseStatus := "running"
// inject build stages
mft, err = InjectSteps(mft, builderTrack, shortRepoSource)
if err != nil {
log.Error().Err(err).
Msgf("Failed injecting build stages for release to %v of pipeline %v/%v/%v version %v", release.Name, release.RepoSource, release.RepoOwner, release.RepoName, release.ReleaseVersion)
return
}
// get autoincrement from release version
autoincrementCandidate := release.ReleaseVersion
if mft.Version.SemVer != nil {
re := regexp.MustCompile(`^[0-9]+\.[0-9]+\.([0-9]+)(-[0-9a-zA-Z-/]+)?$`)
match := re.FindStringSubmatch(release.ReleaseVersion)
if len(match) > 1 {
autoincrementCandidate = match[1]
}
}
autoincrement, err := strconv.Atoi(autoincrementCandidate)
if err != nil {
log.Warn().Err(err).Str("releaseversion", release.ReleaseVersion).Msgf("Failed extracting autoincrement from build version %v for pipeline %v/%v/%v", release.ReleaseVersion, release.RepoSource, release.RepoOwner, release.RepoName)
}
// get authenticated url
authenticatedRepositoryURL, environmentVariableWithToken, err := s.getAuthenticatedRepositoryURL(release.RepoSource, release.RepoOwner, release.RepoName)
if err != nil {
return
}
// create release in database
createdRelease, err = s.cockroachDBClient.InsertRelease(contracts.Release{
Name: release.Name,
Action: release.Action,
RepoSource: release.RepoSource,
RepoOwner: release.RepoOwner,
RepoName: release.RepoName,
ReleaseVersion: release.ReleaseVersion,
ReleaseStatus: releaseStatus,
TriggeredBy: release.TriggeredBy,
})
if err != nil {
return
}
insertedReleaseID, err := strconv.Atoi(createdRelease.ID)
if err != nil {
return
}
// define ci builder params
ciBuilderParams := CiBuilderParams{
JobType: "release",
RepoSource: release.RepoSource,
RepoOwner: release.RepoOwner,
RepoName: release.RepoName,
RepoURL: authenticatedRepositoryURL,
RepoBranch: repoBranch,
RepoRevision: repoRevision,
EnvironmentVariables: environmentVariableWithToken,
Track: builderTrack,
AutoIncrement: autoincrement,
VersionNumber: release.ReleaseVersion,
Manifest: mft,
ReleaseID: insertedReleaseID,
ReleaseName: release.Name,
ReleaseAction: release.Action,
ReleaseTriggeredBy: release.TriggeredBy,
}
// create ci release job
if waitForJobToStart {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
return
}
} else {
go func(ciBuilderParams CiBuilderParams) {
_, err = s.ciBuilderClient.CreateCiBuilderJob(ciBuilderParams)
if err != nil {
log.Warn().Err(err).Msgf("Failed creating async release job")
}
}(ciBuilderParams)
}
// handle triggers
go func() {
s.FireReleaseTriggers(release, "started")
}()
return
}
func (s *buildServiceImpl) FinishRelease(repoSource, repoOwner, repoName string, releaseID int, releaseStatus string) error {
err := s.cockroachDBClient.UpdateReleaseStatus(repoSource, repoOwner, repoName, releaseID, releaseStatus)
if err != nil {
return err
}
// handle triggers
go func() {
release, err := s.cockroachDBClient.GetPipelineRelease(repoSource, repoOwner, repoName, releaseID)
if err != nil {
return
}
if release != nil {
s.FireReleaseTriggers(*release, "finished")
}
}()
return nil
}
func (s *buildServiceImpl) FirePipelineTriggers(build contracts.Build, event string) error {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Checking if triggers need to be fired...", build.RepoSource, build.RepoOwner, build.RepoName, event)
// retrieve all pipeline triggers
pipelines, err := s.cockroachDBClient.GetPipelineTriggers(build, event)
if err != nil {
return err
}
// create event object
pe := manifest.EstafettePipelineEvent{
RepoSource: build.RepoSource,
RepoOwner: build.RepoOwner,
RepoName: build.RepoName,
Branch: build.RepoBranch,
Status: build.BuildStatus,
Event: event,
}
// check for each whether it should fire
for _, p := range pipelines {
for _, t := range p.Triggers {
log.Debug().Interface("event", pe).Interface("trigger", t).Msgf("[trigger:pipeline(%v/%v/%v:%v)] Checking if pipeline '%v/%v/%v' trigger should fire...", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName)
if t.Pipeline == nil {
continue
}
if t.Pipeline.Fires(&pe) {
// create new build for t.Run
if t.BuildAction != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Firing build action '%v/%v/%v', branch '%v'...", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
err := s.fireBuild(*p, t)
if err != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Failed starting build action'%v/%v/%v', branch '%v'", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
}
} else if t.ReleaseAction != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Firing release action '%v/%v/%v', target '%v', action '%v'...", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
err := s.fireRelease(*p, t, fmt.Sprintf("trigger.pipeline { name: %v/%v/%v, event: %v }", build.RepoSource, build.RepoOwner, build.RepoName, event))
if err != nil {
log.Info().Msgf("[trigger:pipeline(%v/%v/%v:%v)] Failed starting release action '%v/%v/%v', target '%v', action '%v'", build.RepoSource, build.RepoOwner, build.RepoName, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
}
}
}
}
}
return nil
}
func (s *buildServiceImpl) FireReleaseTriggers(release contracts.Release, event string) error {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v] Checking if triggers need to be fired...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event)
pipelines, err := s.cockroachDBClient.GetReleaseTriggers(release, event)
if err != nil {
return err
}
// create event object
re := manifest.EstafetteReleaseEvent{
RepoSource: release.RepoSource,
RepoOwner: release.RepoOwner,
RepoName: release.RepoName,
Target: release.Name,
Status: release.ReleaseStatus,
Event: event,
}
// check for each whether it should fire
for _, p := range pipelines {
for _, t := range p.Triggers {
log.Debug().Interface("event", re).Interface("trigger", t).Msgf("[trigger:release(%v/%v/%v-%v:%v)] Checking if pipeline '%v/%v/%v' trigger should fire...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName)
if t.Release == nil {
continue
}
if t.Release.Fires(&re) {
if t.BuildAction != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Firing build action '%v/%v/%v', branch '%v'...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
err := s.fireBuild(*p, t)
if err != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Failed starting build action '%v/%v/%v', branch '%v'", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
}
} else if t.ReleaseAction != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Firing release action '%v/%v/%v', target '%v', action '%v'...", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
err := s.fireRelease(*p, t, fmt.Sprintf("trigger.release { name: %v/%v/%v, target: %v, event: %v }", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event))
if err != nil {
log.Info().Msgf("[trigger:release(%v/%v/%v-%v:%v)] Failed starting release action '%v/%v/%v', target '%v', action '%v'", release.RepoSource, release.RepoOwner, release.RepoName, release.Name, event, p.RepoSource, p.RepoOwner, p.RepoName, t.ReleaseAction.Target, t.ReleaseAction.Action)
}
}
}
}
}
return nil
}
func (s *buildServiceImpl) fireBuild(p contracts.Pipeline, t manifest.EstafetteTrigger) error {
if t.BuildAction == nil {
return fmt.Errorf("Trigger to fire does not have a 'builds' property, shouldn't get to here")
}
// get last build for branch defined in 'builds' section
lastBuildForBranch, err := s.cockroachDBClient.GetLastPipelineBuildForBranch(p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
if lastBuildForBranch == nil {
return fmt.Errorf("There's no build for pipeline '%v/%v/%v' branch '%v', cannot trigger one", p.RepoSource, p.RepoOwner, p.RepoName, t.BuildAction.Branch)
}
// empty the build version so a new one gets created
lastBuildForBranch.BuildVersion = ""
_, err = s.CreateBuild(*lastBuildForBranch, true)
if err != nil {
return err
}
return nil
}
func (s *buildServiceImpl) fireRelease(p contracts.Pipeline, t manifest.EstafetteTrigger, triggeredBy string) error {
if t.ReleaseAction == nil {
return fmt.Errorf("Trigger to fire does not have a 'releases' property, shouldn't get to here")
}
_, err := s.CreateRelease(contracts.Release{
Name: t.ReleaseAction.Target,
Action: t.ReleaseAction.Action,
RepoSource: p.RepoSource,
RepoOwner: p.RepoOwner,
RepoName: p.RepoName,
ReleaseVersion: p.BuildVersion,
TriggeredBy: triggeredBy,
}, *p.ManifestObject, p.RepoBranch, p.RepoRevision, true)
if err != nil {
return err
}
return nil
}
func (s *buildServiceImpl) FireCronTriggers() error {
log.Info().Msgf("Checking if triggers for cron need to be fired...")
pipelines, err := s.cockroachDBClient.GetCronTriggers()
if err != nil {
return err
}
// create event object
ce := manifest.EstafetteCronEvent{}
// check for each whether it should fire
for _, p := range pipelines {
for _, t := range p.Triggers {
if t.Cron == nil {
continue
}
if t.Cron.Fires(&ce) {
// create new release for t.Run
log.Info().Msgf("Firing %v because of cron", ce)
}
}
}
return nil
}
func (s *buildServiceImpl) getShortRepoSource(repoSource string) string {
repoSourceArray := strings.Split(repoSource, ".")
if len(repoSourceArray) <= 0 {
return repoSource
}
return repoSourceArray[0]
}
func (s *buildServiceImpl) getAuthenticatedRepositoryURL(repoSource, repoOwner, repoName string) (authenticatedRepositoryURL string, environmentVariableWithToken map[string]string, err error) {
switch repoSource {
case "github.com":
var accessToken string
accessToken, authenticatedRepositoryURL, err = s.githubJobVarsFunc(repoSource, repoOwner, repoName)
if err != nil {
return
}
environmentVariableWithToken = map[string]string{"ESTAFETTE_GITHUB_API_TOKEN": accessToken}
return
case "bitbucket.org":
var accessToken string
accessToken, authenticatedRepositoryURL, err = s.bitbucketJobVarsFunc(repoSource, repoOwner, repoName)
if err != nil {
return
}
environmentVariableWithToken = map[string]string{"ESTAFETTE_BITBUCKET_API_TOKEN": accessToken}
return
}
return authenticatedRepositoryURL, environmentVariableWithToken, fmt.Errorf("Source %v not supported for generating authenticated repository url", repoSource)
}
|
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"context"
"io"
"math/rand"
"sync"
"time"
"go.etcd.io/etcd/auth"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
type watchServer struct {
lg *zap.Logger
clusterID int64
memberID int64
maxRequestBytes int
sg etcdserver.RaftStatusGetter
watchable mvcc.WatchableKV
ag AuthGetter
}
// NewWatchServer returns a new watch server.
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
srv := &watchServer{
lg: s.Cfg.Logger,
clusterID: int64(s.Cluster().ID()),
memberID: int64(s.ID()),
maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
sg: s,
watchable: s.Watchable(),
ag: s,
}
if srv.lg == nil {
srv.lg = zap.NewNop()
}
return srv
}
var (
// External test can read this with GetProgressReportInterval()
// and change this to a small value to finish fast with
// SetProgressReportInterval().
progressReportInterval = 10 * time.Minute
progressReportIntervalMu sync.RWMutex
)
// GetProgressReportInterval returns the current progress report interval (for testing).
func GetProgressReportInterval() time.Duration {
progressReportIntervalMu.RLock()
interval := progressReportInterval
progressReportIntervalMu.RUnlock()
// add rand(1/10*progressReportInterval) as jitter so that etcdserver will not
// send progress notifications to watchers around the same time even when watchers
// are created around the same time (which is common when a client restarts itself).
jitter := time.Duration(rand.Int63n(int64(interval) / 10))
return interval + jitter
}
// SetProgressReportInterval updates the current progress report interval (for testing).
func SetProgressReportInterval(newTimeout time.Duration) {
progressReportIntervalMu.Lock()
progressReportInterval = newTimeout
progressReportIntervalMu.Unlock()
}
// We send ctrl response inside the read loop. We do not want
// send to block read, but we still want ctrl response we sent to
// be serialized. Thus we use a buffered chan to solve the problem.
// A small buffer should be OK for most cases, since we expect the
// ctrl requests are infrequent.
const ctrlStreamBufLen = 16
// serverWatchStream is an etcd server side stream. It receives requests
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
// and creates responses that forwarded to gRPC stream.
// It also forwards control message like watch created and canceled.
type serverWatchStream struct {
lg *zap.Logger
clusterID int64
memberID int64
maxRequestBytes int
sg etcdserver.RaftStatusGetter
watchable mvcc.WatchableKV
ag AuthGetter
gRPCStream pb.Watch_WatchServer
watchStream mvcc.WatchStream
ctrlStream chan *pb.WatchResponse
// mu protects progress, prevKV, fragment
mu sync.RWMutex
// tracks the watchID that stream might need to send progress to
// TODO: combine progress and prevKV into a single struct?
progress map[mvcc.WatchID]bool
// record watch IDs that need return previous key-value pair
prevKV map[mvcc.WatchID]bool
// records fragmented watch IDs
fragment map[mvcc.WatchID]bool
// closec indicates the stream is closed.
closec chan struct{}
// wg waits for the send loop to complete
wg sync.WaitGroup
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
sws := serverWatchStream{
lg: ws.lg,
clusterID: ws.clusterID,
memberID: ws.memberID,
maxRequestBytes: ws.maxRequestBytes,
sg: ws.sg,
watchable: ws.watchable,
ag: ws.ag,
gRPCStream: stream,
watchStream: ws.watchable.NewWatchStream(),
// chan for sending control response like watcher created and canceled.
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
progress: make(map[mvcc.WatchID]bool),
prevKV: make(map[mvcc.WatchID]bool),
fragment: make(map[mvcc.WatchID]bool),
closec: make(chan struct{}),
}
sws.wg.Add(1)
go func() {
sws.sendLoop()
sws.wg.Done()
}()
errc := make(chan error, 1)
// Ideally recvLoop would also use sws.wg to signal its completion
// but when stream.Context().Done() is closed, the stream's recv
// may continue to block since it uses a different context, leading to
// deadlock when calling sws.close().
go func() {
if rerr := sws.recvLoop(); rerr != nil {
if isClientCtxErr(stream.Context().Err(), rerr) {
sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr))
} else {
sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(rerr))
streamFailures.WithLabelValues("receive", "watch").Inc()
}
errc <- rerr
}
}()
select {
case err = <-errc:
close(sws.ctrlStream)
case <-stream.Context().Done():
err = stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled {
err = rpctypes.ErrGRPCNoLeader
}
}
sws.close()
return err
}
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil {
return false
}
if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{}
}
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
}
func (sws *serverWatchStream) recvLoop() error {
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch uv := req.RequestUnion.(type) {
case *pb.WatchRequest_CreateRequest:
if uv.CreateRequest == nil {
break
}
creq := uv.CreateRequest
if len(creq.Key) == 0 {
// \x00 is the smallest key
creq.Key = []byte{0}
}
if len(creq.RangeEnd) == 0 {
// force nil since watchstream.Watch distinguishes
// between nil and []byte{} for single key / >=
creq.RangeEnd = nil
}
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
// support >= key queries
creq.RangeEnd = []byte{}
}
if !sws.isWatchPermitted(creq) {
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: creq.WatchId,
Canceled: true,
Created: true,
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
}
select {
case sws.ctrlStream <- wr:
case <-sws.closec:
}
return nil
}
filters := FiltersFromRequest(creq)
wsrev := sws.watchStream.Rev()
rev := creq.StartRevision
if rev == 0 {
rev = wsrev + 1
}
id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...)
if err == nil {
sws.mu.Lock()
if creq.ProgressNotify {
sws.progress[id] = true
}
if creq.PrevKv {
sws.prevKV[id] = true
}
if creq.Fragment {
sws.fragment[id] = true
}
sws.mu.Unlock()
}
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
Created: true,
Canceled: err != nil,
}
if err != nil {
wr.CancelReason = err.Error()
}
select {
case sws.ctrlStream <- wr:
case <-sws.closec:
return nil
}
case *pb.WatchRequest_CancelRequest:
if uv.CancelRequest != nil {
id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(mvcc.WatchID(id))
if err == nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: id,
Canceled: true,
}
sws.mu.Lock()
delete(sws.progress, mvcc.WatchID(id))
delete(sws.prevKV, mvcc.WatchID(id))
delete(sws.fragment, mvcc.WatchID(id))
sws.mu.Unlock()
}
}
case *pb.WatchRequest_ProgressRequest:
if uv.ProgressRequest != nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
}
}
default:
// we probably should not shutdown the entire stream when
// receive an valid command.
// so just do nothing instead.
continue
}
}
}
func (sws *serverWatchStream) sendLoop() {
// watch ids that are currently active
ids := make(map[mvcc.WatchID]struct{})
// watch responses pending on a watch id creation message
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
interval := GetProgressReportInterval()
progressTicker := time.NewTicker(interval)
defer func() {
progressTicker.Stop()
// drain the chan to clean up pending events
for ws := range sws.watchStream.Chan() {
mvcc.ReportEventReceived(len(ws.Events))
}
for _, wrs := range pending {
for _, ws := range wrs {
mvcc.ReportEventReceived(len(ws.Events))
}
}
}()
for {
select {
case wresp, ok := <-sws.watchStream.Chan():
if !ok {
return
}
// TODO: evs is []mvccpb.Event type
// either return []*mvccpb.Event from the mvcc package
// or define protocol buffer with []mvccpb.Event.
evs := wresp.Events
events := make([]*mvccpb.Event, len(evs))
sws.mu.RLock()
needPrevKV := sws.prevKV[wresp.WatchID]
sws.mu.RUnlock()
for i := range evs {
events[i] = &evs[i]
if needPrevKV {
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
if err == nil && len(r.KVs) != 0 {
events[i].PrevKv = &(r.KVs[0])
}
}
}
canceled := wresp.CompactRevision != 0
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wresp.Revision),
WatchId: int64(wresp.WatchID),
Events: events,
CompactRevision: wresp.CompactRevision,
Canceled: canceled,
}
if _, okID := ids[wresp.WatchID]; !okID {
// buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs
continue
}
mvcc.ReportEventReceived(len(evs))
sws.mu.RLock()
fragmented, ok := sws.fragment[wresp.WatchID]
sws.mu.RUnlock()
var serr error
if !fragmented && !ok {
serr = sws.gRPCStream.Send(wr)
} else {
serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
}
if serr != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr))
} else {
sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr))
streamFailures.WithLabelValues("send", "watch").Inc()
}
return
}
sws.mu.Lock()
if len(evs) > 0 && sws.progress[wresp.WatchID] {
// elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false
}
sws.mu.Unlock()
case c, ok := <-sws.ctrlStream:
if !ok {
return
}
if err := sws.gRPCStream.Send(c); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err))
} else {
sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err))
streamFailures.WithLabelValues("send", "watch").Inc()
}
return
}
// track id creation
wid := mvcc.WatchID(c.WatchId)
if c.Canceled {
delete(ids, wid)
continue
}
if c.Created {
// flush buffered events
ids[wid] = struct{}{}
for _, v := range pending[wid] {
mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err))
} else {
sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err))
streamFailures.WithLabelValues("send", "watch").Inc()
}
return
}
}
delete(pending, wid)
}
case <-progressTicker.C:
sws.mu.Lock()
for id, ok := range sws.progress {
if ok {
sws.watchStream.RequestProgress(id)
}
sws.progress[id] = true
}
sws.mu.Unlock()
case <-sws.closec:
return
}
}
}
func sendFragments(
wr *pb.WatchResponse,
maxRequestBytes int,
sendFunc func(*pb.WatchResponse) error) error {
// no need to fragment if total request size is smaller
// than max request limit or response contains only one event
if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
return sendFunc(wr)
}
ow := *wr
ow.Events = make([]*mvccpb.Event, 0)
ow.Fragment = true
var idx int
for {
cur := ow
for _, ev := range wr.Events[idx:] {
cur.Events = append(cur.Events, ev)
if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
cur.Events = cur.Events[:len(cur.Events)-1]
break
}
idx++
}
if idx == len(wr.Events) {
// last response has no more fragment
cur.Fragment = false
}
if err := sendFunc(&cur); err != nil {
return err
}
if !cur.Fragment {
break
}
}
return nil
}
func (sws *serverWatchStream) close() {
sws.watchStream.Close()
close(sws.closec)
sws.wg.Wait()
}
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
return &pb.ResponseHeader{
ClusterId: uint64(sws.clusterID),
MemberId: uint64(sws.memberID),
Revision: rev,
RaftTerm: sws.sg.Term(),
}
}
func filterNoDelete(e mvccpb.Event) bool {
return e.Type == mvccpb.DELETE
}
func filterNoPut(e mvccpb.Event) bool {
return e.Type == mvccpb.PUT
}
// FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request.
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
for _, ft := range creq.Filters {
switch ft {
case pb.WatchCreateRequest_NOPUT:
filters = append(filters, filterNoPut)
case pb.WatchCreateRequest_NODELETE:
filters = append(filters, filterNoDelete)
default:
}
}
return filters
}
etcdserver: watch stream got closed once one request is not permitted (#11708)
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3rpc
import (
"context"
"io"
"math/rand"
"sync"
"time"
"go.etcd.io/etcd/auth"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
type watchServer struct {
lg *zap.Logger
clusterID int64
memberID int64
maxRequestBytes int
sg etcdserver.RaftStatusGetter
watchable mvcc.WatchableKV
ag AuthGetter
}
// NewWatchServer returns a new watch server.
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
srv := &watchServer{
lg: s.Cfg.Logger,
clusterID: int64(s.Cluster().ID()),
memberID: int64(s.ID()),
maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
sg: s,
watchable: s.Watchable(),
ag: s,
}
if srv.lg == nil {
srv.lg = zap.NewNop()
}
return srv
}
var (
// External test can read this with GetProgressReportInterval()
// and change this to a small value to finish fast with
// SetProgressReportInterval().
progressReportInterval = 10 * time.Minute
progressReportIntervalMu sync.RWMutex
)
// GetProgressReportInterval returns the current progress report interval (for testing).
func GetProgressReportInterval() time.Duration {
progressReportIntervalMu.RLock()
interval := progressReportInterval
progressReportIntervalMu.RUnlock()
// add rand(1/10*progressReportInterval) as jitter so that etcdserver will not
// send progress notifications to watchers around the same time even when watchers
// are created around the same time (which is common when a client restarts itself).
jitter := time.Duration(rand.Int63n(int64(interval) / 10))
return interval + jitter
}
// SetProgressReportInterval updates the current progress report interval (for testing).
func SetProgressReportInterval(newTimeout time.Duration) {
progressReportIntervalMu.Lock()
progressReportInterval = newTimeout
progressReportIntervalMu.Unlock()
}
// We send ctrl response inside the read loop. We do not want
// send to block read, but we still want ctrl response we sent to
// be serialized. Thus we use a buffered chan to solve the problem.
// A small buffer should be OK for most cases, since we expect the
// ctrl requests are infrequent.
const ctrlStreamBufLen = 16
// serverWatchStream is an etcd server side stream. It receives requests
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
// and creates responses that forwarded to gRPC stream.
// It also forwards control message like watch created and canceled.
type serverWatchStream struct {
lg *zap.Logger
clusterID int64
memberID int64
maxRequestBytes int
sg etcdserver.RaftStatusGetter
watchable mvcc.WatchableKV
ag AuthGetter
gRPCStream pb.Watch_WatchServer
watchStream mvcc.WatchStream
ctrlStream chan *pb.WatchResponse
// mu protects progress, prevKV, fragment
mu sync.RWMutex
// tracks the watchID that stream might need to send progress to
// TODO: combine progress and prevKV into a single struct?
progress map[mvcc.WatchID]bool
// record watch IDs that need return previous key-value pair
prevKV map[mvcc.WatchID]bool
// records fragmented watch IDs
fragment map[mvcc.WatchID]bool
// closec indicates the stream is closed.
closec chan struct{}
// wg waits for the send loop to complete
wg sync.WaitGroup
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
sws := serverWatchStream{
lg: ws.lg,
clusterID: ws.clusterID,
memberID: ws.memberID,
maxRequestBytes: ws.maxRequestBytes,
sg: ws.sg,
watchable: ws.watchable,
ag: ws.ag,
gRPCStream: stream,
watchStream: ws.watchable.NewWatchStream(),
// chan for sending control response like watcher created and canceled.
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
progress: make(map[mvcc.WatchID]bool),
prevKV: make(map[mvcc.WatchID]bool),
fragment: make(map[mvcc.WatchID]bool),
closec: make(chan struct{}),
}
sws.wg.Add(1)
go func() {
sws.sendLoop()
sws.wg.Done()
}()
errc := make(chan error, 1)
// Ideally recvLoop would also use sws.wg to signal its completion
// but when stream.Context().Done() is closed, the stream's recv
// may continue to block since it uses a different context, leading to
// deadlock when calling sws.close().
go func() {
if rerr := sws.recvLoop(); rerr != nil {
if isClientCtxErr(stream.Context().Err(), rerr) {
sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr))
} else {
sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(rerr))
streamFailures.WithLabelValues("receive", "watch").Inc()
}
errc <- rerr
}
}()
select {
case err = <-errc:
close(sws.ctrlStream)
case <-stream.Context().Done():
err = stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled {
err = rpctypes.ErrGRPCNoLeader
}
}
sws.close()
return err
}
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil {
return false
}
if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{}
}
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
}
func (sws *serverWatchStream) recvLoop() error {
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch uv := req.RequestUnion.(type) {
case *pb.WatchRequest_CreateRequest:
if uv.CreateRequest == nil {
break
}
creq := uv.CreateRequest
if len(creq.Key) == 0 {
// \x00 is the smallest key
creq.Key = []byte{0}
}
if len(creq.RangeEnd) == 0 {
// force nil since watchstream.Watch distinguishes
// between nil and []byte{} for single key / >=
creq.RangeEnd = nil
}
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
// support >= key queries
creq.RangeEnd = []byte{}
}
if !sws.isWatchPermitted(creq) {
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: creq.WatchId,
Canceled: true,
Created: true,
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
}
select {
case sws.ctrlStream <- wr:
continue
case <-sws.closec:
return nil
}
}
filters := FiltersFromRequest(creq)
wsrev := sws.watchStream.Rev()
rev := creq.StartRevision
if rev == 0 {
rev = wsrev + 1
}
id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...)
if err == nil {
sws.mu.Lock()
if creq.ProgressNotify {
sws.progress[id] = true
}
if creq.PrevKv {
sws.prevKV[id] = true
}
if creq.Fragment {
sws.fragment[id] = true
}
sws.mu.Unlock()
}
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
Created: true,
Canceled: err != nil,
}
if err != nil {
wr.CancelReason = err.Error()
}
select {
case sws.ctrlStream <- wr:
case <-sws.closec:
return nil
}
case *pb.WatchRequest_CancelRequest:
if uv.CancelRequest != nil {
id := uv.CancelRequest.WatchId
err := sws.watchStream.Cancel(mvcc.WatchID(id))
if err == nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: id,
Canceled: true,
}
sws.mu.Lock()
delete(sws.progress, mvcc.WatchID(id))
delete(sws.prevKV, mvcc.WatchID(id))
delete(sws.fragment, mvcc.WatchID(id))
sws.mu.Unlock()
}
}
case *pb.WatchRequest_ProgressRequest:
if uv.ProgressRequest != nil {
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
}
}
default:
// we probably should not shutdown the entire stream when
// receive an valid command.
// so just do nothing instead.
continue
}
}
}
func (sws *serverWatchStream) sendLoop() {
// watch ids that are currently active
ids := make(map[mvcc.WatchID]struct{})
// watch responses pending on a watch id creation message
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
interval := GetProgressReportInterval()
progressTicker := time.NewTicker(interval)
defer func() {
progressTicker.Stop()
// drain the chan to clean up pending events
for ws := range sws.watchStream.Chan() {
mvcc.ReportEventReceived(len(ws.Events))
}
for _, wrs := range pending {
for _, ws := range wrs {
mvcc.ReportEventReceived(len(ws.Events))
}
}
}()
for {
select {
case wresp, ok := <-sws.watchStream.Chan():
if !ok {
return
}
// TODO: evs is []mvccpb.Event type
// either return []*mvccpb.Event from the mvcc package
// or define protocol buffer with []mvccpb.Event.
evs := wresp.Events
events := make([]*mvccpb.Event, len(evs))
sws.mu.RLock()
needPrevKV := sws.prevKV[wresp.WatchID]
sws.mu.RUnlock()
for i := range evs {
events[i] = &evs[i]
if needPrevKV {
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
if err == nil && len(r.KVs) != 0 {
events[i].PrevKv = &(r.KVs[0])
}
}
}
canceled := wresp.CompactRevision != 0
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wresp.Revision),
WatchId: int64(wresp.WatchID),
Events: events,
CompactRevision: wresp.CompactRevision,
Canceled: canceled,
}
if _, okID := ids[wresp.WatchID]; !okID {
// buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs
continue
}
mvcc.ReportEventReceived(len(evs))
sws.mu.RLock()
fragmented, ok := sws.fragment[wresp.WatchID]
sws.mu.RUnlock()
var serr error
if !fragmented && !ok {
serr = sws.gRPCStream.Send(wr)
} else {
serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
}
if serr != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr))
} else {
sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr))
streamFailures.WithLabelValues("send", "watch").Inc()
}
return
}
sws.mu.Lock()
if len(evs) > 0 && sws.progress[wresp.WatchID] {
// elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false
}
sws.mu.Unlock()
case c, ok := <-sws.ctrlStream:
if !ok {
return
}
if err := sws.gRPCStream.Send(c); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err))
} else {
sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err))
streamFailures.WithLabelValues("send", "watch").Inc()
}
return
}
// track id creation
wid := mvcc.WatchID(c.WatchId)
if c.Canceled {
delete(ids, wid)
continue
}
if c.Created {
// flush buffered events
ids[wid] = struct{}{}
for _, v := range pending[wid] {
mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err))
} else {
sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err))
streamFailures.WithLabelValues("send", "watch").Inc()
}
return
}
}
delete(pending, wid)
}
case <-progressTicker.C:
sws.mu.Lock()
for id, ok := range sws.progress {
if ok {
sws.watchStream.RequestProgress(id)
}
sws.progress[id] = true
}
sws.mu.Unlock()
case <-sws.closec:
return
}
}
}
func sendFragments(
wr *pb.WatchResponse,
maxRequestBytes int,
sendFunc func(*pb.WatchResponse) error) error {
// no need to fragment if total request size is smaller
// than max request limit or response contains only one event
if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
return sendFunc(wr)
}
ow := *wr
ow.Events = make([]*mvccpb.Event, 0)
ow.Fragment = true
var idx int
for {
cur := ow
for _, ev := range wr.Events[idx:] {
cur.Events = append(cur.Events, ev)
if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
cur.Events = cur.Events[:len(cur.Events)-1]
break
}
idx++
}
if idx == len(wr.Events) {
// last response has no more fragment
cur.Fragment = false
}
if err := sendFunc(&cur); err != nil {
return err
}
if !cur.Fragment {
break
}
}
return nil
}
func (sws *serverWatchStream) close() {
sws.watchStream.Close()
close(sws.closec)
sws.wg.Wait()
}
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
return &pb.ResponseHeader{
ClusterId: uint64(sws.clusterID),
MemberId: uint64(sws.memberID),
Revision: rev,
RaftTerm: sws.sg.Term(),
}
}
func filterNoDelete(e mvccpb.Event) bool {
return e.Type == mvccpb.DELETE
}
func filterNoPut(e mvccpb.Event) bool {
return e.Type == mvccpb.PUT
}
// FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request.
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
for _, ft := range creq.Filters {
switch ft {
case pb.WatchCreateRequest_NOPUT:
filters = append(filters, filterNoPut)
case pb.WatchCreateRequest_NODELETE:
filters = append(filters, filterNoDelete)
default:
}
}
return filters
}
|
package notifier
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/jinzhu/gorm"
"github.com/oinume/lekcije/server/config"
"github.com/oinume/lekcije/server/emailer"
"github.com/oinume/lekcije/server/errors"
"github.com/oinume/lekcije/server/fetcher"
"github.com/oinume/lekcije/server/logger"
"github.com/oinume/lekcije/server/model"
"github.com/oinume/lekcije/server/stopwatch"
"github.com/oinume/lekcije/server/util"
"github.com/uber-go/zap"
)
type Notifier struct {
db *gorm.DB
fetcher *fetcher.TeacherLessonFetcher
dryRun bool
lessonService *model.LessonService
teachers map[uint32]*model.Teacher
fetchedLessons map[uint32][]*model.Lesson
sender emailer.Sender
senderWaitGroup *sync.WaitGroup
stopwatch stopwatch.Stopwatch
sync.Mutex
}
func NewNotifier(db *gorm.DB, fetcher *fetcher.TeacherLessonFetcher, dryRun bool, sender emailer.Sender) *Notifier {
return &Notifier{
db: db,
fetcher: fetcher,
dryRun: dryRun,
teachers: make(map[uint32]*model.Teacher, 1000),
fetchedLessons: make(map[uint32][]*model.Lesson, 1000),
sender: sender,
senderWaitGroup: &sync.WaitGroup{},
stopwatch: stopwatch.NewSync().Start(),
}
}
func (n *Notifier) SendNotification(user *model.User) error {
followingTeacherService := model.NewFollowingTeacherService(n.db)
n.lessonService = model.NewLessonService(n.db)
const maxFetchErrorCount = 5
teacherIDs, err := followingTeacherService.FindTeacherIDsByUserID(user.ID, maxFetchErrorCount)
if err != nil {
return errors.Wrapperf(err, "Failed to FindTeacherIDsByUserID(): userID=%v", user.ID)
}
n.stopwatch.Mark(fmt.Sprintf("FindTeacherIDsByUserID:%d", user.ID))
if len(teacherIDs) == 0 {
return nil
}
logger.App.Info(
"Target teachers",
zap.Uint("userID", uint(user.ID)),
zap.String("teacherIDs", strings.Join(util.Uint32ToStringSlice(teacherIDs...), ",")),
)
availableLessonsPerTeacher := make(map[uint32][]*model.Lesson, 1000)
wg := &sync.WaitGroup{}
for _, teacherID := range teacherIDs {
wg.Add(1)
go func(teacherID uint32) {
defer n.stopwatch.Mark(fmt.Sprintf("fetchAndExtractNewAvailableLessons:%d", teacherID))
defer wg.Done()
teacher, fetchedLessons, newAvailableLessons, err := n.fetchAndExtractNewAvailableLessons(teacherID)
if err != nil {
switch err.(type) {
case *errors.NotFound:
if err := model.NewTeacherService(n.db).IncrementFetchErrorCount(teacherID, 1); err != nil {
logger.App.Error(
"IncrementFetchErrorCount failed",
zap.Uint("teacherID", uint(teacherID)), zap.Error(err),
)
}
logger.App.Warn("Cannot find teacher", zap.Uint("teacherID", uint(teacherID)))
// TODO: Handle a case eikaiwa.dmm.com is down
default:
logger.App.Error("Cannot fetch teacher", zap.Uint("teacherID", uint(teacherID)), zap.Error(err))
}
return
}
n.Lock()
defer n.Unlock()
n.teachers[teacherID] = teacher
if _, ok := n.fetchedLessons[teacherID]; !ok {
n.fetchedLessons[teacherID] = make([]*model.Lesson, 0, 5000)
}
n.fetchedLessons[teacherID] = append(n.fetchedLessons[teacherID], fetchedLessons...)
if len(newAvailableLessons) > 0 {
availableLessonsPerTeacher[teacherID] = newAvailableLessons
}
}(teacherID)
if err != nil {
return err
}
}
wg.Wait()
if err := n.sendNotificationToUser(user, availableLessonsPerTeacher); err != nil {
return err
}
time.Sleep(200 * time.Millisecond)
n.stopwatch.Mark("sleep")
return nil
}
// Returns teacher, fetchedLessons, newAvailableLessons, error
func (n *Notifier) fetchAndExtractNewAvailableLessons(teacherID uint32) (
*model.Teacher, []*model.Lesson, []*model.Lesson, error,
) {
teacher, fetchedLessons, err := n.fetcher.Fetch(teacherID)
if err != nil {
return nil, nil, nil, err
}
logger.App.Debug(
"fetcher.Fetch",
zap.Uint("teacherID", uint(teacher.ID)),
zap.Int("lessons", len(fetchedLessons)),
)
//fmt.Printf("fetchedLessons ---\n")
//for _, l := range fetchedLessons {
// fmt.Printf("teacherID=%v, datetime=%v, status=%v\n", l.TeacherId, l.Datetime, l.Status)
//}
now := time.Now().In(config.LocalTimezone())
fromDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, config.LocalTimezone())
toDate := fromDate.Add(24 * 6 * time.Hour)
lastFetchedLessons, err := n.lessonService.FindLessons(teacher.ID, fromDate, toDate)
if err != nil {
return nil, nil, nil, err
}
//fmt.Printf("lastFetchedLessons ---\n")
//for _, l := range lastFetchedLessons {
// fmt.Printf("teacherID=%v, datetime=%v, status=%v\n", l.TeacherId, l.Datetime, l.Status)
//}
newAvailableLessons := n.lessonService.GetNewAvailableLessons(lastFetchedLessons, fetchedLessons)
//fmt.Printf("newAvailableLessons ---\n")
//for _, l := range newAvailableLessons {
// fmt.Printf("teacherID=%v, datetime=%v, status=%v\n", l.TeacherId, l.Datetime, l.Status)
//}
return teacher, fetchedLessons, newAvailableLessons, nil
}
func (n *Notifier) sendNotificationToUser(
user *model.User,
lessonsPerTeacher map[uint32][]*model.Lesson,
) error {
lessonsCount := 0
var teacherIDs []int
for teacherID, lessons := range lessonsPerTeacher {
teacherIDs = append(teacherIDs, int(teacherID))
lessonsCount += len(lessons)
}
if lessonsCount == 0 {
// Don't send notification
return nil
}
sort.Ints(teacherIDs)
var teacherIDs2 []uint32
var teacherNames []string
for _, id := range teacherIDs {
teacherIDs2 = append(teacherIDs2, uint32(id))
teacherNames = append(teacherNames, n.teachers[uint32(id)].Name)
}
// TODO: getEmailTemplate as a static file
t := emailer.NewTemplate("notifier", getEmailTemplateJP())
data := struct {
To string
TeacherNames string
TeacherIDs []uint32
Teachers map[uint32]*model.Teacher
LessonsPerTeacher map[uint32][]*model.Lesson
WebURL string
}{
To: user.Email,
TeacherNames: strings.Join(teacherNames, ", "),
TeacherIDs: teacherIDs2,
Teachers: n.teachers,
LessonsPerTeacher: lessonsPerTeacher,
WebURL: config.WebURL(),
}
email, err := emailer.NewEmailFromTemplate(t, data)
if err != nil {
return errors.InternalWrapf(err, "Failed to create emailer.Email from template: to=%v", user.Email)
}
email.SetCustomArg("user_id", fmt.Sprint(user.ID))
email.SetCustomArg("teacher_ids", strings.Join(util.Uint32ToStringSlice(teacherIDs2...), ","))
//fmt.Printf("--- mail ---\n%s", email.BodyString())
n.stopwatch.Mark("emailer.NewEmailFromTemplate")
logger.App.Info("sendNotificationToUser", zap.String("email", user.Email))
n.senderWaitGroup.Add(1)
go func(email *emailer.Email) {
defer n.stopwatch.Mark(fmt.Sprintf("sender.Send:%d", user.ID))
defer n.senderWaitGroup.Done()
if err := n.sender.Send(email); err != nil {
logger.App.Error(
"Failed to sendNotificationToUser",
zap.String("email", user.Email), zap.Error(err),
)
}
}(email)
return nil
// return n.sender.Send(email)
}
func getEmailTemplateJP() string {
return strings.TrimSpace(`
From: lekcije <lekcije@lekcije.com>
To: {{ .To }}
Subject: {{ .TeacherNames }}の空きレッスンがあります
Body: text/html
{{ range $teacherID := .TeacherIDs }}
{{- $teacher := index $.Teachers $teacherID -}}
--- {{ $teacher.Name }} ---
{{- $lessons := index $.LessonsPerTeacher $teacherID }}
{{- range $lesson := $lessons }}
{{ $lesson.Datetime.Format "2006-01-02 15:04" }}
{{- end }}
レッスンの予約はこちらから:
<a href="http://eikaiwa.dmm.com/teacher/index/{{ $teacherID }}/">PC</a>
<a href="http://eikaiwa.dmm.com/teacher/schedule/{{ $teacherID }}/">Mobile</a>
{{ end }}
空きレッスンの通知の解除は<a href="{{ .WebURL }}/me">こちら</a>
<a href="https://goo.gl/forms/CIGO3kpiQCGjtFD42">お問い合わせ</a>
`)
}
//func getEmailTemplateEN() string {
// return strings.TrimSpace(`
//{{- range $teacherID := .TeacherIDs }}
//{{- $teacher := index $.Teachers $teacherID -}}
//--- {{ $teacher.Name }} ---
// {{- $lessons := index $.LessonsPerTeacher $teacherID }}
// {{- range $lesson := $lessons }}
//{{ $lesson.Datetime.Format "2006-01-02 15:04" }}
// {{- end }}
//
//Reserve here:
//<a href="http://eikaiwa.dmm.com/teacher/index/{{ $teacherID }}/">PC</a>
//<a href="http://eikaiwa.dmm.com/teacher/schedule/{{ $teacherID }}/">Mobile</a>
//{{ end }}
//Click <a href="{{ .WebURL }}/me">here</a> if you want to stop notification of the teacher.
// `)
//}
func (n *Notifier) Close() {
n.senderWaitGroup.Wait()
defer n.fetcher.Close()
defer func() {
if n.dryRun {
return
}
for teacherID, lessons := range n.fetchedLessons {
if _, err := n.lessonService.UpdateLessons(lessons); err != nil {
logger.App.Error(
"An error ocurred in Notifier.Close",
zap.Error(err), zap.Uint("teacherID", uint(teacherID)),
)
}
}
}()
defer func() {
n.stopwatch.Stop()
//logger.App.Info("Stopwatch report", zap.String("report", watch.Report()))
//fmt.Println("--- stopwatch ---")
//fmt.Println(n.stopwatch.Report())
}()
}
Less sleep time
package notifier
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/jinzhu/gorm"
"github.com/oinume/lekcije/server/config"
"github.com/oinume/lekcije/server/emailer"
"github.com/oinume/lekcije/server/errors"
"github.com/oinume/lekcije/server/fetcher"
"github.com/oinume/lekcije/server/logger"
"github.com/oinume/lekcije/server/model"
"github.com/oinume/lekcije/server/stopwatch"
"github.com/oinume/lekcije/server/util"
"github.com/uber-go/zap"
)
type Notifier struct {
db *gorm.DB
fetcher *fetcher.TeacherLessonFetcher
dryRun bool
lessonService *model.LessonService
teachers map[uint32]*model.Teacher
fetchedLessons map[uint32][]*model.Lesson
sender emailer.Sender
senderWaitGroup *sync.WaitGroup
stopwatch stopwatch.Stopwatch
sync.Mutex
}
func NewNotifier(db *gorm.DB, fetcher *fetcher.TeacherLessonFetcher, dryRun bool, sender emailer.Sender) *Notifier {
return &Notifier{
db: db,
fetcher: fetcher,
dryRun: dryRun,
teachers: make(map[uint32]*model.Teacher, 1000),
fetchedLessons: make(map[uint32][]*model.Lesson, 1000),
sender: sender,
senderWaitGroup: &sync.WaitGroup{},
stopwatch: stopwatch.NewSync().Start(),
}
}
func (n *Notifier) SendNotification(user *model.User) error {
followingTeacherService := model.NewFollowingTeacherService(n.db)
n.lessonService = model.NewLessonService(n.db)
const maxFetchErrorCount = 5
teacherIDs, err := followingTeacherService.FindTeacherIDsByUserID(user.ID, maxFetchErrorCount)
if err != nil {
return errors.Wrapperf(err, "Failed to FindTeacherIDsByUserID(): userID=%v", user.ID)
}
n.stopwatch.Mark(fmt.Sprintf("FindTeacherIDsByUserID:%d", user.ID))
if len(teacherIDs) == 0 {
return nil
}
logger.App.Info(
"Target teachers",
zap.Uint("userID", uint(user.ID)),
zap.String("teacherIDs", strings.Join(util.Uint32ToStringSlice(teacherIDs...), ",")),
)
availableLessonsPerTeacher := make(map[uint32][]*model.Lesson, 1000)
wg := &sync.WaitGroup{}
for _, teacherID := range teacherIDs {
wg.Add(1)
go func(teacherID uint32) {
defer n.stopwatch.Mark(fmt.Sprintf("fetchAndExtractNewAvailableLessons:%d", teacherID))
defer wg.Done()
teacher, fetchedLessons, newAvailableLessons, err := n.fetchAndExtractNewAvailableLessons(teacherID)
if err != nil {
switch err.(type) {
case *errors.NotFound:
if err := model.NewTeacherService(n.db).IncrementFetchErrorCount(teacherID, 1); err != nil {
logger.App.Error(
"IncrementFetchErrorCount failed",
zap.Uint("teacherID", uint(teacherID)), zap.Error(err),
)
}
logger.App.Warn("Cannot find teacher", zap.Uint("teacherID", uint(teacherID)))
// TODO: Handle a case eikaiwa.dmm.com is down
default:
logger.App.Error("Cannot fetch teacher", zap.Uint("teacherID", uint(teacherID)), zap.Error(err))
}
return
}
n.Lock()
defer n.Unlock()
n.teachers[teacherID] = teacher
if _, ok := n.fetchedLessons[teacherID]; !ok {
n.fetchedLessons[teacherID] = make([]*model.Lesson, 0, 5000)
}
n.fetchedLessons[teacherID] = append(n.fetchedLessons[teacherID], fetchedLessons...)
if len(newAvailableLessons) > 0 {
availableLessonsPerTeacher[teacherID] = newAvailableLessons
}
}(teacherID)
if err != nil {
return err
}
}
wg.Wait()
if err := n.sendNotificationToUser(user, availableLessonsPerTeacher); err != nil {
return err
}
time.Sleep(150 * time.Millisecond)
n.stopwatch.Mark("sleep")
return nil
}
// Returns teacher, fetchedLessons, newAvailableLessons, error
func (n *Notifier) fetchAndExtractNewAvailableLessons(teacherID uint32) (
*model.Teacher, []*model.Lesson, []*model.Lesson, error,
) {
teacher, fetchedLessons, err := n.fetcher.Fetch(teacherID)
if err != nil {
return nil, nil, nil, err
}
logger.App.Debug(
"fetcher.Fetch",
zap.Uint("teacherID", uint(teacher.ID)),
zap.Int("lessons", len(fetchedLessons)),
)
//fmt.Printf("fetchedLessons ---\n")
//for _, l := range fetchedLessons {
// fmt.Printf("teacherID=%v, datetime=%v, status=%v\n", l.TeacherId, l.Datetime, l.Status)
//}
now := time.Now().In(config.LocalTimezone())
fromDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, config.LocalTimezone())
toDate := fromDate.Add(24 * 6 * time.Hour)
lastFetchedLessons, err := n.lessonService.FindLessons(teacher.ID, fromDate, toDate)
if err != nil {
return nil, nil, nil, err
}
//fmt.Printf("lastFetchedLessons ---\n")
//for _, l := range lastFetchedLessons {
// fmt.Printf("teacherID=%v, datetime=%v, status=%v\n", l.TeacherId, l.Datetime, l.Status)
//}
newAvailableLessons := n.lessonService.GetNewAvailableLessons(lastFetchedLessons, fetchedLessons)
//fmt.Printf("newAvailableLessons ---\n")
//for _, l := range newAvailableLessons {
// fmt.Printf("teacherID=%v, datetime=%v, status=%v\n", l.TeacherId, l.Datetime, l.Status)
//}
return teacher, fetchedLessons, newAvailableLessons, nil
}
func (n *Notifier) sendNotificationToUser(
user *model.User,
lessonsPerTeacher map[uint32][]*model.Lesson,
) error {
lessonsCount := 0
var teacherIDs []int
for teacherID, lessons := range lessonsPerTeacher {
teacherIDs = append(teacherIDs, int(teacherID))
lessonsCount += len(lessons)
}
if lessonsCount == 0 {
// Don't send notification
return nil
}
sort.Ints(teacherIDs)
var teacherIDs2 []uint32
var teacherNames []string
for _, id := range teacherIDs {
teacherIDs2 = append(teacherIDs2, uint32(id))
teacherNames = append(teacherNames, n.teachers[uint32(id)].Name)
}
// TODO: getEmailTemplate as a static file
t := emailer.NewTemplate("notifier", getEmailTemplateJP())
data := struct {
To string
TeacherNames string
TeacherIDs []uint32
Teachers map[uint32]*model.Teacher
LessonsPerTeacher map[uint32][]*model.Lesson
WebURL string
}{
To: user.Email,
TeacherNames: strings.Join(teacherNames, ", "),
TeacherIDs: teacherIDs2,
Teachers: n.teachers,
LessonsPerTeacher: lessonsPerTeacher,
WebURL: config.WebURL(),
}
email, err := emailer.NewEmailFromTemplate(t, data)
if err != nil {
return errors.InternalWrapf(err, "Failed to create emailer.Email from template: to=%v", user.Email)
}
email.SetCustomArg("user_id", fmt.Sprint(user.ID))
email.SetCustomArg("teacher_ids", strings.Join(util.Uint32ToStringSlice(teacherIDs2...), ","))
//fmt.Printf("--- mail ---\n%s", email.BodyString())
n.stopwatch.Mark("emailer.NewEmailFromTemplate")
logger.App.Info("sendNotificationToUser", zap.String("email", user.Email))
n.senderWaitGroup.Add(1)
go func(email *emailer.Email) {
defer n.stopwatch.Mark(fmt.Sprintf("sender.Send:%d", user.ID))
defer n.senderWaitGroup.Done()
if err := n.sender.Send(email); err != nil {
logger.App.Error(
"Failed to sendNotificationToUser",
zap.String("email", user.Email), zap.Error(err),
)
}
}(email)
return nil
// return n.sender.Send(email)
}
func getEmailTemplateJP() string {
return strings.TrimSpace(`
From: lekcije <lekcije@lekcije.com>
To: {{ .To }}
Subject: {{ .TeacherNames }}の空きレッスンがあります
Body: text/html
{{ range $teacherID := .TeacherIDs }}
{{- $teacher := index $.Teachers $teacherID -}}
--- {{ $teacher.Name }} ---
{{- $lessons := index $.LessonsPerTeacher $teacherID }}
{{- range $lesson := $lessons }}
{{ $lesson.Datetime.Format "2006-01-02 15:04" }}
{{- end }}
レッスンの予約はこちらから:
<a href="http://eikaiwa.dmm.com/teacher/index/{{ $teacherID }}/">PC</a>
<a href="http://eikaiwa.dmm.com/teacher/schedule/{{ $teacherID }}/">Mobile</a>
{{ end }}
空きレッスンの通知の解除は<a href="{{ .WebURL }}/me">こちら</a>
<a href="https://goo.gl/forms/CIGO3kpiQCGjtFD42">お問い合わせ</a>
`)
}
//func getEmailTemplateEN() string {
// return strings.TrimSpace(`
//{{- range $teacherID := .TeacherIDs }}
//{{- $teacher := index $.Teachers $teacherID -}}
//--- {{ $teacher.Name }} ---
// {{- $lessons := index $.LessonsPerTeacher $teacherID }}
// {{- range $lesson := $lessons }}
//{{ $lesson.Datetime.Format "2006-01-02 15:04" }}
// {{- end }}
//
//Reserve here:
//<a href="http://eikaiwa.dmm.com/teacher/index/{{ $teacherID }}/">PC</a>
//<a href="http://eikaiwa.dmm.com/teacher/schedule/{{ $teacherID }}/">Mobile</a>
//{{ end }}
//Click <a href="{{ .WebURL }}/me">here</a> if you want to stop notification of the teacher.
// `)
//}
func (n *Notifier) Close() {
n.senderWaitGroup.Wait()
defer n.fetcher.Close()
defer func() {
if n.dryRun {
return
}
for teacherID, lessons := range n.fetchedLessons {
if _, err := n.lessonService.UpdateLessons(lessons); err != nil {
logger.App.Error(
"An error ocurred in Notifier.Close",
zap.Error(err), zap.Uint("teacherID", uint(teacherID)),
)
}
}
}()
defer func() {
n.stopwatch.Stop()
//logger.App.Info("Stopwatch report", zap.String("report", watch.Report()))
//fmt.Println("--- stopwatch ---")
//fmt.Println(n.stopwatch.Report())
}()
}
|
package ipam
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net"
"strconv"
"sync"
"testing"
"time"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/boltdb"
"github.com/docker/libnetwork/bitseq"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/ipamapi"
"github.com/docker/libnetwork/ipamutils"
_ "github.com/docker/libnetwork/testutils"
"github.com/docker/libnetwork/types"
"github.com/stretchr/testify/assert"
)
const (
defaultPrefix = "/tmp/libnetwork/test/ipam"
)
func init() {
boltdb.Register()
}
// OptionBoltdbWithRandomDBFile function returns a random dir for local store backend
func randomLocalStore(needStore bool) (datastore.DataStore, error) {
if !needStore {
return nil, nil
}
tmp, err := ioutil.TempFile("", "libnetwork-")
if err != nil {
return nil, fmt.Errorf("Error creating temp file: %v", err)
}
if err := tmp.Close(); err != nil {
return nil, fmt.Errorf("Error closing temp file: %v", err)
}
return datastore.NewDataStore(datastore.LocalScope, &datastore.ScopeCfg{
Client: datastore.ScopeClientCfg{
Provider: "boltdb",
Address: defaultPrefix + tmp.Name(),
Config: &store.Config{
Bucket: "libnetwork",
ConnectionTimeout: 3 * time.Second,
},
},
})
}
func getAllocator(store bool) (*Allocator, error) {
ipamutils.InitNetworks(nil)
ds, err := randomLocalStore(store)
if err != nil {
return nil, err
}
return NewAllocator(ds, nil)
}
func TestInt2IP2IntConversion(t *testing.T) {
for i := uint64(0); i < 256*256*256; i++ {
var array [4]byte // new array at each cycle
addIntToIP(array[:], i)
j := ipToUint64(array[:])
if j != i {
t.Fatalf("Failed to convert ordinal %d to IP % x and back to ordinal. Got %d", i, array, j)
}
}
}
func TestGetAddressVersion(t *testing.T) {
if v4 != getAddressVersion(net.ParseIP("172.28.30.112")) {
t.Fatal("Failed to detect IPv4 version")
}
if v4 != getAddressVersion(net.ParseIP("0.0.0.1")) {
t.Fatal("Failed to detect IPv4 version")
}
if v6 != getAddressVersion(net.ParseIP("ff01::1")) {
t.Fatal("Failed to detect IPv6 version")
}
if v6 != getAddressVersion(net.ParseIP("2001:db8::76:51")) {
t.Fatal("Failed to detect IPv6 version")
}
}
func TestKeyString(t *testing.T) {
k := &SubnetKey{AddressSpace: "default", Subnet: "172.27.0.0/16"}
expected := "default/172.27.0.0/16"
if expected != k.String() {
t.Fatalf("Unexpected key string: %s", k.String())
}
k2 := &SubnetKey{}
err := k2.FromString(expected)
if err != nil {
t.Fatal(err)
}
if k2.AddressSpace != k.AddressSpace || k2.Subnet != k.Subnet {
t.Fatalf("SubnetKey.FromString() failed. Expected %v. Got %v", k, k2)
}
expected = fmt.Sprintf("%s/%s", expected, "172.27.3.0/24")
k.ChildSubnet = "172.27.3.0/24"
if expected != k.String() {
t.Fatalf("Unexpected key string: %s", k.String())
}
err = k2.FromString(expected)
if err != nil {
t.Fatal(err)
}
if k2.AddressSpace != k.AddressSpace || k2.Subnet != k.Subnet || k2.ChildSubnet != k.ChildSubnet {
t.Fatalf("SubnetKey.FromString() failed. Expected %v. Got %v", k, k2)
}
}
func TestPoolDataMarshal(t *testing.T) {
_, nw, err := net.ParseCIDR("172.28.30.1/24")
if err != nil {
t.Fatal(err)
}
p := &PoolData{
ParentKey: SubnetKey{AddressSpace: "Blue", Subnet: "172.28.0.0/16"},
Pool: nw,
Range: &AddressRange{Sub: &net.IPNet{IP: net.IP{172, 28, 20, 0}, Mask: net.IPMask{255, 255, 255, 0}}, Start: 0, End: 255},
RefCount: 4,
}
ba, err := json.Marshal(p)
if err != nil {
t.Fatal(err)
}
var q PoolData
err = json.Unmarshal(ba, &q)
if err != nil {
t.Fatal(err)
}
if p.ParentKey != q.ParentKey || !types.CompareIPNet(p.Range.Sub, q.Range.Sub) ||
p.Range.Start != q.Range.Start || p.Range.End != q.Range.End || p.RefCount != q.RefCount ||
!types.CompareIPNet(p.Pool, q.Pool) {
t.Fatalf("\n%#v\n%#v", p, &q)
}
p = &PoolData{
ParentKey: SubnetKey{AddressSpace: "Blue", Subnet: "172.28.0.0/16"},
Pool: nw,
RefCount: 4,
}
ba, err = json.Marshal(p)
if err != nil {
t.Fatal(err)
}
err = json.Unmarshal(ba, &q)
if err != nil {
t.Fatal(err)
}
if q.Range != nil {
t.Fatal("Unexpected Range")
}
}
func TestSubnetsMarshal(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
if err != nil {
t.Fatal(err)
}
pid0, _, _, err := a.RequestPool(localAddressSpace, "192.168.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
pid1, _, _, err := a.RequestPool(localAddressSpace, "192.169.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
_, _, err = a.RequestAddress(pid0, nil, nil)
if err != nil {
t.Fatal(err)
}
cfg, err := a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
ba := cfg.Value()
if err := cfg.SetValue(ba); err != nil {
t.Fatal(err)
}
expIP := &net.IPNet{IP: net.IP{192, 168, 0, 2}, Mask: net.IPMask{255, 255, 0, 0}}
ip, _, err := a.RequestAddress(pid0, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(expIP, ip) {
t.Fatalf("Got unexpected ip after pool config restore: %s", ip)
}
expIP = &net.IPNet{IP: net.IP{192, 169, 0, 1}, Mask: net.IPMask{255, 255, 0, 0}}
ip, _, err = a.RequestAddress(pid1, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(expIP, ip) {
t.Fatalf("Got unexpected ip after pool config restore: %s", ip)
}
}
}
func TestAddSubnets(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
if err != nil {
t.Fatal(err)
}
a.addrSpaces["abc"] = a.addrSpaces[localAddressSpace]
pid0, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding subnet")
}
pid1, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatalf("Unexpected failure in adding overlapping subnets to different address spaces: %v", err)
}
if pid0 == pid1 {
t.Fatal("returned same pool id for same subnets in different namespaces")
}
pid, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatalf("Unexpected failure requesting existing subnet: %v", err)
}
if pid != pid1 {
t.Fatal("returned different pool id for same subnet requests")
}
_, _, _, err = a.RequestPool("abc", "10.128.0.0/9", "", nil, false)
if err == nil {
t.Fatal("Expected failure on adding overlapping base subnet")
}
pid2, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "10.128.0.0/9", nil, false)
if err != nil {
t.Fatalf("Unexpected failure on adding sub pool: %v", err)
}
pid3, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "10.128.0.0/9", nil, false)
if err != nil {
t.Fatalf("Unexpected failure on adding overlapping sub pool: %v", err)
}
if pid2 != pid3 {
t.Fatal("returned different pool id for same sub pool requests")
}
_, _, _, err = a.RequestPool(localAddressSpace, "10.20.2.0/24", "", nil, false)
if err == nil {
t.Fatal("Failed to detect overlapping subnets")
}
_, _, _, err = a.RequestPool(localAddressSpace, "10.128.0.0/9", "", nil, false)
if err == nil {
t.Fatal("Failed to detect overlapping subnets")
}
_, _, _, err = a.RequestPool(localAddressSpace, "1003:1:2:3:4:5:6::/112", "", nil, false)
if err != nil {
t.Fatalf("Failed to add v6 subnet: %s", err.Error())
}
_, _, _, err = a.RequestPool(localAddressSpace, "1003:1:2:3::/64", "", nil, false)
if err == nil {
t.Fatal("Failed to detect overlapping v6 subnet")
}
}
}
// TestDoublePoolRelease tests that releasing a pool which has already
// been released raises an error.
func TestDoublePoolRelease(t *testing.T) {
for _, store := range []bool{false, true} {
for _, repeats := range []int{0, 1, 10} {
a, err := getAllocator(store)
assert.NoError(t, err)
// Request initial pool allocation
pid0, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
assert.NoError(t, err)
// Re-request the same pool
for i := 0; i < repeats; i++ {
pidN, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
assert.NoError(t, err)
assert.Equal(t, pid0, pidN)
}
// Release the repeats
for i := 0; i < repeats; i++ {
err = a.ReleasePool(pid0)
assert.NoError(t, err)
}
// Release the initial request
err = a.ReleasePool(pid0)
assert.NoError(t, err)
// Releasing again fails
err = a.ReleasePool(pid0)
assert.Error(t, err)
}
}
}
func TestAddReleasePoolID(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
var k0, k1, k2 SubnetKey
aSpace, err := a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
pid0, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding pool")
}
if err := k0.FromString(pid0); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets := aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
pid1, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding sub pool")
}
if err := k1.FromString(pid1); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k1].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k1, subnets[k1].RefCount)
}
pid2, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding sub pool")
}
if pid0 == pid1 || pid0 == pid2 || pid1 != pid2 {
t.Fatalf("Incorrect poolIDs returned %s, %s, %s", pid0, pid1, pid2)
}
if err := k2.FromString(pid2); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k2].RefCount != 2 {
t.Fatalf("Unexpected ref count for %s: %d", k2, subnets[k2].RefCount)
}
if subnets[k0].RefCount != 3 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid1); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 2 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid0); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
pid00, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding pool")
}
if pid00 != pid0 {
t.Fatal("main pool should still exist")
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 2 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid2); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid00); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if bp, ok := subnets[k0]; ok {
t.Fatalf("Base pool %s is still present: %v", k0, bp)
}
_, _, _, err = a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding pool")
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
}
}
func TestPredefinedPool(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
if _, err := a.getPredefinedPool("blue", false); err == nil {
t.Fatal("Expected failure for non default addr space")
}
pid, nw, _, err := a.RequestPool(localAddressSpace, "", "", nil, false)
if err != nil {
t.Fatal(err)
}
nw2, err := a.getPredefinedPool(localAddressSpace, false)
if err != nil {
t.Fatal(err)
}
if types.CompareIPNet(nw, nw2) {
t.Fatalf("Unexpected default network returned: %s = %s", nw2, nw)
}
if err := a.ReleasePool(pid); err != nil {
t.Fatal(err)
}
}
}
func TestRemoveSubnet(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["splane"] = &addrSpace{
id: dsConfigKey + "/" + "splane",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
input := []struct {
addrSpace string
subnet string
v6 bool
}{
{localAddressSpace, "192.168.0.0/16", false},
{localAddressSpace, "172.17.0.0/16", false},
{localAddressSpace, "10.0.0.0/8", false},
{localAddressSpace, "2001:db8:1:2:3:4:ffff::/112", false},
{"splane", "172.17.0.0/16", false},
{"splane", "10.0.0.0/8", false},
{"splane", "2001:db8:1:2:3:4:5::/112", true},
{"splane", "2001:db8:1:2:3:4:ffff::/112", true},
}
poolIDs := make([]string, len(input))
for ind, i := range input {
if poolIDs[ind], _, _, err = a.RequestPool(i.addrSpace, i.subnet, "", nil, i.v6); err != nil {
t.Fatalf("Failed to apply input. Can't proceed: %s", err.Error())
}
}
for ind, id := range poolIDs {
if err := a.ReleasePool(id); err != nil {
t.Fatalf("Failed to release poolID %s (%d)", id, ind)
}
}
}
}
func TestGetSameAddress(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["giallo"] = &addrSpace{
id: dsConfigKey + "/" + "giallo",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
pid, _, _, err := a.RequestPool("giallo", "192.168.100.0/24", "", nil, false)
if err != nil {
t.Fatal(err)
}
ip := net.ParseIP("192.168.100.250")
_, _, err = a.RequestAddress(pid, ip, nil)
if err != nil {
t.Fatal(err)
}
_, _, err = a.RequestAddress(pid, ip, nil)
if err == nil {
t.Fatal(err)
}
}
}
func TestGetAddressSubPoolEqualPool(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
// Requesting a subpool of same size of the master pool should not cause any problem on ip allocation
pid, _, _, err := a.RequestPool(localAddressSpace, "172.18.0.0/16", "172.18.0.0/16", nil, false)
if err != nil {
t.Fatal(err)
}
_, _, err = a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatal(err)
}
}
}
func TestRequestReleaseAddressFromSubPool(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["rosso"] = &addrSpace{
id: dsConfigKey + "/" + "rosso",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
poolID, _, _, err := a.RequestPool("rosso", "172.28.0.0/16", "172.28.30.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
var ip *net.IPNet
expected := &net.IPNet{IP: net.IP{172, 28, 30, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, nil); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp := &net.IPNet{IP: net.IP{172, 28, 30, 97}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, nil); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
_, _, _, err = a.RequestPool("rosso", "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal(err)
}
poolID, _, _, err = a.RequestPool("rosso", "10.0.0.0/16", "10.0.0.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
expected = &net.IPNet{IP: net.IP{10, 0, 0, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, nil); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp = &net.IPNet{IP: net.IP{10, 0, 0, 79}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, nil); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
// Request any addresses from subpool after explicit address request
unoExp, _ := types.ParseCIDR("10.2.2.0/16")
dueExp, _ := types.ParseCIDR("10.2.2.2/16")
treExp, _ := types.ParseCIDR("10.2.2.1/16")
if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil {
t.Fatal(err)
}
tre, _, err := a.RequestAddress(poolID, treExp.IP, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, treExp) {
t.Fatalf("Unexpected address: %v", tre)
}
uno, _, err := a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, unoExp) {
t.Fatalf("Unexpected address: %v", uno)
}
due, _, err := a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(due, dueExp) {
t.Fatalf("Unexpected address: %v", due)
}
if err = a.ReleaseAddress(poolID, uno.IP); err != nil {
t.Fatal(err)
}
uno, _, err = a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, unoExp) {
t.Fatalf("Unexpected address: %v", uno)
}
if err = a.ReleaseAddress(poolID, tre.IP); err != nil {
t.Fatal(err)
}
tre, _, err = a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, treExp) {
t.Fatalf("Unexpected address: %v", tre)
}
}
}
func TestSerializeRequestReleaseAddressFromSubPool(t *testing.T) {
opts := map[string]string{
ipamapi.AllocSerialPrefix: "true"}
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["rosso"] = &addrSpace{
id: dsConfigKey + "/" + "rosso",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
poolID, _, _, err := a.RequestPool("rosso", "172.28.0.0/16", "172.28.30.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
var ip *net.IPNet
expected := &net.IPNet{IP: net.IP{172, 28, 30, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp := &net.IPNet{IP: net.IP{172, 28, 30, 97}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
_, _, _, err = a.RequestPool("rosso", "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal(err)
}
poolID, _, _, err = a.RequestPool("rosso", "10.0.0.0/16", "10.0.0.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
expected = &net.IPNet{IP: net.IP{10, 0, 0, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp = &net.IPNet{IP: net.IP{10, 0, 0, 79}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
// Request any addresses from subpool after explicit address request
unoExp, _ := types.ParseCIDR("10.2.2.0/16")
dueExp, _ := types.ParseCIDR("10.2.2.2/16")
treExp, _ := types.ParseCIDR("10.2.2.1/16")
quaExp, _ := types.ParseCIDR("10.2.2.3/16")
fivExp, _ := types.ParseCIDR("10.2.2.4/16")
if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil {
t.Fatal(err)
}
tre, _, err := a.RequestAddress(poolID, treExp.IP, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, treExp) {
t.Fatalf("Unexpected address: %v", tre)
}
uno, _, err := a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, unoExp) {
t.Fatalf("Unexpected address: %v", uno)
}
due, _, err := a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(due, dueExp) {
t.Fatalf("Unexpected address: %v", due)
}
if err = a.ReleaseAddress(poolID, uno.IP); err != nil {
t.Fatal(err)
}
uno, _, err = a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, quaExp) {
t.Fatalf("Unexpected address: %v", uno)
}
if err = a.ReleaseAddress(poolID, tre.IP); err != nil {
t.Fatal(err)
}
tre, _, err = a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, fivExp) {
t.Fatalf("Unexpected address: %v", tre)
}
}
}
func TestGetAddress(t *testing.T) {
input := []string{
/*"10.0.0.0/8", "10.0.0.0/9", "10.0.0.0/10",*/ "10.0.0.0/11", "10.0.0.0/12", "10.0.0.0/13", "10.0.0.0/14",
"10.0.0.0/15", "10.0.0.0/16", "10.0.0.0/17", "10.0.0.0/18", "10.0.0.0/19", "10.0.0.0/20", "10.0.0.0/21",
"10.0.0.0/22", "10.0.0.0/23", "10.0.0.0/24", "10.0.0.0/25", "10.0.0.0/26", "10.0.0.0/27", "10.0.0.0/28",
"10.0.0.0/29", "10.0.0.0/30", "10.0.0.0/31"}
for _, subnet := range input {
assertGetAddress(t, subnet)
}
}
func TestRequestSyntaxCheck(t *testing.T) {
var (
pool = "192.168.0.0/16"
subPool = "192.168.0.0/24"
as = "green"
)
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces[as] = &addrSpace{
id: dsConfigKey + "/" + as,
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
_, _, _, err = a.RequestPool("", pool, "", nil, false)
if err == nil {
t.Fatal("Failed to detect wrong request: empty address space")
}
_, _, _, err = a.RequestPool("", pool, subPool, nil, false)
if err == nil {
t.Fatal("Failed to detect wrong request: empty address space")
}
_, _, _, err = a.RequestPool(as, "", subPool, nil, false)
if err == nil {
t.Fatal("Failed to detect wrong request: subPool specified and no pool")
}
pid, _, _, err := a.RequestPool(as, pool, subPool, nil, false)
if err != nil {
t.Fatalf("Unexpected failure: %v", err)
}
_, _, err = a.RequestAddress("", nil, nil)
if err == nil {
t.Fatal("Failed to detect wrong request: no pool id specified")
}
ip := net.ParseIP("172.17.0.23")
_, _, err = a.RequestAddress(pid, ip, nil)
if err == nil {
t.Fatal("Failed to detect wrong request: requested IP from different subnet")
}
ip = net.ParseIP("192.168.0.50")
_, _, err = a.RequestAddress(pid, ip, nil)
if err != nil {
t.Fatalf("Unexpected failure: %v", err)
}
err = a.ReleaseAddress("", ip)
if err == nil {
t.Fatal("Failed to detect wrong request: no pool id specified")
}
err = a.ReleaseAddress(pid, nil)
if err == nil {
t.Fatal("Failed to detect wrong request: no pool id specified")
}
err = a.ReleaseAddress(pid, ip)
if err != nil {
t.Fatalf("Unexpected failure: %v: %s, %s", err, pid, ip)
}
}
}
func TestRequest(t *testing.T) {
// Request N addresses from different size subnets, verifying last request
// returns expected address. Internal subnet host size is Allocator's default, 16
input := []struct {
subnet string
numReq int
lastIP string
}{
{"192.168.59.0/24", 254, "192.168.59.254"},
{"192.168.240.0/20", 255, "192.168.240.255"},
{"192.168.0.0/16", 255, "192.168.0.255"},
{"192.168.0.0/16", 256, "192.168.1.0"},
{"10.16.0.0/16", 255, "10.16.0.255"},
{"10.128.0.0/12", 255, "10.128.0.255"},
{"10.0.0.0/8", 256, "10.0.1.0"},
{"192.168.128.0/18", 4*256 - 1, "192.168.131.255"},
/*
{"192.168.240.0/20", 16*256 - 2, "192.168.255.254"},
{"192.168.0.0/16", 256*256 - 2, "192.168.255.254"},
{"10.0.0.0/8", 2 * 256, "10.0.2.0"},
{"10.0.0.0/8", 5 * 256, "10.0.5.0"},
{"10.0.0.0/8", 100 * 256 * 254, "10.99.255.254"},
*/
}
for _, d := range input {
assertNRequests(t, d.subnet, d.numReq, d.lastIP)
}
}
func TestRelease(t *testing.T) {
var (
subnet = "192.168.0.0/23"
)
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
pid, _, _, err := a.RequestPool(localAddressSpace, subnet, "", nil, false)
if err != nil {
t.Fatal(err)
}
bm := a.addresses[SubnetKey{localAddressSpace, subnet, ""}]
// Allocate all addresses
for err != ipamapi.ErrNoAvailableIPs {
_, _, err = a.RequestAddress(pid, nil, nil)
}
toRelease := []struct {
address string
}{
{"192.168.0.1"},
{"192.168.0.2"},
{"192.168.0.3"},
{"192.168.0.4"},
{"192.168.0.5"},
{"192.168.0.6"},
{"192.168.0.7"},
{"192.168.0.8"},
{"192.168.0.9"},
{"192.168.0.10"},
{"192.168.0.30"},
{"192.168.0.31"},
{"192.168.1.32"},
{"192.168.0.254"},
{"192.168.1.1"},
{"192.168.1.2"},
{"192.168.1.3"},
{"192.168.1.253"},
{"192.168.1.254"},
}
// One by one, relase the address and request again. We should get the same IP
for i, inp := range toRelease {
ip0 := net.ParseIP(inp.address)
a.ReleaseAddress(pid, ip0)
bm = a.addresses[SubnetKey{localAddressSpace, subnet, ""}]
if bm.Unselected() != 1 {
t.Fatalf("Failed to update free address count after release. Expected %d, Found: %d", i+1, bm.Unselected())
}
nw, _, err := a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatalf("Failed to obtain the address: %s", err.Error())
}
ip := nw.IP
if !ip0.Equal(ip) {
t.Fatalf("Failed to obtain the same address. Expected: %s, Got: %s", ip0, ip)
}
}
}
}
func assertGetAddress(t *testing.T, subnet string) {
var (
err error
printTime = false
a = &Allocator{}
)
_, sub, _ := net.ParseCIDR(subnet)
ones, bits := sub.Mask.Size()
zeroes := bits - ones
numAddresses := 1 << uint(zeroes)
bm, err := bitseq.NewHandle("ipam_test", nil, "default/"+subnet, uint64(numAddresses))
if err != nil {
t.Fatal(err)
}
start := time.Now()
run := 0
for err != ipamapi.ErrNoAvailableIPs {
_, err = a.getAddress(sub, bm, nil, nil, false)
run++
}
if printTime {
fmt.Printf("\nTaken %v, to allocate all addresses on %s. (nemAddresses: %d. Runs: %d)", time.Since(start), subnet, numAddresses, run)
}
if bm.Unselected() != 0 {
t.Fatalf("Unexpected free count after reserving all addresses: %d", bm.Unselected())
}
/*
if bm.Head.Block != expectedMax || bm.Head.Count != numBlocks {
t.Fatalf("Failed to effectively reserve all addresses on %s. Expected (0x%x, %d) as first sequence. Found (0x%x,%d)",
subnet, expectedMax, numBlocks, bm.Head.Block, bm.Head.Count)
}
*/
}
func assertNRequests(t *testing.T, subnet string, numReq int, lastExpectedIP string) {
var (
nw *net.IPNet
printTime = false
)
lastIP := net.ParseIP(lastExpectedIP)
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
pid, _, _, err := a.RequestPool(localAddressSpace, subnet, "", nil, false)
if err != nil {
t.Fatal(err)
}
i := 0
start := time.Now()
for ; i < numReq; i++ {
nw, _, err = a.RequestAddress(pid, nil, nil)
}
if printTime {
fmt.Printf("\nTaken %v, to allocate %d addresses on %s\n", time.Since(start), numReq, subnet)
}
if !lastIP.Equal(nw.IP) {
t.Fatalf("Wrong last IP. Expected %s. Got: %s (err: %v, ind: %d)", lastExpectedIP, nw.IP.String(), err, i)
}
}
}
func benchmarkRequest(b *testing.B, a *Allocator, subnet string) {
pid, _, _, err := a.RequestPool(localAddressSpace, subnet, "", nil, false)
for err != ipamapi.ErrNoAvailableIPs {
_, _, err = a.RequestAddress(pid, nil, nil)
}
}
func benchMarkRequest(subnet string, b *testing.B) {
a, _ := getAllocator(true)
for n := 0; n < b.N; n++ {
benchmarkRequest(b, a, subnet)
}
}
func BenchmarkRequest_24(b *testing.B) {
a, _ := getAllocator(true)
benchmarkRequest(b, a, "10.0.0.0/24")
}
func BenchmarkRequest_16(b *testing.B) {
a, _ := getAllocator(true)
benchmarkRequest(b, a, "10.0.0.0/16")
}
func BenchmarkRequest_8(b *testing.B) {
a, _ := getAllocator(true)
benchmarkRequest(b, a, "10.0.0.0/8")
}
func TestAllocateRandomDeallocate(t *testing.T) {
for _, store := range []bool{false, true} {
testAllocateRandomDeallocate(t, "172.25.0.0/16", "", 384, store)
testAllocateRandomDeallocate(t, "172.25.0.0/16", "172.25.252.0/22", 384, store)
}
}
func testAllocateRandomDeallocate(t *testing.T, pool, subPool string, num int, store bool) {
ds, err := randomLocalStore(store)
assert.NoError(t, err)
a, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
pid, _, _, err := a.RequestPool(localAddressSpace, pool, subPool, nil, false)
if err != nil {
t.Fatal(err)
}
// Allocate num ip addresses
indices := make(map[int]*net.IPNet, num)
allocated := make(map[string]bool, num)
for i := 0; i < num; i++ {
ip, _, err := a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatal(err)
}
ips := ip.String()
if _, ok := allocated[ips]; ok {
t.Fatalf("Address %s is already allocated", ips)
}
allocated[ips] = true
indices[i] = ip
}
if len(indices) != len(allocated) || len(indices) != num {
t.Fatalf("Unexpected number of allocated addresses: (%d,%d).", len(indices), len(allocated))
}
seed := time.Now().Unix()
rand.Seed(seed)
// Deallocate half of the allocated addresses following a random pattern
pattern := rand.Perm(num)
for i := 0; i < num/2; i++ {
idx := pattern[i]
ip := indices[idx]
err := a.ReleaseAddress(pid, ip.IP)
if err != nil {
t.Fatalf("Unexpected failure on deallocation of %s: %v.\nSeed: %d.", ip, err, seed)
}
delete(indices, idx)
delete(allocated, ip.String())
}
// Request a quarter of addresses
for i := 0; i < num/2; i++ {
ip, _, err := a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatal(err)
}
ips := ip.String()
if _, ok := allocated[ips]; ok {
t.Fatalf("\nAddress %s is already allocated.\nSeed: %d.", ips, seed)
}
allocated[ips] = true
}
if len(allocated) != num {
t.Fatalf("Unexpected number of allocated addresses: %d.\nSeed: %d.", len(allocated), seed)
}
}
func TestRetrieveFromStore(t *testing.T) {
num := 200
ds, err := randomLocalStore(true)
if err != nil {
t.Fatal(err)
}
a, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
pid, _, _, err := a.RequestPool(localAddressSpace, "172.25.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num; i++ {
if _, _, err := a.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a1, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a1.refresh(localAddressSpace)
db := a.DumpDatabase()
db1 := a1.DumpDatabase()
if db != db1 {
t.Fatalf("Unexpected db change.\nExpected:%s\nGot:%s", db, db1)
}
checkDBEquality(a, a1, t)
pid, _, _, err = a1.RequestPool(localAddressSpace, "172.25.0.0/16", "172.25.1.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num/2; i++ {
if _, _, err := a1.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a2, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a2.refresh(localAddressSpace)
checkDBEquality(a1, a2, t)
pid, _, _, err = a2.RequestPool(localAddressSpace, "172.25.0.0/16", "172.25.2.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num/2; i++ {
if _, _, err := a2.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a3, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a3.refresh(localAddressSpace)
checkDBEquality(a2, a3, t)
pid, _, _, err = a3.RequestPool(localAddressSpace, "172.26.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num/2; i++ {
if _, _, err := a3.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a4, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a4.refresh(localAddressSpace)
checkDBEquality(a3, a4, t)
}
func checkDBEquality(a1, a2 *Allocator, t *testing.T) {
for k, cnf1 := range a1.addrSpaces[localAddressSpace].subnets {
cnf2 := a2.addrSpaces[localAddressSpace].subnets[k]
if cnf1.String() != cnf2.String() {
t.Fatalf("%s\n%s", cnf1, cnf2)
}
if cnf1.Range == nil {
a2.retrieveBitmask(k, cnf1.Pool)
}
}
for k, bm1 := range a1.addresses {
bm2 := a2.addresses[k]
if bm1.String() != bm2.String() {
t.Fatalf("%s\n%s", bm1, bm2)
}
}
}
const (
numInstances = 5
first = 0
last = numInstances - 1
)
var (
allocator *Allocator
start = make(chan struct{})
done = make(chan chan struct{}, numInstances-1)
pools = make([]*net.IPNet, numInstances)
)
func runParallelTests(t *testing.T, instance int) {
var err error
t.Parallel()
pTest := flag.Lookup("test.parallel")
if pTest == nil {
t.Skip("Skipped because test.parallel flag not set;")
}
numParallel, err := strconv.Atoi(pTest.Value.String())
if err != nil {
t.Fatal(err)
}
if numParallel < numInstances {
t.Skip("Skipped because t.parallel was less than ", numInstances)
}
// The first instance creates the allocator, gives the start
// and finally checks the pools each instance was assigned
if instance == first {
allocator, err = getAllocator(true)
if err != nil {
t.Fatal(err)
}
close(start)
}
if instance != first {
select {
case <-start:
}
instDone := make(chan struct{})
done <- instDone
defer close(instDone)
if instance == last {
defer close(done)
}
}
_, pools[instance], _, err = allocator.RequestPool(localAddressSpace, "", "", nil, false)
if err != nil {
t.Fatal(err)
}
if instance == first {
for instDone := range done {
select {
case <-instDone:
}
}
// Now check each instance got a different pool
for i := 0; i < numInstances; i++ {
for j := i + 1; j < numInstances; j++ {
if types.CompareIPNet(pools[i], pools[j]) {
t.Fatalf("Instance %d and %d were given the same predefined pool: %v", i, j, pools)
}
}
}
}
}
func TestRequestReleaseAddressDuplicate(t *testing.T) {
a, err := getAllocator(false)
if err != nil {
t.Fatal(err)
}
type IP struct {
ip *net.IPNet
ref int
}
ips := []IP{}
allocatedIPs := []*net.IPNet{}
a.addrSpaces["rosso"] = &addrSpace{
id: dsConfigKey + "/" + "rosso",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
var wg sync.WaitGroup
opts := map[string]string{
ipamapi.AllocSerialPrefix: "true",
}
var l sync.Mutex
poolID, _, _, err := a.RequestPool("rosso", "198.168.0.0/23", "", nil, false)
if err != nil {
t.Fatal(err)
}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
l.Lock()
ips = append(ips, IP{c, 1})
l.Unlock()
allocatedIPs = append(allocatedIPs, c)
if len(allocatedIPs) > 500 {
i := rand.Intn(len(allocatedIPs) - 1)
wg.Add(1)
go func(ip *net.IPNet) {
if err = a.ReleaseAddress(poolID, ip.IP); err != nil {
t.Fatal(err)
}
l.Lock()
ips = append(ips, IP{ip, -1})
l.Unlock()
wg.Done()
}(allocatedIPs[i])
allocatedIPs = append(allocatedIPs[:i], allocatedIPs[i+1:]...)
}
}
}
wg.Wait()
refMap := make(map[string]int)
for _, ip := range ips {
refMap[ip.ip.String()] = refMap[ip.ip.String()] + ip.ref
if refMap[ip.ip.String()] < 0 {
t.Fatalf("IP %s was previously released", ip.ip.String())
}
if refMap[ip.ip.String()] > 1 {
t.Fatalf("IP %s was previously allocated", ip.ip.String())
}
}
}
func TestParallelPredefinedRequest1(t *testing.T) {
runParallelTests(t, 0)
}
func TestParallelPredefinedRequest2(t *testing.T) {
runParallelTests(t, 1)
}
func TestParallelPredefinedRequest3(t *testing.T) {
runParallelTests(t, 2)
}
func TestParallelPredefinedRequest4(t *testing.T) {
runParallelTests(t, 3)
}
func TestParallelPredefinedRequest5(t *testing.T) {
runParallelTests(t, 4)
}
test: update tests to use sub-benchmarks
Go 1.7 added the subtest feature which can make table-driven tests much easier to run and debug. Some tests are not using this feature.
Signed-off-by: Yang Li <555a477e2c9676b20f2a56eb1bbab876ee42139f@gmail.com>
package ipam
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"net"
"strconv"
"sync"
"testing"
"time"
"github.com/docker/libkv/store"
"github.com/docker/libkv/store/boltdb"
"github.com/docker/libnetwork/bitseq"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/ipamapi"
"github.com/docker/libnetwork/ipamutils"
_ "github.com/docker/libnetwork/testutils"
"github.com/docker/libnetwork/types"
"github.com/stretchr/testify/assert"
)
const (
defaultPrefix = "/tmp/libnetwork/test/ipam"
)
func init() {
boltdb.Register()
}
// OptionBoltdbWithRandomDBFile function returns a random dir for local store backend
func randomLocalStore(needStore bool) (datastore.DataStore, error) {
if !needStore {
return nil, nil
}
tmp, err := ioutil.TempFile("", "libnetwork-")
if err != nil {
return nil, fmt.Errorf("Error creating temp file: %v", err)
}
if err := tmp.Close(); err != nil {
return nil, fmt.Errorf("Error closing temp file: %v", err)
}
return datastore.NewDataStore(datastore.LocalScope, &datastore.ScopeCfg{
Client: datastore.ScopeClientCfg{
Provider: "boltdb",
Address: defaultPrefix + tmp.Name(),
Config: &store.Config{
Bucket: "libnetwork",
ConnectionTimeout: 3 * time.Second,
},
},
})
}
func getAllocator(store bool) (*Allocator, error) {
ipamutils.InitNetworks(nil)
ds, err := randomLocalStore(store)
if err != nil {
return nil, err
}
return NewAllocator(ds, nil)
}
func TestInt2IP2IntConversion(t *testing.T) {
for i := uint64(0); i < 256*256*256; i++ {
var array [4]byte // new array at each cycle
addIntToIP(array[:], i)
j := ipToUint64(array[:])
if j != i {
t.Fatalf("Failed to convert ordinal %d to IP % x and back to ordinal. Got %d", i, array, j)
}
}
}
func TestGetAddressVersion(t *testing.T) {
if v4 != getAddressVersion(net.ParseIP("172.28.30.112")) {
t.Fatal("Failed to detect IPv4 version")
}
if v4 != getAddressVersion(net.ParseIP("0.0.0.1")) {
t.Fatal("Failed to detect IPv4 version")
}
if v6 != getAddressVersion(net.ParseIP("ff01::1")) {
t.Fatal("Failed to detect IPv6 version")
}
if v6 != getAddressVersion(net.ParseIP("2001:db8::76:51")) {
t.Fatal("Failed to detect IPv6 version")
}
}
func TestKeyString(t *testing.T) {
k := &SubnetKey{AddressSpace: "default", Subnet: "172.27.0.0/16"}
expected := "default/172.27.0.0/16"
if expected != k.String() {
t.Fatalf("Unexpected key string: %s", k.String())
}
k2 := &SubnetKey{}
err := k2.FromString(expected)
if err != nil {
t.Fatal(err)
}
if k2.AddressSpace != k.AddressSpace || k2.Subnet != k.Subnet {
t.Fatalf("SubnetKey.FromString() failed. Expected %v. Got %v", k, k2)
}
expected = fmt.Sprintf("%s/%s", expected, "172.27.3.0/24")
k.ChildSubnet = "172.27.3.0/24"
if expected != k.String() {
t.Fatalf("Unexpected key string: %s", k.String())
}
err = k2.FromString(expected)
if err != nil {
t.Fatal(err)
}
if k2.AddressSpace != k.AddressSpace || k2.Subnet != k.Subnet || k2.ChildSubnet != k.ChildSubnet {
t.Fatalf("SubnetKey.FromString() failed. Expected %v. Got %v", k, k2)
}
}
func TestPoolDataMarshal(t *testing.T) {
_, nw, err := net.ParseCIDR("172.28.30.1/24")
if err != nil {
t.Fatal(err)
}
p := &PoolData{
ParentKey: SubnetKey{AddressSpace: "Blue", Subnet: "172.28.0.0/16"},
Pool: nw,
Range: &AddressRange{Sub: &net.IPNet{IP: net.IP{172, 28, 20, 0}, Mask: net.IPMask{255, 255, 255, 0}}, Start: 0, End: 255},
RefCount: 4,
}
ba, err := json.Marshal(p)
if err != nil {
t.Fatal(err)
}
var q PoolData
err = json.Unmarshal(ba, &q)
if err != nil {
t.Fatal(err)
}
if p.ParentKey != q.ParentKey || !types.CompareIPNet(p.Range.Sub, q.Range.Sub) ||
p.Range.Start != q.Range.Start || p.Range.End != q.Range.End || p.RefCount != q.RefCount ||
!types.CompareIPNet(p.Pool, q.Pool) {
t.Fatalf("\n%#v\n%#v", p, &q)
}
p = &PoolData{
ParentKey: SubnetKey{AddressSpace: "Blue", Subnet: "172.28.0.0/16"},
Pool: nw,
RefCount: 4,
}
ba, err = json.Marshal(p)
if err != nil {
t.Fatal(err)
}
err = json.Unmarshal(ba, &q)
if err != nil {
t.Fatal(err)
}
if q.Range != nil {
t.Fatal("Unexpected Range")
}
}
func TestSubnetsMarshal(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
if err != nil {
t.Fatal(err)
}
pid0, _, _, err := a.RequestPool(localAddressSpace, "192.168.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
pid1, _, _, err := a.RequestPool(localAddressSpace, "192.169.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
_, _, err = a.RequestAddress(pid0, nil, nil)
if err != nil {
t.Fatal(err)
}
cfg, err := a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
ba := cfg.Value()
if err := cfg.SetValue(ba); err != nil {
t.Fatal(err)
}
expIP := &net.IPNet{IP: net.IP{192, 168, 0, 2}, Mask: net.IPMask{255, 255, 0, 0}}
ip, _, err := a.RequestAddress(pid0, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(expIP, ip) {
t.Fatalf("Got unexpected ip after pool config restore: %s", ip)
}
expIP = &net.IPNet{IP: net.IP{192, 169, 0, 1}, Mask: net.IPMask{255, 255, 0, 0}}
ip, _, err = a.RequestAddress(pid1, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(expIP, ip) {
t.Fatalf("Got unexpected ip after pool config restore: %s", ip)
}
}
}
func TestAddSubnets(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
if err != nil {
t.Fatal(err)
}
a.addrSpaces["abc"] = a.addrSpaces[localAddressSpace]
pid0, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding subnet")
}
pid1, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatalf("Unexpected failure in adding overlapping subnets to different address spaces: %v", err)
}
if pid0 == pid1 {
t.Fatal("returned same pool id for same subnets in different namespaces")
}
pid, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatalf("Unexpected failure requesting existing subnet: %v", err)
}
if pid != pid1 {
t.Fatal("returned different pool id for same subnet requests")
}
_, _, _, err = a.RequestPool("abc", "10.128.0.0/9", "", nil, false)
if err == nil {
t.Fatal("Expected failure on adding overlapping base subnet")
}
pid2, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "10.128.0.0/9", nil, false)
if err != nil {
t.Fatalf("Unexpected failure on adding sub pool: %v", err)
}
pid3, _, _, err := a.RequestPool("abc", "10.0.0.0/8", "10.128.0.0/9", nil, false)
if err != nil {
t.Fatalf("Unexpected failure on adding overlapping sub pool: %v", err)
}
if pid2 != pid3 {
t.Fatal("returned different pool id for same sub pool requests")
}
_, _, _, err = a.RequestPool(localAddressSpace, "10.20.2.0/24", "", nil, false)
if err == nil {
t.Fatal("Failed to detect overlapping subnets")
}
_, _, _, err = a.RequestPool(localAddressSpace, "10.128.0.0/9", "", nil, false)
if err == nil {
t.Fatal("Failed to detect overlapping subnets")
}
_, _, _, err = a.RequestPool(localAddressSpace, "1003:1:2:3:4:5:6::/112", "", nil, false)
if err != nil {
t.Fatalf("Failed to add v6 subnet: %s", err.Error())
}
_, _, _, err = a.RequestPool(localAddressSpace, "1003:1:2:3::/64", "", nil, false)
if err == nil {
t.Fatal("Failed to detect overlapping v6 subnet")
}
}
}
// TestDoublePoolRelease tests that releasing a pool which has already
// been released raises an error.
func TestDoublePoolRelease(t *testing.T) {
for _, store := range []bool{false, true} {
for _, repeats := range []int{0, 1, 10} {
a, err := getAllocator(store)
assert.NoError(t, err)
// Request initial pool allocation
pid0, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
assert.NoError(t, err)
// Re-request the same pool
for i := 0; i < repeats; i++ {
pidN, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
assert.NoError(t, err)
assert.Equal(t, pid0, pidN)
}
// Release the repeats
for i := 0; i < repeats; i++ {
err = a.ReleasePool(pid0)
assert.NoError(t, err)
}
// Release the initial request
err = a.ReleasePool(pid0)
assert.NoError(t, err)
// Releasing again fails
err = a.ReleasePool(pid0)
assert.Error(t, err)
}
}
}
func TestAddReleasePoolID(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
var k0, k1, k2 SubnetKey
aSpace, err := a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
pid0, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding pool")
}
if err := k0.FromString(pid0); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets := aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
pid1, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding sub pool")
}
if err := k1.FromString(pid1); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k1].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k1, subnets[k1].RefCount)
}
pid2, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding sub pool")
}
if pid0 == pid1 || pid0 == pid2 || pid1 != pid2 {
t.Fatalf("Incorrect poolIDs returned %s, %s, %s", pid0, pid1, pid2)
}
if err := k2.FromString(pid2); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k2].RefCount != 2 {
t.Fatalf("Unexpected ref count for %s: %d", k2, subnets[k2].RefCount)
}
if subnets[k0].RefCount != 3 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid1); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 2 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid0); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
pid00, _, _, err := a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding pool")
}
if pid00 != pid0 {
t.Fatal("main pool should still exist")
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 2 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid2); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
if err := a.ReleasePool(pid00); err != nil {
t.Fatal(err)
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if bp, ok := subnets[k0]; ok {
t.Fatalf("Base pool %s is still present: %v", k0, bp)
}
_, _, _, err = a.RequestPool(localAddressSpace, "10.0.0.0/8", "", nil, false)
if err != nil {
t.Fatal("Unexpected failure in adding pool")
}
aSpace, err = a.getAddrSpace(localAddressSpace)
if err != nil {
t.Fatal(err)
}
subnets = aSpace.subnets
if subnets[k0].RefCount != 1 {
t.Fatalf("Unexpected ref count for %s: %d", k0, subnets[k0].RefCount)
}
}
}
func TestPredefinedPool(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
if _, err := a.getPredefinedPool("blue", false); err == nil {
t.Fatal("Expected failure for non default addr space")
}
pid, nw, _, err := a.RequestPool(localAddressSpace, "", "", nil, false)
if err != nil {
t.Fatal(err)
}
nw2, err := a.getPredefinedPool(localAddressSpace, false)
if err != nil {
t.Fatal(err)
}
if types.CompareIPNet(nw, nw2) {
t.Fatalf("Unexpected default network returned: %s = %s", nw2, nw)
}
if err := a.ReleasePool(pid); err != nil {
t.Fatal(err)
}
}
}
func TestRemoveSubnet(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["splane"] = &addrSpace{
id: dsConfigKey + "/" + "splane",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
input := []struct {
addrSpace string
subnet string
v6 bool
}{
{localAddressSpace, "192.168.0.0/16", false},
{localAddressSpace, "172.17.0.0/16", false},
{localAddressSpace, "10.0.0.0/8", false},
{localAddressSpace, "2001:db8:1:2:3:4:ffff::/112", false},
{"splane", "172.17.0.0/16", false},
{"splane", "10.0.0.0/8", false},
{"splane", "2001:db8:1:2:3:4:5::/112", true},
{"splane", "2001:db8:1:2:3:4:ffff::/112", true},
}
poolIDs := make([]string, len(input))
for ind, i := range input {
if poolIDs[ind], _, _, err = a.RequestPool(i.addrSpace, i.subnet, "", nil, i.v6); err != nil {
t.Fatalf("Failed to apply input. Can't proceed: %s", err.Error())
}
}
for ind, id := range poolIDs {
if err := a.ReleasePool(id); err != nil {
t.Fatalf("Failed to release poolID %s (%d)", id, ind)
}
}
}
}
func TestGetSameAddress(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["giallo"] = &addrSpace{
id: dsConfigKey + "/" + "giallo",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
pid, _, _, err := a.RequestPool("giallo", "192.168.100.0/24", "", nil, false)
if err != nil {
t.Fatal(err)
}
ip := net.ParseIP("192.168.100.250")
_, _, err = a.RequestAddress(pid, ip, nil)
if err != nil {
t.Fatal(err)
}
_, _, err = a.RequestAddress(pid, ip, nil)
if err == nil {
t.Fatal(err)
}
}
}
func TestGetAddressSubPoolEqualPool(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
// Requesting a subpool of same size of the master pool should not cause any problem on ip allocation
pid, _, _, err := a.RequestPool(localAddressSpace, "172.18.0.0/16", "172.18.0.0/16", nil, false)
if err != nil {
t.Fatal(err)
}
_, _, err = a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatal(err)
}
}
}
func TestRequestReleaseAddressFromSubPool(t *testing.T) {
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["rosso"] = &addrSpace{
id: dsConfigKey + "/" + "rosso",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
poolID, _, _, err := a.RequestPool("rosso", "172.28.0.0/16", "172.28.30.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
var ip *net.IPNet
expected := &net.IPNet{IP: net.IP{172, 28, 30, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, nil); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp := &net.IPNet{IP: net.IP{172, 28, 30, 97}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, nil); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
_, _, _, err = a.RequestPool("rosso", "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal(err)
}
poolID, _, _, err = a.RequestPool("rosso", "10.0.0.0/16", "10.0.0.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
expected = &net.IPNet{IP: net.IP{10, 0, 0, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, nil); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp = &net.IPNet{IP: net.IP{10, 0, 0, 79}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, nil); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
// Request any addresses from subpool after explicit address request
unoExp, _ := types.ParseCIDR("10.2.2.0/16")
dueExp, _ := types.ParseCIDR("10.2.2.2/16")
treExp, _ := types.ParseCIDR("10.2.2.1/16")
if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil {
t.Fatal(err)
}
tre, _, err := a.RequestAddress(poolID, treExp.IP, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, treExp) {
t.Fatalf("Unexpected address: %v", tre)
}
uno, _, err := a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, unoExp) {
t.Fatalf("Unexpected address: %v", uno)
}
due, _, err := a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(due, dueExp) {
t.Fatalf("Unexpected address: %v", due)
}
if err = a.ReleaseAddress(poolID, uno.IP); err != nil {
t.Fatal(err)
}
uno, _, err = a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, unoExp) {
t.Fatalf("Unexpected address: %v", uno)
}
if err = a.ReleaseAddress(poolID, tre.IP); err != nil {
t.Fatal(err)
}
tre, _, err = a.RequestAddress(poolID, nil, nil)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, treExp) {
t.Fatalf("Unexpected address: %v", tre)
}
}
}
func TestSerializeRequestReleaseAddressFromSubPool(t *testing.T) {
opts := map[string]string{
ipamapi.AllocSerialPrefix: "true"}
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces["rosso"] = &addrSpace{
id: dsConfigKey + "/" + "rosso",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
poolID, _, _, err := a.RequestPool("rosso", "172.28.0.0/16", "172.28.30.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
var ip *net.IPNet
expected := &net.IPNet{IP: net.IP{172, 28, 30, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp := &net.IPNet{IP: net.IP{172, 28, 30, 97}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
_, _, _, err = a.RequestPool("rosso", "10.0.0.0/8", "10.0.0.0/16", nil, false)
if err != nil {
t.Fatal(err)
}
poolID, _, _, err = a.RequestPool("rosso", "10.0.0.0/16", "10.0.0.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
expected = &net.IPNet{IP: net.IP{10, 0, 0, 255}, Mask: net.IPMask{255, 255, 0, 0}}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
ip = c
}
}
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal(err)
}
if !types.CompareIPNet(expected, ip) {
t.Fatalf("Unexpected last IP from subpool. Expected: %s. Got: %v.", expected, ip)
}
rp = &net.IPNet{IP: net.IP{10, 0, 0, 79}, Mask: net.IPMask{255, 255, 0, 0}}
if err = a.ReleaseAddress(poolID, rp.IP); err != nil {
t.Fatal(err)
}
if ip, _, err = a.RequestAddress(poolID, nil, opts); err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(rp, ip) {
t.Fatalf("Unexpected IP from subpool. Expected: %s. Got: %v.", rp, ip)
}
// Request any addresses from subpool after explicit address request
unoExp, _ := types.ParseCIDR("10.2.2.0/16")
dueExp, _ := types.ParseCIDR("10.2.2.2/16")
treExp, _ := types.ParseCIDR("10.2.2.1/16")
quaExp, _ := types.ParseCIDR("10.2.2.3/16")
fivExp, _ := types.ParseCIDR("10.2.2.4/16")
if poolID, _, _, err = a.RequestPool("rosso", "10.2.0.0/16", "10.2.2.0/24", nil, false); err != nil {
t.Fatal(err)
}
tre, _, err := a.RequestAddress(poolID, treExp.IP, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, treExp) {
t.Fatalf("Unexpected address: %v", tre)
}
uno, _, err := a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, unoExp) {
t.Fatalf("Unexpected address: %v", uno)
}
due, _, err := a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(due, dueExp) {
t.Fatalf("Unexpected address: %v", due)
}
if err = a.ReleaseAddress(poolID, uno.IP); err != nil {
t.Fatal(err)
}
uno, _, err = a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(uno, quaExp) {
t.Fatalf("Unexpected address: %v", uno)
}
if err = a.ReleaseAddress(poolID, tre.IP); err != nil {
t.Fatal(err)
}
tre, _, err = a.RequestAddress(poolID, nil, opts)
if err != nil {
t.Fatal(err)
}
if !types.CompareIPNet(tre, fivExp) {
t.Fatalf("Unexpected address: %v", tre)
}
}
}
func TestGetAddress(t *testing.T) {
input := []string{
/*"10.0.0.0/8", "10.0.0.0/9", "10.0.0.0/10",*/ "10.0.0.0/11", "10.0.0.0/12", "10.0.0.0/13", "10.0.0.0/14",
"10.0.0.0/15", "10.0.0.0/16", "10.0.0.0/17", "10.0.0.0/18", "10.0.0.0/19", "10.0.0.0/20", "10.0.0.0/21",
"10.0.0.0/22", "10.0.0.0/23", "10.0.0.0/24", "10.0.0.0/25", "10.0.0.0/26", "10.0.0.0/27", "10.0.0.0/28",
"10.0.0.0/29", "10.0.0.0/30", "10.0.0.0/31"}
for _, subnet := range input {
assertGetAddress(t, subnet)
}
}
func TestRequestSyntaxCheck(t *testing.T) {
var (
pool = "192.168.0.0/16"
subPool = "192.168.0.0/24"
as = "green"
)
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
a.addrSpaces[as] = &addrSpace{
id: dsConfigKey + "/" + as,
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
_, _, _, err = a.RequestPool("", pool, "", nil, false)
if err == nil {
t.Fatal("Failed to detect wrong request: empty address space")
}
_, _, _, err = a.RequestPool("", pool, subPool, nil, false)
if err == nil {
t.Fatal("Failed to detect wrong request: empty address space")
}
_, _, _, err = a.RequestPool(as, "", subPool, nil, false)
if err == nil {
t.Fatal("Failed to detect wrong request: subPool specified and no pool")
}
pid, _, _, err := a.RequestPool(as, pool, subPool, nil, false)
if err != nil {
t.Fatalf("Unexpected failure: %v", err)
}
_, _, err = a.RequestAddress("", nil, nil)
if err == nil {
t.Fatal("Failed to detect wrong request: no pool id specified")
}
ip := net.ParseIP("172.17.0.23")
_, _, err = a.RequestAddress(pid, ip, nil)
if err == nil {
t.Fatal("Failed to detect wrong request: requested IP from different subnet")
}
ip = net.ParseIP("192.168.0.50")
_, _, err = a.RequestAddress(pid, ip, nil)
if err != nil {
t.Fatalf("Unexpected failure: %v", err)
}
err = a.ReleaseAddress("", ip)
if err == nil {
t.Fatal("Failed to detect wrong request: no pool id specified")
}
err = a.ReleaseAddress(pid, nil)
if err == nil {
t.Fatal("Failed to detect wrong request: no pool id specified")
}
err = a.ReleaseAddress(pid, ip)
if err != nil {
t.Fatalf("Unexpected failure: %v: %s, %s", err, pid, ip)
}
}
}
func TestRequest(t *testing.T) {
// Request N addresses from different size subnets, verifying last request
// returns expected address. Internal subnet host size is Allocator's default, 16
input := []struct {
subnet string
numReq int
lastIP string
}{
{"192.168.59.0/24", 254, "192.168.59.254"},
{"192.168.240.0/20", 255, "192.168.240.255"},
{"192.168.0.0/16", 255, "192.168.0.255"},
{"192.168.0.0/16", 256, "192.168.1.0"},
{"10.16.0.0/16", 255, "10.16.0.255"},
{"10.128.0.0/12", 255, "10.128.0.255"},
{"10.0.0.0/8", 256, "10.0.1.0"},
{"192.168.128.0/18", 4*256 - 1, "192.168.131.255"},
/*
{"192.168.240.0/20", 16*256 - 2, "192.168.255.254"},
{"192.168.0.0/16", 256*256 - 2, "192.168.255.254"},
{"10.0.0.0/8", 2 * 256, "10.0.2.0"},
{"10.0.0.0/8", 5 * 256, "10.0.5.0"},
{"10.0.0.0/8", 100 * 256 * 254, "10.99.255.254"},
*/
}
for _, d := range input {
assertNRequests(t, d.subnet, d.numReq, d.lastIP)
}
}
func TestRelease(t *testing.T) {
var (
subnet = "192.168.0.0/23"
)
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
pid, _, _, err := a.RequestPool(localAddressSpace, subnet, "", nil, false)
if err != nil {
t.Fatal(err)
}
bm := a.addresses[SubnetKey{localAddressSpace, subnet, ""}]
// Allocate all addresses
for err != ipamapi.ErrNoAvailableIPs {
_, _, err = a.RequestAddress(pid, nil, nil)
}
toRelease := []struct {
address string
}{
{"192.168.0.1"},
{"192.168.0.2"},
{"192.168.0.3"},
{"192.168.0.4"},
{"192.168.0.5"},
{"192.168.0.6"},
{"192.168.0.7"},
{"192.168.0.8"},
{"192.168.0.9"},
{"192.168.0.10"},
{"192.168.0.30"},
{"192.168.0.31"},
{"192.168.1.32"},
{"192.168.0.254"},
{"192.168.1.1"},
{"192.168.1.2"},
{"192.168.1.3"},
{"192.168.1.253"},
{"192.168.1.254"},
}
// One by one, relase the address and request again. We should get the same IP
for i, inp := range toRelease {
ip0 := net.ParseIP(inp.address)
a.ReleaseAddress(pid, ip0)
bm = a.addresses[SubnetKey{localAddressSpace, subnet, ""}]
if bm.Unselected() != 1 {
t.Fatalf("Failed to update free address count after release. Expected %d, Found: %d", i+1, bm.Unselected())
}
nw, _, err := a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatalf("Failed to obtain the address: %s", err.Error())
}
ip := nw.IP
if !ip0.Equal(ip) {
t.Fatalf("Failed to obtain the same address. Expected: %s, Got: %s", ip0, ip)
}
}
}
}
func assertGetAddress(t *testing.T, subnet string) {
var (
err error
printTime = false
a = &Allocator{}
)
_, sub, _ := net.ParseCIDR(subnet)
ones, bits := sub.Mask.Size()
zeroes := bits - ones
numAddresses := 1 << uint(zeroes)
bm, err := bitseq.NewHandle("ipam_test", nil, "default/"+subnet, uint64(numAddresses))
if err != nil {
t.Fatal(err)
}
start := time.Now()
run := 0
for err != ipamapi.ErrNoAvailableIPs {
_, err = a.getAddress(sub, bm, nil, nil, false)
run++
}
if printTime {
fmt.Printf("\nTaken %v, to allocate all addresses on %s. (nemAddresses: %d. Runs: %d)", time.Since(start), subnet, numAddresses, run)
}
if bm.Unselected() != 0 {
t.Fatalf("Unexpected free count after reserving all addresses: %d", bm.Unselected())
}
/*
if bm.Head.Block != expectedMax || bm.Head.Count != numBlocks {
t.Fatalf("Failed to effectively reserve all addresses on %s. Expected (0x%x, %d) as first sequence. Found (0x%x,%d)",
subnet, expectedMax, numBlocks, bm.Head.Block, bm.Head.Count)
}
*/
}
func assertNRequests(t *testing.T, subnet string, numReq int, lastExpectedIP string) {
var (
nw *net.IPNet
printTime = false
)
lastIP := net.ParseIP(lastExpectedIP)
for _, store := range []bool{false, true} {
a, err := getAllocator(store)
assert.NoError(t, err)
pid, _, _, err := a.RequestPool(localAddressSpace, subnet, "", nil, false)
if err != nil {
t.Fatal(err)
}
i := 0
start := time.Now()
for ; i < numReq; i++ {
nw, _, err = a.RequestAddress(pid, nil, nil)
}
if printTime {
fmt.Printf("\nTaken %v, to allocate %d addresses on %s\n", time.Since(start), numReq, subnet)
}
if !lastIP.Equal(nw.IP) {
t.Fatalf("Wrong last IP. Expected %s. Got: %s (err: %v, ind: %d)", lastExpectedIP, nw.IP.String(), err, i)
}
}
}
func benchmarkRequest(b *testing.B, a *Allocator, subnet string) {
pid, _, _, err := a.RequestPool(localAddressSpace, subnet, "", nil, false)
for err != ipamapi.ErrNoAvailableIPs {
_, _, err = a.RequestAddress(pid, nil, nil)
}
}
func benchMarkRequest(subnet string, b *testing.B) {
a, _ := getAllocator(true)
for n := 0; n < b.N; n++ {
benchmarkRequest(b, a, subnet)
}
}
func BenchmarkRequest(b *testing.B) {
subnets := []string{
"10.0.0.0/24",
"10.0.0.0/16",
"10.0.0.0/8",
}
for _, subnet := range subnets {
name := fmt.Sprintf("%vSubnet", subnet)
b.Run(name, func(b *testing.B) {
a, _ := getAllocator(true)
benchmarkRequest(b, a, subnet)
})
}
}
func TestAllocateRandomDeallocate(t *testing.T) {
for _, store := range []bool{false, true} {
testAllocateRandomDeallocate(t, "172.25.0.0/16", "", 384, store)
testAllocateRandomDeallocate(t, "172.25.0.0/16", "172.25.252.0/22", 384, store)
}
}
func testAllocateRandomDeallocate(t *testing.T, pool, subPool string, num int, store bool) {
ds, err := randomLocalStore(store)
assert.NoError(t, err)
a, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
pid, _, _, err := a.RequestPool(localAddressSpace, pool, subPool, nil, false)
if err != nil {
t.Fatal(err)
}
// Allocate num ip addresses
indices := make(map[int]*net.IPNet, num)
allocated := make(map[string]bool, num)
for i := 0; i < num; i++ {
ip, _, err := a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatal(err)
}
ips := ip.String()
if _, ok := allocated[ips]; ok {
t.Fatalf("Address %s is already allocated", ips)
}
allocated[ips] = true
indices[i] = ip
}
if len(indices) != len(allocated) || len(indices) != num {
t.Fatalf("Unexpected number of allocated addresses: (%d,%d).", len(indices), len(allocated))
}
seed := time.Now().Unix()
rand.Seed(seed)
// Deallocate half of the allocated addresses following a random pattern
pattern := rand.Perm(num)
for i := 0; i < num/2; i++ {
idx := pattern[i]
ip := indices[idx]
err := a.ReleaseAddress(pid, ip.IP)
if err != nil {
t.Fatalf("Unexpected failure on deallocation of %s: %v.\nSeed: %d.", ip, err, seed)
}
delete(indices, idx)
delete(allocated, ip.String())
}
// Request a quarter of addresses
for i := 0; i < num/2; i++ {
ip, _, err := a.RequestAddress(pid, nil, nil)
if err != nil {
t.Fatal(err)
}
ips := ip.String()
if _, ok := allocated[ips]; ok {
t.Fatalf("\nAddress %s is already allocated.\nSeed: %d.", ips, seed)
}
allocated[ips] = true
}
if len(allocated) != num {
t.Fatalf("Unexpected number of allocated addresses: %d.\nSeed: %d.", len(allocated), seed)
}
}
func TestRetrieveFromStore(t *testing.T) {
num := 200
ds, err := randomLocalStore(true)
if err != nil {
t.Fatal(err)
}
a, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
pid, _, _, err := a.RequestPool(localAddressSpace, "172.25.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num; i++ {
if _, _, err := a.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a1, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a1.refresh(localAddressSpace)
db := a.DumpDatabase()
db1 := a1.DumpDatabase()
if db != db1 {
t.Fatalf("Unexpected db change.\nExpected:%s\nGot:%s", db, db1)
}
checkDBEquality(a, a1, t)
pid, _, _, err = a1.RequestPool(localAddressSpace, "172.25.0.0/16", "172.25.1.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num/2; i++ {
if _, _, err := a1.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a2, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a2.refresh(localAddressSpace)
checkDBEquality(a1, a2, t)
pid, _, _, err = a2.RequestPool(localAddressSpace, "172.25.0.0/16", "172.25.2.0/24", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num/2; i++ {
if _, _, err := a2.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a3, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a3.refresh(localAddressSpace)
checkDBEquality(a2, a3, t)
pid, _, _, err = a3.RequestPool(localAddressSpace, "172.26.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
for i := 0; i < num/2; i++ {
if _, _, err := a3.RequestAddress(pid, nil, nil); err != nil {
t.Fatal(err)
}
}
// Restore
a4, err := NewAllocator(ds, nil)
if err != nil {
t.Fatal(err)
}
a4.refresh(localAddressSpace)
checkDBEquality(a3, a4, t)
}
func checkDBEquality(a1, a2 *Allocator, t *testing.T) {
for k, cnf1 := range a1.addrSpaces[localAddressSpace].subnets {
cnf2 := a2.addrSpaces[localAddressSpace].subnets[k]
if cnf1.String() != cnf2.String() {
t.Fatalf("%s\n%s", cnf1, cnf2)
}
if cnf1.Range == nil {
a2.retrieveBitmask(k, cnf1.Pool)
}
}
for k, bm1 := range a1.addresses {
bm2 := a2.addresses[k]
if bm1.String() != bm2.String() {
t.Fatalf("%s\n%s", bm1, bm2)
}
}
}
const (
numInstances = 5
first = 0
last = numInstances - 1
)
var (
allocator *Allocator
start = make(chan struct{})
done = make(chan chan struct{}, numInstances-1)
pools = make([]*net.IPNet, numInstances)
)
func runParallelTests(t *testing.T, instance int) {
var err error
t.Parallel()
pTest := flag.Lookup("test.parallel")
if pTest == nil {
t.Skip("Skipped because test.parallel flag not set;")
}
numParallel, err := strconv.Atoi(pTest.Value.String())
if err != nil {
t.Fatal(err)
}
if numParallel < numInstances {
t.Skip("Skipped because t.parallel was less than ", numInstances)
}
// The first instance creates the allocator, gives the start
// and finally checks the pools each instance was assigned
if instance == first {
allocator, err = getAllocator(true)
if err != nil {
t.Fatal(err)
}
close(start)
}
if instance != first {
select {
case <-start:
}
instDone := make(chan struct{})
done <- instDone
defer close(instDone)
if instance == last {
defer close(done)
}
}
_, pools[instance], _, err = allocator.RequestPool(localAddressSpace, "", "", nil, false)
if err != nil {
t.Fatal(err)
}
if instance == first {
for instDone := range done {
select {
case <-instDone:
}
}
// Now check each instance got a different pool
for i := 0; i < numInstances; i++ {
for j := i + 1; j < numInstances; j++ {
if types.CompareIPNet(pools[i], pools[j]) {
t.Fatalf("Instance %d and %d were given the same predefined pool: %v", i, j, pools)
}
}
}
}
}
func TestRequestReleaseAddressDuplicate(t *testing.T) {
a, err := getAllocator(false)
if err != nil {
t.Fatal(err)
}
type IP struct {
ip *net.IPNet
ref int
}
ips := []IP{}
allocatedIPs := []*net.IPNet{}
a.addrSpaces["rosso"] = &addrSpace{
id: dsConfigKey + "/" + "rosso",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
var wg sync.WaitGroup
opts := map[string]string{
ipamapi.AllocSerialPrefix: "true",
}
var l sync.Mutex
poolID, _, _, err := a.RequestPool("rosso", "198.168.0.0/23", "", nil, false)
if err != nil {
t.Fatal(err)
}
for err == nil {
var c *net.IPNet
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
l.Lock()
ips = append(ips, IP{c, 1})
l.Unlock()
allocatedIPs = append(allocatedIPs, c)
if len(allocatedIPs) > 500 {
i := rand.Intn(len(allocatedIPs) - 1)
wg.Add(1)
go func(ip *net.IPNet) {
if err = a.ReleaseAddress(poolID, ip.IP); err != nil {
t.Fatal(err)
}
l.Lock()
ips = append(ips, IP{ip, -1})
l.Unlock()
wg.Done()
}(allocatedIPs[i])
allocatedIPs = append(allocatedIPs[:i], allocatedIPs[i+1:]...)
}
}
}
wg.Wait()
refMap := make(map[string]int)
for _, ip := range ips {
refMap[ip.ip.String()] = refMap[ip.ip.String()] + ip.ref
if refMap[ip.ip.String()] < 0 {
t.Fatalf("IP %s was previously released", ip.ip.String())
}
if refMap[ip.ip.String()] > 1 {
t.Fatalf("IP %s was previously allocated", ip.ip.String())
}
}
}
func TestParallelPredefinedRequest1(t *testing.T) {
runParallelTests(t, 0)
}
func TestParallelPredefinedRequest2(t *testing.T) {
runParallelTests(t, 1)
}
func TestParallelPredefinedRequest3(t *testing.T) {
runParallelTests(t, 2)
}
func TestParallelPredefinedRequest4(t *testing.T) {
runParallelTests(t, 3)
}
func TestParallelPredefinedRequest5(t *testing.T) {
runParallelTests(t, 4)
}
|
package main
import (
"fmt"
"github.com/glendc/cgreader"
"strings"
)
type Vector struct {
x, y int
}
type Ragnarok struct {
thor, target, dimensions Vector
energy int
}
func GetDirection(a, b string, x, y, v int) <-chan string {
ch := make(chan string)
go func() {
difference := x - y
switch {
case difference < 0:
ch <- a
case difference > 0:
ch <- b
default:
ch <- ""
}
close(ch)
}()
return ch
}
func (ragnarok *Ragnarok) ParseInitialData(ch <-chan string) {
fmt.Sscanf(
<-ch,
"%d %d %d %d %d %d %d \n",
&ragnarok.dimensions.x,
&ragnarok.dimensions.y,
&ragnarok.thor.x,
&ragnarok.thor.y,
&ragnarok.target.x,
&ragnarok.target.y,
&ragnarok.energy)
}
func (ragnarok *Ragnarok) GetInput() (ch chan string) {
ch = make(chan string)
go func() {
ch <- fmt.Sprintf("%d", ragnarok.energy)
}()
return
}
func (ragnarok *Ragnarok) Update(ch <-chan string) string {
channel_b := GetDirection("N", "S", ragnarok.target.y, ragnarok.thor.y, ragnarok.thor.y)
channel_a := GetDirection("E", "W", ragnarok.thor.x, ragnarok.target.x, ragnarok.thor.x)
result_b := <-channel_b
result_a := <-channel_a
return fmt.Sprint(result_b + result_a)
}
func (ragnarok *Ragnarok) SetOutput(output string) string {
if strings.Contains(output, "N") {
ragnarok.thor.y -= 1
} else if strings.Contains(output, "S") {
ragnarok.thor.y += 1
}
if strings.Contains(output, "E") {
ragnarok.thor.x += 1
} else if strings.Contains(output, "W") {
ragnarok.thor.x -= 1
}
ragnarok.energy -= 1
return fmt.Sprintf(
"Target = (%d,%d)\nThor = (%d,%d)\nEnergy = %d",
ragnarok.target.x,
ragnarok.target.y,
ragnarok.thor.x,
ragnarok.thor.y,
ragnarok.energy)
}
func (ragnarok *Ragnarok) LoseConditionCheck() bool {
if ragnarok.energy <= 0 {
return true
}
x, y := ragnarok.thor.x, ragnarok.thor.y
dx, dy := ragnarok.dimensions.x, ragnarok.dimensions.y
if x < 0 || x >= dx || y < 0 || y >= dy {
return true
}
return false
}
func (ragnarok *Ragnarok) WinConditionCheck() bool {
return ragnarok.target == ragnarok.thor
}
func main() {
cgreader.RunTargetProgram("../../input/ragnarok_1.txt", true, &Ragnarok{})
}
Solution now draws map
package main
import (
"fmt"
"github.com/glendc/cgreader"
"strings"
)
type Vector struct {
x, y int
icon string
}
func (v Vector) GetMapCoordinates() string {
return fmt.Sprintf("%d;%d", v.x, v.y)
}
func (v Vector) GetMapIcon() string {
return v.icon
}
type Ragnarok struct {
thor, target, dimensions Vector
energy int
trail [] Vector
}
func GetDirection(a, b string, x, y, v int) <-chan string {
ch := make(chan string)
go func() {
difference := x - y
switch {
case difference < 0:
ch <- a
case difference > 0:
ch <- b
default:
ch <- ""
}
close(ch)
}()
return ch
}
func (ragnarok *Ragnarok) ParseInitialData(ch <-chan string) {
fmt.Sscanf(
<-ch,
"%d %d %d %d %d %d %d \n",
&ragnarok.dimensions.x,
&ragnarok.dimensions.y,
&ragnarok.thor.x,
&ragnarok.thor.y,
&ragnarok.target.x,
&ragnarok.target.y,
&ragnarok.energy)
ragnarok.thor.icon, ragnarok.target.icon = "H", "T"
ragnarok.trail = make([]Vector, 0, ragnarok.energy)
}
func (ragnarok *Ragnarok) GetInput() (ch chan string) {
ch = make(chan string)
go func() {
ch <- fmt.Sprintf("%d", ragnarok.energy)
}()
return
}
func (ragnarok *Ragnarok) Update(ch <-chan string) string {
trail := append(ragnarok.trail, ragnarok.thor, ragnarok.target)
map_info := make([]cgreader.MapObject, len(trail))
for i, v := range trail {
map_info[i] = cgreader.MapObject(v)
}
cgreader.DrawMap(
ragnarok.dimensions.x,
ragnarok.dimensions.y,
".",
map_info...)
channel_b := GetDirection("N", "S", ragnarok.target.y, ragnarok.thor.y, ragnarok.thor.y)
channel_a := GetDirection("E", "W", ragnarok.thor.x, ragnarok.target.x, ragnarok.thor.x)
result_b := <-channel_b
result_a := <-channel_a
return fmt.Sprint(result_b + result_a)
}
func (ragnarok *Ragnarok) SetOutput(output string) string {
ragnarok.trail = append(ragnarok.trail, Vector{ragnarok.thor.x,ragnarok.thor.y,"+"})
if strings.Contains(output, "N") {
ragnarok.thor.y -= 1
} else if strings.Contains(output, "S") {
ragnarok.thor.y += 1
}
if strings.Contains(output, "E") {
ragnarok.thor.x += 1
} else if strings.Contains(output, "W") {
ragnarok.thor.x -= 1
}
ragnarok.energy -= 1
return fmt.Sprintf(
"Target = (%d,%d)\nThor = (%d,%d)\nEnergy = %d",
ragnarok.target.x,
ragnarok.target.y,
ragnarok.thor.x,
ragnarok.thor.y,
ragnarok.energy)
}
func (ragnarok *Ragnarok) LoseConditionCheck() bool {
if ragnarok.energy <= 0 {
return true
}
x, y := ragnarok.thor.x, ragnarok.thor.y
dx, dy := ragnarok.dimensions.x, ragnarok.dimensions.y
if x < 0 || x >= dx || y < 0 || y >= dy {
return true
}
return false
}
func (ragnarok *Ragnarok) WinConditionCheck() bool {
return ragnarok.target.x == ragnarok.thor.x &&
ragnarok.target.y == ragnarok.thor.y
}
func main() {
cgreader.RunTargetProgram("../../input/ragnarok_3.txt", true, &Ragnarok{})
}
|
// Copyright 2015 realglobe, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package account
import (
"encoding/json"
"github.com/realglobe-Inc/edo-id-provider/database/account"
"github.com/realglobe-Inc/edo-id-provider/database/pairwise"
"github.com/realglobe-Inc/edo-id-provider/database/sector"
"github.com/realglobe-Inc/edo-id-provider/database/token"
tadb "github.com/realglobe-Inc/edo-idp-selector/database/ta"
"github.com/realglobe-Inc/edo-lib/rand"
"github.com/realglobe-Inc/edo-lib/server"
"github.com/realglobe-Inc/edo-lib/strset/strsetutil"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func newTestHandler(acnts []account.Element, tas []tadb.Element) *handler {
return New(
server.NewStopper(),
20,
account.NewMemoryDb(acnts),
tadb.NewMemoryDb(tas),
sector.NewMemoryDb(),
pairwise.NewMemoryDb(),
token.NewMemoryDb(),
rand.New(time.Second),
true,
).(*handler)
}
// GET と POST でのアカウント情報リクエストに対応するか。
func TestNormal(t *testing.T) {
for _, meth := range []string{"GET", "POST"} {
acnt := newTestAccount()
hndl := newTestHandler([]account.Element{acnt}, []tadb.Element{test_ta})
now := time.Now()
tok := token.New(test_tokId, now.Add(time.Minute), acnt.Id(), strsetutil.New("openid"), strsetutil.New("email"), test_ta.Id())
hndl.tokDb.Save(tok, now.Add(time.Minute))
r, err := http.NewRequest(meth, "https://idp.example.org/userinfo", nil)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Authorization", "Bearer "+tok.Id())
w := httptest.NewRecorder()
hndl.ServeHTTP(w, r)
if w.Code != http.StatusOK {
t.Error(w.Code)
t.Fatal(http.StatusOK)
} else if contType, contType2 := "application/json", w.HeaderMap.Get("Content-Type"); contType2 != contType {
t.Error(contType2)
t.Fatal(contType)
}
var buff struct{ Sub, Email string }
if err := json.NewDecoder(w.Body).Decode(&buff); err != nil {
t.Fatal(err)
} else if buff.Sub != acnt.Id() {
t.Fatal(buff.Sub)
} else if buff.Email != test_email {
t.Error(buff.Email)
t.Fatal(test_email)
}
}
}
アカウント情報エンドポイントの細かいテストを追加
// Copyright 2015 realglobe, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package account
import (
"encoding/json"
"github.com/realglobe-Inc/edo-id-provider/database/account"
"github.com/realglobe-Inc/edo-id-provider/database/pairwise"
"github.com/realglobe-Inc/edo-id-provider/database/sector"
"github.com/realglobe-Inc/edo-id-provider/database/token"
tadb "github.com/realglobe-Inc/edo-idp-selector/database/ta"
"github.com/realglobe-Inc/edo-lib/rand"
"github.com/realglobe-Inc/edo-lib/server"
"github.com/realglobe-Inc/edo-lib/strset/strsetutil"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func newTestHandler(acnts []account.Element, tas []tadb.Element) *handler {
return New(
server.NewStopper(),
20,
account.NewMemoryDb(acnts),
tadb.NewMemoryDb(tas),
sector.NewMemoryDb(),
pairwise.NewMemoryDb(),
token.NewMemoryDb(),
rand.New(time.Second),
true,
).(*handler)
}
// GET と POST でのアカウント情報リクエストに対応するか。
func TestNormal(t *testing.T) {
for _, meth := range []string{"GET", "POST"} {
acnt := newTestAccount()
hndl := newTestHandler([]account.Element{acnt}, []tadb.Element{test_ta})
now := time.Now()
tok := token.New(test_tokId, now.Add(time.Minute), acnt.Id(), strsetutil.New("openid"), strsetutil.New("email"), test_ta.Id())
hndl.tokDb.Save(tok, now.Add(time.Minute))
r, err := http.NewRequest(meth, "https://idp.example.org/userinfo", nil)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Authorization", "Bearer "+tok.Id())
w := httptest.NewRecorder()
hndl.ServeHTTP(w, r)
if w.Code != http.StatusOK {
t.Error(w.Code)
t.Fatal(http.StatusOK)
} else if contType, contType2 := "application/json", w.HeaderMap.Get("Content-Type"); contType2 != contType {
t.Error(contType2)
t.Fatal(contType)
}
var buff struct{ Sub, Email string }
if err := json.NewDecoder(w.Body).Decode(&buff); err != nil {
t.Fatal(err)
} else if buff.Sub != acnt.Id() {
t.Fatal(buff.Sub)
} else if buff.Email != test_email {
t.Error(buff.Email)
t.Fatal(test_email)
}
}
}
// TA 固有アカウント ID に対応していることの検査。
func TestPairwise(t *testing.T) {
acnt := newTestAccount()
ta := tadb.New("https://ta.example.org", nil, nil, nil, true, "")
hndl := newTestHandler([]account.Element{acnt}, []tadb.Element{ta})
now := time.Now()
tok := token.New(test_tokId, now.Add(time.Minute), acnt.Id(), strsetutil.New("openid"), strsetutil.New("email"), ta.Id())
hndl.tokDb.Save(tok, now.Add(time.Minute))
r, err := http.NewRequest("GET", "https://idp.example.org/userinfo", nil)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Authorization", "Bearer "+tok.Id())
w := httptest.NewRecorder()
hndl.ServeHTTP(w, r)
if w.Code != http.StatusOK {
t.Error(w.Code)
t.Fatal(http.StatusOK)
} else if contType, contType2 := "application/json", w.HeaderMap.Get("Content-Type"); contType2 != contType {
t.Error(contType2)
t.Fatal(contType)
}
var buff struct{ Sub, Email string }
if err := json.NewDecoder(w.Body).Decode(&buff); err != nil {
t.Fatal(err)
} else if buff.Sub == acnt.Id() {
t.Error("not pairwise")
t.Fatal(buff.Sub)
} else if buff.Email != test_email {
t.Error(buff.Email)
t.Fatal(test_email)
}
}
// スコープ属性の展開はしないことの検査。
func TestNotUseScopeAttribute(t *testing.T) {
acnt := newTestAccount()
hndl := newTestHandler([]account.Element{acnt}, []tadb.Element{test_ta})
now := time.Now()
tok := token.New(test_tokId, now.Add(time.Minute), acnt.Id(), strsetutil.New("openid", "email"), nil, test_ta.Id())
hndl.tokDb.Save(tok, now.Add(time.Minute))
r, err := http.NewRequest("GET", "https://idp.example.org/userinfo", nil)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Authorization", "Bearer "+tok.Id())
w := httptest.NewRecorder()
hndl.ServeHTTP(w, r)
if w.Code != http.StatusOK {
t.Error(w.Code)
t.Fatal(http.StatusOK)
} else if contType, contType2 := "application/json", w.HeaderMap.Get("Content-Type"); contType2 != contType {
t.Error(contType2)
t.Fatal(contType)
}
var buff struct{ Sub, Email string }
if err := json.NewDecoder(w.Body).Decode(&buff); err != nil {
t.Fatal(err)
} else if buff.Sub != acnt.Id() {
t.Error(buff.Sub)
t.Fatal(acnt.Id())
} else if buff.Email != "" {
t.Error("got scope attribute")
t.Fatal(buff.Email)
}
}
|
package rpcd
import (
"time"
"github.com/Symantec/Dominator/lib/image"
"github.com/Symantec/Dominator/lib/srpc"
"github.com/Symantec/Dominator/proto/imageserver"
)
func (t *srpcType) GetImage(conn *srpc.Conn,
request imageserver.GetImageRequest,
reply *imageserver.GetImageResponse) error {
var response imageserver.GetImageResponse
response.Image = t.getImageNow(request)
*reply = response
if response.Image != nil || request.Timeout == 0 {
return nil
}
// Image not found yet and willing to wait.
addCh := t.imageDataBase.RegisterAddNotifier()
defer func() {
t.imageDataBase.UnregisterAddNotifier(addCh)
select {
case <-addCh:
default:
}
}()
timer := time.NewTimer(request.Timeout)
for {
select {
case imageName := <-addCh:
if imageName == request.ImageName {
if !timer.Stop() {
<-timer.C
}
response.Image = t.getImageNow(request)
*reply = response
return nil
}
case <-timer.C:
return nil
}
}
}
func (t *srpcType) getImageNow(
request imageserver.GetImageRequest) *image.Image {
img := *t.imageDataBase.GetImage(request.ImageName)
if request.IgnoreFilesystem {
img.FileSystem = nil
}
return &img
}
Fix nil pointer dereference in GetImage() handler when image does not exist.
package rpcd
import (
"time"
"github.com/Symantec/Dominator/lib/image"
"github.com/Symantec/Dominator/lib/srpc"
"github.com/Symantec/Dominator/proto/imageserver"
)
func (t *srpcType) GetImage(conn *srpc.Conn,
request imageserver.GetImageRequest,
reply *imageserver.GetImageResponse) error {
var response imageserver.GetImageResponse
response.Image = t.getImageNow(request)
*reply = response
if response.Image != nil || request.Timeout == 0 {
return nil
}
// Image not found yet and willing to wait.
addCh := t.imageDataBase.RegisterAddNotifier()
defer func() {
t.imageDataBase.UnregisterAddNotifier(addCh)
select {
case <-addCh:
default:
}
}()
timer := time.NewTimer(request.Timeout)
for {
select {
case imageName := <-addCh:
if imageName == request.ImageName {
if !timer.Stop() {
<-timer.C
}
response.Image = t.getImageNow(request)
*reply = response
return nil
}
case <-timer.C:
return nil
}
}
}
func (t *srpcType) getImageNow(
request imageserver.GetImageRequest) *image.Image {
originalImage := t.imageDataBase.GetImage(request.ImageName)
if originalImage == nil {
return nil
}
img := *originalImage
if request.IgnoreFilesystem {
img.FileSystem = nil
}
return &img
}
|
// Copyright 2016, RadiantBlue Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"time"
"github.com/venicegeo/pzsvc-exec/pzse"
"github.com/venicegeo/pzsvc-exec/pzsvc"
)
func main() {
s := pzsvc.Session{AppName: "pzsvc-taskworker", SessionID: "startup", LogRootDir: "pzsvc-exec"}
pzsvc.LogAudit(s, s.AppName, "startup", s.AppName)
if len(os.Args) < 2 {
pzsvc.LogSimpleErr(s, "error: Insufficient parameters. You must specify a config file.", nil)
return
}
// First argument after the base call should be the path to the config file.
// ReadFile returns the contents of the file as a byte buffer.
configBuf, err := ioutil.ReadFile(os.Args[1])
if err != nil {
pzsvc.LogSimpleErr(s, "pzsvc-taskworker error in reading config: ", err)
return
}
var configObj pzse.ConfigType
err = json.Unmarshal(configBuf, &configObj)
if err != nil {
pzsvc.LogSimpleErr(s, "pzsvc-taskworker error in unmarshalling config: ", err)
return
}
s.LogAudit = configObj.LogAudit
if configObj.LogAudit {
pzsvc.LogInfo(s, "Config: Audit logging enabled.")
} else {
pzsvc.LogInfo(s, "Config: Audit logging disabled.")
}
s.PzAddr = configObj.PzAddr
if configObj.PzAddrEnVar != "" {
newAddr := os.Getenv(configObj.PzAddrEnVar)
if newAddr != "" {
s.PzAddr = newAddr
}
}
if s.PzAddr == "" {
pzsvc.LogSimpleErr(s, "Config: Cannot work tasks. Must have either a valid PzAddr, or a valid and populated PzAddrEnVar.", nil)
return
}
if configObj.SvcName == "" {
pzsvc.LogSimpleErr(s, "Config: Cannot work tasks without service name.", nil)
return
}
if configObj.APIKeyEnVar == "" {
pzsvc.LogSimpleErr(s, "Config: Cannot work tasks without valid APIKeyEnVar.", nil)
return
}
apiKey := os.Getenv(configObj.APIKeyEnVar)
if apiKey == "" {
pzsvc.LogSimpleErr(s, "No API key at APIKeyEnVar. Cannot work.", nil)
return
}
s.PzAuth = "Basic " + base64.StdEncoding.EncodeToString([]byte(apiKey+":"))
if configObj.NumProcs == 0 {
pzsvc.LogInfo(s, "Config: No Proc number specified. Defaulting to one at a time.")
configObj.NumProcs = 1
}
if configObj.Port == 0 {
pzsvc.LogInfo(s, "Config: No target Port specified. Defaulting to 8080.")
configObj.Port = 8080
}
svcID := ""
for i := 0; svcID == "" && i < 10; i++ {
svcID, err = pzsvc.FindMySvc(s, configObj.SvcName, configObj.PzAddr, s.PzAuth)
if err != nil {
pzsvc.LogSimpleErr(s, "Taskworker could not find Pz Service ID. Initial Error: ", err)
return
}
if svcID == "" && i < 9 {
pzsvc.LogInfo(s, "Could not find service. Will sleep and wait.")
time.Sleep(15 * time.Second)
}
}
if svcID == "" {
pzsvc.LogSimpleErr(s, "Taskworker could not find Pz Service ID. No error, just no service.", err)
return
}
pzsvc.LogInfo(s, "Found target service. ServiceID: "+svcID)
for i := 0; i < configObj.NumProcs; i++ {
go workerThread(s, configObj, svcID)
}
select {} //blocks forever
}
// WorkBody exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkBody struct {
Content string `json:"content"`
}
// WorkDataInputs exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkDataInputs struct {
Body WorkBody `json:"body"`
}
// WorkInData exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkInData struct {
DataInputs WorkDataInputs `json:"dataInputs"`
}
// WorkSvcData exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkSvcData struct {
Data WorkInData `json:"data"`
JobID string `json:"jobId"`
}
// WorkOutData exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkOutData struct {
SvcData WorkSvcData `json:"serviceData"`
}
func workerThread(s pzsvc.Session, configObj pzse.ConfigType, svcID string) {
var (
err error
failCount int
)
workAddr := fmt.Sprintf("http://localhost:%d/execute", configObj.Port)
s.SessionID, err = pzsvc.PsuUUID()
if err != nil {
s.SessionID = "FailedSessionInit"
pzsvc.LogSimpleErr(s, "psuUUID error: ", err)
panic("Worker thread failed on uid generation. Something is very wrong: " + err.Error())
}
pzsvc.LogInfo(s, "Worker thread initiated.")
for {
var pzJobObj struct {
Data WorkOutData `json:"data"`
}
pzJobObj.Data = WorkOutData{SvcData: WorkSvcData{JobID: "", Data: WorkInData{DataInputs: WorkDataInputs{Body: WorkBody{Content: ""}}}}}
byts, pErr := pzsvc.RequestKnownJSON("POST", "", configObj.PzAddr+"/service/"+svcID+"/task", s.PzAuth, &pzJobObj)
if pErr != nil {
pErr.Log(s, "Taskworker worker thread: error getting new task:")
failCount++
time.Sleep(time.Duration(10*failCount) * time.Second)
continue
}
inpStr := pzJobObj.Data.SvcData.Data.DataInputs.Body.Content
jobID := pzJobObj.Data.SvcData.JobID
if inpStr != "" {
pzsvc.LogInfo(s, "New Task Grabbed. JobID: "+jobID)
failCount = 0
var outpByts []byte
if configObj.JwtSecAuthURL != "" {
// TODO: once JWT conversion exists as an option, handle it here.
// jwtBody = content
// call JwtSecAuthURL. send jwtBody. get response
// outBody = response (more or less)
}
var respObj pzse.OutStruct
pzsvc.LogAuditBuf(s, s.UserID, "http request - calling pzsvc-exec", inpStr, workAddr)
outpByts, pErr := pzsvc.RequestKnownJSON("POST", inpStr, workAddr, "", &respObj)
if pErr != nil {
pErr.Log(s, "Error calling pzsvc-exec")
sendExecResult(s, configObj.PzAddr, s.PzAuth, svcID, jobID, "Fail", nil)
} else {
pzsvc.LogAuditBuf(s, workAddr, "http response from pzsvc-exec", string(outpByts), s.UserID)
sendExecResult(s, configObj.PzAddr, s.PzAuth, svcID, jobID, "Success", outpByts)
}
} else {
pzsvc.LogInfo(s, "No Task. Sleeping now. input: "+string(byts))
time.Sleep(60 * time.Second)
}
}
}
func sendExecResult(s pzsvc.Session, pzAddr, pzAuth, svcID, jobID, status string, resJSON []byte) {
outAddr := pzAddr + `/service/` + svcID + `/task/` + jobID
pzsvc.LogInfo(s, "Sending Exec Results. Status: "+status+".")
if resJSON != nil {
dataID, err := pzsvc.Ingest(s, "Output", "text", "pzsvc-taskworker", "", resJSON, nil)
if err == nil {
outStr := `{ "status" : "` + status + `", "result" : { "type" : "data", "dataId" : "` + dataID + `" } }`
pzsvc.SubmitSinglePart("POST", outStr, outAddr, s.PzAuth)
return
}
pzsvc.LogInfo(s, "Send Exec Results: Ingest failed.")
status = "Fail"
}
outStr := `{ "status" : "` + status + `" }`
pzsvc.SubmitSinglePart("POST", outStr, outAddr, s.PzAuth)
}
just in case
// Copyright 2016, RadiantBlue Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"time"
"github.com/venicegeo/pzsvc-exec/pzse"
"github.com/venicegeo/pzsvc-exec/pzsvc"
)
func main() {
s := pzsvc.Session{AppName: "pzsvc-taskworker", SessionID: "startup", LogRootDir: "pzsvc-exec"}
pzsvc.LogAudit(s, s.AppName, "startup", s.AppName)
if len(os.Args) < 2 {
pzsvc.LogSimpleErr(s, "error: Insufficient parameters. You must specify a config file.", nil)
return
}
// First argument after the base call should be the path to the config file.
// ReadFile returns the contents of the file as a byte buffer.
configBuf, err := ioutil.ReadFile(os.Args[1])
if err != nil {
pzsvc.LogSimpleErr(s, "pzsvc-taskworker error in reading config: ", err)
return
}
var configObj pzse.ConfigType
err = json.Unmarshal(configBuf, &configObj)
if err != nil {
pzsvc.LogSimpleErr(s, "pzsvc-taskworker error in unmarshalling config: ", err)
return
}
s.LogAudit = configObj.LogAudit
if configObj.LogAudit {
pzsvc.LogInfo(s, "Config: Audit logging enabled.")
} else {
pzsvc.LogInfo(s, "Config: Audit logging disabled.")
}
s.PzAddr = configObj.PzAddr
if configObj.PzAddrEnVar != "" {
newAddr := os.Getenv(configObj.PzAddrEnVar)
if newAddr != "" {
s.PzAddr = newAddr
}
}
if s.PzAddr == "" {
pzsvc.LogSimpleErr(s, "Config: Cannot work tasks. Must have either a valid PzAddr, or a valid and populated PzAddrEnVar.", nil)
return
}
if configObj.SvcName == "" {
pzsvc.LogSimpleErr(s, "Config: Cannot work tasks without service name.", nil)
return
}
if configObj.APIKeyEnVar == "" {
pzsvc.LogSimpleErr(s, "Config: Cannot work tasks without valid APIKeyEnVar.", nil)
return
}
apiKey := os.Getenv(configObj.APIKeyEnVar)
if apiKey == "" {
pzsvc.LogSimpleErr(s, "No API key at APIKeyEnVar. Cannot work.", nil)
return
}
s.PzAuth = "Basic " + base64.StdEncoding.EncodeToString([]byte(apiKey+":"))
if configObj.NumProcs == 0 {
pzsvc.LogInfo(s, "Config: No Proc number specified. Defaulting to one at a time.")
configObj.NumProcs = 1
}
if configObj.Port == 0 {
pzsvc.LogInfo(s, "Config: No target Port specified. Defaulting to 8080.")
configObj.Port = 8080
}
svcID := ""
for i := 0; svcID == "" && i < 10; i++ {
svcID, err = pzsvc.FindMySvc(s, configObj.SvcName, configObj.PzAddr, s.PzAuth)
if err != nil {
pzsvc.LogSimpleErr(s, "Taskworker could not find Pz Service ID. Initial Error: ", err)
return
}
if svcID == "" && i < 9 {
pzsvc.LogInfo(s, "Could not find service. Will sleep and wait.")
time.Sleep(15 * time.Second)
}
}
if svcID == "" {
pzsvc.LogSimpleErr(s, "Taskworker could not find Pz Service ID. No error, just no service.", err)
return
}
pzsvc.LogInfo(s, "Found target service. ServiceID: "+svcID)
for i := 0; i < configObj.NumProcs; i++ {
go workerThread(s, configObj, svcID)
}
select {} //blocks forever
}
// WorkBody exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkBody struct {
Content string `json:"content"`
}
// WorkDataInputs exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkDataInputs struct {
Body WorkBody `json:"body"`
}
// WorkInData exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkInData struct {
DataInputs WorkDataInputs `json:"dataInputs"`
}
// WorkSvcData exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkSvcData struct {
Data WorkInData `json:"data"`
JobID string `json:"jobId"`
}
// WorkOutData exists as part of the response format of the Piazza job manager task request endpoint.
// specifically, it's one layer of the bit we care about.
type WorkOutData struct {
SvcData WorkSvcData `json:"serviceData"`
}
func workerThread(s pzsvc.Session, configObj pzse.ConfigType, svcID string) {
var (
err error
failCount int
)
workAddr := fmt.Sprintf("http://localhost:%d/execute", configObj.Port)
s.SessionID, err = pzsvc.PsuUUID()
if err != nil {
s.SessionID = "FailedSessionInit"
pzsvc.LogSimpleErr(s, "psuUUID error: ", err)
panic("Worker thread failed on uid generation. Something is very wrong: " + err.Error())
}
pzsvc.LogInfo(s, "Worker thread initiated.")
for {
var pzJobObj struct {
Data WorkOutData `json:"data"`
}
pzJobObj.Data = WorkOutData{SvcData: WorkSvcData{JobID: "", Data: WorkInData{DataInputs: WorkDataInputs{Body: WorkBody{Content: ""}}}}}
byts, pErr := pzsvc.RequestKnownJSON("POST", "", configObj.PzAddr+"/service/"+svcID+"/task", s.PzAuth, &pzJobObj)
if pErr != nil {
pErr.Log(s, "Taskworker worker thread: error getting new task:")
failCount++
time.Sleep(time.Duration(10*failCount) * time.Second)
continue
}
inpStr := pzJobObj.Data.SvcData.Data.DataInputs.Body.Content
jobID := pzJobObj.Data.SvcData.JobID
if inpStr != "" {
pzsvc.LogInfo(s, "New Task Grabbed. JobID: "+jobID)
failCount = 0
var outpByts []byte
if configObj.JwtSecAuthURL != "" {
// TODO: once JWT conversion exists as an option, handle it here.
// jwtBody = content
// call JwtSecAuthURL. send jwtBody. get response
// outBody = response (more or less)
}
var respObj pzse.OutStruct
pzsvc.LogAuditBuf(s, s.UserID, "http request - calling pzsvc-exec", inpStr, workAddr)
outpByts, pErr := pzsvc.RequestKnownJSON("POST", inpStr, workAddr, "", &respObj)
if pErr != nil {
pErr.Log(s, "Error calling pzsvc-exec")
sendExecResult(s, configObj.PzAddr, s.PzAuth, svcID, jobID, "Fail", nil)
} else {
pzsvc.LogAuditBuf(s, workAddr, "http response from pzsvc-exec", string(outpByts), s.UserID)
sendExecResult(s, configObj.PzAddr, s.PzAuth, svcID, jobID, "Success", outpByts)
}
time.Sleep(10 * time.Second)
} else {
pzsvc.LogInfo(s, "No Task. Sleeping now. input: "+string(byts))
time.Sleep(60 * time.Second)
}
}
}
func sendExecResult(s pzsvc.Session, pzAddr, pzAuth, svcID, jobID, status string, resJSON []byte) {
outAddr := pzAddr + `/service/` + svcID + `/task/` + jobID
pzsvc.LogInfo(s, "Sending Exec Results. Status: "+status+".")
if resJSON != nil {
dataID, err := pzsvc.Ingest(s, "Output", "text", "pzsvc-taskworker", "", resJSON, nil)
if err == nil {
outStr := `{ "status" : "` + status + `", "result" : { "type" : "data", "dataId" : "` + dataID + `" } }`
pzsvc.SubmitSinglePart("POST", outStr, outAddr, s.PzAuth)
return
}
pzsvc.LogInfo(s, "Send Exec Results: Ingest failed.")
status = "Fail"
}
outStr := `{ "status" : "` + status + `" }`
pzsvc.SubmitSinglePart("POST", outStr, outAddr, s.PzAuth)
}
|
// +build linux
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cnitypes "github.com/containernetworking/cni/pkg/types"
current "github.com/containernetworking/cni/pkg/types/current"
"github.com/containers/libpod/pkg/annotations"
selinux "github.com/containers/libpod/pkg/selinux"
"github.com/containers/storage"
"github.com/cri-o/cri-o/internal/config/node"
"github.com/cri-o/cri-o/internal/lib"
libsandbox "github.com/cri-o/cri-o/internal/lib/sandbox"
"github.com/cri-o/cri-o/internal/log"
oci "github.com/cri-o/cri-o/internal/oci"
libconfig "github.com/cri-o/cri-o/pkg/config"
"github.com/cri-o/cri-o/pkg/sandbox"
"github.com/cri-o/cri-o/utils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
pb "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/leaky"
"k8s.io/kubernetes/pkg/kubelet/types"
)
func (s *Server) runPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, retErr error) {
s.updateLock.RLock()
defer s.updateLock.RUnlock()
sbox := sandbox.New(ctx)
if err := sbox.SetConfig(req.GetConfig()); err != nil {
return nil, errors.Wrap(err, "setting sandbox config")
}
pathsToChown := []string{}
// we need to fill in the container name, as it is not present in the request. Luckily, it is a constant.
log.Infof(ctx, "Running pod sandbox: %s%s", translateLabelsToDescription(sbox.Config().GetLabels()), leaky.PodInfraContainerName)
kubeName := sbox.Config().GetMetadata().GetName()
namespace := sbox.Config().GetMetadata().GetNamespace()
attempt := sbox.Config().GetMetadata().GetAttempt()
if err := sbox.SetNameAndID(); err != nil {
return nil, errors.Wrap(err, "setting pod sandbox name and id")
}
if _, err := s.ReservePodName(sbox.ID(), sbox.Name()); err != nil {
return nil, errors.Wrap(err, "reserving pod sandbox name")
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: releasing pod sandbox name: %s", sbox.Name())
s.ReleasePodName(sbox.Name())
}
}()
containerName, err := s.ReserveSandboxContainerIDAndName(sbox.Config())
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: releasing container name: %s", containerName)
s.ReleaseContainerName(containerName)
}
}()
var labelOptions []string
securityContext := sbox.Config().GetLinux().GetSecurityContext()
selinuxConfig := securityContext.GetSelinuxOptions()
if selinuxConfig != nil {
labelOptions = utils.GetLabelOptions(selinuxConfig)
}
privileged := s.privilegedSandbox(req)
podContainer, err := s.StorageRuntimeServer().CreatePodSandbox(s.config.SystemContext,
sbox.Name(), sbox.ID(),
s.config.PauseImage,
s.config.PauseImageAuthFile,
"",
containerName,
kubeName,
sbox.Config().GetMetadata().GetUid(),
namespace,
attempt,
s.defaultIDMappings,
labelOptions,
privileged,
)
mountLabel := podContainer.MountLabel
processLabel := podContainer.ProcessLabel
if errors.Cause(err) == storage.ErrDuplicateName {
return nil, fmt.Errorf("pod sandbox with name %q already exists", sbox.Name())
}
if err != nil {
return nil, fmt.Errorf("error creating pod sandbox with name %q: %v", sbox.Name(), err)
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: removing pod sandbox from storage: %s", sbox.ID())
if err2 := s.StorageRuntimeServer().RemovePodSandbox(sbox.ID()); err2 != nil {
log.Warnf(ctx, "couldn't cleanup pod sandbox %q: %v", sbox.ID(), err2)
}
}
}()
// set log directory
logDir := sbox.Config().GetLogDirectory()
if logDir == "" {
logDir = filepath.Join(s.config.LogDir, sbox.ID())
}
// This should always be absolute from k8s.
if !filepath.IsAbs(logDir) {
return nil, fmt.Errorf("requested logDir for sbox id %s is a relative path: %s", sbox.ID(), logDir)
}
if err := os.MkdirAll(logDir, 0o700); err != nil {
return nil, err
}
// TODO: factor generating/updating the spec into something other projects can vendor
// creates a spec Generator with the default spec.
g, err := generate.New("linux")
if err != nil {
return nil, err
}
g.HostSpecific = true
g.ClearProcessRlimits()
for _, u := range s.config.Ulimits() {
g.AddProcessRlimits(u.Name, u.Hard, u.Soft)
}
// setup defaults for the pod sandbox
g.SetRootReadonly(true)
pauseCommand, err := PauseCommand(s.Config(), podContainer.Config)
if err != nil {
return nil, err
}
g.SetProcessArgs(pauseCommand)
// set DNS options
var resolvPath string
if sbox.Config().GetDnsConfig() != nil {
dnsServers := sbox.Config().GetDnsConfig().Servers
dnsSearches := sbox.Config().GetDnsConfig().Searches
dnsOptions := sbox.Config().GetDnsConfig().Options
resolvPath = fmt.Sprintf("%s/resolv.conf", podContainer.RunDir)
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
if err != nil {
err1 := removeFile(resolvPath)
if err1 != nil {
err = err1
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
}
return nil, err
}
if err := label.Relabel(resolvPath, mountLabel, false); err != nil && errors.Cause(err) != unix.ENOTSUP {
if err1 := removeFile(resolvPath); err1 != nil {
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
}
return nil, err
}
mnt := spec.Mount{
Type: "bind",
Source: resolvPath,
Destination: "/etc/resolv.conf",
Options: []string{"ro", "bind", "nodev", "nosuid", "noexec"},
}
pathsToChown = append(pathsToChown, resolvPath)
g.AddMount(mnt)
}
// add metadata
metadata := sbox.Config().GetMetadata()
metadataJSON, err := json.Marshal(metadata)
if err != nil {
return nil, err
}
// add labels
labels := sbox.Config().GetLabels()
if err := validateLabels(labels); err != nil {
return nil, err
}
// Add special container name label for the infra container
if labels != nil {
labels[types.KubernetesContainerNameLabel] = leaky.PodInfraContainerName
}
labelsJSON, err := json.Marshal(labels)
if err != nil {
return nil, err
}
// add annotations
kubeAnnotations := sbox.Config().GetAnnotations()
kubeAnnotationsJSON, err := json.Marshal(kubeAnnotations)
if err != nil {
return nil, err
}
// Add capabilities from crio.conf if default_capabilities is defined
capabilities := &pb.Capability{}
if s.config.DefaultCapabilities != nil {
g.ClearProcessCapabilities()
capabilities.AddCapabilities = append(capabilities.AddCapabilities, s.config.DefaultCapabilities...)
}
if err := setupCapabilities(&g, capabilities); err != nil {
return nil, err
}
nsOptsJSON, err := json.Marshal(securityContext.GetNamespaceOptions())
if err != nil {
return nil, err
}
hostIPC := securityContext.GetNamespaceOptions().GetIpc() == pb.NamespaceMode_NODE
hostPID := securityContext.GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE
// Don't use SELinux separation with Host Pid or IPC Namespace or privileged.
if hostPID || hostIPC {
processLabel, mountLabel = "", ""
}
g.SetProcessSelinuxLabel(processLabel)
g.SetLinuxMountLabel(mountLabel)
// Remove the default /dev/shm mount to ensure we overwrite it
g.RemoveMount(libsandbox.DevShmPath)
// create shm mount for the pod containers.
var shmPath string
if hostIPC {
shmPath = libsandbox.DevShmPath
} else {
shmPath, err = setupShm(podContainer.RunDir, mountLabel)
if err != nil {
return nil, err
}
pathsToChown = append(pathsToChown, shmPath)
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: unmounting shmPath for sandbox %s", sbox.ID())
if err2 := unix.Unmount(shmPath, unix.MNT_DETACH); err2 != nil {
log.Warnf(ctx, "failed to unmount shm for pod: %v", err2)
}
}
}()
}
mnt := spec.Mount{
Type: "bind",
Source: shmPath,
Destination: libsandbox.DevShmPath,
Options: []string{"rw", "bind"},
}
// bind mount the pod shm
g.AddMount(mnt)
err = s.setPodSandboxMountLabel(sbox.ID(), mountLabel)
if err != nil {
return nil, err
}
if err := s.CtrIDIndex().Add(sbox.ID()); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: deleting container ID from idIndex for sandbox %s", sbox.ID())
if err2 := s.CtrIDIndex().Delete(sbox.ID()); err2 != nil {
log.Warnf(ctx, "couldn't delete ctr id %s from idIndex", sbox.ID())
}
}
}()
// set log path inside log directory
logPath := filepath.Join(logDir, sbox.ID()+".log")
// Handle https://issues.k8s.io/44043
if err := utils.EnsureSaneLogPath(logPath); err != nil {
return nil, err
}
hostNetwork := securityContext.GetNamespaceOptions().GetNetwork() == pb.NamespaceMode_NODE
hostname, err := getHostname(sbox.ID(), sbox.Config().Hostname, hostNetwork)
if err != nil {
return nil, err
}
g.SetHostname(hostname)
// validate the runtime handler
runtimeHandler, err := s.runtimeHandler(req)
if err != nil {
return nil, err
}
g.AddAnnotation(annotations.Metadata, string(metadataJSON))
g.AddAnnotation(annotations.Labels, string(labelsJSON))
g.AddAnnotation(annotations.Annotations, string(kubeAnnotationsJSON))
g.AddAnnotation(annotations.LogPath, logPath)
g.AddAnnotation(annotations.Name, sbox.Name())
g.AddAnnotation(annotations.Namespace, namespace)
g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox)
g.AddAnnotation(annotations.SandboxID, sbox.ID())
g.AddAnnotation(annotations.Image, s.config.PauseImage)
g.AddAnnotation(annotations.ContainerName, containerName)
g.AddAnnotation(annotations.ContainerID, sbox.ID())
g.AddAnnotation(annotations.ShmPath, shmPath)
g.AddAnnotation(annotations.PrivilegedRuntime, fmt.Sprintf("%v", privileged))
g.AddAnnotation(annotations.RuntimeHandler, runtimeHandler)
g.AddAnnotation(annotations.ResolvPath, resolvPath)
g.AddAnnotation(annotations.HostName, hostname)
g.AddAnnotation(annotations.NamespaceOptions, string(nsOptsJSON))
g.AddAnnotation(annotations.KubeName, kubeName)
g.AddAnnotation(annotations.HostNetwork, fmt.Sprintf("%v", hostNetwork))
g.AddAnnotation(annotations.ContainerManager, lib.ContainerManagerCRIO)
if podContainer.Config.Config.StopSignal != "" {
// this key is defined in image-spec conversion document at https://github.com/opencontainers/image-spec/pull/492/files#diff-8aafbe2c3690162540381b8cdb157112R57
g.AddAnnotation("org.opencontainers.image.stopSignal", podContainer.Config.Config.StopSignal)
}
if s.config.CgroupManager().IsSystemd() && node.SystemdHasCollectMode() {
g.AddAnnotation("org.systemd.property.CollectMode", "'inactive-or-failed'")
}
created := time.Now()
g.AddAnnotation(annotations.Created, created.Format(time.RFC3339Nano))
portMappings := convertPortMappings(sbox.Config().GetPortMappings())
portMappingsJSON, err := json.Marshal(portMappings)
if err != nil {
return nil, err
}
g.AddAnnotation(annotations.PortMappings, string(portMappingsJSON))
cgroupParent, cgroupPath, err := s.config.CgroupManager().SandboxCgroupPath(sbox.Config().GetLinux().GetCgroupParent(), sbox.ID())
if err != nil {
return nil, err
}
if cgroupPath != "" {
g.SetLinuxCgroupsPath(cgroupPath)
}
g.AddAnnotation(annotations.CgroupParent, cgroupParent)
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
return nil, errors.Wrap(err, "add or replace linux namespace")
}
for _, uidmap := range s.defaultIDMappings.UIDs() {
g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
}
for _, gidmap := range s.defaultIDMappings.GIDs() {
g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
}
}
sb, err := libsandbox.New(sbox.ID(), namespace, sbox.Name(), kubeName, logDir, labels, kubeAnnotations, processLabel, mountLabel, metadata, shmPath, cgroupParent, privileged, runtimeHandler, resolvPath, hostname, portMappings, hostNetwork, created)
if err != nil {
return nil, err
}
if err := s.addSandbox(sb); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: removing pod sandbox %s", sbox.ID())
if err := s.removeSandbox(sbox.ID()); err != nil {
log.Warnf(ctx, "could not remove pod sandbox: %v", err)
}
}
}()
if err := s.PodIDIndex().Add(sbox.ID()); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: deleting pod ID %s from idIndex", sbox.ID())
if err := s.PodIDIndex().Delete(sbox.ID()); err != nil {
log.Warnf(ctx, "couldn't delete pod id %s from idIndex", sbox.ID())
}
}
}()
for k, v := range kubeAnnotations {
g.AddAnnotation(k, v)
}
for k, v := range labels {
g.AddAnnotation(k, v)
}
// Add default sysctls given in crio.conf
s.configureGeneratorForSysctls(ctx, g, hostNetwork, hostIPC)
// extract linux sysctls from annotations and pass down to oci runtime
// Will override any duplicate default systcl from crio.conf
for key, value := range sbox.Config().GetLinux().GetSysctls() {
g.AddLinuxSysctl(key, value)
}
// Set OOM score adjust of the infra container to be very low
// so it doesn't get killed.
g.SetProcessOOMScoreAdj(PodInfraOOMAdj)
g.SetLinuxResourcesCPUShares(PodInfraCPUshares)
// set up namespaces
cleanupFuncs, err := s.configureGeneratorForSandboxNamespaces(hostNetwork, hostIPC, hostPID, sb, g)
// We want to cleanup after ourselves if we are managing any namespaces and fail in this function.
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: cleaning up namespaces after failing to run sandbox %s", sbox.ID())
for idx := range cleanupFuncs {
if err2 := cleanupFuncs[idx](); err2 != nil {
log.Debugf(ctx, err2.Error())
}
}
}
}()
if err != nil {
return nil, err
}
if s.Config().Seccomp().IsDisabled() {
g.Config.Linux.Seccomp = nil
}
saveOptions := generate.ExportOptions{}
mountPoint, err := s.StorageRuntimeServer().StartContainer(sbox.ID())
if err != nil {
return nil, fmt.Errorf("failed to mount container %s in pod sandbox %s(%s): %v", containerName, sb.Name(), sbox.ID(), err)
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: stopping storage container for sandbox %s", sbox.ID())
if err2 := s.StorageRuntimeServer().StopContainer(sbox.ID()); err2 != nil {
log.Warnf(ctx, "couldn't stop storage container: %v: %v", sbox.ID(), err2)
}
}
}()
g.AddAnnotation(annotations.MountPoint, mountPoint)
hostnamePath := fmt.Sprintf("%s/hostname", podContainer.RunDir)
if err := ioutil.WriteFile(hostnamePath, []byte(hostname+"\n"), 0o644); err != nil {
return nil, err
}
if err := label.Relabel(hostnamePath, mountLabel, false); err != nil && errors.Cause(err) != unix.ENOTSUP {
return nil, err
}
mnt = spec.Mount{
Type: "bind",
Source: hostnamePath,
Destination: "/etc/hostname",
Options: []string{"ro", "bind", "nodev", "nosuid", "noexec"},
}
pathsToChown = append(pathsToChown, hostnamePath)
g.AddMount(mnt)
g.AddAnnotation(annotations.HostnamePath, hostnamePath)
sb.AddHostnamePath(hostnamePath)
container, err := oci.NewContainer(sbox.ID(), containerName, podContainer.RunDir, logPath, labels, g.Config.Annotations, kubeAnnotations, s.config.PauseImage, "", "", nil, sbox.ID(), false, false, false, runtimeHandler, podContainer.Dir, created, podContainer.Config.Config.StopSignal)
if err != nil {
return nil, err
}
runtimeType, err := s.Runtime().ContainerRuntimeType(container)
if err != nil {
return nil, err
}
// If using kata runtime, the process label should be set to container_kvm_t
// Keep in mind that kata does *not* apply any process label to containers within the VM
// Note: the requirement here is that the name used for the runtime class has "kata" in it
// or the runtime_type is set to "vm"
if runtimeType == libconfig.RuntimeTypeVM || strings.Contains(strings.ToLower(runtimeHandler), "kata") {
processLabel, err = selinux.SELinuxKVMLabel(processLabel)
if err != nil {
return nil, err
}
g.SetProcessSelinuxLabel(processLabel)
}
container.SetMountPoint(mountPoint)
container.SetIDMappings(s.defaultIDMappings)
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
if securityContext.GetNamespaceOptions().GetIpc() == pb.NamespaceMode_NODE {
g.RemoveMount("/dev/mqueue")
mqueue := spec.Mount{
Type: "bind",
Source: "/dev/mqueue",
Destination: "/dev/mqueue",
Options: []string{"rw", "rbind", "nodev", "nosuid", "noexec"},
}
g.AddMount(mqueue)
}
if hostNetwork {
g.RemoveMount("/sys")
g.RemoveMount("/sys/cgroup")
sysMnt := spec.Mount{
Destination: "/sys",
Type: "bind",
Source: "/sys",
Options: []string{"nosuid", "noexec", "nodev", "ro", "rbind"},
}
g.AddMount(sysMnt)
}
if securityContext.GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE {
g.RemoveMount("/proc")
proc := spec.Mount{
Type: "bind",
Source: "/proc",
Destination: "/proc",
Options: []string{"rw", "rbind", "nodev", "nosuid", "noexec"},
}
g.AddMount(proc)
}
}
g.SetRootPath(mountPoint)
if os.Getenv(rootlessEnvName) != "" {
makeOCIConfigurationRootless(&g)
}
container.SetSpec(g.Config)
if err := sb.SetInfraContainer(container); err != nil {
return nil, err
}
var ips []string
var result cnitypes.Result
if s.config.ManageNSLifecycle {
ips, result, err = s.networkStart(ctx, sb)
if err != nil {
return nil, err
}
if result != nil {
resultCurrent, err := current.NewResultFromResult(result)
if err != nil {
return nil, err
}
cniResultJSON, err := json.Marshal(resultCurrent)
if err != nil {
return nil, err
}
g.AddAnnotation(annotations.CNIResult, string(cniResultJSON))
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: in manageNSLifecycle, stopping network for sandbox %s", sb.ID())
if err2 := s.networkStop(ctx, sb); err2 != nil {
log.Errorf(ctx, "error stopping network on cleanup: %v", err2)
}
}
}()
}
for idx, ip := range ips {
g.AddAnnotation(fmt.Sprintf("%s.%d", annotations.IP, idx), ip)
}
sb.AddIPs(ips)
sb.SetNamespaceOptions(securityContext.GetNamespaceOptions())
spp := securityContext.GetSeccompProfilePath()
g.AddAnnotation(annotations.SeccompProfilePath, spp)
sb.SetSeccompProfilePath(spp)
if !privileged {
if err := s.setupSeccomp(ctx, &g, spp); err != nil {
return nil, err
}
}
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
if err != nil {
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), sbox.ID(), err)
}
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), sbox.ID(), err)
}
s.addInfraContainer(container)
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: removing infra container %s", container.ID())
s.removeInfraContainer(container)
}
}()
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
rootPair := s.defaultIDMappings.RootPair()
for _, path := range pathsToChown {
if err := os.Chown(path, rootPair.UID, rootPair.GID); err != nil {
return nil, errors.Wrapf(err, "cannot chown %s to %d:%d", path, rootPair.UID, rootPair.GID)
}
}
}
if err := s.createContainerPlatform(container, sb.CgroupParent()); err != nil {
return nil, err
}
if err := s.Runtime().StartContainer(container); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
// Clean-up steps from RemovePodSanbox
log.Infof(ctx, "runSandbox: stopping container %s", container.ID())
if err2 := s.Runtime().StopContainer(ctx, container, int64(10)); err2 != nil {
log.Warnf(ctx, "failed to stop container %s: %v", container.Name(), err2)
}
if err2 := s.Runtime().WaitContainerStateStopped(ctx, container); err2 != nil {
log.Warnf(ctx, "failed to get container 'stopped' status %s in pod sandbox %s: %v", container.Name(), sb.ID(), err2)
}
log.Infof(ctx, "runSandbox: deleting container %s", container.ID())
if err2 := s.Runtime().DeleteContainer(container); err2 != nil {
log.Warnf(ctx, "failed to delete container %s in pod sandbox %s: %v", container.Name(), sb.ID(), err2)
}
log.Infof(ctx, "runSandbox: writing container %s state to disk", container.ID())
if err2 := s.ContainerStateToDisk(container); err2 != nil {
log.Warnf(ctx, "failed to write container state %s in pod sandbox %s: %v", container.Name(), sb.ID(), err2)
}
}
}()
if err := s.ContainerStateToDisk(container); err != nil {
log.Warnf(ctx, "unable to write containers %s state to disk: %v", container.ID(), err)
}
if !s.config.ManageNSLifecycle {
ips, _, err = s.networkStart(ctx, sb)
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: in not manageNSLifecycle, stopping network for sandbox %s", sb.ID())
if err2 := s.networkStop(ctx, sb); err2 != nil {
log.Errorf(ctx, "error stopping network on cleanup: %v", err2)
}
}
}()
}
sb.AddIPs(ips)
sb.SetCreated()
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
log.Infof(ctx, "runSandbox: context was either canceled or the deadline was exceeded: %v", ctx.Err())
return nil, ctx.Err()
}
log.Infof(ctx, "Ran pod sandbox %s with infra container: %s", container.ID(), container.Description())
resp = &pb.RunPodSandboxResponse{PodSandboxId: sbox.ID()}
return resp, nil
}
func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, _ error) {
shmPath = filepath.Join(podSandboxRunDir, "shm")
if err := os.Mkdir(shmPath, 0o700); err != nil {
return "", err
}
shmOptions := "mode=1777,size=" + strconv.Itoa(libsandbox.DefaultShmSize)
if err := unix.Mount("shm", shmPath, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV,
label.FormatMountLabel(shmOptions, mountLabel)); err != nil {
return "", fmt.Errorf("failed to mount shm tmpfs for pod: %v", err)
}
return shmPath, nil
}
// PauseCommand returns the pause command for the provided image configuration.
func PauseCommand(cfg *libconfig.Config, image *v1.Image) ([]string, error) {
if cfg == nil {
return nil, fmt.Errorf("provided configuration is nil")
}
// This has been explicitly set by the user, since the configuration
// default is `/pause`
if cfg.PauseCommand == "" {
if image == nil ||
(len(image.Config.Entrypoint) == 0 && len(image.Config.Cmd) == 0) {
return nil, fmt.Errorf(
"unable to run pause image %q: %s",
cfg.PauseImage,
"neither Cmd nor Entrypoint specified",
)
}
cmd := []string{}
cmd = append(cmd, image.Config.Entrypoint...)
cmd = append(cmd, image.Config.Cmd...)
return cmd, nil
}
return []string{cfg.PauseCommand}, nil
}
func (s *Server) configureGeneratorForSysctls(ctx context.Context, g generate.Generator, hostNetwork, hostIPC bool) {
sysctls, err := s.config.RuntimeConfig.Sysctls()
if err != nil {
log.Warnf(ctx, "sysctls invalid: %v", err)
}
for _, sysctl := range sysctls {
if err := sysctl.Validate(hostNetwork, hostIPC); err != nil {
log.Warnf(ctx, "skipping invalid sysctl %s: %v", sysctl, err)
continue
}
g.AddLinuxSysctl(sysctl.Key(), sysctl.Value())
}
}
// configureGeneratorForSandboxNamespaces set the linux namespaces for the generator, based on whether the pod is sharing namespaces with the host,
// as well as whether CRI-O should be managing the namespace lifecycle.
// it returns a slice of cleanup funcs, all of which are the respective NamespaceRemove() for the sandbox.
// The caller should defer the cleanup funcs if there is an error, to make sure each namespace we are managing is properly cleaned up.
func (s *Server) configureGeneratorForSandboxNamespaces(hostNetwork, hostIPC, hostPID bool, sb *libsandbox.Sandbox, g generate.Generator) (cleanupFuncs []func() error, retErr error) {
managedNamespaces := make([]libsandbox.NSType, 0, 3)
if hostNetwork {
if err := g.RemoveLinuxNamespace(string(spec.NetworkNamespace)); err != nil {
return nil, err
}
} else if s.config.ManageNSLifecycle {
managedNamespaces = append(managedNamespaces, libsandbox.NETNS)
}
if hostIPC {
if err := g.RemoveLinuxNamespace(string(spec.IPCNamespace)); err != nil {
return nil, err
}
} else if s.config.ManageNSLifecycle {
managedNamespaces = append(managedNamespaces, libsandbox.IPCNS)
}
// Since we need a process to hold open the PID namespace, CRI-O can't manage the NS lifecycle
if hostPID {
if err := g.RemoveLinuxNamespace(string(spec.PIDNamespace)); err != nil {
return nil, err
}
}
// There's no option to set hostUTS
if s.config.ManageNSLifecycle {
managedNamespaces = append(managedNamespaces, libsandbox.UTSNS)
// now that we've configured the namespaces we're sharing, tell sandbox to configure them
managedNamespaces, err := sb.CreateManagedNamespaces(managedNamespaces, &s.config)
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, sb.RemoveManagedNamespaces)
if err := configureGeneratorGivenNamespacePaths(managedNamespaces, g); err != nil {
return cleanupFuncs, err
}
}
return cleanupFuncs, nil
}
// configureGeneratorGivenNamespacePaths takes a map of nsType -> nsPath. It configures the generator
// to add or replace the defaults to these paths
func configureGeneratorGivenNamespacePaths(managedNamespaces []*libsandbox.ManagedNamespace, g generate.Generator) error {
typeToSpec := map[libsandbox.NSType]spec.LinuxNamespaceType{
libsandbox.IPCNS: spec.IPCNamespace,
libsandbox.NETNS: spec.NetworkNamespace,
libsandbox.UTSNS: spec.UTSNamespace,
libsandbox.USERNS: spec.UserNamespace,
}
for _, ns := range managedNamespaces {
// allow for empty paths, as this namespace just shouldn't be configured
if ns.Path() == "" {
continue
}
nsForSpec := typeToSpec[ns.Type()]
if nsForSpec == "" {
return errors.Errorf("Invalid namespace type %s", nsForSpec)
}
err := g.AddOrReplaceLinuxNamespace(string(nsForSpec), ns.Path())
if err != nil {
return err
}
}
return nil
}
Remove unnecessary err assignment
Signed-off-by: Ted Yu <22f3e725b314afeca4d45e12683c560674029d21@gmail.com>
// +build linux
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
cnitypes "github.com/containernetworking/cni/pkg/types"
current "github.com/containernetworking/cni/pkg/types/current"
"github.com/containers/libpod/pkg/annotations"
selinux "github.com/containers/libpod/pkg/selinux"
"github.com/containers/storage"
"github.com/cri-o/cri-o/internal/config/node"
"github.com/cri-o/cri-o/internal/lib"
libsandbox "github.com/cri-o/cri-o/internal/lib/sandbox"
"github.com/cri-o/cri-o/internal/log"
oci "github.com/cri-o/cri-o/internal/oci"
libconfig "github.com/cri-o/cri-o/pkg/config"
"github.com/cri-o/cri-o/pkg/sandbox"
"github.com/cri-o/cri-o/utils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
pb "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/leaky"
"k8s.io/kubernetes/pkg/kubelet/types"
)
func (s *Server) runPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest) (resp *pb.RunPodSandboxResponse, retErr error) {
s.updateLock.RLock()
defer s.updateLock.RUnlock()
sbox := sandbox.New(ctx)
if err := sbox.SetConfig(req.GetConfig()); err != nil {
return nil, errors.Wrap(err, "setting sandbox config")
}
pathsToChown := []string{}
// we need to fill in the container name, as it is not present in the request. Luckily, it is a constant.
log.Infof(ctx, "Running pod sandbox: %s%s", translateLabelsToDescription(sbox.Config().GetLabels()), leaky.PodInfraContainerName)
kubeName := sbox.Config().GetMetadata().GetName()
namespace := sbox.Config().GetMetadata().GetNamespace()
attempt := sbox.Config().GetMetadata().GetAttempt()
if err := sbox.SetNameAndID(); err != nil {
return nil, errors.Wrap(err, "setting pod sandbox name and id")
}
if _, err := s.ReservePodName(sbox.ID(), sbox.Name()); err != nil {
return nil, errors.Wrap(err, "reserving pod sandbox name")
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: releasing pod sandbox name: %s", sbox.Name())
s.ReleasePodName(sbox.Name())
}
}()
containerName, err := s.ReserveSandboxContainerIDAndName(sbox.Config())
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: releasing container name: %s", containerName)
s.ReleaseContainerName(containerName)
}
}()
var labelOptions []string
securityContext := sbox.Config().GetLinux().GetSecurityContext()
selinuxConfig := securityContext.GetSelinuxOptions()
if selinuxConfig != nil {
labelOptions = utils.GetLabelOptions(selinuxConfig)
}
privileged := s.privilegedSandbox(req)
podContainer, err := s.StorageRuntimeServer().CreatePodSandbox(s.config.SystemContext,
sbox.Name(), sbox.ID(),
s.config.PauseImage,
s.config.PauseImageAuthFile,
"",
containerName,
kubeName,
sbox.Config().GetMetadata().GetUid(),
namespace,
attempt,
s.defaultIDMappings,
labelOptions,
privileged,
)
mountLabel := podContainer.MountLabel
processLabel := podContainer.ProcessLabel
if errors.Cause(err) == storage.ErrDuplicateName {
return nil, fmt.Errorf("pod sandbox with name %q already exists", sbox.Name())
}
if err != nil {
return nil, fmt.Errorf("error creating pod sandbox with name %q: %v", sbox.Name(), err)
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: removing pod sandbox from storage: %s", sbox.ID())
if err2 := s.StorageRuntimeServer().RemovePodSandbox(sbox.ID()); err2 != nil {
log.Warnf(ctx, "couldn't cleanup pod sandbox %q: %v", sbox.ID(), err2)
}
}
}()
// set log directory
logDir := sbox.Config().GetLogDirectory()
if logDir == "" {
logDir = filepath.Join(s.config.LogDir, sbox.ID())
}
// This should always be absolute from k8s.
if !filepath.IsAbs(logDir) {
return nil, fmt.Errorf("requested logDir for sbox id %s is a relative path: %s", sbox.ID(), logDir)
}
if err := os.MkdirAll(logDir, 0o700); err != nil {
return nil, err
}
// TODO: factor generating/updating the spec into something other projects can vendor
// creates a spec Generator with the default spec.
g, err := generate.New("linux")
if err != nil {
return nil, err
}
g.HostSpecific = true
g.ClearProcessRlimits()
for _, u := range s.config.Ulimits() {
g.AddProcessRlimits(u.Name, u.Hard, u.Soft)
}
// setup defaults for the pod sandbox
g.SetRootReadonly(true)
pauseCommand, err := PauseCommand(s.Config(), podContainer.Config)
if err != nil {
return nil, err
}
g.SetProcessArgs(pauseCommand)
// set DNS options
var resolvPath string
if sbox.Config().GetDnsConfig() != nil {
dnsServers := sbox.Config().GetDnsConfig().Servers
dnsSearches := sbox.Config().GetDnsConfig().Searches
dnsOptions := sbox.Config().GetDnsConfig().Options
resolvPath = fmt.Sprintf("%s/resolv.conf", podContainer.RunDir)
err = parseDNSOptions(dnsServers, dnsSearches, dnsOptions, resolvPath)
if err != nil {
err1 := removeFile(resolvPath)
if err1 != nil {
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
}
return nil, err
}
if err := label.Relabel(resolvPath, mountLabel, false); err != nil && errors.Cause(err) != unix.ENOTSUP {
if err1 := removeFile(resolvPath); err1 != nil {
return nil, fmt.Errorf("%v; failed to remove %s: %v", err, resolvPath, err1)
}
return nil, err
}
mnt := spec.Mount{
Type: "bind",
Source: resolvPath,
Destination: "/etc/resolv.conf",
Options: []string{"ro", "bind", "nodev", "nosuid", "noexec"},
}
pathsToChown = append(pathsToChown, resolvPath)
g.AddMount(mnt)
}
// add metadata
metadata := sbox.Config().GetMetadata()
metadataJSON, err := json.Marshal(metadata)
if err != nil {
return nil, err
}
// add labels
labels := sbox.Config().GetLabels()
if err := validateLabels(labels); err != nil {
return nil, err
}
// Add special container name label for the infra container
if labels != nil {
labels[types.KubernetesContainerNameLabel] = leaky.PodInfraContainerName
}
labelsJSON, err := json.Marshal(labels)
if err != nil {
return nil, err
}
// add annotations
kubeAnnotations := sbox.Config().GetAnnotations()
kubeAnnotationsJSON, err := json.Marshal(kubeAnnotations)
if err != nil {
return nil, err
}
// Add capabilities from crio.conf if default_capabilities is defined
capabilities := &pb.Capability{}
if s.config.DefaultCapabilities != nil {
g.ClearProcessCapabilities()
capabilities.AddCapabilities = append(capabilities.AddCapabilities, s.config.DefaultCapabilities...)
}
if err := setupCapabilities(&g, capabilities); err != nil {
return nil, err
}
nsOptsJSON, err := json.Marshal(securityContext.GetNamespaceOptions())
if err != nil {
return nil, err
}
hostIPC := securityContext.GetNamespaceOptions().GetIpc() == pb.NamespaceMode_NODE
hostPID := securityContext.GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE
// Don't use SELinux separation with Host Pid or IPC Namespace or privileged.
if hostPID || hostIPC {
processLabel, mountLabel = "", ""
}
g.SetProcessSelinuxLabel(processLabel)
g.SetLinuxMountLabel(mountLabel)
// Remove the default /dev/shm mount to ensure we overwrite it
g.RemoveMount(libsandbox.DevShmPath)
// create shm mount for the pod containers.
var shmPath string
if hostIPC {
shmPath = libsandbox.DevShmPath
} else {
shmPath, err = setupShm(podContainer.RunDir, mountLabel)
if err != nil {
return nil, err
}
pathsToChown = append(pathsToChown, shmPath)
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: unmounting shmPath for sandbox %s", sbox.ID())
if err2 := unix.Unmount(shmPath, unix.MNT_DETACH); err2 != nil {
log.Warnf(ctx, "failed to unmount shm for pod: %v", err2)
}
}
}()
}
mnt := spec.Mount{
Type: "bind",
Source: shmPath,
Destination: libsandbox.DevShmPath,
Options: []string{"rw", "bind"},
}
// bind mount the pod shm
g.AddMount(mnt)
err = s.setPodSandboxMountLabel(sbox.ID(), mountLabel)
if err != nil {
return nil, err
}
if err := s.CtrIDIndex().Add(sbox.ID()); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: deleting container ID from idIndex for sandbox %s", sbox.ID())
if err2 := s.CtrIDIndex().Delete(sbox.ID()); err2 != nil {
log.Warnf(ctx, "couldn't delete ctr id %s from idIndex", sbox.ID())
}
}
}()
// set log path inside log directory
logPath := filepath.Join(logDir, sbox.ID()+".log")
// Handle https://issues.k8s.io/44043
if err := utils.EnsureSaneLogPath(logPath); err != nil {
return nil, err
}
hostNetwork := securityContext.GetNamespaceOptions().GetNetwork() == pb.NamespaceMode_NODE
hostname, err := getHostname(sbox.ID(), sbox.Config().Hostname, hostNetwork)
if err != nil {
return nil, err
}
g.SetHostname(hostname)
// validate the runtime handler
runtimeHandler, err := s.runtimeHandler(req)
if err != nil {
return nil, err
}
g.AddAnnotation(annotations.Metadata, string(metadataJSON))
g.AddAnnotation(annotations.Labels, string(labelsJSON))
g.AddAnnotation(annotations.Annotations, string(kubeAnnotationsJSON))
g.AddAnnotation(annotations.LogPath, logPath)
g.AddAnnotation(annotations.Name, sbox.Name())
g.AddAnnotation(annotations.Namespace, namespace)
g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox)
g.AddAnnotation(annotations.SandboxID, sbox.ID())
g.AddAnnotation(annotations.Image, s.config.PauseImage)
g.AddAnnotation(annotations.ContainerName, containerName)
g.AddAnnotation(annotations.ContainerID, sbox.ID())
g.AddAnnotation(annotations.ShmPath, shmPath)
g.AddAnnotation(annotations.PrivilegedRuntime, fmt.Sprintf("%v", privileged))
g.AddAnnotation(annotations.RuntimeHandler, runtimeHandler)
g.AddAnnotation(annotations.ResolvPath, resolvPath)
g.AddAnnotation(annotations.HostName, hostname)
g.AddAnnotation(annotations.NamespaceOptions, string(nsOptsJSON))
g.AddAnnotation(annotations.KubeName, kubeName)
g.AddAnnotation(annotations.HostNetwork, fmt.Sprintf("%v", hostNetwork))
g.AddAnnotation(annotations.ContainerManager, lib.ContainerManagerCRIO)
if podContainer.Config.Config.StopSignal != "" {
// this key is defined in image-spec conversion document at https://github.com/opencontainers/image-spec/pull/492/files#diff-8aafbe2c3690162540381b8cdb157112R57
g.AddAnnotation("org.opencontainers.image.stopSignal", podContainer.Config.Config.StopSignal)
}
if s.config.CgroupManager().IsSystemd() && node.SystemdHasCollectMode() {
g.AddAnnotation("org.systemd.property.CollectMode", "'inactive-or-failed'")
}
created := time.Now()
g.AddAnnotation(annotations.Created, created.Format(time.RFC3339Nano))
portMappings := convertPortMappings(sbox.Config().GetPortMappings())
portMappingsJSON, err := json.Marshal(portMappings)
if err != nil {
return nil, err
}
g.AddAnnotation(annotations.PortMappings, string(portMappingsJSON))
cgroupParent, cgroupPath, err := s.config.CgroupManager().SandboxCgroupPath(sbox.Config().GetLinux().GetCgroupParent(), sbox.ID())
if err != nil {
return nil, err
}
if cgroupPath != "" {
g.SetLinuxCgroupsPath(cgroupPath)
}
g.AddAnnotation(annotations.CgroupParent, cgroupParent)
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
return nil, errors.Wrap(err, "add or replace linux namespace")
}
for _, uidmap := range s.defaultIDMappings.UIDs() {
g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
}
for _, gidmap := range s.defaultIDMappings.GIDs() {
g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
}
}
sb, err := libsandbox.New(sbox.ID(), namespace, sbox.Name(), kubeName, logDir, labels, kubeAnnotations, processLabel, mountLabel, metadata, shmPath, cgroupParent, privileged, runtimeHandler, resolvPath, hostname, portMappings, hostNetwork, created)
if err != nil {
return nil, err
}
if err := s.addSandbox(sb); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: removing pod sandbox %s", sbox.ID())
if err := s.removeSandbox(sbox.ID()); err != nil {
log.Warnf(ctx, "could not remove pod sandbox: %v", err)
}
}
}()
if err := s.PodIDIndex().Add(sbox.ID()); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: deleting pod ID %s from idIndex", sbox.ID())
if err := s.PodIDIndex().Delete(sbox.ID()); err != nil {
log.Warnf(ctx, "couldn't delete pod id %s from idIndex", sbox.ID())
}
}
}()
for k, v := range kubeAnnotations {
g.AddAnnotation(k, v)
}
for k, v := range labels {
g.AddAnnotation(k, v)
}
// Add default sysctls given in crio.conf
s.configureGeneratorForSysctls(ctx, g, hostNetwork, hostIPC)
// extract linux sysctls from annotations and pass down to oci runtime
// Will override any duplicate default systcl from crio.conf
for key, value := range sbox.Config().GetLinux().GetSysctls() {
g.AddLinuxSysctl(key, value)
}
// Set OOM score adjust of the infra container to be very low
// so it doesn't get killed.
g.SetProcessOOMScoreAdj(PodInfraOOMAdj)
g.SetLinuxResourcesCPUShares(PodInfraCPUshares)
// set up namespaces
cleanupFuncs, err := s.configureGeneratorForSandboxNamespaces(hostNetwork, hostIPC, hostPID, sb, g)
// We want to cleanup after ourselves if we are managing any namespaces and fail in this function.
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: cleaning up namespaces after failing to run sandbox %s", sbox.ID())
for idx := range cleanupFuncs {
if err2 := cleanupFuncs[idx](); err2 != nil {
log.Debugf(ctx, err2.Error())
}
}
}
}()
if err != nil {
return nil, err
}
if s.Config().Seccomp().IsDisabled() {
g.Config.Linux.Seccomp = nil
}
saveOptions := generate.ExportOptions{}
mountPoint, err := s.StorageRuntimeServer().StartContainer(sbox.ID())
if err != nil {
return nil, fmt.Errorf("failed to mount container %s in pod sandbox %s(%s): %v", containerName, sb.Name(), sbox.ID(), err)
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: stopping storage container for sandbox %s", sbox.ID())
if err2 := s.StorageRuntimeServer().StopContainer(sbox.ID()); err2 != nil {
log.Warnf(ctx, "couldn't stop storage container: %v: %v", sbox.ID(), err2)
}
}
}()
g.AddAnnotation(annotations.MountPoint, mountPoint)
hostnamePath := fmt.Sprintf("%s/hostname", podContainer.RunDir)
if err := ioutil.WriteFile(hostnamePath, []byte(hostname+"\n"), 0o644); err != nil {
return nil, err
}
if err := label.Relabel(hostnamePath, mountLabel, false); err != nil && errors.Cause(err) != unix.ENOTSUP {
return nil, err
}
mnt = spec.Mount{
Type: "bind",
Source: hostnamePath,
Destination: "/etc/hostname",
Options: []string{"ro", "bind", "nodev", "nosuid", "noexec"},
}
pathsToChown = append(pathsToChown, hostnamePath)
g.AddMount(mnt)
g.AddAnnotation(annotations.HostnamePath, hostnamePath)
sb.AddHostnamePath(hostnamePath)
container, err := oci.NewContainer(sbox.ID(), containerName, podContainer.RunDir, logPath, labels, g.Config.Annotations, kubeAnnotations, s.config.PauseImage, "", "", nil, sbox.ID(), false, false, false, runtimeHandler, podContainer.Dir, created, podContainer.Config.Config.StopSignal)
if err != nil {
return nil, err
}
runtimeType, err := s.Runtime().ContainerRuntimeType(container)
if err != nil {
return nil, err
}
// If using kata runtime, the process label should be set to container_kvm_t
// Keep in mind that kata does *not* apply any process label to containers within the VM
// Note: the requirement here is that the name used for the runtime class has "kata" in it
// or the runtime_type is set to "vm"
if runtimeType == libconfig.RuntimeTypeVM || strings.Contains(strings.ToLower(runtimeHandler), "kata") {
processLabel, err = selinux.SELinuxKVMLabel(processLabel)
if err != nil {
return nil, err
}
g.SetProcessSelinuxLabel(processLabel)
}
container.SetMountPoint(mountPoint)
container.SetIDMappings(s.defaultIDMappings)
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
if securityContext.GetNamespaceOptions().GetIpc() == pb.NamespaceMode_NODE {
g.RemoveMount("/dev/mqueue")
mqueue := spec.Mount{
Type: "bind",
Source: "/dev/mqueue",
Destination: "/dev/mqueue",
Options: []string{"rw", "rbind", "nodev", "nosuid", "noexec"},
}
g.AddMount(mqueue)
}
if hostNetwork {
g.RemoveMount("/sys")
g.RemoveMount("/sys/cgroup")
sysMnt := spec.Mount{
Destination: "/sys",
Type: "bind",
Source: "/sys",
Options: []string{"nosuid", "noexec", "nodev", "ro", "rbind"},
}
g.AddMount(sysMnt)
}
if securityContext.GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE {
g.RemoveMount("/proc")
proc := spec.Mount{
Type: "bind",
Source: "/proc",
Destination: "/proc",
Options: []string{"rw", "rbind", "nodev", "nosuid", "noexec"},
}
g.AddMount(proc)
}
}
g.SetRootPath(mountPoint)
if os.Getenv(rootlessEnvName) != "" {
makeOCIConfigurationRootless(&g)
}
container.SetSpec(g.Config)
if err := sb.SetInfraContainer(container); err != nil {
return nil, err
}
var ips []string
var result cnitypes.Result
if s.config.ManageNSLifecycle {
ips, result, err = s.networkStart(ctx, sb)
if err != nil {
return nil, err
}
if result != nil {
resultCurrent, err := current.NewResultFromResult(result)
if err != nil {
return nil, err
}
cniResultJSON, err := json.Marshal(resultCurrent)
if err != nil {
return nil, err
}
g.AddAnnotation(annotations.CNIResult, string(cniResultJSON))
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: in manageNSLifecycle, stopping network for sandbox %s", sb.ID())
if err2 := s.networkStop(ctx, sb); err2 != nil {
log.Errorf(ctx, "error stopping network on cleanup: %v", err2)
}
}
}()
}
for idx, ip := range ips {
g.AddAnnotation(fmt.Sprintf("%s.%d", annotations.IP, idx), ip)
}
sb.AddIPs(ips)
sb.SetNamespaceOptions(securityContext.GetNamespaceOptions())
spp := securityContext.GetSeccompProfilePath()
g.AddAnnotation(annotations.SeccompProfilePath, spp)
sb.SetSeccompProfilePath(spp)
if !privileged {
if err := s.setupSeccomp(ctx, &g, spp); err != nil {
return nil, err
}
}
err = g.SaveToFile(filepath.Join(podContainer.Dir, "config.json"), saveOptions)
if err != nil {
return nil, fmt.Errorf("failed to save template configuration for pod sandbox %s(%s): %v", sb.Name(), sbox.ID(), err)
}
if err = g.SaveToFile(filepath.Join(podContainer.RunDir, "config.json"), saveOptions); err != nil {
return nil, fmt.Errorf("failed to write runtime configuration for pod sandbox %s(%s): %v", sb.Name(), sbox.ID(), err)
}
s.addInfraContainer(container)
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: removing infra container %s", container.ID())
s.removeInfraContainer(container)
}
}()
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
rootPair := s.defaultIDMappings.RootPair()
for _, path := range pathsToChown {
if err := os.Chown(path, rootPair.UID, rootPair.GID); err != nil {
return nil, errors.Wrapf(err, "cannot chown %s to %d:%d", path, rootPair.UID, rootPair.GID)
}
}
}
if err := s.createContainerPlatform(container, sb.CgroupParent()); err != nil {
return nil, err
}
if err := s.Runtime().StartContainer(container); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
// Clean-up steps from RemovePodSanbox
log.Infof(ctx, "runSandbox: stopping container %s", container.ID())
if err2 := s.Runtime().StopContainer(ctx, container, int64(10)); err2 != nil {
log.Warnf(ctx, "failed to stop container %s: %v", container.Name(), err2)
}
if err2 := s.Runtime().WaitContainerStateStopped(ctx, container); err2 != nil {
log.Warnf(ctx, "failed to get container 'stopped' status %s in pod sandbox %s: %v", container.Name(), sb.ID(), err2)
}
log.Infof(ctx, "runSandbox: deleting container %s", container.ID())
if err2 := s.Runtime().DeleteContainer(container); err2 != nil {
log.Warnf(ctx, "failed to delete container %s in pod sandbox %s: %v", container.Name(), sb.ID(), err2)
}
log.Infof(ctx, "runSandbox: writing container %s state to disk", container.ID())
if err2 := s.ContainerStateToDisk(container); err2 != nil {
log.Warnf(ctx, "failed to write container state %s in pod sandbox %s: %v", container.Name(), sb.ID(), err2)
}
}
}()
if err := s.ContainerStateToDisk(container); err != nil {
log.Warnf(ctx, "unable to write containers %s state to disk: %v", container.ID(), err)
}
if !s.config.ManageNSLifecycle {
ips, _, err = s.networkStart(ctx, sb)
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
log.Infof(ctx, "runSandbox: in not manageNSLifecycle, stopping network for sandbox %s", sb.ID())
if err2 := s.networkStop(ctx, sb); err2 != nil {
log.Errorf(ctx, "error stopping network on cleanup: %v", err2)
}
}
}()
}
sb.AddIPs(ips)
sb.SetCreated()
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
log.Infof(ctx, "runSandbox: context was either canceled or the deadline was exceeded: %v", ctx.Err())
return nil, ctx.Err()
}
log.Infof(ctx, "Ran pod sandbox %s with infra container: %s", container.ID(), container.Description())
resp = &pb.RunPodSandboxResponse{PodSandboxId: sbox.ID()}
return resp, nil
}
func setupShm(podSandboxRunDir, mountLabel string) (shmPath string, _ error) {
shmPath = filepath.Join(podSandboxRunDir, "shm")
if err := os.Mkdir(shmPath, 0o700); err != nil {
return "", err
}
shmOptions := "mode=1777,size=" + strconv.Itoa(libsandbox.DefaultShmSize)
if err := unix.Mount("shm", shmPath, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV,
label.FormatMountLabel(shmOptions, mountLabel)); err != nil {
return "", fmt.Errorf("failed to mount shm tmpfs for pod: %v", err)
}
return shmPath, nil
}
// PauseCommand returns the pause command for the provided image configuration.
func PauseCommand(cfg *libconfig.Config, image *v1.Image) ([]string, error) {
if cfg == nil {
return nil, fmt.Errorf("provided configuration is nil")
}
// This has been explicitly set by the user, since the configuration
// default is `/pause`
if cfg.PauseCommand == "" {
if image == nil ||
(len(image.Config.Entrypoint) == 0 && len(image.Config.Cmd) == 0) {
return nil, fmt.Errorf(
"unable to run pause image %q: %s",
cfg.PauseImage,
"neither Cmd nor Entrypoint specified",
)
}
cmd := []string{}
cmd = append(cmd, image.Config.Entrypoint...)
cmd = append(cmd, image.Config.Cmd...)
return cmd, nil
}
return []string{cfg.PauseCommand}, nil
}
func (s *Server) configureGeneratorForSysctls(ctx context.Context, g generate.Generator, hostNetwork, hostIPC bool) {
sysctls, err := s.config.RuntimeConfig.Sysctls()
if err != nil {
log.Warnf(ctx, "sysctls invalid: %v", err)
}
for _, sysctl := range sysctls {
if err := sysctl.Validate(hostNetwork, hostIPC); err != nil {
log.Warnf(ctx, "skipping invalid sysctl %s: %v", sysctl, err)
continue
}
g.AddLinuxSysctl(sysctl.Key(), sysctl.Value())
}
}
// configureGeneratorForSandboxNamespaces set the linux namespaces for the generator, based on whether the pod is sharing namespaces with the host,
// as well as whether CRI-O should be managing the namespace lifecycle.
// it returns a slice of cleanup funcs, all of which are the respective NamespaceRemove() for the sandbox.
// The caller should defer the cleanup funcs if there is an error, to make sure each namespace we are managing is properly cleaned up.
func (s *Server) configureGeneratorForSandboxNamespaces(hostNetwork, hostIPC, hostPID bool, sb *libsandbox.Sandbox, g generate.Generator) (cleanupFuncs []func() error, retErr error) {
managedNamespaces := make([]libsandbox.NSType, 0, 3)
if hostNetwork {
if err := g.RemoveLinuxNamespace(string(spec.NetworkNamespace)); err != nil {
return nil, err
}
} else if s.config.ManageNSLifecycle {
managedNamespaces = append(managedNamespaces, libsandbox.NETNS)
}
if hostIPC {
if err := g.RemoveLinuxNamespace(string(spec.IPCNamespace)); err != nil {
return nil, err
}
} else if s.config.ManageNSLifecycle {
managedNamespaces = append(managedNamespaces, libsandbox.IPCNS)
}
// Since we need a process to hold open the PID namespace, CRI-O can't manage the NS lifecycle
if hostPID {
if err := g.RemoveLinuxNamespace(string(spec.PIDNamespace)); err != nil {
return nil, err
}
}
// There's no option to set hostUTS
if s.config.ManageNSLifecycle {
managedNamespaces = append(managedNamespaces, libsandbox.UTSNS)
// now that we've configured the namespaces we're sharing, tell sandbox to configure them
managedNamespaces, err := sb.CreateManagedNamespaces(managedNamespaces, &s.config)
if err != nil {
return nil, err
}
cleanupFuncs = append(cleanupFuncs, sb.RemoveManagedNamespaces)
if err := configureGeneratorGivenNamespacePaths(managedNamespaces, g); err != nil {
return cleanupFuncs, err
}
}
return cleanupFuncs, nil
}
// configureGeneratorGivenNamespacePaths takes a map of nsType -> nsPath. It configures the generator
// to add or replace the defaults to these paths
func configureGeneratorGivenNamespacePaths(managedNamespaces []*libsandbox.ManagedNamespace, g generate.Generator) error {
typeToSpec := map[libsandbox.NSType]spec.LinuxNamespaceType{
libsandbox.IPCNS: spec.IPCNamespace,
libsandbox.NETNS: spec.NetworkNamespace,
libsandbox.UTSNS: spec.UTSNamespace,
libsandbox.USERNS: spec.UserNamespace,
}
for _, ns := range managedNamespaces {
// allow for empty paths, as this namespace just shouldn't be configured
if ns.Path() == "" {
continue
}
nsForSpec := typeToSpec[ns.Type()]
if nsForSpec == "" {
return errors.Errorf("Invalid namespace type %s", nsForSpec)
}
err := g.AddOrReplaceLinuxNamespace(string(nsForSpec), ns.Path())
if err != nil {
return err
}
}
return nil
}
|
package nodename
import (
"errors"
"net"
"os"
"strings"
)
// Get the name of the current machine as a short hostname, domain name, full name (host.domain) and an error.
func Get() (host string, domain string, full string, err error) {
host, err = os.Hostname()
if err != nil {
return
}
host = removeTrailingDot(host)
host, domain = split2(host, '.')
if domain != "" {
full = host + "." + domain
} else {
full, err = resolveNetFullname(host)
full = removeTrailingDot(full)
host, domain = split2(full, '.')
}
return
}
func removeTrailingDot(s string) string {
if len(s) == 0 {
return ""
}
if s[len(s)-1] == '.' {
return s[:len(s)-1]
}
return s
}
func resolveNetFullname(host string) (string, error) {
as, e := net.LookupHost(host)
if e != nil {
return "", e
}
hndot := host + "."
for _, a := range as {
hs, e := net.LookupAddr(a)
if e != nil {
continue
}
for _, h := range hs {
if strings.HasPrefix(h, hndot) {
return h, nil
}
}
}
return "", errors.New("network resolution of host domain failed")
}
func split2(s string, char byte) (string, string) {
i := strings.IndexRune(s, rune(char))
if i < 0 {
return s, ""
}
return s[0:i], s[i+1:]
}
Remeber plain hostname even if network lookup fails
package nodename
import (
"errors"
"net"
"os"
"strings"
)
// Get the name of the current machine as a short hostname, domain name, full name (host.domain) and an error.
func Get() (host string, domain string, full string, err error) {
host, err = os.Hostname()
if err != nil {
return
}
host = removeTrailingDot(host)
host, domain = split2(host, '.')
if domain != "" {
full = host + "." + domain
} else {
full, err = resolveNetFullname(host)
if err != nil {
full = removeTrailingDot(full)
host, domain = split2(full, '.')
}
}
return
}
func removeTrailingDot(s string) string {
if len(s) == 0 {
return ""
}
if s[len(s)-1] == '.' {
return s[:len(s)-1]
}
return s
}
func resolveNetFullname(host string) (string, error) {
as, e := net.LookupHost(host)
if e != nil {
return "", e
}
hndot := host + "."
for _, a := range as {
hs, e := net.LookupAddr(a)
if e != nil {
continue
}
for _, h := range hs {
if strings.HasPrefix(h, hndot) {
return h, nil
}
}
}
return "", errors.New("network resolution of host domain failed")
}
func split2(s string, char byte) (string, string) {
i := strings.IndexRune(s, rune(char))
if i < 0 {
return s, ""
}
return s[0:i], s[i+1:]
}
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package examples_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/golang/glog"
)
func validateObject(obj runtime.Object) (errors []error) {
switch t := obj.(type) {
case *api.ReplicationController:
errors = validation.ValidateManifest(&t.DesiredState.PodTemplate.DesiredState.Manifest)
case *api.ReplicationControllerList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Service:
errors = validation.ValidateService(t)
case *api.ServiceList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Pod:
errors = validation.ValidateManifest(&t.DesiredState.Manifest)
case *api.PodList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
default:
return []error{fmt.Errorf("no validation defined for %#v", obj)}
}
return errors
}
func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {
err := filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && path != inDir {
return filepath.SkipDir
}
name := filepath.Base(path)
ext := filepath.Ext(name)
if ext != "" {
name = name[:len(name)-len(ext)]
}
if ext != ".json" {
return nil
}
glog.Infof("Testing %s", path)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
fn(name, path, data)
return nil
})
return err
}
func TestApiExamples(t *testing.T) {
expected := map[string]runtime.Object{
"controller": &api.ReplicationController{},
"controller-list": &api.ReplicationControllerList{},
"pod": &api.Pod{},
"pod-list": &api.PodList{},
"service": &api.Service{},
"external-service": &api.Service{},
"service-list": &api.ServiceList{},
}
tested := 0
err := walkJSONFiles("../api/examples", func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s does not have a test case defined", path)
return
}
tested += 1
if err := runtime.DefaultCodec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Errorf("Expected %d examples, Got %d", len(expected), tested)
}
}
func TestExamples(t *testing.T) {
expected := map[string]runtime.Object{
"frontend-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master": &api.Pod{},
"frontend-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
}
tested := 0
err := walkJSONFiles("../examples/guestbook", func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s does not have a test case defined", path)
return
}
tested += 1
if err := runtime.DefaultCodec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Errorf("Expected %d examples, Got %d", len(expected), tested)
}
}
var jsonRegexp = regexp.MustCompile("(?ms)^```\\w*\\n(\\{.+?\\})\\w*\\n^```")
func TestReadme(t *testing.T) {
path := "../README.md"
data, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("Unable to read file: %v", err)
}
match := jsonRegexp.FindStringSubmatch(string(data))
if match == nil {
return
}
for _, json := range match[1:] {
expectedType := &api.Pod{}
if err := runtime.DefaultCodec.DecodeInto([]byte(json), expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
encoded, err := runtime.DefaultCodec.Encode(expectedType)
if err != nil {
t.Errorf("Could not encode object: %v", err)
continue
}
t.Logf("Found pod %s\n%s", json, encoded)
}
}
Simple refactor for ease of readability
runtime.DefaultCodec -> latest.Codec
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package examples_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/golang/glog"
)
func validateObject(obj runtime.Object) (errors []error) {
switch t := obj.(type) {
case *api.ReplicationController:
errors = validation.ValidateManifest(&t.DesiredState.PodTemplate.DesiredState.Manifest)
case *api.ReplicationControllerList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Service:
errors = validation.ValidateService(t)
case *api.ServiceList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Pod:
errors = validation.ValidateManifest(&t.DesiredState.Manifest)
case *api.PodList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
default:
return []error{fmt.Errorf("no validation defined for %#v", obj)}
}
return errors
}
func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {
err := filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && path != inDir {
return filepath.SkipDir
}
name := filepath.Base(path)
ext := filepath.Ext(name)
if ext != "" {
name = name[:len(name)-len(ext)]
}
if ext != ".json" {
return nil
}
glog.Infof("Testing %s", path)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
fn(name, path, data)
return nil
})
return err
}
func TestApiExamples(t *testing.T) {
expected := map[string]runtime.Object{
"controller": &api.ReplicationController{},
"controller-list": &api.ReplicationControllerList{},
"pod": &api.Pod{},
"pod-list": &api.PodList{},
"service": &api.Service{},
"external-service": &api.Service{},
"service-list": &api.ServiceList{},
}
tested := 0
err := walkJSONFiles("../api/examples", func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s does not have a test case defined", path)
return
}
tested += 1
if err := latest.Codec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Errorf("Expected %d examples, Got %d", len(expected), tested)
}
}
func TestExamples(t *testing.T) {
expected := map[string]runtime.Object{
"frontend-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master": &api.Pod{},
"frontend-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
}
tested := 0
err := walkJSONFiles("../examples/guestbook", func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s does not have a test case defined", path)
return
}
tested += 1
if err := latest.Codec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Errorf("Expected %d examples, Got %d", len(expected), tested)
}
}
var jsonRegexp = regexp.MustCompile("(?ms)^```\\w*\\n(\\{.+?\\})\\w*\\n^```")
func TestReadme(t *testing.T) {
path := "../README.md"
data, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("Unable to read file: %v", err)
}
match := jsonRegexp.FindStringSubmatch(string(data))
if match == nil {
return
}
for _, json := range match[1:] {
expectedType := &api.Pod{}
if err := latest.Codec.DecodeInto([]byte(json), expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
encoded, err := latest.Codec.Encode(expectedType)
if err != nil {
t.Errorf("Could not encode object: %v", err)
continue
}
t.Logf("Found pod %s\n%s", json, encoded)
}
}
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package examples_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/yaml"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
schedulerapilatest "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/latest"
"github.com/golang/glog"
)
func validateObject(obj runtime.Object) (errors []error) {
switch t := obj.(type) {
case *api.ReplicationController:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateReplicationController(t)
case *api.ReplicationControllerList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Service:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateService(t)
case *api.ServiceList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Pod:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePod(t)
case *api.PodList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.PersistentVolume:
errors = validation.ValidatePersistentVolume(t)
case *api.PersistentVolumeClaim:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePersistentVolumeClaim(t)
case *api.PodTemplate:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePodTemplate(t)
case *api.Endpoints:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateEndpoints(t)
case *api.Namespace:
errors = validation.ValidateNamespace(t)
case *api.Secret:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateSecret(t)
case *api.LimitRange:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateLimitRange(t)
case *api.ResourceQuota:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateResourceQuota(t)
default:
return []error{fmt.Errorf("no validation defined for %#v", obj)}
}
return errors
}
func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {
return filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && path != inDir {
return filepath.SkipDir
}
file := filepath.Base(path)
if ext := filepath.Ext(file); ext == ".json" || ext == ".yaml" {
glog.Infof("Testing %s", path)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
name := strings.TrimSuffix(file, ext)
if ext == ".yaml" {
out, err := yaml.ToJSON(data)
if err != nil {
return fmt.Errorf("%s: %v", path, err)
}
data = out
}
fn(name, path, data)
}
return nil
})
}
func TestExampleObjectSchemas(t *testing.T) {
cases := map[string]map[string]runtime.Object{
"../cmd/integration": {
"v1beta3-controller": &api.ReplicationController{},
"v1-controller": &api.ReplicationController{},
},
"../examples/guestbook": {
"frontend-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master-controller": &api.ReplicationController{},
"frontend-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
},
"../examples/guestbook-go": {
"guestbook-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master-controller": &api.ReplicationController{},
"guestbook-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
},
"../docs/user-guide/walkthrough": {
"pod-nginx": &api.Pod{},
"pod-nginx-with-label": &api.Pod{},
"pod-redis": &api.Pod{},
"pod-with-http-healthcheck": &api.Pod{},
"service": &api.Service{},
"replication-controller": &api.ReplicationController{},
"podtemplate": &api.PodTemplate{},
},
"../docs/user-guide/update-demo": {
"kitten-rc": &api.ReplicationController{},
"nautilus-rc": &api.ReplicationController{},
},
"../docs/user-guide/persistent-volumes/volumes": {
"local-01": &api.PersistentVolume{},
"local-02": &api.PersistentVolume{},
"gce": &api.PersistentVolume{},
"nfs": &api.PersistentVolume{},
},
"../docs/user-guide/persistent-volumes/claims": {
"claim-01": &api.PersistentVolumeClaim{},
"claim-02": &api.PersistentVolumeClaim{},
"claim-03": &api.PersistentVolumeClaim{},
},
"../docs/user-guide/persistent-volumes/simpletest": {
"namespace": &api.Namespace{},
"pod": &api.Pod{},
"service": &api.Service{},
},
"../examples/iscsi": {
"iscsi": &api.Pod{},
},
"../examples/glusterfs": {
"glusterfs-pod": &api.Pod{},
"glusterfs-endpoints": &api.Endpoints{},
},
"../docs/user-guide/liveness": {
"exec-liveness": &api.Pod{},
"http-liveness": &api.Pod{},
},
"../docs/user-guide": {
"multi-pod": nil,
"pod": &api.Pod{},
"replication": &api.ReplicationController{},
},
"../examples": {
"scheduler-policy-config": &schedulerapi.Policy{},
},
"../examples/rbd/secret": {
"ceph-secret": &api.Secret{},
},
"../examples/rbd": {
"rbd": &api.Pod{},
"rbd-with-secret": &api.Pod{},
},
"../examples/cassandra": {
"cassandra-controller": &api.ReplicationController{},
"cassandra-service": &api.Service{},
"cassandra": &api.Pod{},
},
"../examples/celery-rabbitmq": {
"celery-controller": &api.ReplicationController{},
"flower-controller": &api.ReplicationController{},
"flower-service": &api.Service{},
"rabbitmq-controller": &api.ReplicationController{},
"rabbitmq-service": &api.Service{},
},
"../examples/cluster-dns": {
"dns-backend-rc": &api.ReplicationController{},
"dns-backend-service": &api.Service{},
"dns-frontend-pod": &api.Pod{},
"namespace-dev": &api.Namespace{},
"namespace-prod": &api.Namespace{},
},
"../docs/user-guide/downward-api": {
"dapi-pod": &api.Pod{},
},
"../examples/elasticsearch": {
"apiserver-secret": nil,
"music-rc": &api.ReplicationController{},
"music-service": &api.Service{},
},
"../examples/explorer": {
"pod": &api.Pod{},
},
"../examples/hazelcast": {
"hazelcast-controller": &api.ReplicationController{},
"hazelcast-service": &api.Service{},
},
"../docs/admin/namespaces": {
"namespace-dev": &api.Namespace{},
"namespace-prod": &api.Namespace{},
},
"../docs/user-guide/limitrange": {
"invalid-pod": &api.Pod{},
"limits": &api.LimitRange{},
"namespace": &api.Namespace{},
"valid-pod": &api.Pod{},
},
"../docs/user-guide/logging-demo": {
"synthetic_0_25lps": &api.Pod{},
"synthetic_10lps": &api.Pod{},
},
"../examples/meteor": {
"meteor-controller": &api.ReplicationController{},
"meteor-service": &api.Service{},
"mongo-pod": &api.Pod{},
"mongo-service": &api.Service{},
},
"../examples/mysql-wordpress-pd": {
"mysql-service": &api.Service{},
"mysql": &api.Pod{},
"wordpress-service": &api.Service{},
"wordpress": &api.Pod{},
},
"../examples/nfs": {
"nfs-server-pod": &api.Pod{},
"nfs-server-service": &api.Service{},
"nfs-web-pod": &api.Pod{},
},
"../docs/user-guide/node-selection": {
"pod": &api.Pod{},
},
"../examples/openshift-origin": {
"openshift-controller": &api.ReplicationController{},
"openshift-service": &api.Service{},
},
"../examples/phabricator": {
"authenticator-controller": &api.ReplicationController{},
"phabricator-controller": &api.ReplicationController{},
"phabricator-service": &api.Service{},
},
"../examples/redis": {
"redis-controller": &api.ReplicationController{},
"redis-master": &api.Pod{},
"redis-proxy": &api.Pod{},
"redis-sentinel-controller": &api.ReplicationController{},
"redis-sentinel-service": &api.Service{},
},
"../docs/user-guide/resourcequota": {
"namespace": &api.Namespace{},
"limits": &api.LimitRange{},
"quota": &api.ResourceQuota{},
},
"../examples/rethinkdb": {
"admin-pod": &api.Pod{},
"admin-service": &api.Service{},
"driver-service": &api.Service{},
"rc": &api.ReplicationController{},
},
"../docs/user-guide/secrets": {
"secret-pod": &api.Pod{},
"secret": &api.Secret{},
},
"../examples/spark": {
"spark-master-service": &api.Service{},
"spark-master": &api.Pod{},
"spark-worker-controller": &api.ReplicationController{},
},
"../examples/storm": {
"storm-nimbus-service": &api.Service{},
"storm-nimbus": &api.Pod{},
"storm-worker-controller": &api.ReplicationController{},
"zookeeper-service": &api.Service{},
"zookeeper": &api.Pod{},
},
}
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
for path, expected := range cases {
tested := 0
err := walkJSONFiles(path, func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s: %s does not have a test case defined", path, name)
return
}
tested++
if expectedType == nil {
t.Logf("skipping : %s/%s\n", path, name)
return
}
if name == "scheduler-policy-config" {
if err := schedulerapilatest.Codec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
//TODO: Add validate method for &schedulerapi.Policy
} else {
if err := latest.Codec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Logf("failing path: %q", path)
t.Errorf("Expected %d examples, Got %d", len(expected), tested)
}
}
}
// This regex is tricky, but it works. For future me, here is the decode:
//
// Flags: (?ms) = multiline match, allow . to match \n
// 1) Look for a line that starts with ``` (a markdown code block)
// 2) (?: ... ) = non-capturing group
// 3) (P<name>) = capture group as "name"
// 4) Look for #1 followed by either:
// 4a) "yaml" followed by any word-characters followed by a newline (e.g. ```yamlfoo\n)
// 4b) "any word-characters followed by a newline (e.g. ```json\n)
// 5) Look for either:
// 5a) #4a followed by one or more characters (non-greedy)
// 5b) #4b followed by { followed by one or more characters (non-greedy) followed by }
// 6) Look for #5 followed by a newline followed by ``` (end of the code block)
//
// This could probably be simplified, but is already too delicate. Before any
// real changes, we should have a testscase that just tests this regex.
var sampleRegexp = regexp.MustCompile("(?ms)^```(?:(?P<type>yaml)\\w*\\n(?P<content>.+?)|\\w*\\n(?P<content>\\{.+?\\}))\\n^```")
var subsetRegexp = regexp.MustCompile("(?ms)\\.{3}")
func TestReadme(t *testing.T) {
paths := []struct {
file string
expectedType []runtime.Object
}{
{"../README.md", []runtime.Object{&api.Pod{}}},
{"../docs/user-guide/walkthrough/README.md", []runtime.Object{&api.Pod{}}},
{"../examples/iscsi/README.md", []runtime.Object{&api.Pod{}}},
{"../docs/user-guide/simple-yaml.md", []runtime.Object{&api.Pod{}, &api.ReplicationController{}}},
}
for _, path := range paths {
data, err := ioutil.ReadFile(path.file)
if err != nil {
t.Errorf("Unable to read file %s: %v", path, err)
continue
}
matches := sampleRegexp.FindAllStringSubmatch(string(data), -1)
if matches == nil {
continue
}
ix := 0
for _, match := range matches {
var content, subtype string
for i, name := range sampleRegexp.SubexpNames() {
if name == "type" {
subtype = match[i]
}
if name == "content" && match[i] != "" {
content = match[i]
}
}
if subtype == "yaml" && subsetRegexp.FindString(content) != "" {
t.Logf("skipping (%s): \n%s", subtype, content)
continue
}
var expectedType runtime.Object
if len(path.expectedType) == 1 {
expectedType = path.expectedType[0]
} else {
expectedType = path.expectedType[ix]
ix++
}
json, err := yaml.ToJSON([]byte(content))
if err != nil {
t.Errorf("%s could not be converted to JSON: %v\n%s", path, err, string(content))
}
if err := latest.Codec.DecodeInto(json, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(content))
continue
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
_, err = latest.Codec.Encode(expectedType)
if err != nil {
t.Errorf("Could not encode object: %v", err)
continue
}
}
}
}
Make examples test easier to debug
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package examples_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/yaml"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
schedulerapilatest "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/latest"
"github.com/golang/glog"
)
func validateObject(obj runtime.Object) (errors []error) {
switch t := obj.(type) {
case *api.ReplicationController:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateReplicationController(t)
case *api.ReplicationControllerList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Service:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateService(t)
case *api.ServiceList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.Pod:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePod(t)
case *api.PodList:
for i := range t.Items {
errors = append(errors, validateObject(&t.Items[i])...)
}
case *api.PersistentVolume:
errors = validation.ValidatePersistentVolume(t)
case *api.PersistentVolumeClaim:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePersistentVolumeClaim(t)
case *api.PodTemplate:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePodTemplate(t)
case *api.Endpoints:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateEndpoints(t)
case *api.Namespace:
errors = validation.ValidateNamespace(t)
case *api.Secret:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateSecret(t)
case *api.LimitRange:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateLimitRange(t)
case *api.ResourceQuota:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidateResourceQuota(t)
default:
return []error{fmt.Errorf("no validation defined for %#v", obj)}
}
return errors
}
func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {
return filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && path != inDir {
return filepath.SkipDir
}
file := filepath.Base(path)
if ext := filepath.Ext(file); ext == ".json" || ext == ".yaml" {
glog.Infof("Testing %s", path)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
name := strings.TrimSuffix(file, ext)
if ext == ".yaml" {
out, err := yaml.ToJSON(data)
if err != nil {
return fmt.Errorf("%s: %v", path, err)
}
data = out
}
fn(name, path, data)
}
return nil
})
}
func TestExampleObjectSchemas(t *testing.T) {
cases := map[string]map[string]runtime.Object{
"../cmd/integration": {
"v1beta3-controller": &api.ReplicationController{},
"v1-controller": &api.ReplicationController{},
},
"../examples/guestbook": {
"frontend-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master-controller": &api.ReplicationController{},
"frontend-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
},
"../examples/guestbook-go": {
"guestbook-controller": &api.ReplicationController{},
"redis-slave-controller": &api.ReplicationController{},
"redis-master-controller": &api.ReplicationController{},
"guestbook-service": &api.Service{},
"redis-master-service": &api.Service{},
"redis-slave-service": &api.Service{},
},
"../docs/user-guide/walkthrough": {
"pod-nginx": &api.Pod{},
"pod-nginx-with-label": &api.Pod{},
"pod-redis": &api.Pod{},
"pod-with-http-healthcheck": &api.Pod{},
"service": &api.Service{},
"replication-controller": &api.ReplicationController{},
"podtemplate": &api.PodTemplate{},
},
"../docs/user-guide/update-demo": {
"kitten-rc": &api.ReplicationController{},
"nautilus-rc": &api.ReplicationController{},
},
"../docs/user-guide/persistent-volumes/volumes": {
"local-01": &api.PersistentVolume{},
"local-02": &api.PersistentVolume{},
"gce": &api.PersistentVolume{},
"nfs": &api.PersistentVolume{},
},
"../docs/user-guide/persistent-volumes/claims": {
"claim-01": &api.PersistentVolumeClaim{},
"claim-02": &api.PersistentVolumeClaim{},
"claim-03": &api.PersistentVolumeClaim{},
},
"../docs/user-guide/persistent-volumes/simpletest": {
"namespace": &api.Namespace{},
"pod": &api.Pod{},
"service": &api.Service{},
},
"../examples/iscsi": {
"iscsi": &api.Pod{},
},
"../examples/glusterfs": {
"glusterfs-pod": &api.Pod{},
"glusterfs-endpoints": &api.Endpoints{},
},
"../docs/user-guide/liveness": {
"exec-liveness": &api.Pod{},
"http-liveness": &api.Pod{},
},
"../docs/user-guide": {
"multi-pod": nil,
"pod": &api.Pod{},
"replication": &api.ReplicationController{},
},
"../examples": {
"scheduler-policy-config": &schedulerapi.Policy{},
},
"../examples/rbd/secret": {
"ceph-secret": &api.Secret{},
},
"../examples/rbd": {
"rbd": &api.Pod{},
"rbd-with-secret": &api.Pod{},
},
"../examples/cassandra": {
"cassandra-controller": &api.ReplicationController{},
"cassandra-service": &api.Service{},
"cassandra": &api.Pod{},
},
"../examples/celery-rabbitmq": {
"celery-controller": &api.ReplicationController{},
"flower-controller": &api.ReplicationController{},
"flower-service": &api.Service{},
"rabbitmq-controller": &api.ReplicationController{},
"rabbitmq-service": &api.Service{},
},
"../examples/cluster-dns": {
"dns-backend-rc": &api.ReplicationController{},
"dns-backend-service": &api.Service{},
"dns-frontend-pod": &api.Pod{},
"namespace-dev": &api.Namespace{},
"namespace-prod": &api.Namespace{},
},
"../docs/user-guide/downward-api": {
"dapi-pod": &api.Pod{},
},
"../examples/elasticsearch": {
"apiserver-secret": nil,
"music-rc": &api.ReplicationController{},
"music-service": &api.Service{},
},
"../examples/explorer": {
"pod": &api.Pod{},
},
"../examples/hazelcast": {
"hazelcast-controller": &api.ReplicationController{},
"hazelcast-service": &api.Service{},
},
"../docs/admin/namespaces": {
"namespace-dev": &api.Namespace{},
"namespace-prod": &api.Namespace{},
},
"../docs/user-guide/limitrange": {
"invalid-pod": &api.Pod{},
"limits": &api.LimitRange{},
"namespace": &api.Namespace{},
"valid-pod": &api.Pod{},
},
"../docs/user-guide/logging-demo": {
"synthetic_0_25lps": &api.Pod{},
"synthetic_10lps": &api.Pod{},
},
"../examples/meteor": {
"meteor-controller": &api.ReplicationController{},
"meteor-service": &api.Service{},
"mongo-pod": &api.Pod{},
"mongo-service": &api.Service{},
},
"../examples/mysql-wordpress-pd": {
"mysql-service": &api.Service{},
"mysql": &api.Pod{},
"wordpress-service": &api.Service{},
"wordpress": &api.Pod{},
},
"../examples/nfs": {
"nfs-server-pod": &api.Pod{},
"nfs-server-service": &api.Service{},
"nfs-web-pod": &api.Pod{},
},
"../docs/user-guide/node-selection": {
"pod": &api.Pod{},
},
"../examples/openshift-origin": {
"openshift-controller": &api.ReplicationController{},
"openshift-service": &api.Service{},
},
"../examples/phabricator": {
"authenticator-controller": &api.ReplicationController{},
"phabricator-controller": &api.ReplicationController{},
"phabricator-service": &api.Service{},
},
"../examples/redis": {
"redis-controller": &api.ReplicationController{},
"redis-master": &api.Pod{},
"redis-proxy": &api.Pod{},
"redis-sentinel-controller": &api.ReplicationController{},
"redis-sentinel-service": &api.Service{},
},
"../docs/user-guide/resourcequota": {
"namespace": &api.Namespace{},
"limits": &api.LimitRange{},
"quota": &api.ResourceQuota{},
},
"../examples/rethinkdb": {
"admin-pod": &api.Pod{},
"admin-service": &api.Service{},
"driver-service": &api.Service{},
"rc": &api.ReplicationController{},
},
"../docs/user-guide/secrets": {
"secret-pod": &api.Pod{},
"secret": &api.Secret{},
},
"../examples/spark": {
"spark-master-service": &api.Service{},
"spark-master": &api.Pod{},
"spark-worker-controller": &api.ReplicationController{},
},
"../examples/storm": {
"storm-nimbus-service": &api.Service{},
"storm-nimbus": &api.Pod{},
"storm-worker-controller": &api.ReplicationController{},
"zookeeper-service": &api.Service{},
"zookeeper": &api.Pod{},
},
}
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
for path, expected := range cases {
tested := 0
err := walkJSONFiles(path, func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s: %s does not have a test case defined", path, name)
return
}
tested++
if expectedType == nil {
t.Logf("skipping : %s/%s\n", path, name)
return
}
if name == "scheduler-policy-config" {
if err := schedulerapilatest.Codec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
//TODO: Add validate method for &schedulerapi.Policy
} else {
if err := latest.Codec.DecodeInto(data, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
return
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
}
})
if err != nil {
t.Errorf("Expected no error, Got %v", err)
}
if tested != len(expected) {
t.Errorf("Directory %v: Expected %d examples, Got %d", path, len(expected), tested)
}
}
}
// This regex is tricky, but it works. For future me, here is the decode:
//
// Flags: (?ms) = multiline match, allow . to match \n
// 1) Look for a line that starts with ``` (a markdown code block)
// 2) (?: ... ) = non-capturing group
// 3) (P<name>) = capture group as "name"
// 4) Look for #1 followed by either:
// 4a) "yaml" followed by any word-characters followed by a newline (e.g. ```yamlfoo\n)
// 4b) "any word-characters followed by a newline (e.g. ```json\n)
// 5) Look for either:
// 5a) #4a followed by one or more characters (non-greedy)
// 5b) #4b followed by { followed by one or more characters (non-greedy) followed by }
// 6) Look for #5 followed by a newline followed by ``` (end of the code block)
//
// This could probably be simplified, but is already too delicate. Before any
// real changes, we should have a testscase that just tests this regex.
var sampleRegexp = regexp.MustCompile("(?ms)^```(?:(?P<type>yaml)\\w*\\n(?P<content>.+?)|\\w*\\n(?P<content>\\{.+?\\}))\\n^```")
var subsetRegexp = regexp.MustCompile("(?ms)\\.{3}")
func TestReadme(t *testing.T) {
paths := []struct {
file string
expectedType []runtime.Object
}{
{"../README.md", []runtime.Object{&api.Pod{}}},
{"../docs/user-guide/walkthrough/README.md", []runtime.Object{&api.Pod{}}},
{"../examples/iscsi/README.md", []runtime.Object{&api.Pod{}}},
{"../docs/user-guide/simple-yaml.md", []runtime.Object{&api.Pod{}, &api.ReplicationController{}}},
}
for _, path := range paths {
data, err := ioutil.ReadFile(path.file)
if err != nil {
t.Errorf("Unable to read file %s: %v", path, err)
continue
}
matches := sampleRegexp.FindAllStringSubmatch(string(data), -1)
if matches == nil {
continue
}
ix := 0
for _, match := range matches {
var content, subtype string
for i, name := range sampleRegexp.SubexpNames() {
if name == "type" {
subtype = match[i]
}
if name == "content" && match[i] != "" {
content = match[i]
}
}
if subtype == "yaml" && subsetRegexp.FindString(content) != "" {
t.Logf("skipping (%s): \n%s", subtype, content)
continue
}
var expectedType runtime.Object
if len(path.expectedType) == 1 {
expectedType = path.expectedType[0]
} else {
expectedType = path.expectedType[ix]
ix++
}
json, err := yaml.ToJSON([]byte(content))
if err != nil {
t.Errorf("%s could not be converted to JSON: %v\n%s", path, err, string(content))
}
if err := latest.Codec.DecodeInto(json, expectedType); err != nil {
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(content))
continue
}
if errors := validateObject(expectedType); len(errors) > 0 {
t.Errorf("%s did not validate correctly: %v", path, errors)
}
_, err = latest.Codec.Encode(expectedType)
if err != nil {
t.Errorf("Could not encode object: %v", err)
continue
}
}
}
}
|
package main
import (
"bufio"
"flag"
"fmt"
"github.com/nu7hatch/gouuid"
"io"
"log"
"net"
"net/url"
"os"
"os/signal"
"syscall"
"time"
)
type exchangeid string
type Exchange struct {
id exchangeid
router *Router
socket net.Listener
uchan chan string
}
func NewExchange(id string, socket net.Listener) *Exchange {
return &Exchange{exchangeid(id), NewRouter(), socket, make(chan string)}
}
func (e *Exchange) Start(quit <-chan bool, quitted chan<- bool) {
squit := make(chan bool, 1)
uquit := make(chan bool, 1)
go e.waitClient(squit)
go e.distributeUrl(uquit)
select {
case <-quit:
e.socket.Close()
<-squit
case <-squit:
}
close(e.uchan)
<-uquit
quitted <- true
}
func (e *Exchange) waitClient(quit chan<- bool) {
for {
log.Printf("Waiting client")
client, err := e.socket.Accept()
log.Printf("Accepting client")
if err != nil {
log.Println(err)
break
}
log.Printf("Connected from %s", client.RemoteAddr().String())
go e.handleConnection(client)
}
quit <- true
}
func (e *Exchange) handleConnection(client net.Conn) {
crawler := NewCrawler(e.id, client)
e.router.Add(crawler)
defer func() {
e.router.Remove(crawler)
client.Close()
}()
reader := bufio.NewReader(crawler.GetConn())
for {
rawurl, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
log.Printf("Connection closed by %s", client.RemoteAddr().String())
} else {
log.Println(err)
}
break
}
switch parsed, err := url.Parse(rawurl); {
case len(rawurl) < 1:
log.Printf("Invalid URL: %s", rawurl)
case err != nil:
log.Printf("Invalid URL: %s (%v)", rawurl, err)
case parsed.Scheme != "http" && parsed.Scheme != "https":
log.Printf("Invalid URL: %s", rawurl)
default:
rawurl = rawurl[:len(rawurl)-1]
e.uchan <- rawurl
log.Printf("Got a URL from %s: %s", client.RemoteAddr().String(), rawurl)
}
}
}
func (e *Exchange) distributeUrl(quit chan<- bool) {
for rawurl := range e.uchan {
crawler, err := e.router.Route(rawurl)
if err != nil {
log.Println(err)
e.uchan <- rawurl
continue
}
eid := crawler.GetExchangeId()
if eid == e.id {
fmt.Fprintf(crawler.GetConn(), rawurl+"\n")
} else {
// amqp.Publish(exchange), rawurl)
}
}
quit <- true
}
func main() {
newid, _ := uuid.NewV4()
id := *flag.String("id", newid.String(), "Exchange ID")
ip := *flag.String("ip", "0.0.0.0", "IP address for listen")
port := *flag.Int("port", 9000, "Port number for listen")
flag.Parse()
isContinue := true
for isContinue {
func() {
quit := make(chan bool, 1)
quitted := make(chan bool, 1)
defer func() {
if err := recover(); err == nil {
log.Println("Exiting...")
} else {
quit <- true
log.Println(err)
log.Println("Restarting...")
time.Sleep(5 * time.Second)
<-quitted
}
}()
log.Printf("Listening %s:%d", ip, port)
socket, err := net.Listen("tcp", fmt.Sprintf("%s:%d", ip, port))
if err != nil {
log.Fatalln(err)
return
}
exchange := NewExchange(id, socket)
go exchange.Start(quit, quitted)
stop := make(chan os.Signal, 1)
signal.Notify(stop, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, syscall.SIGTERM)
select {
case <-quitted:
isContinue = false
case <-stop:
quit <- true
<-quitted
isContinue = false
}
}()
}
}
deleted extra implementetion
package main
import (
"bufio"
"flag"
"fmt"
"github.com/nu7hatch/gouuid"
"io"
"log"
"net"
"net/url"
"os"
"os/signal"
"syscall"
"time"
)
type exchangeid string
type Exchange struct {
id exchangeid
router *Router
socket net.Listener
uchan chan string
}
func NewExchange(id string, socket net.Listener) *Exchange {
return &Exchange{exchangeid(id), NewRouter(), socket, make(chan string)}
}
func (e *Exchange) Start(quit <-chan bool, quitted chan<- bool) {
squit := make(chan bool, 1)
uquit := make(chan bool, 1)
go e.waitClient(squit)
go e.distributeUrl(uquit)
select {
case <-quit:
e.socket.Close()
<-squit
case <-squit:
}
close(e.uchan)
<-uquit
quitted <- true
}
func (e *Exchange) waitClient(quit chan<- bool) {
for {
log.Printf("Waiting client")
client, err := e.socket.Accept()
log.Printf("Accepting client")
if err != nil {
log.Println(err)
break
}
log.Printf("Connected from %s", client.RemoteAddr().String())
go e.handleConnection(client)
}
quit <- true
}
func (e *Exchange) handleConnection(client net.Conn) {
crawler := NewCrawler(e.id, client)
e.router.Add(crawler)
defer func() {
e.router.Remove(crawler)
client.Close()
}()
reader := bufio.NewReader(crawler.GetConn())
for {
rawurl, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
log.Printf("Connection closed by %s", client.RemoteAddr().String())
} else {
log.Println(err)
}
break
}
switch parsed, err := url.Parse(rawurl); {
case err != nil:
log.Printf("Invalid URL: %s (%v)", rawurl, err)
case parsed.Scheme != "http" && parsed.Scheme != "https":
log.Printf("Invalid URL: %s", rawurl)
default:
rawurl = rawurl[:len(rawurl)-1]
e.uchan <- rawurl
log.Printf("Got a URL from %s: %s", client.RemoteAddr().String(), rawurl)
}
}
}
func (e *Exchange) distributeUrl(quit chan<- bool) {
for rawurl := range e.uchan {
crawler, err := e.router.Route(rawurl)
if err != nil {
log.Println(err)
e.uchan <- rawurl
continue
}
eid := crawler.GetExchangeId()
if eid == e.id {
fmt.Fprintf(crawler.GetConn(), rawurl+"\n")
} else {
// amqp.Publish(exchange), rawurl)
}
}
quit <- true
}
func main() {
newid, _ := uuid.NewV4()
id := *flag.String("id", newid.String(), "Exchange ID")
ip := *flag.String("ip", "0.0.0.0", "IP address for listen")
port := *flag.Int("port", 9000, "Port number for listen")
flag.Parse()
isContinue := true
for isContinue {
func() {
quit := make(chan bool, 1)
quitted := make(chan bool, 1)
defer func() {
if err := recover(); err == nil {
log.Println("Exiting...")
} else {
quit <- true
log.Println(err)
log.Println("Restarting...")
time.Sleep(5 * time.Second)
<-quitted
}
}()
log.Printf("Listening %s:%d", ip, port)
socket, err := net.Listen("tcp", fmt.Sprintf("%s:%d", ip, port))
if err != nil {
log.Fatalln(err)
return
}
exchange := NewExchange(id, socket)
go exchange.Start(quit, quitted)
stop := make(chan os.Signal, 1)
signal.Notify(stop, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, syscall.SIGTERM)
select {
case <-quitted:
isContinue = false
case <-stop:
quit <- true
<-quitted
isContinue = false
}
}()
}
}
|
package exec
import (
"testing"
"github.com/frozzare/go-assert"
)
func TestCmd(t *testing.T) {
err := ExecCmd("ls", false)
assert.Nil(t, err)
err = ExecCmd("ls", true)
assert.Nil(t, err)
}
Fix failed tests
package exec
import (
"testing"
"github.com/frozzare/go-assert"
)
func TestCmd(t *testing.T) {
err := Cmd("ls", false)
assert.Nil(t, err)
err = Cmd("ls", true)
assert.Nil(t, err)
}
|
package main
import (
"os"
"fmt"
"log"
"flag"
"github.com/rwcarlsen/goexif/exif"
"github.com/rwcarlsen/goexif/tiff"
)
func main() {
flag.Parse()
fname := flag.Arg(0)
f, err := os.Open(fname)
if err != nil {
log.Fatal(err)
}
x, err := exif.Decode(f)
if err != nil {
log.Fatal(err)
}
x.Walk(Walker{})
}
type Walker struct {}
func (_ Walker) Walk(name exif.FieldName, tag *tiff.Tag) error {
data, _ := tag.MarshalJSON()
fmt.Printf("%v: %v\n", name, string(data))
return nil
}
exifstat works on multiple files
package main
import (
"os"
"fmt"
"log"
"flag"
"github.com/rwcarlsen/goexif/exif"
"github.com/rwcarlsen/goexif/tiff"
)
func main() {
flag.Parse()
fnames := flag.Args()
for _, name := range fnames {
f, err := os.Open(name)
if err != nil {
log.Printf("err on %v: %v", name, err)
continue
}
x, err := exif.Decode(f)
if err != nil {
log.Printf("err on %v: %v", name, err)
continue
}
fmt.Printf("\n---- Image '%v' ----\n", name)
x.Walk(Walker{})
}
}
type Walker struct {}
func (_ Walker) Walk(name exif.FieldName, tag *tiff.Tag) error {
data, _ := tag.MarshalJSON()
fmt.Printf(" %v: %v\n", name, string(data))
return nil
}
|
// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package expand
import (
"bytes"
"context"
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"mvdan.cc/sh/syntax"
)
type Context struct {
// Env is used to get and set environment variables when performing
// shell expansions. Some special parameters are also expanded via this
// interface, such as:
//
// * "#", "@", "*", "0"-"9" for the shell's parameters
// * "?", "$", "PPID" for the shell's status and process
// * "HOME foo" to retrieve user foo's home directory
//
// If "HOME foo" is returned as unset, os/user.Lookup will be used.
Env Environ
NoGlob bool
GlobStar bool
CmdSubst func(context.Context, io.Writer, *syntax.CmdSubst)
// TODO: rethink this interface
// Readdirnames is used for file path globbing. If nil, globbing is
// disabled. Use Context.SystemReaddirnames to use the filesystem
// directly.
Readdirnames func(string) []string
// OnError is called when an error is encountered. If nil, errors cause
// a panic.
OnError func(error)
bufferAlloc bytes.Buffer
fieldAlloc [4]fieldPart
fieldsAlloc [4][]fieldPart
ifs string
// A pointer to a parameter expansion node, if we're inside one.
// Necessary for ${LINENO}.
curParam *syntax.ParamExp
}
// UnexpectedCommandError is returned if a command substitution is encountered
// when Context.CmdSubst is nil.
type UnexpectedCommandError struct {
Node *syntax.CmdSubst
}
func (u UnexpectedCommandError) Error() string {
return fmt.Sprintf("unexpected command substitution at %s", u.Node.Pos())
}
func (c *Context) prepareIFS() {
vr := c.Env.Get("IFS")
if !vr.IsSet() {
c.ifs = " \t\n"
} else {
c.ifs = vr.String()
}
}
func (c *Context) ifsRune(r rune) bool {
for _, r2 := range c.ifs {
if r == r2 {
return true
}
}
return false
}
func (c *Context) ifsJoin(strs []string) string {
sep := ""
if c.ifs != "" {
sep = c.ifs[:1]
}
return strings.Join(strs, sep)
}
func (c *Context) err(err error) {
if c.OnError == nil {
panic(err)
}
c.OnError(err)
}
func (c *Context) strBuilder() *bytes.Buffer {
b := &c.bufferAlloc
b.Reset()
return b
}
func (c *Context) envGet(name string) string {
return c.Env.Get(name).String()
}
func (c *Context) envSet(name, value string) {
wenv, ok := c.Env.(WriteEnviron)
if !ok {
// TODO: we should probably error here
return
}
wenv.Set(name, Variable{Value: value})
}
func (c *Context) ExpandLiteral(ctx context.Context, word *syntax.Word) string {
if word == nil {
return ""
}
field := c.wordField(ctx, word.Parts, quoteDouble)
return c.fieldJoin(field)
}
func (c *Context) ExpandFormat(format string, args []string) (string, int, error) {
buf := c.strBuilder()
esc := false
var fmts []rune
initialArgs := len(args)
for _, c := range format {
switch {
case esc:
esc = false
switch c {
case 'n':
buf.WriteRune('\n')
case 'r':
buf.WriteRune('\r')
case 't':
buf.WriteRune('\t')
case '\\':
buf.WriteRune('\\')
default:
buf.WriteRune('\\')
buf.WriteRune(c)
}
case len(fmts) > 0:
switch c {
case '%':
buf.WriteByte('%')
fmts = nil
case 'c':
var b byte
if len(args) > 0 {
arg := ""
arg, args = args[0], args[1:]
if len(arg) > 0 {
b = arg[0]
}
}
buf.WriteByte(b)
fmts = nil
case '+', '-', ' ':
if len(fmts) > 1 {
return "", 0, fmt.Errorf("invalid format char: %c", c)
}
fmts = append(fmts, c)
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
fmts = append(fmts, c)
case 's', 'd', 'i', 'u', 'o', 'x':
arg := ""
if len(args) > 0 {
arg, args = args[0], args[1:]
}
var farg interface{} = arg
if c != 's' {
n, _ := strconv.ParseInt(arg, 0, 0)
if c == 'i' || c == 'd' {
farg = int(n)
} else {
farg = uint(n)
}
if c == 'i' || c == 'u' {
c = 'd'
}
}
fmts = append(fmts, c)
fmt.Fprintf(buf, string(fmts), farg)
fmts = nil
default:
return "", 0, fmt.Errorf("invalid format char: %c", c)
}
case c == '\\':
esc = true
case args != nil && c == '%':
// if args == nil, we are not doing format
// arguments
fmts = []rune{c}
default:
buf.WriteRune(c)
}
}
if len(fmts) > 0 {
return "", 0, fmt.Errorf("missing format char")
}
return buf.String(), initialArgs - len(args), nil
}
func (c *Context) fieldJoin(parts []fieldPart) string {
switch len(parts) {
case 0:
return ""
case 1: // short-cut without a string copy
return parts[0].val
}
buf := c.strBuilder()
for _, part := range parts {
buf.WriteString(part.val)
}
return buf.String()
}
func (c *Context) escapedGlobField(parts []fieldPart) (escaped string, glob bool) {
buf := c.strBuilder()
for _, part := range parts {
if part.quote > quoteNone {
buf.WriteString(syntax.QuotePattern(part.val))
continue
}
buf.WriteString(part.val)
if syntax.HasPattern(part.val) {
glob = true
}
}
if glob { // only copy the string if it will be used
escaped = buf.String()
}
return escaped, glob
}
func (c *Context) ExpandFields(ctx context.Context, words ...*syntax.Word) []string {
c.prepareIFS()
fields := make([]string, 0, len(words))
dir := c.envGet("PWD")
baseDir := syntax.QuotePattern(dir)
for _, expWord := range Braces(words...) {
for _, field := range c.wordFields(ctx, expWord.Parts) {
path, doGlob := c.escapedGlobField(field)
var matches []string
abs := filepath.IsAbs(path)
if doGlob && !c.NoGlob {
if !abs {
path = filepath.Join(baseDir, path)
}
matches = c.glob(path)
}
if len(matches) == 0 {
fields = append(fields, c.fieldJoin(field))
continue
}
for _, match := range matches {
if !abs {
endSeparator := strings.HasSuffix(match, string(filepath.Separator))
match, _ = filepath.Rel(dir, match)
if endSeparator {
match += string(filepath.Separator)
}
}
fields = append(fields, match)
}
}
}
return fields
}
func (c *Context) ExpandPattern(ctx context.Context, word *syntax.Word) string {
field := c.wordField(ctx, word.Parts, quoteSingle)
buf := c.strBuilder()
for _, part := range field {
if part.quote > quoteNone {
buf.WriteString(syntax.QuotePattern(part.val))
} else {
buf.WriteString(part.val)
}
}
return buf.String()
}
type fieldPart struct {
val string
quote quoteLevel
}
type quoteLevel uint
const (
quoteNone quoteLevel = iota
quoteDouble
quoteSingle
)
func (c *Context) wordField(ctx context.Context, wps []syntax.WordPart, ql quoteLevel) []fieldPart {
var field []fieldPart
for i, wp := range wps {
switch x := wp.(type) {
case *syntax.Lit:
s := x.Value
if i == 0 {
s = c.expandUser(s)
}
if ql == quoteDouble && strings.Contains(s, "\\") {
buf := c.strBuilder()
for i := 0; i < len(s); i++ {
b := s[i]
if b == '\\' && i+1 < len(s) {
switch s[i+1] {
case '\n': // remove \\\n
i++
continue
case '"', '\\', '$', '`': // special chars
continue
}
}
buf.WriteByte(b)
}
s = buf.String()
}
field = append(field, fieldPart{val: s})
case *syntax.SglQuoted:
fp := fieldPart{quote: quoteSingle, val: x.Value}
if x.Dollar {
fp.val, _, _ = c.ExpandFormat(fp.val, nil)
}
field = append(field, fp)
case *syntax.DblQuoted:
for _, part := range c.wordField(ctx, x.Parts, quoteDouble) {
part.quote = quoteDouble
field = append(field, part)
}
case *syntax.ParamExp:
field = append(field, fieldPart{val: c.paramExp(ctx, x)})
case *syntax.CmdSubst:
field = append(field, fieldPart{val: c.cmdSubst(ctx, x)})
case *syntax.ArithmExp:
field = append(field, fieldPart{
val: strconv.Itoa(c.ExpandArithm(ctx, x.X)),
})
default:
panic(fmt.Sprintf("unhandled word part: %T", x))
}
}
return field
}
func (c *Context) cmdSubst(ctx context.Context, cs *syntax.CmdSubst) string {
if c.CmdSubst == nil {
c.err(UnexpectedCommandError{Node: cs})
return ""
}
buf := c.strBuilder()
c.CmdSubst(ctx, buf, cs)
return strings.TrimRight(buf.String(), "\n")
}
func (c *Context) wordFields(ctx context.Context, wps []syntax.WordPart) [][]fieldPart {
fields := c.fieldsAlloc[:0]
curField := c.fieldAlloc[:0]
allowEmpty := false
flush := func() {
if len(curField) == 0 {
return
}
fields = append(fields, curField)
curField = nil
}
splitAdd := func(val string) {
for i, field := range strings.FieldsFunc(val, c.ifsRune) {
if i > 0 {
flush()
}
curField = append(curField, fieldPart{val: field})
}
}
for i, wp := range wps {
switch x := wp.(type) {
case *syntax.Lit:
s := x.Value
if i == 0 {
s = c.expandUser(s)
}
if strings.Contains(s, "\\") {
buf := c.strBuilder()
for i := 0; i < len(s); i++ {
b := s[i]
if b == '\\' {
i++
b = s[i]
}
buf.WriteByte(b)
}
s = buf.String()
}
curField = append(curField, fieldPart{val: s})
case *syntax.SglQuoted:
allowEmpty = true
fp := fieldPart{quote: quoteSingle, val: x.Value}
if x.Dollar {
fp.val, _, _ = c.ExpandFormat(fp.val, nil)
}
curField = append(curField, fp)
case *syntax.DblQuoted:
allowEmpty = true
if len(x.Parts) == 1 {
pe, _ := x.Parts[0].(*syntax.ParamExp)
if elems := c.quotedElems(pe); elems != nil {
for i, elem := range elems {
if i > 0 {
flush()
}
curField = append(curField, fieldPart{
quote: quoteDouble,
val: elem,
})
}
continue
}
}
for _, part := range c.wordField(ctx, x.Parts, quoteDouble) {
part.quote = quoteDouble
curField = append(curField, part)
}
case *syntax.ParamExp:
splitAdd(c.paramExp(ctx, x))
case *syntax.CmdSubst:
splitAdd(c.cmdSubst(ctx, x))
case *syntax.ArithmExp:
curField = append(curField, fieldPart{
val: strconv.Itoa(c.ExpandArithm(ctx, x.X)),
})
default:
panic(fmt.Sprintf("unhandled word part: %T", x))
}
}
flush()
if allowEmpty && len(fields) == 0 {
fields = append(fields, curField)
}
return fields
}
// quotedElems checks if a parameter expansion is exactly ${@} or ${foo[@]}
func (c *Context) quotedElems(pe *syntax.ParamExp) []string {
if pe == nil || pe.Excl || pe.Length || pe.Width {
return nil
}
if pe.Param.Value == "@" {
return c.Env.Get("@").Value.([]string)
}
if nodeLit(pe.Index) != "@" {
return nil
}
val := c.Env.Get(pe.Param.Value).Value
if x, ok := val.([]string); ok {
return x
}
return nil
}
func (c *Context) expandUser(field string) string {
if len(field) == 0 || field[0] != '~' {
return field
}
name := field[1:]
rest := ""
if i := strings.Index(name, "/"); i >= 0 {
rest = name[i:]
name = name[:i]
}
if name == "" {
return c.Env.Get("HOME").String() + rest
}
if vr := c.Env.Get("HOME " + name); vr.IsSet() {
return vr.String() + rest
}
u, err := user.Lookup(name)
if err != nil {
return field
}
return u.HomeDir + rest
}
func findAllIndex(pattern, name string, n int) [][]int {
expr, err := syntax.TranslatePattern(pattern, true)
if err != nil {
return nil
}
rx := regexp.MustCompile(expr)
return rx.FindAllStringIndex(name, n)
}
// TODO: use this again to optimize globbing; see
// https://github.com/mvdan/sh/issues/213
func hasGlob(path string) bool {
magicChars := `*?[`
if runtime.GOOS != "windows" {
magicChars = `*?[\`
}
return strings.ContainsAny(path, magicChars)
}
var rxGlobStar = regexp.MustCompile(".*")
func (c *Context) glob(pattern string) []string {
parts := strings.Split(pattern, string(filepath.Separator))
matches := []string{"."}
if filepath.IsAbs(pattern) {
if parts[0] == "" {
// unix-like
matches[0] = string(filepath.Separator)
} else {
// windows (for some reason it won't work without the
// trailing separator)
matches[0] = parts[0] + string(filepath.Separator)
}
parts = parts[1:]
}
for _, part := range parts {
if part == "**" && c.GlobStar {
for i := range matches {
// "a/**" should match "a/ a/b a/b/c ..."; note
// how the zero-match case has a trailing
// separator.
matches[i] += string(filepath.Separator)
}
// expand all the possible levels of **
latest := matches
for {
var newMatches []string
for _, dir := range latest {
newMatches = c.globDir(dir, rxGlobStar, newMatches)
}
if len(newMatches) == 0 {
// not another level of directories to
// try; stop
break
}
matches = append(matches, newMatches...)
latest = newMatches
}
continue
}
expr, err := syntax.TranslatePattern(part, true)
if err != nil {
return nil
}
rx := regexp.MustCompile("^" + expr + "$")
var newMatches []string
for _, dir := range matches {
newMatches = c.globDir(dir, rx, newMatches)
}
matches = newMatches
}
return matches
}
// SystemReaddirnames uses os.Open and File.Readdirnames to retrieve the names
// of the files within a directoy on the system's filesystem. Any error is
// reported via Context.OnError.
func (c *Context) SystemReaddirnames(dir string) []string {
d, err := os.Open(dir)
if err != nil {
c.err(err)
return nil
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
c.err(err)
return nil
}
sort.Strings(names)
return names
}
func (c *Context) globDir(dir string, rx *regexp.Regexp, matches []string) []string {
if c.Readdirnames == nil {
return nil
}
names := c.Readdirnames(dir)
for _, name := range names {
if !strings.HasPrefix(rx.String(), `^\.`) && name[0] == '.' {
continue
}
if rx.MatchString(name) {
matches = append(matches, filepath.Join(dir, name))
}
}
return matches
}
func (c *Context) ReadFields(s string, n int, raw bool) []string {
c.prepareIFS()
type pos struct {
start, end int
}
var fpos []pos
runes := make([]rune, 0, len(s))
infield := false
esc := false
for _, r := range s {
if infield {
if c.ifsRune(r) && (raw || !esc) {
fpos[len(fpos)-1].end = len(runes)
infield = false
}
} else {
if !c.ifsRune(r) && (raw || !esc) {
fpos = append(fpos, pos{start: len(runes), end: -1})
infield = true
}
}
if r == '\\' {
if raw || esc {
runes = append(runes, r)
}
esc = !esc
continue
}
runes = append(runes, r)
esc = false
}
if len(fpos) == 0 {
return nil
}
if infield {
fpos[len(fpos)-1].end = len(runes)
}
switch {
case n == 1:
// include heading/trailing IFSs
fpos[0].start, fpos[0].end = 0, len(runes)
fpos = fpos[:1]
case n != -1 && n < len(fpos):
// combine to max n fields
fpos[n-1].end = fpos[len(fpos)-1].end
fpos = fpos[:n]
}
var fields = make([]string, len(fpos))
for i, p := range fpos {
fields[i] = string(runes[p.start:p.end])
}
return fields
}
expand: improve the Context field godocs
// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package expand
import (
"bytes"
"context"
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"mvdan.cc/sh/syntax"
)
type Context struct {
// Env is used to get and set environment variables when performing
// shell expansions. Some special parameters are also expanded via this
// interface, such as:
//
// * "#", "@", "*", "0"-"9" for the shell's parameters
// * "?", "$", "PPID" for the shell's status and process
// * "HOME foo" to retrieve user foo's home directory (if unset,
// os/user.Lookup will be used)
Env Environ
// NoGlob corresponds to the shell option that disables globbing.
NoGlob bool
// GlobStar corresponds to the shell option that allows globbing with
// "**".
GlobStar bool
// CmdSubst is used to expand command substitutions. Output should be
// written to the provided io.Writer.
//
// If nil, expanding a syntax.CmdSubst node will result in an
// UnexpectedCommandError error.
CmdSubst func(context.Context, io.Writer, *syntax.CmdSubst)
// TODO: rethink this interface
// Readdirnames is used for file path globbing. If nil, globbing is
// disabled. Use Context.SystemReaddirnames to use the filesystem
// directly.
Readdirnames func(string) []string
// OnError is called when an error is encountered. If nil, errors cause
// a panic.
OnError func(error)
bufferAlloc bytes.Buffer
fieldAlloc [4]fieldPart
fieldsAlloc [4][]fieldPart
ifs string
// A pointer to a parameter expansion node, if we're inside one.
// Necessary for ${LINENO}.
curParam *syntax.ParamExp
}
// UnexpectedCommandError is returned if a command substitution is encountered
// when Context.CmdSubst is nil.
type UnexpectedCommandError struct {
Node *syntax.CmdSubst
}
func (u UnexpectedCommandError) Error() string {
return fmt.Sprintf("unexpected command substitution at %s", u.Node.Pos())
}
func (c *Context) prepareIFS() {
vr := c.Env.Get("IFS")
if !vr.IsSet() {
c.ifs = " \t\n"
} else {
c.ifs = vr.String()
}
}
func (c *Context) ifsRune(r rune) bool {
for _, r2 := range c.ifs {
if r == r2 {
return true
}
}
return false
}
func (c *Context) ifsJoin(strs []string) string {
sep := ""
if c.ifs != "" {
sep = c.ifs[:1]
}
return strings.Join(strs, sep)
}
func (c *Context) err(err error) {
if c.OnError == nil {
panic(err)
}
c.OnError(err)
}
func (c *Context) strBuilder() *bytes.Buffer {
b := &c.bufferAlloc
b.Reset()
return b
}
func (c *Context) envGet(name string) string {
return c.Env.Get(name).String()
}
func (c *Context) envSet(name, value string) {
wenv, ok := c.Env.(WriteEnviron)
if !ok {
// TODO: we should probably error here
return
}
wenv.Set(name, Variable{Value: value})
}
func (c *Context) ExpandLiteral(ctx context.Context, word *syntax.Word) string {
if word == nil {
return ""
}
field := c.wordField(ctx, word.Parts, quoteDouble)
return c.fieldJoin(field)
}
func (c *Context) ExpandFormat(format string, args []string) (string, int, error) {
buf := c.strBuilder()
esc := false
var fmts []rune
initialArgs := len(args)
for _, c := range format {
switch {
case esc:
esc = false
switch c {
case 'n':
buf.WriteRune('\n')
case 'r':
buf.WriteRune('\r')
case 't':
buf.WriteRune('\t')
case '\\':
buf.WriteRune('\\')
default:
buf.WriteRune('\\')
buf.WriteRune(c)
}
case len(fmts) > 0:
switch c {
case '%':
buf.WriteByte('%')
fmts = nil
case 'c':
var b byte
if len(args) > 0 {
arg := ""
arg, args = args[0], args[1:]
if len(arg) > 0 {
b = arg[0]
}
}
buf.WriteByte(b)
fmts = nil
case '+', '-', ' ':
if len(fmts) > 1 {
return "", 0, fmt.Errorf("invalid format char: %c", c)
}
fmts = append(fmts, c)
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
fmts = append(fmts, c)
case 's', 'd', 'i', 'u', 'o', 'x':
arg := ""
if len(args) > 0 {
arg, args = args[0], args[1:]
}
var farg interface{} = arg
if c != 's' {
n, _ := strconv.ParseInt(arg, 0, 0)
if c == 'i' || c == 'd' {
farg = int(n)
} else {
farg = uint(n)
}
if c == 'i' || c == 'u' {
c = 'd'
}
}
fmts = append(fmts, c)
fmt.Fprintf(buf, string(fmts), farg)
fmts = nil
default:
return "", 0, fmt.Errorf("invalid format char: %c", c)
}
case c == '\\':
esc = true
case args != nil && c == '%':
// if args == nil, we are not doing format
// arguments
fmts = []rune{c}
default:
buf.WriteRune(c)
}
}
if len(fmts) > 0 {
return "", 0, fmt.Errorf("missing format char")
}
return buf.String(), initialArgs - len(args), nil
}
func (c *Context) fieldJoin(parts []fieldPart) string {
switch len(parts) {
case 0:
return ""
case 1: // short-cut without a string copy
return parts[0].val
}
buf := c.strBuilder()
for _, part := range parts {
buf.WriteString(part.val)
}
return buf.String()
}
func (c *Context) escapedGlobField(parts []fieldPart) (escaped string, glob bool) {
buf := c.strBuilder()
for _, part := range parts {
if part.quote > quoteNone {
buf.WriteString(syntax.QuotePattern(part.val))
continue
}
buf.WriteString(part.val)
if syntax.HasPattern(part.val) {
glob = true
}
}
if glob { // only copy the string if it will be used
escaped = buf.String()
}
return escaped, glob
}
func (c *Context) ExpandFields(ctx context.Context, words ...*syntax.Word) []string {
c.prepareIFS()
fields := make([]string, 0, len(words))
dir := c.envGet("PWD")
baseDir := syntax.QuotePattern(dir)
for _, expWord := range Braces(words...) {
for _, field := range c.wordFields(ctx, expWord.Parts) {
path, doGlob := c.escapedGlobField(field)
var matches []string
abs := filepath.IsAbs(path)
if doGlob && !c.NoGlob {
if !abs {
path = filepath.Join(baseDir, path)
}
matches = c.glob(path)
}
if len(matches) == 0 {
fields = append(fields, c.fieldJoin(field))
continue
}
for _, match := range matches {
if !abs {
endSeparator := strings.HasSuffix(match, string(filepath.Separator))
match, _ = filepath.Rel(dir, match)
if endSeparator {
match += string(filepath.Separator)
}
}
fields = append(fields, match)
}
}
}
return fields
}
func (c *Context) ExpandPattern(ctx context.Context, word *syntax.Word) string {
field := c.wordField(ctx, word.Parts, quoteSingle)
buf := c.strBuilder()
for _, part := range field {
if part.quote > quoteNone {
buf.WriteString(syntax.QuotePattern(part.val))
} else {
buf.WriteString(part.val)
}
}
return buf.String()
}
type fieldPart struct {
val string
quote quoteLevel
}
type quoteLevel uint
const (
quoteNone quoteLevel = iota
quoteDouble
quoteSingle
)
func (c *Context) wordField(ctx context.Context, wps []syntax.WordPart, ql quoteLevel) []fieldPart {
var field []fieldPart
for i, wp := range wps {
switch x := wp.(type) {
case *syntax.Lit:
s := x.Value
if i == 0 {
s = c.expandUser(s)
}
if ql == quoteDouble && strings.Contains(s, "\\") {
buf := c.strBuilder()
for i := 0; i < len(s); i++ {
b := s[i]
if b == '\\' && i+1 < len(s) {
switch s[i+1] {
case '\n': // remove \\\n
i++
continue
case '"', '\\', '$', '`': // special chars
continue
}
}
buf.WriteByte(b)
}
s = buf.String()
}
field = append(field, fieldPart{val: s})
case *syntax.SglQuoted:
fp := fieldPart{quote: quoteSingle, val: x.Value}
if x.Dollar {
fp.val, _, _ = c.ExpandFormat(fp.val, nil)
}
field = append(field, fp)
case *syntax.DblQuoted:
for _, part := range c.wordField(ctx, x.Parts, quoteDouble) {
part.quote = quoteDouble
field = append(field, part)
}
case *syntax.ParamExp:
field = append(field, fieldPart{val: c.paramExp(ctx, x)})
case *syntax.CmdSubst:
field = append(field, fieldPart{val: c.cmdSubst(ctx, x)})
case *syntax.ArithmExp:
field = append(field, fieldPart{
val: strconv.Itoa(c.ExpandArithm(ctx, x.X)),
})
default:
panic(fmt.Sprintf("unhandled word part: %T", x))
}
}
return field
}
func (c *Context) cmdSubst(ctx context.Context, cs *syntax.CmdSubst) string {
if c.CmdSubst == nil {
c.err(UnexpectedCommandError{Node: cs})
return ""
}
buf := c.strBuilder()
c.CmdSubst(ctx, buf, cs)
return strings.TrimRight(buf.String(), "\n")
}
func (c *Context) wordFields(ctx context.Context, wps []syntax.WordPart) [][]fieldPart {
fields := c.fieldsAlloc[:0]
curField := c.fieldAlloc[:0]
allowEmpty := false
flush := func() {
if len(curField) == 0 {
return
}
fields = append(fields, curField)
curField = nil
}
splitAdd := func(val string) {
for i, field := range strings.FieldsFunc(val, c.ifsRune) {
if i > 0 {
flush()
}
curField = append(curField, fieldPart{val: field})
}
}
for i, wp := range wps {
switch x := wp.(type) {
case *syntax.Lit:
s := x.Value
if i == 0 {
s = c.expandUser(s)
}
if strings.Contains(s, "\\") {
buf := c.strBuilder()
for i := 0; i < len(s); i++ {
b := s[i]
if b == '\\' {
i++
b = s[i]
}
buf.WriteByte(b)
}
s = buf.String()
}
curField = append(curField, fieldPart{val: s})
case *syntax.SglQuoted:
allowEmpty = true
fp := fieldPart{quote: quoteSingle, val: x.Value}
if x.Dollar {
fp.val, _, _ = c.ExpandFormat(fp.val, nil)
}
curField = append(curField, fp)
case *syntax.DblQuoted:
allowEmpty = true
if len(x.Parts) == 1 {
pe, _ := x.Parts[0].(*syntax.ParamExp)
if elems := c.quotedElems(pe); elems != nil {
for i, elem := range elems {
if i > 0 {
flush()
}
curField = append(curField, fieldPart{
quote: quoteDouble,
val: elem,
})
}
continue
}
}
for _, part := range c.wordField(ctx, x.Parts, quoteDouble) {
part.quote = quoteDouble
curField = append(curField, part)
}
case *syntax.ParamExp:
splitAdd(c.paramExp(ctx, x))
case *syntax.CmdSubst:
splitAdd(c.cmdSubst(ctx, x))
case *syntax.ArithmExp:
curField = append(curField, fieldPart{
val: strconv.Itoa(c.ExpandArithm(ctx, x.X)),
})
default:
panic(fmt.Sprintf("unhandled word part: %T", x))
}
}
flush()
if allowEmpty && len(fields) == 0 {
fields = append(fields, curField)
}
return fields
}
// quotedElems checks if a parameter expansion is exactly ${@} or ${foo[@]}
func (c *Context) quotedElems(pe *syntax.ParamExp) []string {
if pe == nil || pe.Excl || pe.Length || pe.Width {
return nil
}
if pe.Param.Value == "@" {
return c.Env.Get("@").Value.([]string)
}
if nodeLit(pe.Index) != "@" {
return nil
}
val := c.Env.Get(pe.Param.Value).Value
if x, ok := val.([]string); ok {
return x
}
return nil
}
func (c *Context) expandUser(field string) string {
if len(field) == 0 || field[0] != '~' {
return field
}
name := field[1:]
rest := ""
if i := strings.Index(name, "/"); i >= 0 {
rest = name[i:]
name = name[:i]
}
if name == "" {
return c.Env.Get("HOME").String() + rest
}
if vr := c.Env.Get("HOME " + name); vr.IsSet() {
return vr.String() + rest
}
u, err := user.Lookup(name)
if err != nil {
return field
}
return u.HomeDir + rest
}
func findAllIndex(pattern, name string, n int) [][]int {
expr, err := syntax.TranslatePattern(pattern, true)
if err != nil {
return nil
}
rx := regexp.MustCompile(expr)
return rx.FindAllStringIndex(name, n)
}
// TODO: use this again to optimize globbing; see
// https://github.com/mvdan/sh/issues/213
func hasGlob(path string) bool {
magicChars := `*?[`
if runtime.GOOS != "windows" {
magicChars = `*?[\`
}
return strings.ContainsAny(path, magicChars)
}
var rxGlobStar = regexp.MustCompile(".*")
func (c *Context) glob(pattern string) []string {
parts := strings.Split(pattern, string(filepath.Separator))
matches := []string{"."}
if filepath.IsAbs(pattern) {
if parts[0] == "" {
// unix-like
matches[0] = string(filepath.Separator)
} else {
// windows (for some reason it won't work without the
// trailing separator)
matches[0] = parts[0] + string(filepath.Separator)
}
parts = parts[1:]
}
for _, part := range parts {
if part == "**" && c.GlobStar {
for i := range matches {
// "a/**" should match "a/ a/b a/b/c ..."; note
// how the zero-match case has a trailing
// separator.
matches[i] += string(filepath.Separator)
}
// expand all the possible levels of **
latest := matches
for {
var newMatches []string
for _, dir := range latest {
newMatches = c.globDir(dir, rxGlobStar, newMatches)
}
if len(newMatches) == 0 {
// not another level of directories to
// try; stop
break
}
matches = append(matches, newMatches...)
latest = newMatches
}
continue
}
expr, err := syntax.TranslatePattern(part, true)
if err != nil {
return nil
}
rx := regexp.MustCompile("^" + expr + "$")
var newMatches []string
for _, dir := range matches {
newMatches = c.globDir(dir, rx, newMatches)
}
matches = newMatches
}
return matches
}
// SystemReaddirnames uses os.Open and File.Readdirnames to retrieve the names
// of the files within a directoy on the system's filesystem. Any error is
// reported via Context.OnError.
func (c *Context) SystemReaddirnames(dir string) []string {
d, err := os.Open(dir)
if err != nil {
c.err(err)
return nil
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
c.err(err)
return nil
}
sort.Strings(names)
return names
}
func (c *Context) globDir(dir string, rx *regexp.Regexp, matches []string) []string {
if c.Readdirnames == nil {
return nil
}
names := c.Readdirnames(dir)
for _, name := range names {
if !strings.HasPrefix(rx.String(), `^\.`) && name[0] == '.' {
continue
}
if rx.MatchString(name) {
matches = append(matches, filepath.Join(dir, name))
}
}
return matches
}
func (c *Context) ReadFields(s string, n int, raw bool) []string {
c.prepareIFS()
type pos struct {
start, end int
}
var fpos []pos
runes := make([]rune, 0, len(s))
infield := false
esc := false
for _, r := range s {
if infield {
if c.ifsRune(r) && (raw || !esc) {
fpos[len(fpos)-1].end = len(runes)
infield = false
}
} else {
if !c.ifsRune(r) && (raw || !esc) {
fpos = append(fpos, pos{start: len(runes), end: -1})
infield = true
}
}
if r == '\\' {
if raw || esc {
runes = append(runes, r)
}
esc = !esc
continue
}
runes = append(runes, r)
esc = false
}
if len(fpos) == 0 {
return nil
}
if infield {
fpos[len(fpos)-1].end = len(runes)
}
switch {
case n == 1:
// include heading/trailing IFSs
fpos[0].start, fpos[0].end = 0, len(runes)
fpos = fpos[:1]
case n != -1 && n < len(fpos):
// combine to max n fields
fpos[n-1].end = fpos[len(fpos)-1].end
fpos = fpos[:n]
}
var fields = make([]string, len(fpos))
for i, p := range fpos {
fields[i] = string(runes[p.start:p.end])
}
return fields
}
|
package services
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"jbrodriguez/unbalance/server/src/algorithm"
"jbrodriguez/unbalance/server/src/dto"
"jbrodriguez/unbalance/server/src/lib"
"jbrodriguez/unbalance/server/src/model"
"github.com/jbrodriguez/actor"
"github.com/jbrodriguez/mlog"
"github.com/jbrodriguez/pubsub"
)
const (
mailCmd = "/usr/local/emhttp/webGui/scripts/notify"
timeFormat = "Jan _2, 2006 15:04:05"
)
// Core service
type Core struct {
bus *pubsub.PubSub
storage *model.Unraid
settings *lib.Settings
// this holds the state of any operation
operation model.Operation
actor *actor.Actor
reFreeSpace *regexp.Regexp
reItems *regexp.Regexp
reRsync *regexp.Regexp
reStat *regexp.Regexp
reProgress *regexp.Regexp
rsyncErrors map[int]string
}
// NewCore -
func NewCore(bus *pubsub.PubSub, settings *lib.Settings) *Core {
core := &Core{
bus: bus,
settings: settings,
// opState: stateIdle,
storage: &model.Unraid{},
actor: actor.NewActor(bus),
operation: model.Operation{
OpState: model.StateIdle,
PrevState: model.StateIdle,
DryRun: settings.DryRun,
},
}
core.reFreeSpace = regexp.MustCompile(`(.*?)\s+(\d+)\s+(\d+)\s+(\d+)\s+(.*?)\s+(.*?)$`)
core.reItems = regexp.MustCompile(`(\d+)\s+(.*?)$`)
core.reRsync = regexp.MustCompile(`exit status (\d+)`)
core.reProgress = regexp.MustCompile(`(?s)^([\d,]+).*?\(.*?\)$|^([\d,]+).*?$`)
core.reStat = regexp.MustCompile(`[-dclpsbD]([-rwxsS]{3})([-rwxsS]{3})([-rwxtT]{3})\|(.*?)\:(.*?)\|(.*?)\|(.*)`)
core.rsyncErrors = map[int]string{
0: "Success",
1: "Syntax or usage error",
2: "Protocol incompatibility",
3: "Errors selecting input/output files, dirs",
4: "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them, or an option was specified that is supported by the client and not by the server.",
5: "Error starting client-server protocol",
6: "Daemon unable to append to log-file",
10: "Error in socket I/O",
11: "Error in file I/O",
12: "Error in rsync protocol data stream",
13: "Errors with program diagnostics",
14: "Error in IPC code",
20: "Received SIGUSR1 or SIGINT",
21: "Some error returned by waitpid()",
22: "Error allocating core memory buffers",
23: "Partial transfer due to error",
24: "Partial transfer due to vanished source files",
25: "The --max-delete limit stopped deletions",
30: "Timeout in data send/receive",
35: "Timeout waiting for daemon connection",
}
// core.ownerPerms = map[int]bool{
// 644
// }
return core
}
// Start -
func (c *Core) Start() (err error) {
mlog.Info("starting service Core ...")
c.actor.Register("/get/config", c.getConfig)
c.actor.Register("/get/status", c.getStatus)
c.actor.Register("/config/set/notifyCalc", c.setNotifyCalc)
c.actor.Register("/config/set/notifyMove", c.setNotifyMove)
c.actor.Register("/config/set/reservedSpace", c.setReservedSpace)
c.actor.Register("/config/set/verbosity", c.setVerbosity)
c.actor.Register("/get/storage", c.getStorage)
c.actor.Register("/config/toggle/dryRun", c.toggleDryRun)
c.actor.Register("/get/tree", c.getTree)
c.actor.Register("/disks/locate", c.locate)
c.actor.Register("/config/set/rsyncFlags", c.setRsyncFlags)
c.actor.Register("calculate", c.calc)
c.actor.Register("move", c.move)
c.actor.Register("copy", c.copy)
c.actor.Register("validate", c.validate)
c.actor.Register("getLog", c.getLog)
c.actor.Register("findTargets", c.findTargets)
c.actor.Register("gather", c.gather)
err = c.storage.SanityCheck(c.settings.APIFolders)
if err != nil {
return err
}
go c.actor.React()
return nil
}
// Stop -
func (c *Core) Stop() {
mlog.Info("stopped service Core ...")
}
// SetStorage -
func (c *Core) SetStorage(unraid *model.Unraid) {
c.storage = unraid
}
func (c *Core) getConfig(msg *pubsub.Message) {
mlog.Info("Sending config")
rsyncFlags := strings.Join(c.settings.RsyncFlags, " ")
if rsyncFlags == "-avX --partial" || rsyncFlags == "-avRX --partial" {
c.settings.RsyncFlags = []string{"-avPRX"}
c.settings.Save()
}
msg.Reply <- &c.settings.Config
}
func (c *Core) getStatus(msg *pubsub.Message) {
mlog.Info("Sending status")
msg.Reply <- c.operation.OpState
}
func (c *Core) setNotifyCalc(msg *pubsub.Message) {
fnotify := msg.Payload.(float64)
notify := int(fnotify)
mlog.Info("Setting notifyCalc to (%d)", notify)
c.settings.NotifyCalc = notify
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) setNotifyMove(msg *pubsub.Message) {
fnotify := msg.Payload.(float64)
notify := int(fnotify)
mlog.Info("Setting notifyMove to (%d)", notify)
c.settings.NotifyMove = notify
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) setVerbosity(msg *pubsub.Message) {
fverbosity := msg.Payload.(float64)
verbosity := int(fverbosity)
mlog.Info("Setting verbosity to (%d)", verbosity)
c.settings.Verbosity = verbosity
err := c.settings.Save()
if err != nil {
mlog.Warning("not right %s", err)
}
msg.Reply <- &c.settings.Config
}
func (c *Core) setReservedSpace(msg *pubsub.Message) {
mlog.Warning("payload: %+v", msg.Payload)
payload, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert Reserved Space parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert Reserved Space parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
msg.Reply <- &c.settings.Config
return
}
var reserved dto.Reserved
err := json.Unmarshal([]byte(payload), &reserved)
if err != nil {
mlog.Warning("Unable to bind reservedSpace parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind reservedSpace parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
amount := int64(reserved.Amount)
unit := reserved.Unit
mlog.Info("Setting reservedAmount to (%d)", amount)
mlog.Info("Setting reservedUnit to (%s)", unit)
c.settings.ReservedAmount = amount
c.settings.ReservedUnit = unit
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) getStorage(msg *pubsub.Message) {
var stats string
if c.operation.OpState == model.StateIdle {
c.storage.Refresh()
} else if c.operation.OpState == model.StateMove || c.operation.OpState == model.StateCopy || c.operation.OpState == model.StateGather {
percent, left, speed := progress(c.operation.BytesToTransfer, c.operation.BytesTransferred, time.Since(c.operation.Started))
stats = fmt.Sprintf("%.2f%% done ~ %s left (%.2f MB/s)", percent, left, speed)
}
c.storage.Stats = stats
c.storage.OpState = c.operation.OpState
c.storage.PrevState = c.operation.PrevState
c.storage.BytesToTransfer = c.operation.BytesToTransfer
msg.Reply <- c.storage
}
func (c *Core) toggleDryRun(msg *pubsub.Message) {
mlog.Info("Toggling dryRun from (%t)", c.settings.DryRun)
c.settings.ToggleDryRun()
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) getTree(msg *pubsub.Message) {
path := msg.Payload.(string)
msg.Reply <- c.storage.GetTree(path)
}
func (c *Core) locate(msg *pubsub.Message) {
chosen := msg.Payload.([]string)
msg.Reply <- c.storage.Locate(chosen)
}
func (c *Core) setRsyncFlags(msg *pubsub.Message) {
// mlog.Warning("payload: %+v", msg.Payload)
payload, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert Rsync Flags parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert Rsync Flags parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
msg.Reply <- &c.settings.Config
return
}
var rsync dto.Rsync
err := json.Unmarshal([]byte(payload), &rsync)
if err != nil {
mlog.Warning("Unable to bind rsyncFlags parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind rsyncFlags parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
mlog.Info("Setting rsyncFlags to (%s)", strings.Join(rsync.Flags, " "))
c.settings.RsyncFlags = rsync.Flags
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) calc(msg *pubsub.Message) {
c.operation = model.Operation{OpState: model.StateCalc, PrevState: model.StateIdle}
go c._calc(msg)
}
func (c *Core) _calc(msg *pubsub.Message) {
defer func() { c.operation.OpState = model.StateIdle }()
payload, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert calculate parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert calculate parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
var dtoCalc dto.Calculate
err := json.Unmarshal([]byte(payload), &dtoCalc)
if err != nil {
mlog.Warning("Unable to bind calculate parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind calculate parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
mlog.Info("Running calculate operation ...")
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "calcStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
disks := make([]*model.Disk, 0)
// create array of destination disks
var srcDisk *model.Disk
for _, disk := range c.storage.Disks {
// reset disk
disk.NewFree = disk.Free
disk.Bin = nil
disk.Src = false
disk.Dst = dtoCalc.DestDisks[disk.Path]
if disk.Path == dtoCalc.SourceDisk {
disk.Src = true
srcDisk = disk
} else {
// add it to the target disk list, only if the user selected it
if val, ok := dtoCalc.DestDisks[disk.Path]; ok && val {
// double check, if it's a cache disk, make sure it's the main cache disk
if disk.Type == "Cache" && len(disk.Name) > 5 {
continue
}
disks = append(disks, disk)
}
}
}
mlog.Info("_calc:Begin:srcDisk(%s); dstDisks(%d)", srcDisk.Path, len(disks))
for _, disk := range disks {
mlog.Info("_calc:elegibleDestDisk(%s)", disk.Path)
}
sort.Sort(model.ByFree(disks))
srcDiskWithoutMnt := srcDisk.Path[5:]
owner := ""
lib.Shell("id -un", mlog.Warning, "owner", "", func(line string) {
owner = line
})
group := ""
lib.Shell("id -gn", mlog.Warning, "group", "", func(line string) {
group = line
})
c.operation.OwnerIssue = 0
c.operation.GroupIssue = 0
c.operation.FolderIssue = 0
c.operation.FileIssue = 0
// Check permission and gather folders to be transferred from
// source disk
folders := make([]*model.Item, 0)
for _, path := range dtoCalc.Folders {
msg := fmt.Sprintf("Scanning %s on %s", path, srcDiskWithoutMnt)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%s", msg)
c.checkOwnerAndPermissions(&c.operation, dtoCalc.SourceDisk, path, owner, group)
msg = "Checked permissions ..."
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%s", msg)
list := c.getFolders(dtoCalc.SourceDisk, path)
if list != nil {
folders = append(folders, list...)
}
}
mlog.Info("_calc:foldersToBeTransferredTotal(%d)", len(folders))
for _, v := range folders {
mlog.Info("_calc:toBeTransferred:Path(%s); Size(%s)", v.Path, lib.ByteSize(v.Size))
}
willBeTransferred := make([]*model.Item, 0)
if len(folders) > 0 {
// Initialize fields
// c.storage.BytesToTransfer = 0
// c.storage.SourceDiskName = srcDisk.Path
c.operation.BytesToTransfer = 0
c.operation.SourceDiskName = srcDisk.Path
for _, disk := range disks {
diskWithoutMnt := disk.Path[5:]
msg := fmt.Sprintf("Trying to allocate folders to %s ...", diskWithoutMnt)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%s", msg)
// time.Sleep(2 * time.Second)
if disk.Path != srcDisk.Path {
// disk.NewFree = disk.Free
var reserved int64
switch c.settings.ReservedUnit {
case "%":
fcalc := disk.Size * c.settings.ReservedAmount / 100
reserved = int64(fcalc)
break
case "Mb":
reserved = c.settings.ReservedAmount * 1000 * 1000
break
case "Gb":
reserved = c.settings.ReservedAmount * 1000 * 1000 * 1000
break
default:
reserved = lib.ReservedSpace
}
ceil := lib.Max(lib.ReservedSpace, reserved)
mlog.Info("_calc:FoldersLeft(%d):ReservedSpace(%d)", len(folders), ceil)
packer := algorithm.NewKnapsack(disk, folders, ceil)
bin := packer.BestFit()
if bin != nil {
srcDisk.NewFree += bin.Size
disk.NewFree -= bin.Size
c.operation.BytesToTransfer += bin.Size
// c.storage.BytesToTransfer += bin.Size
willBeTransferred = append(willBeTransferred, bin.Items...)
folders = c.removeFolders(folders, bin.Items)
mlog.Info("_calc:BinAllocated=[Disk(%s); Items(%d)];Freespace=[original(%s); final(%s)]", disk.Path, len(bin.Items), lib.ByteSize(srcDisk.Free), lib.ByteSize(srcDisk.NewFree))
} else {
mlog.Info("_calc:NoBinAllocated=Disk(%s)", disk.Path)
}
}
}
}
c.operation.Finished = time.Now()
elapsed := lib.Round(time.Since(c.operation.Started), time.Millisecond)
fstarted := c.operation.Started.Format(timeFormat)
ffinished := c.operation.Finished.Format(timeFormat)
// Send to frontend console started/ended/elapsed times
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Started: %s", fstarted)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Ended: %s", ffinished)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Elapsed: %s", elapsed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
if len(willBeTransferred) == 0 {
mlog.Info("_calc:No folders can be transferred.")
} else {
mlog.Info("_calc:%d folders will be transferred.", len(willBeTransferred))
for _, folder := range willBeTransferred {
mlog.Info("_calc:willBeTransferred(%s)", folder.Path)
}
}
// send to frontend the folders that will not be transferred, if any
// notTransferred holds a string representation of all the folders, separated by a '\n'
c.operation.FoldersNotTransferred = make([]string, 0)
notTransferred := ""
if len(folders) > 0 {
outbound = &dto.Packet{Topic: "calcProgress", Payload: "The following folders will not be transferred, because there's not enough space in the target disks:\n"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%d folders will NOT be transferred.", len(folders))
for _, folder := range folders {
c.operation.FoldersNotTransferred = append(c.operation.FoldersNotTransferred, folder.Path)
notTransferred += folder.Path + "\n"
outbound = &dto.Packet{Topic: "calcProgress", Payload: folder.Path}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:notTransferred(%s)", folder.Path)
}
}
// send mail according to user preferences
subject := "unBALANCE - CALCULATE operation completed"
message := fmt.Sprintf("\n\nStarted: %s\nEnded: %s\n\nElapsed: %s", fstarted, ffinished, elapsed)
if notTransferred != "" {
switch c.settings.NotifyCalc {
case 1:
message += "\n\nSome folders will not be transferred because there's not enough space for them in any of the destination disks."
case 2:
message += "\n\nThe following folders will not be transferred because there's not enough space for them in any of the destination disks:\n\n" + notTransferred
}
}
if c.operation.OwnerIssue > 0 || c.operation.GroupIssue > 0 || c.operation.FolderIssue > 0 || c.operation.FileIssue > 0 {
message += fmt.Sprintf(`
\n\nThere are some permission issues:
\n\n%d file(s)/folder(s) with an owner other than 'nobody'
\n%d file(s)/folder(s) with a group other than 'users'
\n%d folder(s) with a permission other than 'drwxrwxrwx'
\n%d files(s) with a permission other than '-rw-rw-rw-' or '-r--r--r--'
\n\nCheck the log file (/boot/logs/unbalance.log) for additional information
\n\nIt's strongly suggested to install the Fix Common Plugins and run the Docker Safe New Permissions command
`, c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)
}
if sendErr := c.sendmail(c.settings.NotifyCalc, subject, message, false); sendErr != nil {
mlog.Error(sendErr)
}
// some local logging
mlog.Info("_calc:FoldersLeft(%d)", len(folders))
mlog.Info("_calc:src(%s):Listing (%d) disks ...", srcDisk.Path, len(c.storage.Disks))
for _, disk := range c.storage.Disks {
// mlog.Info("the mystery of the year(%s)", disk.Path)
disk.Print()
}
mlog.Info("=========================================================")
mlog.Info("Results for %s", srcDisk.Path)
mlog.Info("Original Free Space: %s", lib.ByteSize(srcDisk.Free))
mlog.Info("Final Free Space: %s", lib.ByteSize(srcDisk.NewFree))
mlog.Info("Gained Space: %s", lib.ByteSize(srcDisk.NewFree-srcDisk.Free))
mlog.Info("Bytes To Move: %s", lib.ByteSize(c.operation.BytesToTransfer))
mlog.Info("---------------------------------------------------------")
c.storage.Print()
// msg.Reply <- c.storage
mlog.Info("_calc:End:srcDisk(%s)", srcDisk.Path)
outbound = &dto.Packet{Topic: "calcProgress", Payload: "Operation Finished"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
c.storage.BytesToTransfer = c.operation.BytesToTransfer
c.storage.OpState = c.operation.OpState
c.storage.PrevState = c.operation.PrevState
// send to front end the signal of operation finished
outbound = &dto.Packet{Topic: "calcFinished", Payload: c.storage}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// only send the perm issue msg if there's actually some work to do (BytesToTransfer > 0)
// and there actually perm issues
if c.operation.BytesToTransfer > 0 && (c.operation.OwnerIssue+c.operation.GroupIssue+c.operation.FolderIssue+c.operation.FileIssue > 0) {
outbound = &dto.Packet{Topic: "calcPermIssue", Payload: fmt.Sprintf("%d|%d|%d|%d", c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
}
func (c *Core) getFolders(src string, folder string) (items []*model.Item) {
srcFolder := filepath.Join(src, folder)
mlog.Info("getFolders:Scanning source-disk(%s):folder(%s)", src, folder)
var fi os.FileInfo
var err error
if fi, err = os.Stat(srcFolder); os.IsNotExist(err) {
mlog.Warning("getFolders:Folder does not exist: %s", srcFolder)
return nil
}
if !fi.IsDir() {
mlog.Info("getFolder-found(%s)-size(%d)", srcFolder, fi.Size())
item := &model.Item{Name: folder, Size: fi.Size(), Path: folder, Location: src}
items = append(items, item)
msg := fmt.Sprintf("Found %s (%s)", item.Name, lib.ByteSize(item.Size))
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
dirs, err := ioutil.ReadDir(srcFolder)
if err != nil {
mlog.Warning("getFolders:Unable to readdir: %s", err)
}
mlog.Info("getFolders:Readdir(%d)", len(dirs))
if len(dirs) == 0 {
mlog.Info("getFolders:No subdirectories under %s", srcFolder)
return nil
}
scanFolder := srcFolder + "/."
cmdText := fmt.Sprintf("find \"%s\" ! -name . -prune -exec du -bs {} +", scanFolder)
mlog.Info("getFolders:Executing %s", cmdText)
lib.Shell(cmdText, mlog.Warning, "getFolders:find/du:", "", func(line string) {
mlog.Info("getFolders:find(%s): %s", scanFolder, line)
result := c.reItems.FindStringSubmatch(line)
// mlog.Info("[%s] %s", result[1], result[2])
size, _ := strconv.ParseInt(result[1], 10, 64)
item := &model.Item{Name: result[2], Size: size, Path: filepath.Join(folder, filepath.Base(result[2])), Location: src}
items = append(items, item)
msg := fmt.Sprintf("Found %s (%s)", filepath.Base(item.Name), lib.ByteSize(size))
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
})
return
}
func (c *Core) checkOwnerAndPermissions(operation *model.Operation, src, folder, ownerName, groupName string) {
srcFolder := filepath.Join(src, folder)
// outbound := &dto.Packet{Topic: "calcProgress", Payload: "Checking permissions ..."}
// c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("perms:Scanning disk(%s):folder(%s)", src, folder)
if _, err := os.Stat(srcFolder); os.IsNotExist(err) {
mlog.Warning("perms:Folder does not exist: %s", srcFolder)
return
}
scanFolder := srcFolder + "/."
cmdText := fmt.Sprintf(`find "%s" -exec stat --format "%%A|%%U:%%G|%%F|%%n" {} \;`, scanFolder)
mlog.Info("perms:Executing %s", cmdText)
lib.Shell(cmdText, mlog.Warning, "perms:find/stat:", "", func(line string) {
result := c.reStat.FindStringSubmatch(line)
if result == nil {
mlog.Warning("perms:Unable to parse (%s)", line)
return
}
u := result[1]
g := result[2]
o := result[3]
user := result[4]
group := result[5]
kind := result[6]
name := result[7]
perms := u + g + o
if user != "nobody" {
mlog.Info("perms:User != nobody: [%s]: %s", user, name)
operation.OwnerIssue++
}
if group != "users" {
mlog.Info("perms:Group != users: [%s]: %s", group, name)
operation.GroupIssue++
}
if kind == "directory" {
if perms != "rwxrwxrwx" {
mlog.Info("perms:Folder perms != rwxrwxrwx: [%s]: %s", perms, name)
operation.FolderIssue++
}
} else {
match := strings.Compare(perms, "r--r--r--") == 0 || strings.Compare(perms, "rw-rw-rw-") == 0
if !match {
mlog.Info("perms:File perms != rw-rw-rw- or r--r--r--: [%s]: %s", perms, name)
operation.FileIssue++
}
}
})
return
}
func (c *Core) removeFolders(folders []*model.Item, list []*model.Item) []*model.Item {
w := 0 // write index
loop:
for _, fld := range folders {
for _, itm := range list {
if itm.Name == fld.Name {
continue loop
}
}
folders[w] = fld
w++
}
return folders[:w]
}
func (c *Core) move(msg *pubsub.Message) {
c.operation.OpState = model.StateMove
c.operation.PrevState = model.StateMove
go c.transfer("Move", false, msg)
}
func (c *Core) copy(msg *pubsub.Message) {
c.operation.OpState = model.StateCopy
c.operation.PrevState = model.StateCopy
go c.transfer("Copy", false, msg)
}
func (c *Core) validate(msg *pubsub.Message) {
c.operation.OpState = model.StateValidate
c.operation.PrevState = model.StateValidate
go c.checksum(msg)
}
func (c *Core) transfer(opName string, multiSource bool, msg *pubsub.Message) {
defer func() {
c.operation.OpState = model.StateIdle
c.operation.Started = time.Time{}
c.operation.BytesTransferred = 0
c.operation.Target = ""
}()
mlog.Info("Running %s operation ...", opName)
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "transferStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "progressStats", Payload: "Waiting to collect stats ..."}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// user may have changed rsync flags or dry-run setting, adjust for it
c.operation.RsyncFlags = c.settings.RsyncFlags
c.operation.DryRun = c.settings.DryRun
if c.operation.DryRun {
c.operation.RsyncFlags = append(c.operation.RsyncFlags, "--dry-run")
}
c.operation.RsyncStrFlags = strings.Join(c.operation.RsyncFlags, " ")
workdir := c.operation.SourceDiskName
c.operation.Commands = make([]model.Command, 0)
for _, disk := range c.storage.Disks {
if disk.Bin == nil || disk.Src {
continue
}
for _, item := range disk.Bin.Items {
var src, dst string
if strings.Contains(c.operation.RsyncStrFlags, "R") {
if item.Path[0] == filepath.Separator {
src = item.Path[1:]
} else {
src = item.Path
}
dst = disk.Path + string(filepath.Separator)
} else {
src = filepath.Join(c.operation.SourceDiskName, item.Path)
dst = filepath.Join(disk.Path, filepath.Dir(item.Path)) + string(filepath.Separator)
}
if multiSource {
workdir = item.Location
}
c.operation.Commands = append(c.operation.Commands, model.Command{
Src: src,
Dst: dst,
Path: item.Path,
Size: item.Size,
WorkDir: workdir,
})
}
}
if c.settings.NotifyMove == 2 {
c.notifyCommandsToRun(opName)
}
// execute each rsync command created in the step above
c.runOperation(opName, c.operation.RsyncFlags, c.operation.RsyncStrFlags, multiSource)
}
func (c *Core) checksum(msg *pubsub.Message) {
defer func() {
c.operation.OpState = model.StateIdle
c.operation.PrevState = model.StateIdle
c.operation.Started = time.Time{}
c.operation.BytesTransferred = 0
}()
opName := "Validate"
mlog.Info("Running %s operation ...", opName)
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "transferStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
multiSource := false
if !strings.HasPrefix(c.operation.RsyncStrFlags, "-a") {
finished := time.Now()
elapsed := time.Since(c.operation.Started)
subject := fmt.Sprintf("unBALANCE - %s operation INTERRUPTED", strings.ToUpper(opName))
headline := fmt.Sprintf("For proper %s operation, rsync flags MUST begin with -a", opName)
mlog.Warning(headline)
outbound := &dto.Packet{Topic: "opError", Payload: fmt.Sprintf("%s operation was interrupted. Check log (/boot/logs/unbalance.log) for details.", opName)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
_, _, speed := progress(c.operation.BytesToTransfer, 0, elapsed)
c.finishTransferOperation(subject, headline, make([]string, 0), c.operation.Started, finished, elapsed, 0, speed, multiSource)
return
}
// Initialize local variables
// we use the rsync flags that were created by the transfer operation,
// but replace -a with -rc, to perform the validation
checkRsyncFlags := make([]string, 0)
for _, flag := range c.operation.RsyncFlags {
checkRsyncFlags = append(checkRsyncFlags, strings.Replace(flag, "-a", "-rc", -1))
}
checkRsyncStrFlags := strings.Join(checkRsyncFlags, " ")
// execute each rsync command created in the transfer phase
c.runOperation(opName, checkRsyncFlags, checkRsyncStrFlags, multiSource)
}
func (c *Core) runOperation(opName string, rsyncFlags []string, rsyncStrFlags string, multiSource bool) {
// Initialize local variables
var calls int64
var callsPerDelta int64
var finished time.Time
var elapsed time.Duration
commandsExecuted := make([]string, 0)
c.operation.BytesTransferred = 0
for _, command := range c.operation.Commands {
args := append(
rsyncFlags,
command.Src,
command.Dst,
)
cmd := fmt.Sprintf(`rsync %s %s %s`, rsyncStrFlags, strconv.Quote(command.Src), strconv.Quote(command.Dst))
mlog.Info("Command Started: (src: %s) %s ", command.WorkDir, cmd)
outbound := &dto.Packet{Topic: "transferProgress", Payload: cmd}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
bytesTransferred := c.operation.BytesTransferred
var deltaMoved int64
// actual shell execution
err := lib.ShellEx(func(text string) {
line := strings.TrimSpace(text)
if len(line) <= 0 {
return
}
if callsPerDelta <= 50 {
calls++
}
delta := int64(time.Since(c.operation.Started) / time.Second)
if delta == 0 {
delta = 1
}
callsPerDelta = calls / delta
match := c.reProgress.FindStringSubmatch(line)
if match == nil {
// this is a regular output line from rsync, print it
// according to verbosity settings
if c.settings.Verbosity == 1 {
mlog.Info("%s", line)
}
if callsPerDelta <= 50 {
outbound := &dto.Packet{Topic: "transferProgress", Payload: line}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
return
}
// this is a file transfer progress output line
if match[1] == "" {
// this happens when the file hasn't finished transferring
moved := strings.Replace(match[2], ",", "", -1)
deltaMoved, _ = strconv.ParseInt(moved, 10, 64)
} else {
// the file has finished transferring
moved := strings.Replace(match[1], ",", "", -1)
deltaMoved, _ = strconv.ParseInt(moved, 10, 64)
bytesTransferred += deltaMoved
}
percent, left, speed := progress(c.operation.BytesToTransfer, bytesTransferred+deltaMoved, time.Since(c.operation.Started))
msg := fmt.Sprintf("%.2f%% done ~ %s left (%.2f MB/s)", percent, left, speed)
if callsPerDelta <= 50 {
outbound := &dto.Packet{Topic: "progressStats", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
}, command.WorkDir, "rsync", args...)
finished = time.Now()
elapsed = time.Since(c.operation.Started)
if err != nil {
subject := fmt.Sprintf("unBALANCE - %s operation INTERRUPTED", strings.ToUpper(opName))
headline := fmt.Sprintf("Command Interrupted: %s (%s)", cmd, err.Error()+" : "+getError(err.Error(), c.reRsync, c.rsyncErrors))
mlog.Warning(headline)
outbound := &dto.Packet{Topic: "opError", Payload: fmt.Sprintf("%s operation was interrupted. Check log (/boot/logs/unbalance.log) for details.", opName)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
_, _, speed := progress(c.operation.BytesToTransfer, bytesTransferred+deltaMoved, elapsed)
c.finishTransferOperation(subject, headline, commandsExecuted, c.operation.Started, finished, elapsed, bytesTransferred+deltaMoved, speed, multiSource)
return
}
mlog.Info("Command Finished")
c.operation.BytesTransferred = c.operation.BytesTransferred + command.Size
percent, left, speed := progress(c.operation.BytesToTransfer, c.operation.BytesTransferred, elapsed)
msg := fmt.Sprintf("%.2f%% done ~ %s left (%.2f MB/s)", percent, left, speed)
mlog.Info("Current progress: %s", msg)
outbound = &dto.Packet{Topic: "progressStats", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
commandsExecuted = append(commandsExecuted, cmd)
// if it isn't a dry-run and the operation is Move or Gather, delete the source folder
if !c.operation.DryRun && (c.operation.OpState == model.StateMove || c.operation.OpState == model.StateGather) {
exists, _ := lib.Exists(filepath.Join(command.Dst, command.Src))
if exists {
rmrf := fmt.Sprintf("rm -rf \"%s\"", filepath.Join(c.operation.SourceDiskName, command.Path))
mlog.Info("Removing: %s", rmrf)
err = lib.Shell(rmrf, mlog.Warning, "transferProgress:", "", func(line string) {
mlog.Info(line)
})
if err != nil {
msg := fmt.Sprintf("Unable to remove source folder (%s): %s", filepath.Join(c.operation.SourceDiskName, command.Path), err)
outbound := &dto.Packet{Topic: "transferProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Warning(msg)
}
} else {
mlog.Warning("Skipping deletion (file/folder not present in destination): %s", filepath.Join(command.Dst, command.Src))
}
}
}
subject := fmt.Sprintf("unBALANCE - %s operation completed", strings.ToUpper(opName))
headline := fmt.Sprintf("%s operation has finished", opName)
_, _, speed := progress(c.operation.BytesToTransfer, c.operation.BytesTransferred, elapsed)
c.finishTransferOperation(subject, headline, commandsExecuted, c.operation.Started, finished, elapsed, c.operation.BytesTransferred, speed, multiSource)
}
func (c *Core) finishTransferOperation(subject, headline string, commands []string, started, finished time.Time, elapsed time.Duration, transferred int64, speed float64, multiSource bool) {
fstarted := started.Format(timeFormat)
ffinished := finished.Format(timeFormat)
elapsed = lib.Round(time.Since(started), time.Millisecond)
outbound := &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Started: %s", fstarted)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Ended: %s", ffinished)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Elapsed: %s", elapsed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Transferred %s at ~ %.2f MB/s", lib.ByteSize(transferred), speed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: headline}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: "These are the commands that were executed:"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
printedCommands := ""
for _, command := range commands {
printedCommands += command + "\n"
outbound = &dto.Packet{Topic: "transferProgress", Payload: command}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
outbound = &dto.Packet{Topic: "transferProgress", Payload: "Operation Finished"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// send to front end the signal of operation finished
if c.settings.DryRun {
outbound = &dto.Packet{Topic: "transferProgress", Payload: "--- IT WAS A DRY RUN ---"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
finishMsg := "transferFinished"
if multiSource {
finishMsg = "gatherFinished"
}
outbound = &dto.Packet{Topic: finishMsg, Payload: ""}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
message := fmt.Sprintf("\n\nStarted: %s\nEnded: %s\n\nElapsed: %s\n\n%s\n\nTransferred %s at ~ %.2f MB/s", fstarted, ffinished, elapsed, headline, lib.ByteSize(transferred), speed)
switch c.settings.NotifyMove {
case 1:
message += fmt.Sprintf("\n\n%d commands were executed.", len(commands))
case 2:
message += "\n\nThese are the commands that were executed:\n\n" + printedCommands
}
go func() {
if sendErr := c.sendmail(c.settings.NotifyMove, subject, message, c.settings.DryRun); sendErr != nil {
mlog.Error(sendErr)
}
}()
mlog.Info("\n%s\n%s", subject, message)
}
func (c *Core) findTargets(msg *pubsub.Message) {
c.operation = model.Operation{OpState: model.StateFindTargets, PrevState: model.StateIdle}
go c._findTargets(msg)
}
func (c *Core) _findTargets(msg *pubsub.Message) {
defer func() { c.operation.OpState = model.StateIdle }()
data, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert findTargets parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert findTargets parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
var chosen []string
err := json.Unmarshal([]byte(data), &chosen)
if err != nil {
mlog.Warning("Unable to bind findTargets parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind findTargets parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
mlog.Info("Running findTargets operation ...")
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "calcStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// disks := make([]*model.Disk, 0)
c.storage.Refresh()
owner := ""
lib.Shell("id -un", mlog.Warning, "owner", "", func(line string) {
owner = line
})
group := ""
lib.Shell("id -gn", mlog.Warning, "group", "", func(line string) {
group = line
})
c.operation.OwnerIssue = 0
c.operation.GroupIssue = 0
c.operation.FolderIssue = 0
c.operation.FileIssue = 0
entries := make([]*model.Item, 0)
// Check permission and look for the chosen folders on every disk
for _, disk := range c.storage.Disks {
for _, path := range chosen {
msg := fmt.Sprintf("Scanning %s on %s", path, disk.Path)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_find:%s", msg)
c.checkOwnerAndPermissions(&c.operation, disk.Path, path, owner, group)
msg = "Checked permissions ..."
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_find:%s", msg)
list := c.getFolders(disk.Path, path)
if list != nil {
entries = append(entries, list...)
}
}
}
mlog.Info("_find:elegibleFolders(%d)", len(entries))
var totalSize int64
for _, entry := range entries {
totalSize += entry.Size
mlog.Info("_find:elegibleFolder:Location(%s); Size(%s)", filepath.Join(entry.Location, entry.Path), lib.ByteSize(entry.Size))
}
mlog.Info("_find:potentialSizeToBeTransferred(%s)", lib.ByteSize(totalSize))
if len(entries) > 0 {
// Initialize fields
// c.storage.BytesToTransfer = 0
// c.storage.SourceDiskName = srcDisk.Path
c.operation.BytesToTransfer = 0
// c.operation.SourceDiskName = mntUser
for _, disk := range c.storage.Disks {
diskWithoutMnt := disk.Path[5:]
msg := fmt.Sprintf("Trying to allocate folders to %s ...", diskWithoutMnt)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_find:%s", msg)
// time.Sleep(2 * time.Second)
var reserved int64
switch c.settings.ReservedUnit {
case "%":
fcalc := disk.Size * c.settings.ReservedAmount / 100
reserved = int64(fcalc)
break
case "Mb":
reserved = c.settings.ReservedAmount * 1000 * 1000
break
case "Gb":
reserved = c.settings.ReservedAmount * 1000 * 1000 * 1000
break
default:
reserved = lib.ReservedSpace
}
ceil := lib.Max(lib.ReservedSpace, reserved)
mlog.Info("_find:FoldersLeft(%d):ReservedSpace(%d)", len(entries), ceil)
packer := algorithm.NewGreedy(disk, entries, totalSize, ceil)
bin := packer.FitAll()
if bin != nil {
disk.NewFree -= bin.Size
disk.Src = false
disk.Dst = false
c.operation.BytesToTransfer += bin.Size
mlog.Info("_find:BinAllocated=[Disk(%s); Items(%d)]", disk.Path, len(bin.Items))
} else {
mlog.Info("_find:NoBinAllocated=Disk(%s)", disk.Path)
}
}
}
c.operation.Finished = time.Now()
elapsed := lib.Round(time.Since(c.operation.Started), time.Millisecond)
fstarted := c.operation.Started.Format(timeFormat)
ffinished := c.operation.Finished.Format(timeFormat)
// Send to frontend console started/ended/elapsed times
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Started: %s", fstarted)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Ended: %s", ffinished)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Elapsed: %s", elapsed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// send to frontend the folders that will not be transferred, if any
// notTransferred holds a string representation of all the folders, separated by a '\n'
c.operation.FoldersNotTransferred = make([]string, 0)
// send mail according to user preferences
subject := "unBALANCE - CALCULATE operation completed"
message := fmt.Sprintf("\n\nStarted: %s\nEnded: %s\n\nElapsed: %s", fstarted, ffinished, elapsed)
if c.operation.OwnerIssue > 0 || c.operation.GroupIssue > 0 || c.operation.FolderIssue > 0 || c.operation.FileIssue > 0 {
message += fmt.Sprintf(`
\n\nThere are some permission issues:
\n\n%d file(s)/folder(s) with an owner other than 'nobody'
\n%d file(s)/folder(s) with a group other than 'users'
\n%d folder(s) with a permission other than 'drwxrwxrwx'
\n%d files(s) with a permission other than '-rw-rw-rw-' or '-r--r--r--'
\n\nCheck the log file (/boot/logs/unbalance.log) for additional information
\n\nIt's strongly suggested to install the Fix Common Plugins and run the Docker Safe New Permissions command
`, c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)
}
if sendErr := c.sendmail(c.settings.NotifyCalc, subject, message, false); sendErr != nil {
mlog.Error(sendErr)
}
// some local logging
mlog.Info("_find:Listing (%d) disks ...", len(c.storage.Disks))
for _, disk := range c.storage.Disks {
// mlog.Info("the mystery of the year(%s)", disk.Path)
disk.Print()
}
c.storage.Print()
outbound = &dto.Packet{Topic: "calcProgress", Payload: "Operation Finished"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
c.storage.BytesToTransfer = c.operation.BytesToTransfer
c.storage.OpState = c.operation.OpState
c.storage.PrevState = c.operation.PrevState
// send to front end the signal of operation finished
outbound = &dto.Packet{Topic: "findFinished", Payload: c.storage}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// only send the perm issue msg if there's actually some work to do (BytesToTransfer > 0)
// and there actually perm issues
if c.operation.BytesToTransfer > 0 && (c.operation.OwnerIssue+c.operation.GroupIssue+c.operation.FolderIssue+c.operation.FileIssue > 0) {
outbound = &dto.Packet{Topic: "calcPermIssue", Payload: fmt.Sprintf("%d|%d|%d|%d", c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
}
func (c *Core) gather(msg *pubsub.Message) {
mlog.Info("%+v", msg.Payload)
data, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert gather parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert gather parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
var target model.Disk
err := json.Unmarshal([]byte(data), &target)
if err != nil {
mlog.Warning("Unable to bind gather parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind gather parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
// user chose a target disk, remove bin from all other disks, since only the target
// will have work to do
for _, disk := range c.storage.Disks {
if disk.Path != target.Path {
disk.Bin = nil
}
}
c.operation.OpState = model.StateGather
c.operation.PrevState = model.StateGather
go c.transfer("Move", true, msg)
}
func (c *Core) getLog(msg *pubsub.Message) {
log := c.storage.GetLog()
outbound := &dto.Packet{Topic: "gotLog", Payload: log}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
func (c *Core) sendmail(notify int, subject, message string, dryRun bool) (err error) {
if notify == 0 {
return nil
}
dry := ""
if dryRun {
dry = "-------\nDRY RUN\n-------\n"
}
msg := dry + message
// strCmd := fmt.Sprintf("-s \"%s\" -m \"%s\"", mailCmd, subject, msg)
cmd := exec.Command(mailCmd, "-e", "unBALANCE operation update", "-s", subject, "-m", msg)
err = cmd.Run()
return
}
func progress(bytesToTransfer, bytesTransferred int64, elapsed time.Duration) (percent float64, left time.Duration, speed float64) {
bytesPerSec := float64(bytesTransferred) / elapsed.Seconds()
speed = bytesPerSec / 1024 / 1024 // MB/s
percent = (float64(bytesTransferred) / float64(bytesToTransfer)) * 100 // %
left = time.Duration(float64(bytesToTransfer-bytesTransferred)/bytesPerSec) * time.Second
return
}
func getError(line string, re *regexp.Regexp, errors map[int]string) string {
result := re.FindStringSubmatch(line)
status, _ := strconv.Atoi(result[1])
msg, ok := errors[status]
if !ok {
msg = "unknown error"
}
return msg
}
func (c *Core) notifyCommandsToRun(opName string) {
message := "\n\nThe following commands will be executed:\n\n"
for _, command := range c.operation.Commands {
cmd := fmt.Sprintf(`(src: %s) rsync %s %s %s`, command.WorkDir, c.operation.RsyncStrFlags, strconv.Quote(command.Src), strconv.Quote(command.Dst))
message += cmd + "\n"
}
subject := fmt.Sprintf("unBALANCE - %s operation STARTING", strings.ToUpper(opName))
go func() {
if sendErr := c.sendmail(c.settings.NotifyMove, subject, message, c.settings.DryRun); sendErr != nil {
mlog.Error(sendErr)
}
}()
}
Remove duplicated message
Related to #69
package services
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"jbrodriguez/unbalance/server/src/algorithm"
"jbrodriguez/unbalance/server/src/dto"
"jbrodriguez/unbalance/server/src/lib"
"jbrodriguez/unbalance/server/src/model"
"github.com/jbrodriguez/actor"
"github.com/jbrodriguez/mlog"
"github.com/jbrodriguez/pubsub"
)
const (
mailCmd = "/usr/local/emhttp/webGui/scripts/notify"
timeFormat = "Jan _2, 2006 15:04:05"
)
// Core service
type Core struct {
bus *pubsub.PubSub
storage *model.Unraid
settings *lib.Settings
// this holds the state of any operation
operation model.Operation
actor *actor.Actor
reFreeSpace *regexp.Regexp
reItems *regexp.Regexp
reRsync *regexp.Regexp
reStat *regexp.Regexp
reProgress *regexp.Regexp
rsyncErrors map[int]string
}
// NewCore -
func NewCore(bus *pubsub.PubSub, settings *lib.Settings) *Core {
core := &Core{
bus: bus,
settings: settings,
// opState: stateIdle,
storage: &model.Unraid{},
actor: actor.NewActor(bus),
operation: model.Operation{
OpState: model.StateIdle,
PrevState: model.StateIdle,
DryRun: settings.DryRun,
},
}
core.reFreeSpace = regexp.MustCompile(`(.*?)\s+(\d+)\s+(\d+)\s+(\d+)\s+(.*?)\s+(.*?)$`)
core.reItems = regexp.MustCompile(`(\d+)\s+(.*?)$`)
core.reRsync = regexp.MustCompile(`exit status (\d+)`)
core.reProgress = regexp.MustCompile(`(?s)^([\d,]+).*?\(.*?\)$|^([\d,]+).*?$`)
core.reStat = regexp.MustCompile(`[-dclpsbD]([-rwxsS]{3})([-rwxsS]{3})([-rwxtT]{3})\|(.*?)\:(.*?)\|(.*?)\|(.*)`)
core.rsyncErrors = map[int]string{
0: "Success",
1: "Syntax or usage error",
2: "Protocol incompatibility",
3: "Errors selecting input/output files, dirs",
4: "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them, or an option was specified that is supported by the client and not by the server.",
5: "Error starting client-server protocol",
6: "Daemon unable to append to log-file",
10: "Error in socket I/O",
11: "Error in file I/O",
12: "Error in rsync protocol data stream",
13: "Errors with program diagnostics",
14: "Error in IPC code",
20: "Received SIGUSR1 or SIGINT",
21: "Some error returned by waitpid()",
22: "Error allocating core memory buffers",
23: "Partial transfer due to error",
24: "Partial transfer due to vanished source files",
25: "The --max-delete limit stopped deletions",
30: "Timeout in data send/receive",
35: "Timeout waiting for daemon connection",
}
// core.ownerPerms = map[int]bool{
// 644
// }
return core
}
// Start -
func (c *Core) Start() (err error) {
mlog.Info("starting service Core ...")
c.actor.Register("/get/config", c.getConfig)
c.actor.Register("/get/status", c.getStatus)
c.actor.Register("/config/set/notifyCalc", c.setNotifyCalc)
c.actor.Register("/config/set/notifyMove", c.setNotifyMove)
c.actor.Register("/config/set/reservedSpace", c.setReservedSpace)
c.actor.Register("/config/set/verbosity", c.setVerbosity)
c.actor.Register("/get/storage", c.getStorage)
c.actor.Register("/config/toggle/dryRun", c.toggleDryRun)
c.actor.Register("/get/tree", c.getTree)
c.actor.Register("/disks/locate", c.locate)
c.actor.Register("/config/set/rsyncFlags", c.setRsyncFlags)
c.actor.Register("calculate", c.calc)
c.actor.Register("move", c.move)
c.actor.Register("copy", c.copy)
c.actor.Register("validate", c.validate)
c.actor.Register("getLog", c.getLog)
c.actor.Register("findTargets", c.findTargets)
c.actor.Register("gather", c.gather)
err = c.storage.SanityCheck(c.settings.APIFolders)
if err != nil {
return err
}
go c.actor.React()
return nil
}
// Stop -
func (c *Core) Stop() {
mlog.Info("stopped service Core ...")
}
// SetStorage -
func (c *Core) SetStorage(unraid *model.Unraid) {
c.storage = unraid
}
func (c *Core) getConfig(msg *pubsub.Message) {
mlog.Info("Sending config")
rsyncFlags := strings.Join(c.settings.RsyncFlags, " ")
if rsyncFlags == "-avX --partial" || rsyncFlags == "-avRX --partial" {
c.settings.RsyncFlags = []string{"-avPRX"}
c.settings.Save()
}
msg.Reply <- &c.settings.Config
}
func (c *Core) getStatus(msg *pubsub.Message) {
mlog.Info("Sending status")
msg.Reply <- c.operation.OpState
}
func (c *Core) setNotifyCalc(msg *pubsub.Message) {
fnotify := msg.Payload.(float64)
notify := int(fnotify)
mlog.Info("Setting notifyCalc to (%d)", notify)
c.settings.NotifyCalc = notify
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) setNotifyMove(msg *pubsub.Message) {
fnotify := msg.Payload.(float64)
notify := int(fnotify)
mlog.Info("Setting notifyMove to (%d)", notify)
c.settings.NotifyMove = notify
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) setVerbosity(msg *pubsub.Message) {
fverbosity := msg.Payload.(float64)
verbosity := int(fverbosity)
mlog.Info("Setting verbosity to (%d)", verbosity)
c.settings.Verbosity = verbosity
err := c.settings.Save()
if err != nil {
mlog.Warning("not right %s", err)
}
msg.Reply <- &c.settings.Config
}
func (c *Core) setReservedSpace(msg *pubsub.Message) {
mlog.Warning("payload: %+v", msg.Payload)
payload, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert Reserved Space parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert Reserved Space parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
msg.Reply <- &c.settings.Config
return
}
var reserved dto.Reserved
err := json.Unmarshal([]byte(payload), &reserved)
if err != nil {
mlog.Warning("Unable to bind reservedSpace parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind reservedSpace parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
amount := int64(reserved.Amount)
unit := reserved.Unit
mlog.Info("Setting reservedAmount to (%d)", amount)
mlog.Info("Setting reservedUnit to (%s)", unit)
c.settings.ReservedAmount = amount
c.settings.ReservedUnit = unit
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) getStorage(msg *pubsub.Message) {
var stats string
if c.operation.OpState == model.StateIdle {
c.storage.Refresh()
} else if c.operation.OpState == model.StateMove || c.operation.OpState == model.StateCopy || c.operation.OpState == model.StateGather {
percent, left, speed := progress(c.operation.BytesToTransfer, c.operation.BytesTransferred, time.Since(c.operation.Started))
stats = fmt.Sprintf("%.2f%% done ~ %s left (%.2f MB/s)", percent, left, speed)
}
c.storage.Stats = stats
c.storage.OpState = c.operation.OpState
c.storage.PrevState = c.operation.PrevState
c.storage.BytesToTransfer = c.operation.BytesToTransfer
msg.Reply <- c.storage
}
func (c *Core) toggleDryRun(msg *pubsub.Message) {
mlog.Info("Toggling dryRun from (%t)", c.settings.DryRun)
c.settings.ToggleDryRun()
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) getTree(msg *pubsub.Message) {
path := msg.Payload.(string)
msg.Reply <- c.storage.GetTree(path)
}
func (c *Core) locate(msg *pubsub.Message) {
chosen := msg.Payload.([]string)
msg.Reply <- c.storage.Locate(chosen)
}
func (c *Core) setRsyncFlags(msg *pubsub.Message) {
// mlog.Warning("payload: %+v", msg.Payload)
payload, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert Rsync Flags parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert Rsync Flags parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
msg.Reply <- &c.settings.Config
return
}
var rsync dto.Rsync
err := json.Unmarshal([]byte(payload), &rsync)
if err != nil {
mlog.Warning("Unable to bind rsyncFlags parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind rsyncFlags parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
mlog.Info("Setting rsyncFlags to (%s)", strings.Join(rsync.Flags, " "))
c.settings.RsyncFlags = rsync.Flags
c.settings.Save()
msg.Reply <- &c.settings.Config
}
func (c *Core) calc(msg *pubsub.Message) {
c.operation = model.Operation{OpState: model.StateCalc, PrevState: model.StateIdle}
go c._calc(msg)
}
func (c *Core) _calc(msg *pubsub.Message) {
defer func() { c.operation.OpState = model.StateIdle }()
payload, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert calculate parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert calculate parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
var dtoCalc dto.Calculate
err := json.Unmarshal([]byte(payload), &dtoCalc)
if err != nil {
mlog.Warning("Unable to bind calculate parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind calculate parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
mlog.Info("Running calculate operation ...")
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "calcStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
disks := make([]*model.Disk, 0)
// create array of destination disks
var srcDisk *model.Disk
for _, disk := range c.storage.Disks {
// reset disk
disk.NewFree = disk.Free
disk.Bin = nil
disk.Src = false
disk.Dst = dtoCalc.DestDisks[disk.Path]
if disk.Path == dtoCalc.SourceDisk {
disk.Src = true
srcDisk = disk
} else {
// add it to the target disk list, only if the user selected it
if val, ok := dtoCalc.DestDisks[disk.Path]; ok && val {
// double check, if it's a cache disk, make sure it's the main cache disk
if disk.Type == "Cache" && len(disk.Name) > 5 {
continue
}
disks = append(disks, disk)
}
}
}
mlog.Info("_calc:Begin:srcDisk(%s); dstDisks(%d)", srcDisk.Path, len(disks))
for _, disk := range disks {
mlog.Info("_calc:elegibleDestDisk(%s)", disk.Path)
}
sort.Sort(model.ByFree(disks))
srcDiskWithoutMnt := srcDisk.Path[5:]
owner := ""
lib.Shell("id -un", mlog.Warning, "owner", "", func(line string) {
owner = line
})
group := ""
lib.Shell("id -gn", mlog.Warning, "group", "", func(line string) {
group = line
})
c.operation.OwnerIssue = 0
c.operation.GroupIssue = 0
c.operation.FolderIssue = 0
c.operation.FileIssue = 0
// Check permission and gather folders to be transferred from
// source disk
folders := make([]*model.Item, 0)
for _, path := range dtoCalc.Folders {
msg := fmt.Sprintf("Scanning %s on %s", path, srcDiskWithoutMnt)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
c.checkOwnerAndPermissions(&c.operation, dtoCalc.SourceDisk, path, owner, group)
msg = "Checked permissions ..."
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%s", msg)
list := c.getFolders(dtoCalc.SourceDisk, path)
if list != nil {
folders = append(folders, list...)
}
}
mlog.Info("_calc:foldersToBeTransferredTotal(%d)", len(folders))
for _, v := range folders {
mlog.Info("_calc:toBeTransferred:Path(%s); Size(%s)", v.Path, lib.ByteSize(v.Size))
}
willBeTransferred := make([]*model.Item, 0)
if len(folders) > 0 {
// Initialize fields
// c.storage.BytesToTransfer = 0
// c.storage.SourceDiskName = srcDisk.Path
c.operation.BytesToTransfer = 0
c.operation.SourceDiskName = srcDisk.Path
for _, disk := range disks {
diskWithoutMnt := disk.Path[5:]
msg := fmt.Sprintf("Trying to allocate folders to %s ...", diskWithoutMnt)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%s", msg)
// time.Sleep(2 * time.Second)
if disk.Path != srcDisk.Path {
// disk.NewFree = disk.Free
var reserved int64
switch c.settings.ReservedUnit {
case "%":
fcalc := disk.Size * c.settings.ReservedAmount / 100
reserved = int64(fcalc)
break
case "Mb":
reserved = c.settings.ReservedAmount * 1000 * 1000
break
case "Gb":
reserved = c.settings.ReservedAmount * 1000 * 1000 * 1000
break
default:
reserved = lib.ReservedSpace
}
ceil := lib.Max(lib.ReservedSpace, reserved)
mlog.Info("_calc:FoldersLeft(%d):ReservedSpace(%d)", len(folders), ceil)
packer := algorithm.NewKnapsack(disk, folders, ceil)
bin := packer.BestFit()
if bin != nil {
srcDisk.NewFree += bin.Size
disk.NewFree -= bin.Size
c.operation.BytesToTransfer += bin.Size
// c.storage.BytesToTransfer += bin.Size
willBeTransferred = append(willBeTransferred, bin.Items...)
folders = c.removeFolders(folders, bin.Items)
mlog.Info("_calc:BinAllocated=[Disk(%s); Items(%d)];Freespace=[original(%s); final(%s)]", disk.Path, len(bin.Items), lib.ByteSize(srcDisk.Free), lib.ByteSize(srcDisk.NewFree))
} else {
mlog.Info("_calc:NoBinAllocated=Disk(%s)", disk.Path)
}
}
}
}
c.operation.Finished = time.Now()
elapsed := lib.Round(time.Since(c.operation.Started), time.Millisecond)
fstarted := c.operation.Started.Format(timeFormat)
ffinished := c.operation.Finished.Format(timeFormat)
// Send to frontend console started/ended/elapsed times
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Started: %s", fstarted)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Ended: %s", ffinished)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Elapsed: %s", elapsed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
if len(willBeTransferred) == 0 {
mlog.Info("_calc:No folders can be transferred.")
} else {
mlog.Info("_calc:%d folders will be transferred.", len(willBeTransferred))
for _, folder := range willBeTransferred {
mlog.Info("_calc:willBeTransferred(%s)", folder.Path)
}
}
// send to frontend the folders that will not be transferred, if any
// notTransferred holds a string representation of all the folders, separated by a '\n'
c.operation.FoldersNotTransferred = make([]string, 0)
notTransferred := ""
if len(folders) > 0 {
outbound = &dto.Packet{Topic: "calcProgress", Payload: "The following folders will not be transferred, because there's not enough space in the target disks:\n"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:%d folders will NOT be transferred.", len(folders))
for _, folder := range folders {
c.operation.FoldersNotTransferred = append(c.operation.FoldersNotTransferred, folder.Path)
notTransferred += folder.Path + "\n"
outbound = &dto.Packet{Topic: "calcProgress", Payload: folder.Path}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_calc:notTransferred(%s)", folder.Path)
}
}
// send mail according to user preferences
subject := "unBALANCE - CALCULATE operation completed"
message := fmt.Sprintf("\n\nStarted: %s\nEnded: %s\n\nElapsed: %s", fstarted, ffinished, elapsed)
if notTransferred != "" {
switch c.settings.NotifyCalc {
case 1:
message += "\n\nSome folders will not be transferred because there's not enough space for them in any of the destination disks."
case 2:
message += "\n\nThe following folders will not be transferred because there's not enough space for them in any of the destination disks:\n\n" + notTransferred
}
}
if c.operation.OwnerIssue > 0 || c.operation.GroupIssue > 0 || c.operation.FolderIssue > 0 || c.operation.FileIssue > 0 {
message += fmt.Sprintf(`
\n\nThere are some permission issues:
\n\n%d file(s)/folder(s) with an owner other than 'nobody'
\n%d file(s)/folder(s) with a group other than 'users'
\n%d folder(s) with a permission other than 'drwxrwxrwx'
\n%d files(s) with a permission other than '-rw-rw-rw-' or '-r--r--r--'
\n\nCheck the log file (/boot/logs/unbalance.log) for additional information
\n\nIt's strongly suggested to install the Fix Common Plugins and run the Docker Safe New Permissions command
`, c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)
}
if sendErr := c.sendmail(c.settings.NotifyCalc, subject, message, false); sendErr != nil {
mlog.Error(sendErr)
}
// some local logging
mlog.Info("_calc:FoldersLeft(%d)", len(folders))
mlog.Info("_calc:src(%s):Listing (%d) disks ...", srcDisk.Path, len(c.storage.Disks))
for _, disk := range c.storage.Disks {
// mlog.Info("the mystery of the year(%s)", disk.Path)
disk.Print()
}
mlog.Info("=========================================================")
mlog.Info("Results for %s", srcDisk.Path)
mlog.Info("Original Free Space: %s", lib.ByteSize(srcDisk.Free))
mlog.Info("Final Free Space: %s", lib.ByteSize(srcDisk.NewFree))
mlog.Info("Gained Space: %s", lib.ByteSize(srcDisk.NewFree-srcDisk.Free))
mlog.Info("Bytes To Move: %s", lib.ByteSize(c.operation.BytesToTransfer))
mlog.Info("---------------------------------------------------------")
c.storage.Print()
// msg.Reply <- c.storage
mlog.Info("_calc:End:srcDisk(%s)", srcDisk.Path)
outbound = &dto.Packet{Topic: "calcProgress", Payload: "Operation Finished"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
c.storage.BytesToTransfer = c.operation.BytesToTransfer
c.storage.OpState = c.operation.OpState
c.storage.PrevState = c.operation.PrevState
// send to front end the signal of operation finished
outbound = &dto.Packet{Topic: "calcFinished", Payload: c.storage}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// only send the perm issue msg if there's actually some work to do (BytesToTransfer > 0)
// and there actually perm issues
if c.operation.BytesToTransfer > 0 && (c.operation.OwnerIssue+c.operation.GroupIssue+c.operation.FolderIssue+c.operation.FileIssue > 0) {
outbound = &dto.Packet{Topic: "calcPermIssue", Payload: fmt.Sprintf("%d|%d|%d|%d", c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
}
func (c *Core) getFolders(src string, folder string) (items []*model.Item) {
srcFolder := filepath.Join(src, folder)
mlog.Info("getFolders:Scanning source-disk(%s):folder(%s)", src, folder)
var fi os.FileInfo
var err error
if fi, err = os.Stat(srcFolder); os.IsNotExist(err) {
mlog.Warning("getFolders:Folder does not exist: %s", srcFolder)
return nil
}
if !fi.IsDir() {
mlog.Info("getFolder-found(%s)-size(%d)", srcFolder, fi.Size())
item := &model.Item{Name: folder, Size: fi.Size(), Path: folder, Location: src}
items = append(items, item)
msg := fmt.Sprintf("Found %s (%s)", item.Name, lib.ByteSize(item.Size))
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
dirs, err := ioutil.ReadDir(srcFolder)
if err != nil {
mlog.Warning("getFolders:Unable to readdir: %s", err)
}
mlog.Info("getFolders:Readdir(%d)", len(dirs))
if len(dirs) == 0 {
mlog.Info("getFolders:No subdirectories under %s", srcFolder)
return nil
}
scanFolder := srcFolder + "/."
cmdText := fmt.Sprintf("find \"%s\" ! -name . -prune -exec du -bs {} +", scanFolder)
mlog.Info("getFolders:Executing %s", cmdText)
lib.Shell(cmdText, mlog.Warning, "getFolders:find/du:", "", func(line string) {
mlog.Info("getFolders:find(%s): %s", scanFolder, line)
result := c.reItems.FindStringSubmatch(line)
// mlog.Info("[%s] %s", result[1], result[2])
size, _ := strconv.ParseInt(result[1], 10, 64)
item := &model.Item{Name: result[2], Size: size, Path: filepath.Join(folder, filepath.Base(result[2])), Location: src}
items = append(items, item)
msg := fmt.Sprintf("Found %s (%s)", filepath.Base(item.Name), lib.ByteSize(size))
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
})
return
}
func (c *Core) checkOwnerAndPermissions(operation *model.Operation, src, folder, ownerName, groupName string) {
srcFolder := filepath.Join(src, folder)
// outbound := &dto.Packet{Topic: "calcProgress", Payload: "Checking permissions ..."}
// c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("perms:Scanning disk(%s):folder(%s)", src, folder)
if _, err := os.Stat(srcFolder); os.IsNotExist(err) {
mlog.Warning("perms:Folder does not exist: %s", srcFolder)
return
}
scanFolder := srcFolder + "/."
cmdText := fmt.Sprintf(`find "%s" -exec stat --format "%%A|%%U:%%G|%%F|%%n" {} \;`, scanFolder)
mlog.Info("perms:Executing %s", cmdText)
lib.Shell(cmdText, mlog.Warning, "perms:find/stat:", "", func(line string) {
result := c.reStat.FindStringSubmatch(line)
if result == nil {
mlog.Warning("perms:Unable to parse (%s)", line)
return
}
u := result[1]
g := result[2]
o := result[3]
user := result[4]
group := result[5]
kind := result[6]
name := result[7]
perms := u + g + o
if user != "nobody" {
mlog.Info("perms:User != nobody: [%s]: %s", user, name)
operation.OwnerIssue++
}
if group != "users" {
mlog.Info("perms:Group != users: [%s]: %s", group, name)
operation.GroupIssue++
}
if kind == "directory" {
if perms != "rwxrwxrwx" {
mlog.Info("perms:Folder perms != rwxrwxrwx: [%s]: %s", perms, name)
operation.FolderIssue++
}
} else {
match := strings.Compare(perms, "r--r--r--") == 0 || strings.Compare(perms, "rw-rw-rw-") == 0
if !match {
mlog.Info("perms:File perms != rw-rw-rw- or r--r--r--: [%s]: %s", perms, name)
operation.FileIssue++
}
}
})
return
}
func (c *Core) removeFolders(folders []*model.Item, list []*model.Item) []*model.Item {
w := 0 // write index
loop:
for _, fld := range folders {
for _, itm := range list {
if itm.Name == fld.Name {
continue loop
}
}
folders[w] = fld
w++
}
return folders[:w]
}
func (c *Core) move(msg *pubsub.Message) {
c.operation.OpState = model.StateMove
c.operation.PrevState = model.StateMove
go c.transfer("Move", false, msg)
}
func (c *Core) copy(msg *pubsub.Message) {
c.operation.OpState = model.StateCopy
c.operation.PrevState = model.StateCopy
go c.transfer("Copy", false, msg)
}
func (c *Core) validate(msg *pubsub.Message) {
c.operation.OpState = model.StateValidate
c.operation.PrevState = model.StateValidate
go c.checksum(msg)
}
func (c *Core) transfer(opName string, multiSource bool, msg *pubsub.Message) {
defer func() {
c.operation.OpState = model.StateIdle
c.operation.Started = time.Time{}
c.operation.BytesTransferred = 0
c.operation.Target = ""
}()
mlog.Info("Running %s operation ...", opName)
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "transferStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "progressStats", Payload: "Waiting to collect stats ..."}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// user may have changed rsync flags or dry-run setting, adjust for it
c.operation.RsyncFlags = c.settings.RsyncFlags
c.operation.DryRun = c.settings.DryRun
if c.operation.DryRun {
c.operation.RsyncFlags = append(c.operation.RsyncFlags, "--dry-run")
}
c.operation.RsyncStrFlags = strings.Join(c.operation.RsyncFlags, " ")
workdir := c.operation.SourceDiskName
c.operation.Commands = make([]model.Command, 0)
for _, disk := range c.storage.Disks {
if disk.Bin == nil || disk.Src {
continue
}
for _, item := range disk.Bin.Items {
var src, dst string
if strings.Contains(c.operation.RsyncStrFlags, "R") {
if item.Path[0] == filepath.Separator {
src = item.Path[1:]
} else {
src = item.Path
}
dst = disk.Path + string(filepath.Separator)
} else {
src = filepath.Join(c.operation.SourceDiskName, item.Path)
dst = filepath.Join(disk.Path, filepath.Dir(item.Path)) + string(filepath.Separator)
}
if multiSource {
workdir = item.Location
}
c.operation.Commands = append(c.operation.Commands, model.Command{
Src: src,
Dst: dst,
Path: item.Path,
Size: item.Size,
WorkDir: workdir,
})
}
}
if c.settings.NotifyMove == 2 {
c.notifyCommandsToRun(opName)
}
// execute each rsync command created in the step above
c.runOperation(opName, c.operation.RsyncFlags, c.operation.RsyncStrFlags, multiSource)
}
func (c *Core) checksum(msg *pubsub.Message) {
defer func() {
c.operation.OpState = model.StateIdle
c.operation.PrevState = model.StateIdle
c.operation.Started = time.Time{}
c.operation.BytesTransferred = 0
}()
opName := "Validate"
mlog.Info("Running %s operation ...", opName)
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "transferStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
multiSource := false
if !strings.HasPrefix(c.operation.RsyncStrFlags, "-a") {
finished := time.Now()
elapsed := time.Since(c.operation.Started)
subject := fmt.Sprintf("unBALANCE - %s operation INTERRUPTED", strings.ToUpper(opName))
headline := fmt.Sprintf("For proper %s operation, rsync flags MUST begin with -a", opName)
mlog.Warning(headline)
outbound := &dto.Packet{Topic: "opError", Payload: fmt.Sprintf("%s operation was interrupted. Check log (/boot/logs/unbalance.log) for details.", opName)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
_, _, speed := progress(c.operation.BytesToTransfer, 0, elapsed)
c.finishTransferOperation(subject, headline, make([]string, 0), c.operation.Started, finished, elapsed, 0, speed, multiSource)
return
}
// Initialize local variables
// we use the rsync flags that were created by the transfer operation,
// but replace -a with -rc, to perform the validation
checkRsyncFlags := make([]string, 0)
for _, flag := range c.operation.RsyncFlags {
checkRsyncFlags = append(checkRsyncFlags, strings.Replace(flag, "-a", "-rc", -1))
}
checkRsyncStrFlags := strings.Join(checkRsyncFlags, " ")
// execute each rsync command created in the transfer phase
c.runOperation(opName, checkRsyncFlags, checkRsyncStrFlags, multiSource)
}
func (c *Core) runOperation(opName string, rsyncFlags []string, rsyncStrFlags string, multiSource bool) {
// Initialize local variables
var calls int64
var callsPerDelta int64
var finished time.Time
var elapsed time.Duration
commandsExecuted := make([]string, 0)
c.operation.BytesTransferred = 0
for _, command := range c.operation.Commands {
args := append(
rsyncFlags,
command.Src,
command.Dst,
)
cmd := fmt.Sprintf(`rsync %s %s %s`, rsyncStrFlags, strconv.Quote(command.Src), strconv.Quote(command.Dst))
mlog.Info("Command Started: (src: %s) %s ", command.WorkDir, cmd)
outbound := &dto.Packet{Topic: "transferProgress", Payload: cmd}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
bytesTransferred := c.operation.BytesTransferred
var deltaMoved int64
// actual shell execution
err := lib.ShellEx(func(text string) {
line := strings.TrimSpace(text)
if len(line) <= 0 {
return
}
if callsPerDelta <= 50 {
calls++
}
delta := int64(time.Since(c.operation.Started) / time.Second)
if delta == 0 {
delta = 1
}
callsPerDelta = calls / delta
match := c.reProgress.FindStringSubmatch(line)
if match == nil {
// this is a regular output line from rsync, print it
// according to verbosity settings
if c.settings.Verbosity == 1 {
mlog.Info("%s", line)
}
if callsPerDelta <= 50 {
outbound := &dto.Packet{Topic: "transferProgress", Payload: line}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
return
}
// this is a file transfer progress output line
if match[1] == "" {
// this happens when the file hasn't finished transferring
moved := strings.Replace(match[2], ",", "", -1)
deltaMoved, _ = strconv.ParseInt(moved, 10, 64)
} else {
// the file has finished transferring
moved := strings.Replace(match[1], ",", "", -1)
deltaMoved, _ = strconv.ParseInt(moved, 10, 64)
bytesTransferred += deltaMoved
}
percent, left, speed := progress(c.operation.BytesToTransfer, bytesTransferred+deltaMoved, time.Since(c.operation.Started))
msg := fmt.Sprintf("%.2f%% done ~ %s left (%.2f MB/s)", percent, left, speed)
if callsPerDelta <= 50 {
outbound := &dto.Packet{Topic: "progressStats", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
}, command.WorkDir, "rsync", args...)
finished = time.Now()
elapsed = time.Since(c.operation.Started)
if err != nil {
subject := fmt.Sprintf("unBALANCE - %s operation INTERRUPTED", strings.ToUpper(opName))
headline := fmt.Sprintf("Command Interrupted: %s (%s)", cmd, err.Error()+" : "+getError(err.Error(), c.reRsync, c.rsyncErrors))
mlog.Warning(headline)
outbound := &dto.Packet{Topic: "opError", Payload: fmt.Sprintf("%s operation was interrupted. Check log (/boot/logs/unbalance.log) for details.", opName)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
_, _, speed := progress(c.operation.BytesToTransfer, bytesTransferred+deltaMoved, elapsed)
c.finishTransferOperation(subject, headline, commandsExecuted, c.operation.Started, finished, elapsed, bytesTransferred+deltaMoved, speed, multiSource)
return
}
mlog.Info("Command Finished")
c.operation.BytesTransferred = c.operation.BytesTransferred + command.Size
percent, left, speed := progress(c.operation.BytesToTransfer, c.operation.BytesTransferred, elapsed)
msg := fmt.Sprintf("%.2f%% done ~ %s left (%.2f MB/s)", percent, left, speed)
mlog.Info("Current progress: %s", msg)
outbound = &dto.Packet{Topic: "progressStats", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
commandsExecuted = append(commandsExecuted, cmd)
// if it isn't a dry-run and the operation is Move or Gather, delete the source folder
if !c.operation.DryRun && (c.operation.OpState == model.StateMove || c.operation.OpState == model.StateGather) {
exists, _ := lib.Exists(filepath.Join(command.Dst, command.Src))
if exists {
rmrf := fmt.Sprintf("rm -rf \"%s\"", filepath.Join(c.operation.SourceDiskName, command.Path))
mlog.Info("Removing: %s", rmrf)
err = lib.Shell(rmrf, mlog.Warning, "transferProgress:", "", func(line string) {
mlog.Info(line)
})
if err != nil {
msg := fmt.Sprintf("Unable to remove source folder (%s): %s", filepath.Join(c.operation.SourceDiskName, command.Path), err)
outbound := &dto.Packet{Topic: "transferProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Warning(msg)
}
} else {
mlog.Warning("Skipping deletion (file/folder not present in destination): %s", filepath.Join(command.Dst, command.Src))
}
}
}
subject := fmt.Sprintf("unBALANCE - %s operation completed", strings.ToUpper(opName))
headline := fmt.Sprintf("%s operation has finished", opName)
_, _, speed := progress(c.operation.BytesToTransfer, c.operation.BytesTransferred, elapsed)
c.finishTransferOperation(subject, headline, commandsExecuted, c.operation.Started, finished, elapsed, c.operation.BytesTransferred, speed, multiSource)
}
func (c *Core) finishTransferOperation(subject, headline string, commands []string, started, finished time.Time, elapsed time.Duration, transferred int64, speed float64, multiSource bool) {
fstarted := started.Format(timeFormat)
ffinished := finished.Format(timeFormat)
elapsed = lib.Round(time.Since(started), time.Millisecond)
outbound := &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Started: %s", fstarted)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Ended: %s", ffinished)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Elapsed: %s", elapsed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: fmt.Sprintf("Transferred %s at ~ %.2f MB/s", lib.ByteSize(transferred), speed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: headline}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "transferProgress", Payload: "These are the commands that were executed:"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
printedCommands := ""
for _, command := range commands {
printedCommands += command + "\n"
outbound = &dto.Packet{Topic: "transferProgress", Payload: command}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
outbound = &dto.Packet{Topic: "transferProgress", Payload: "Operation Finished"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// send to front end the signal of operation finished
if c.settings.DryRun {
outbound = &dto.Packet{Topic: "transferProgress", Payload: "--- IT WAS A DRY RUN ---"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
finishMsg := "transferFinished"
if multiSource {
finishMsg = "gatherFinished"
}
outbound = &dto.Packet{Topic: finishMsg, Payload: ""}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
message := fmt.Sprintf("\n\nStarted: %s\nEnded: %s\n\nElapsed: %s\n\n%s\n\nTransferred %s at ~ %.2f MB/s", fstarted, ffinished, elapsed, headline, lib.ByteSize(transferred), speed)
switch c.settings.NotifyMove {
case 1:
message += fmt.Sprintf("\n\n%d commands were executed.", len(commands))
case 2:
message += "\n\nThese are the commands that were executed:\n\n" + printedCommands
}
go func() {
if sendErr := c.sendmail(c.settings.NotifyMove, subject, message, c.settings.DryRun); sendErr != nil {
mlog.Error(sendErr)
}
}()
mlog.Info("\n%s\n%s", subject, message)
}
func (c *Core) findTargets(msg *pubsub.Message) {
c.operation = model.Operation{OpState: model.StateFindTargets, PrevState: model.StateIdle}
go c._findTargets(msg)
}
func (c *Core) _findTargets(msg *pubsub.Message) {
defer func() { c.operation.OpState = model.StateIdle }()
data, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert findTargets parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert findTargets parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
var chosen []string
err := json.Unmarshal([]byte(data), &chosen)
if err != nil {
mlog.Warning("Unable to bind findTargets parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind findTargets parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
mlog.Info("Running findTargets operation ...")
c.operation.Started = time.Now()
outbound := &dto.Packet{Topic: "calcStarted", Payload: "Operation started"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// disks := make([]*model.Disk, 0)
c.storage.Refresh()
owner := ""
lib.Shell("id -un", mlog.Warning, "owner", "", func(line string) {
owner = line
})
group := ""
lib.Shell("id -gn", mlog.Warning, "group", "", func(line string) {
group = line
})
c.operation.OwnerIssue = 0
c.operation.GroupIssue = 0
c.operation.FolderIssue = 0
c.operation.FileIssue = 0
entries := make([]*model.Item, 0)
// Check permission and look for the chosen folders on every disk
for _, disk := range c.storage.Disks {
for _, path := range chosen {
msg := fmt.Sprintf("Scanning %s on %s", path, disk.Path)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_find:%s", msg)
c.checkOwnerAndPermissions(&c.operation, disk.Path, path, owner, group)
msg = "Checked permissions ..."
outbound := &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_find:%s", msg)
list := c.getFolders(disk.Path, path)
if list != nil {
entries = append(entries, list...)
}
}
}
mlog.Info("_find:elegibleFolders(%d)", len(entries))
var totalSize int64
for _, entry := range entries {
totalSize += entry.Size
mlog.Info("_find:elegibleFolder:Location(%s); Size(%s)", filepath.Join(entry.Location, entry.Path), lib.ByteSize(entry.Size))
}
mlog.Info("_find:potentialSizeToBeTransferred(%s)", lib.ByteSize(totalSize))
if len(entries) > 0 {
// Initialize fields
// c.storage.BytesToTransfer = 0
// c.storage.SourceDiskName = srcDisk.Path
c.operation.BytesToTransfer = 0
// c.operation.SourceDiskName = mntUser
for _, disk := range c.storage.Disks {
diskWithoutMnt := disk.Path[5:]
msg := fmt.Sprintf("Trying to allocate folders to %s ...", diskWithoutMnt)
outbound = &dto.Packet{Topic: "calcProgress", Payload: msg}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
mlog.Info("_find:%s", msg)
// time.Sleep(2 * time.Second)
var reserved int64
switch c.settings.ReservedUnit {
case "%":
fcalc := disk.Size * c.settings.ReservedAmount / 100
reserved = int64(fcalc)
break
case "Mb":
reserved = c.settings.ReservedAmount * 1000 * 1000
break
case "Gb":
reserved = c.settings.ReservedAmount * 1000 * 1000 * 1000
break
default:
reserved = lib.ReservedSpace
}
ceil := lib.Max(lib.ReservedSpace, reserved)
mlog.Info("_find:FoldersLeft(%d):ReservedSpace(%d)", len(entries), ceil)
packer := algorithm.NewGreedy(disk, entries, totalSize, ceil)
bin := packer.FitAll()
if bin != nil {
disk.NewFree -= bin.Size
disk.Src = false
disk.Dst = false
c.operation.BytesToTransfer += bin.Size
mlog.Info("_find:BinAllocated=[Disk(%s); Items(%d)]", disk.Path, len(bin.Items))
} else {
mlog.Info("_find:NoBinAllocated=Disk(%s)", disk.Path)
}
}
}
c.operation.Finished = time.Now()
elapsed := lib.Round(time.Since(c.operation.Started), time.Millisecond)
fstarted := c.operation.Started.Format(timeFormat)
ffinished := c.operation.Finished.Format(timeFormat)
// Send to frontend console started/ended/elapsed times
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Started: %s", fstarted)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Ended: %s", ffinished)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
outbound = &dto.Packet{Topic: "calcProgress", Payload: fmt.Sprintf("Elapsed: %s", elapsed)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// send to frontend the folders that will not be transferred, if any
// notTransferred holds a string representation of all the folders, separated by a '\n'
c.operation.FoldersNotTransferred = make([]string, 0)
// send mail according to user preferences
subject := "unBALANCE - CALCULATE operation completed"
message := fmt.Sprintf("\n\nStarted: %s\nEnded: %s\n\nElapsed: %s", fstarted, ffinished, elapsed)
if c.operation.OwnerIssue > 0 || c.operation.GroupIssue > 0 || c.operation.FolderIssue > 0 || c.operation.FileIssue > 0 {
message += fmt.Sprintf(`
\n\nThere are some permission issues:
\n\n%d file(s)/folder(s) with an owner other than 'nobody'
\n%d file(s)/folder(s) with a group other than 'users'
\n%d folder(s) with a permission other than 'drwxrwxrwx'
\n%d files(s) with a permission other than '-rw-rw-rw-' or '-r--r--r--'
\n\nCheck the log file (/boot/logs/unbalance.log) for additional information
\n\nIt's strongly suggested to install the Fix Common Plugins and run the Docker Safe New Permissions command
`, c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)
}
if sendErr := c.sendmail(c.settings.NotifyCalc, subject, message, false); sendErr != nil {
mlog.Error(sendErr)
}
// some local logging
mlog.Info("_find:Listing (%d) disks ...", len(c.storage.Disks))
for _, disk := range c.storage.Disks {
// mlog.Info("the mystery of the year(%s)", disk.Path)
disk.Print()
}
c.storage.Print()
outbound = &dto.Packet{Topic: "calcProgress", Payload: "Operation Finished"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
c.storage.BytesToTransfer = c.operation.BytesToTransfer
c.storage.OpState = c.operation.OpState
c.storage.PrevState = c.operation.PrevState
// send to front end the signal of operation finished
outbound = &dto.Packet{Topic: "findFinished", Payload: c.storage}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
// only send the perm issue msg if there's actually some work to do (BytesToTransfer > 0)
// and there actually perm issues
if c.operation.BytesToTransfer > 0 && (c.operation.OwnerIssue+c.operation.GroupIssue+c.operation.FolderIssue+c.operation.FileIssue > 0) {
outbound = &dto.Packet{Topic: "calcPermIssue", Payload: fmt.Sprintf("%d|%d|%d|%d", c.operation.OwnerIssue, c.operation.GroupIssue, c.operation.FolderIssue, c.operation.FileIssue)}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
}
}
func (c *Core) gather(msg *pubsub.Message) {
mlog.Info("%+v", msg.Payload)
data, ok := msg.Payload.(string)
if !ok {
mlog.Warning("Unable to convert gather parameters")
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to convert gather parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
var target model.Disk
err := json.Unmarshal([]byte(data), &target)
if err != nil {
mlog.Warning("Unable to bind gather parameters: %s", err)
outbound := &dto.Packet{Topic: "opError", Payload: "Unable to bind gather parameters"}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
// mlog.Fatalf(err.Error())
}
// user chose a target disk, remove bin from all other disks, since only the target
// will have work to do
for _, disk := range c.storage.Disks {
if disk.Path != target.Path {
disk.Bin = nil
}
}
c.operation.OpState = model.StateGather
c.operation.PrevState = model.StateGather
go c.transfer("Move", true, msg)
}
func (c *Core) getLog(msg *pubsub.Message) {
log := c.storage.GetLog()
outbound := &dto.Packet{Topic: "gotLog", Payload: log}
c.bus.Pub(&pubsub.Message{Payload: outbound}, "socket:broadcast")
return
}
func (c *Core) sendmail(notify int, subject, message string, dryRun bool) (err error) {
if notify == 0 {
return nil
}
dry := ""
if dryRun {
dry = "-------\nDRY RUN\n-------\n"
}
msg := dry + message
// strCmd := fmt.Sprintf("-s \"%s\" -m \"%s\"", mailCmd, subject, msg)
cmd := exec.Command(mailCmd, "-e", "unBALANCE operation update", "-s", subject, "-m", msg)
err = cmd.Run()
return
}
func progress(bytesToTransfer, bytesTransferred int64, elapsed time.Duration) (percent float64, left time.Duration, speed float64) {
bytesPerSec := float64(bytesTransferred) / elapsed.Seconds()
speed = bytesPerSec / 1024 / 1024 // MB/s
percent = (float64(bytesTransferred) / float64(bytesToTransfer)) * 100 // %
left = time.Duration(float64(bytesToTransfer-bytesTransferred)/bytesPerSec) * time.Second
return
}
func getError(line string, re *regexp.Regexp, errors map[int]string) string {
result := re.FindStringSubmatch(line)
status, _ := strconv.Atoi(result[1])
msg, ok := errors[status]
if !ok {
msg = "unknown error"
}
return msg
}
func (c *Core) notifyCommandsToRun(opName string) {
message := "\n\nThe following commands will be executed:\n\n"
for _, command := range c.operation.Commands {
cmd := fmt.Sprintf(`(src: %s) rsync %s %s %s`, command.WorkDir, c.operation.RsyncStrFlags, strconv.Quote(command.Src), strconv.Quote(command.Dst))
message += cmd + "\n"
}
subject := fmt.Sprintf("unBALANCE - %s operation STARTING", strings.ToUpper(opName))
go func() {
if sendErr := c.sendmail(c.settings.NotifyMove, subject, message, c.settings.DryRun); sendErr != nil {
mlog.Error(sendErr)
}
}()
}
|
package server
import (
"log"
"net/http"
"strings"
"github.com/gorilla/websocket"
"github.com/kjk/betterguid"
)
type wsHandler struct {
ws *wsConn
state *State
addr string
handlers map[string]func([]byte)
}
func newWSHandler(conn *websocket.Conn, state *State, r *http.Request) *wsHandler {
h := &wsHandler{
ws: newWSConn(conn),
state: state,
addr: conn.RemoteAddr().String(),
}
h.init(r)
h.initHandlers()
return h
}
func (h *wsHandler) run() {
defer h.ws.close()
go h.ws.send()
go h.ws.recv()
for {
req, ok := <-h.ws.in
if !ok {
if h.state != nil {
h.state.deleteWS(h.addr)
}
return
}
h.dispatchRequest(req)
}
}
func (h *wsHandler) dispatchRequest(req WSRequest) {
if handler, ok := h.handlers[req.Type]; ok {
handler(req.Data)
}
}
func (h *wsHandler) init(r *http.Request) {
h.state.setWS(h.addr, h.ws)
log.Println(h.addr, "[State] User ID:", h.state.user.ID, "|",
h.state.numIRC(), "IRC connections |",
h.state.numWS(), "WebSocket connections")
channels, err := h.state.user.GetChannels()
if err != nil {
log.Println(err)
}
path := r.URL.EscapedPath()[3:]
pathServer, pathChannel := getTabFromPath(path)
cookieServer, cookieChannel := parseTabCookie(r, path)
for _, channel := range channels {
if (channel.Server == pathServer && channel.Name == pathChannel) ||
(channel.Server == cookieServer && channel.Name == cookieChannel) {
// Userlist and messages for this channel gets embedded in the index page
continue
}
h.state.sendJSON("users", Userlist{
Server: channel.Server,
Channel: channel.Name,
Users: channelStore.GetUsers(channel.Server, channel.Name),
})
h.state.sendLastMessages(channel.Server, channel.Name, 50)
}
}
func (h *wsHandler) connect(b []byte) {
var data Server
data.UnmarshalJSON(b)
if _, ok := h.state.getIRC(data.Host); !ok {
log.Println(h.addr, "[IRC] Add server", data.Host)
connectIRC(data.Server, h.state)
go h.state.user.AddServer(data.Server)
} else {
log.Println(h.addr, "[IRC]", data.Host, "already added")
}
}
func (h *wsHandler) reconnect(b []byte) {
var data ReconnectSettings
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok && !i.Connected() {
if i.TLS {
i.TLSConfig.InsecureSkipVerify = data.SkipVerify
}
i.Reconnect()
}
}
func (h *wsHandler) join(b []byte) {
var data Join
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Join(data.Channels...)
}
}
func (h *wsHandler) part(b []byte) {
var data Part
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Part(data.Channels...)
}
}
func (h *wsHandler) quit(b []byte) {
var data Quit
data.UnmarshalJSON(b)
log.Println(h.addr, "[IRC] Remove server", data.Server)
if i, ok := h.state.getIRC(data.Server); ok {
h.state.deleteIRC(data.Server)
i.Quit()
}
go h.state.user.RemoveServer(data.Server)
}
func (h *wsHandler) message(b []byte) {
var data Message
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Privmsg(data.To, data.Content)
go h.state.user.LogMessage(betterguid.New(),
data.Server, i.GetNick(), data.To, data.Content)
}
}
func (h *wsHandler) nick(b []byte) {
var data Nick
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Nick(data.New)
}
}
func (h *wsHandler) topic(b []byte) {
var data Topic
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Topic(data.Channel, data.Topic)
}
}
func (h *wsHandler) invite(b []byte) {
var data Invite
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Invite(data.User, data.Channel)
}
}
func (h *wsHandler) kick(b []byte) {
var data Invite
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Kick(data.Channel, data.User)
}
}
func (h *wsHandler) whois(b []byte) {
var data Whois
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Whois(data.User)
}
}
func (h *wsHandler) away(b []byte) {
var data Away
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Away(data.Message)
}
}
func (h *wsHandler) raw(b []byte) {
var data Raw
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Write(data.Message)
}
}
func (h *wsHandler) search(b []byte) {
go func() {
var data SearchRequest
data.UnmarshalJSON(b)
results, err := h.state.user.SearchMessages(data.Server, data.Channel, data.Phrase)
if err != nil {
log.Println(err)
return
}
h.state.sendJSON("search", SearchResult{
Server: data.Server,
Channel: data.Channel,
Results: results,
})
}()
}
func (h *wsHandler) cert(b []byte) {
var data ClientCert
data.UnmarshalJSON(b)
err := h.state.user.SetCertificate(data.Cert, data.Key)
if err != nil {
h.state.sendJSON("cert_fail", Error{Message: err.Error()})
return
}
h.state.sendJSON("cert_success", nil)
}
func (h *wsHandler) fetchMessages(b []byte) {
var data FetchMessages
data.UnmarshalJSON(b)
h.state.sendMessages(data.Server, data.Channel, 200, data.Next)
}
func (h *wsHandler) setServerName(b []byte) {
var data ServerName
data.UnmarshalJSON(b)
if isValidServerName(data.Name) {
h.state.user.SetServerName(data.Name, data.Server)
}
}
func (h *wsHandler) initHandlers() {
h.handlers = map[string]func([]byte){
"connect": h.connect,
"reconnect": h.reconnect,
"join": h.join,
"part": h.part,
"quit": h.quit,
"message": h.message,
"nick": h.nick,
"topic": h.topic,
"invite": h.invite,
"kick": h.kick,
"whois": h.whois,
"away": h.away,
"raw": h.raw,
"search": h.search,
"cert": h.cert,
"fetch_messages": h.fetchMessages,
"set_server_name": h.setServerName,
}
}
func isValidServerName(name string) bool {
return strings.TrimSpace(name) != ""
}
Add support for X-Forwarded-For
package server
import (
"log"
"net/http"
"strings"
"github.com/gorilla/websocket"
"github.com/kjk/betterguid"
)
type wsHandler struct {
ws *wsConn
state *State
addr string
handlers map[string]func([]byte)
}
func newWSHandler(conn *websocket.Conn, state *State, r *http.Request) *wsHandler {
var address string
if r.Header.Get("X-Forwarded-For") != "" {
address = r.Header.Get("X-Forwarded-For")
} else {
address = conn.RemoteAddr().String()
}
h := &wsHandler{
ws: newWSConn(conn),
state: state,
addr: address,
}
h.init(r)
h.initHandlers()
return h
}
func (h *wsHandler) run() {
defer h.ws.close()
go h.ws.send()
go h.ws.recv()
for {
req, ok := <-h.ws.in
if !ok {
if h.state != nil {
h.state.deleteWS(h.addr)
}
return
}
h.dispatchRequest(req)
}
}
func (h *wsHandler) dispatchRequest(req WSRequest) {
if handler, ok := h.handlers[req.Type]; ok {
handler(req.Data)
}
}
func (h *wsHandler) init(r *http.Request) {
h.state.setWS(h.addr, h.ws)
log.Println(h.addr, "[State] User ID:", h.state.user.ID, "|",
h.state.numIRC(), "IRC connections |",
h.state.numWS(), "WebSocket connections")
channels, err := h.state.user.GetChannels()
if err != nil {
log.Println(err)
}
path := r.URL.EscapedPath()[3:]
pathServer, pathChannel := getTabFromPath(path)
cookieServer, cookieChannel := parseTabCookie(r, path)
for _, channel := range channels {
if (channel.Server == pathServer && channel.Name == pathChannel) ||
(channel.Server == cookieServer && channel.Name == cookieChannel) {
// Userlist and messages for this channel gets embedded in the index page
continue
}
h.state.sendJSON("users", Userlist{
Server: channel.Server,
Channel: channel.Name,
Users: channelStore.GetUsers(channel.Server, channel.Name),
})
h.state.sendLastMessages(channel.Server, channel.Name, 50)
}
}
func (h *wsHandler) connect(b []byte) {
var data Server
data.UnmarshalJSON(b)
if _, ok := h.state.getIRC(data.Host); !ok {
log.Println(h.addr, "[IRC] Add server", data.Host)
connectIRC(data.Server, h.state)
go h.state.user.AddServer(data.Server)
} else {
log.Println(h.addr, "[IRC]", data.Host, "already added")
}
}
func (h *wsHandler) reconnect(b []byte) {
var data ReconnectSettings
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok && !i.Connected() {
if i.TLS {
i.TLSConfig.InsecureSkipVerify = data.SkipVerify
}
i.Reconnect()
}
}
func (h *wsHandler) join(b []byte) {
var data Join
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Join(data.Channels...)
}
}
func (h *wsHandler) part(b []byte) {
var data Part
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Part(data.Channels...)
}
}
func (h *wsHandler) quit(b []byte) {
var data Quit
data.UnmarshalJSON(b)
log.Println(h.addr, "[IRC] Remove server", data.Server)
if i, ok := h.state.getIRC(data.Server); ok {
h.state.deleteIRC(data.Server)
i.Quit()
}
go h.state.user.RemoveServer(data.Server)
}
func (h *wsHandler) message(b []byte) {
var data Message
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Privmsg(data.To, data.Content)
go h.state.user.LogMessage(betterguid.New(),
data.Server, i.GetNick(), data.To, data.Content)
}
}
func (h *wsHandler) nick(b []byte) {
var data Nick
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Nick(data.New)
}
}
func (h *wsHandler) topic(b []byte) {
var data Topic
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Topic(data.Channel, data.Topic)
}
}
func (h *wsHandler) invite(b []byte) {
var data Invite
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Invite(data.User, data.Channel)
}
}
func (h *wsHandler) kick(b []byte) {
var data Invite
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Kick(data.Channel, data.User)
}
}
func (h *wsHandler) whois(b []byte) {
var data Whois
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Whois(data.User)
}
}
func (h *wsHandler) away(b []byte) {
var data Away
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Away(data.Message)
}
}
func (h *wsHandler) raw(b []byte) {
var data Raw
data.UnmarshalJSON(b)
if i, ok := h.state.getIRC(data.Server); ok {
i.Write(data.Message)
}
}
func (h *wsHandler) search(b []byte) {
go func() {
var data SearchRequest
data.UnmarshalJSON(b)
results, err := h.state.user.SearchMessages(data.Server, data.Channel, data.Phrase)
if err != nil {
log.Println(err)
return
}
h.state.sendJSON("search", SearchResult{
Server: data.Server,
Channel: data.Channel,
Results: results,
})
}()
}
func (h *wsHandler) cert(b []byte) {
var data ClientCert
data.UnmarshalJSON(b)
err := h.state.user.SetCertificate(data.Cert, data.Key)
if err != nil {
h.state.sendJSON("cert_fail", Error{Message: err.Error()})
return
}
h.state.sendJSON("cert_success", nil)
}
func (h *wsHandler) fetchMessages(b []byte) {
var data FetchMessages
data.UnmarshalJSON(b)
h.state.sendMessages(data.Server, data.Channel, 200, data.Next)
}
func (h *wsHandler) setServerName(b []byte) {
var data ServerName
data.UnmarshalJSON(b)
if isValidServerName(data.Name) {
h.state.user.SetServerName(data.Name, data.Server)
}
}
func (h *wsHandler) initHandlers() {
h.handlers = map[string]func([]byte){
"connect": h.connect,
"reconnect": h.reconnect,
"join": h.join,
"part": h.part,
"quit": h.quit,
"message": h.message,
"nick": h.nick,
"topic": h.topic,
"invite": h.invite,
"kick": h.kick,
"whois": h.whois,
"away": h.away,
"raw": h.raw,
"search": h.search,
"cert": h.cert,
"fetch_messages": h.fetchMessages,
"set_server_name": h.setServerName,
}
}
func isValidServerName(name string) bool {
return strings.TrimSpace(name) != ""
}
|
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package common_test
import (
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
"github.com/juju/juju/service/common"
)
type confSuite struct {
testing.IsolationSuite
}
var _ = gc.Suite(&confSuite{})
func (*confSuite) TestValidateOkay(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "/path/to/some-command a b c",
}
err := conf.Validate()
c.Check(err, jc.ErrorIsNil)
}
func (*confSuite) TestValidateMissingDesc(c *gc.C) {
conf := common.Conf{
ExecStart: "/path/to/some-command a b c",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, ".*missing Desc.*")
}
func (*confSuite) TestValidateMissingExecStart(c *gc.C) {
conf := common.Conf{
Desc: "some service",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, ".*missing ExecStart.*")
}
func (*confSuite) TestValidateRelativeExecStart(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "some-command a b c",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, `.*relative path in ExecStart \(.*`)
}
func (*confSuite) TestValidateRelativeExecStopPost(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "/path/to/some-command a b c",
ExecStopPost: "some-other-command a b c",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, `.*relative path in ExecStopPost \(.*`)
}
Add tests for quoted executables.
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package common_test
import (
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
"github.com/juju/juju/service/common"
)
type confSuite struct {
testing.IsolationSuite
}
var _ = gc.Suite(&confSuite{})
func (*confSuite) TestValidateOkay(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "/path/to/some-command a b c",
}
err := conf.Validate()
c.Check(err, jc.ErrorIsNil)
}
func (*confSuite) TestValidateSingleQuotedExecutable(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "'/path/to/some-command' a b c",
}
err := conf.Validate()
c.Check(err, jc.ErrorIsNil)
}
func (*confSuite) TestValidateDoubleQuotedExecutable(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: `"/path/to/some-command" a b c`,
}
err := conf.Validate()
c.Check(err, jc.ErrorIsNil)
}
func (*confSuite) TestValidatePartiallyQuotedExecutable(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "'/path/to/some-command a b c'",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, `.*relative path in ExecStart \(.*`)
}
func (*confSuite) TestValidateMissingDesc(c *gc.C) {
conf := common.Conf{
ExecStart: "/path/to/some-command a b c",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, ".*missing Desc.*")
}
func (*confSuite) TestValidateMissingExecStart(c *gc.C) {
conf := common.Conf{
Desc: "some service",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, ".*missing ExecStart.*")
}
func (*confSuite) TestValidateRelativeExecStart(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "some-command a b c",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, `.*relative path in ExecStart \(.*`)
}
func (*confSuite) TestValidateRelativeExecStopPost(c *gc.C) {
conf := common.Conf{
Desc: "some service",
ExecStart: "/path/to/some-command a b c",
ExecStopPost: "some-other-command a b c",
}
err := conf.Validate()
c.Check(err, gc.ErrorMatches, `.*relative path in ExecStopPost \(.*`)
}
|
// Copyright 2012 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The jwt package provides support for creating credentials for OAuth2 service
// account requests.
//
// For examples of the package usage please see jwt_test.go.
// Example usage (error handling omitted for brevity):
//
// // Craft the ClaimSet and JWT token.
// t := &jwt.Token{
// Key: pemKeyBytes,
// }
// t.ClaimSet = &jwt.ClaimSet{
// Iss: "XXXXXXXXXXXX@developer.gserviceaccount.com",
// Scope: "https://www.googleapis.com/auth/devstorage.read_only",
// }
//
// // We need to provide a client.
// c := &http.Client{}
//
// // Get the access token.
// o, _ := t.Assert(c)
//
// // Form the request to the service.
// req, _ := http.NewRequest("GET", "https://storage.googleapis.com/", nil)
// req.Header.Set("Authorization", "OAuth "+o.AccessToken)
// req.Header.Set("x-goog-api-version", "2")
// req.Header.Set("x-goog-project-id", "XXXXXXXXXXXX")
//
// // Make the request.
// result, _ := c.Do(req)
//
// For info on OAuth2 service accounts please see the online documentation.
// https://developers.google.com/accounts/docs/OAuth2ServiceAccount
//
package jwt
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"code.google.com/p/goauth2/oauth"
)
// These are the default/standard values for this to work for Google service accounts.
const (
stdAlgorithm = "RS256"
stdType = "JWT"
stdAssertionType = "http://oauth.net/grant_type/jwt/1.0/bearer"
stdGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
stdAud = "https://accounts.google.com/o/oauth2/token"
)
var (
ErrInvalidKey = errors.New("Invalid Key")
)
// base64Encode returns and Base64url encoded version of the input string with any
// trailing "=" stripped.
func base64Encode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// base64Decode decodes the Base64url encoded string
func base64Decode(s string) ([]byte, error) {
// add back missing padding
switch len(s) % 4 {
case 2:
s += "=="
case 3:
s += "="
}
return base64.URLEncoding.DecodeString(s)
}
// The JWT claim set contains information about the JWT including the
// permissions being requested (scopes), the target of the token, the issuer,
// the time the token was issued, and the lifetime of the token.
//
// Aud is usually https://accounts.google.com/o/oauth2/token
type ClaimSet struct {
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
Prn string `json:"prn,omitempty"` // email for which the application is requesting delegated access (Optional).
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
Typ string `json:"typ,omitempty"`
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
PrivateClaims map[string]interface{} `json:"-"`
exp time.Time
iat time.Time
}
// setTimes sets iat and exp to time.Now() and iat.Add(time.Hour) respectively.
//
// Note that these times have nothing to do with the expiration time for the
// access_token returned by the server. These have to do with the lifetime of
// the encoded JWT.
//
// A JWT can be re-used for up to one hour after it was encoded. The access
// token that is granted will also be good for one hour so there is little point
// in trying to use the JWT a second time.
func (c *ClaimSet) setTimes(t time.Time) {
c.iat = t
c.exp = c.iat.Add(time.Hour)
}
var (
jsonStart = []byte{'{'}
jsonEnd = []byte{'}'}
)
// encode returns the Base64url encoded form of the Signature.
func (c *ClaimSet) encode() string {
if c.exp.IsZero() || c.iat.IsZero() {
c.setTimes(time.Now())
}
if c.Aud == "" {
c.Aud = stdAud
}
c.Exp = c.exp.Unix()
c.Iat = c.iat.Unix()
b, err := json.Marshal(c)
if err != nil {
panic(err)
}
if len(c.PrivateClaims) == 0 {
return base64Encode(b)
}
// Marshal private claim set and then append it to b.
prv, err := json.Marshal(c.PrivateClaims)
if err != nil {
panic(fmt.Errorf("Invalid map of private claims %v", c.PrivateClaims))
}
// Concatenate public and private claim JSON objects.
if !bytes.HasSuffix(b, jsonEnd) {
panic(fmt.Errorf("Invalid JSON %s", b))
}
if !bytes.HasPrefix(prv, jsonStart) {
panic(fmt.Errorf("Invalid JSON %s", prv))
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
return base64Encode(b)
}
// Header describes the algorithm and type of token being generated,
// and optionally a KeyID describing additional parameters for the
// signature.
type Header struct {
Algorithm string `json:"alg"`
Type string `json:"typ"`
KeyId string `json:"kid,omitempty"`
}
func (h *Header) encode() string {
b, err := json.Marshal(h)
if err != nil {
panic(err)
}
return base64Encode(b)
}
// A JWT is composed of three parts: a header, a claim set, and a signature.
// The well formed and encoded JWT can then be exchanged for an access token.
//
// The Token is not a JWT, but is is encoded to produce a well formed JWT.
//
// When obtaining a key from the Google API console it will be downloaded in a
// PKCS12 encoding. To use this key you will need to convert it to a PEM file.
// This can be achieved with openssl.
//
// $ openssl pkcs12 -in <key.p12> -nocerts -passin pass:notasecret -nodes -out <key.pem>
//
// The contents of this file can then be used as the Key.
type Token struct {
ClaimSet *ClaimSet // claim set used to construct the JWT
Header *Header // header used to construct the JWT
Key []byte // PEM printable encoding of the private key
pKey *rsa.PrivateKey
header string
claim string
sig string
useExternalSigner bool
signer Signer
}
// NewToken returns a filled in *Token based on the standard header,
// and sets the Iat and Exp times based on when the call to Assert is
// made.
func NewToken(iss, scope string, key []byte) *Token {
c := &ClaimSet{
Iss: iss,
Scope: scope,
Aud: stdAud,
}
h := &Header{
Algorithm: stdAlgorithm,
Type: stdType,
}
t := &Token{
ClaimSet: c,
Header: h,
Key: key,
}
return t
}
// Signer is an interface that given a JWT token, returns the header &
// claim (serialized and urlEncoded to a byte slice), along with the
// signature and an error (if any occured). It could modify any data
// to sign (typically the KeyID).
//
// Example usage where a SHA256 hash of the original url-encoded token
// with an added KeyID and secret data is used as a signature:
//
// var privateData = "secret data added to hash, indexed by KeyID"
//
// type SigningService struct{}
//
// func (ss *SigningService) Sign(in *jwt.Token) (newTokenData, sig []byte, err error) {
// in.Header.KeyID = "signing service"
// newTokenData = in.EncodeWithoutSignature()
// dataToSign := fmt.Sprintf("%s.%s", newTokenData, privateData)
// h := sha256.New()
// _, err := h.Write([]byte(dataToSign))
// sig = h.Sum(nil)
// return
// }
type Signer interface {
Sign(in *Token) (tokenData, signature []byte, err error)
}
// NewSignerToken returns a *Token, using an external signer function
func NewSignerToken(iss, scope string, signer Signer) *Token {
t := NewToken(iss, scope, nil)
t.useExternalSigner = true
t.signer = signer
return t
}
// Expired returns a boolean value letting us know if the token has expired.
func (t *Token) Expired() bool {
return t.ClaimSet.exp.Before(time.Now())
}
// Encode constructs and signs a Token returning a JWT ready to use for
// requesting an access token.
func (t *Token) encode() (string, error) {
var tok string
t.header = t.Header.encode()
t.claim = t.ClaimSet.encode()
err := t.sign()
if err != nil {
return tok, err
}
tok = fmt.Sprintf("%s.%s.%s", t.header, t.claim, t.sig)
return tok, nil
}
// EncodeWithoutSignature returns the url-encoded value of the Token
// before signing has occured (typically for use by external signers).
func (t *Token) EncodeWithoutSignature() string {
t.header = t.Header.encode()
t.claim = t.ClaimSet.encode()
return fmt.Sprintf("%s.%s", t.header, t.claim)
}
// sign computes the signature for a Token. The details for this can be found
// in the OAuth2 Service Account documentation.
// https://developers.google.com/accounts/docs/OAuth2ServiceAccount#computingsignature
func (t *Token) sign() error {
if t.useExternalSigner {
fulldata, sig, err := t.signer.Sign(t)
if err != nil {
return err
}
split := strings.Split(string(fulldata), ".")
if len(split) != 2 {
return errors.New("no token returned")
}
t.header = split[0]
t.claim = split[1]
t.sig = base64Encode(sig)
return err
}
ss := fmt.Sprintf("%s.%s", t.header, t.claim)
if t.pKey == nil {
err := t.parsePrivateKey()
if err != nil {
return err
}
}
h := sha256.New()
h.Write([]byte(ss))
b, err := rsa.SignPKCS1v15(rand.Reader, t.pKey, crypto.SHA256, h.Sum(nil))
t.sig = base64Encode(b)
return err
}
// parsePrivateKey converts the Token's Key ([]byte) into a parsed
// rsa.PrivateKey. If the key is not well formed this method will return an
// ErrInvalidKey error.
func (t *Token) parsePrivateKey() error {
block, _ := pem.Decode(t.Key)
if block == nil {
return ErrInvalidKey
}
parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return err
}
}
var ok bool
t.pKey, ok = parsedKey.(*rsa.PrivateKey)
if !ok {
return ErrInvalidKey
}
return nil
}
// Assert obtains an *oauth.Token from the remote server by encoding and sending
// a JWT. The access_token will expire in one hour (3600 seconds) and cannot be
// refreshed (no refresh_token is returned with the response). Once this token
// expires call this method again to get a fresh one.
func (t *Token) Assert(c *http.Client) (*oauth.Token, error) {
var o *oauth.Token
u, v, err := t.buildRequest()
if err != nil {
return o, err
}
resp, err := c.PostForm(u, v)
if err != nil {
return o, err
}
o, err = handleResponse(resp)
return o, err
}
// buildRequest sets up the URL values and the proper URL string for making our
// access_token request.
func (t *Token) buildRequest() (string, url.Values, error) {
v := url.Values{}
j, err := t.encode()
if err != nil {
return t.ClaimSet.Aud, v, err
}
v.Set("grant_type", stdGrantType)
v.Set("assertion", j)
return t.ClaimSet.Aud, v, nil
}
// Used for decoding the response body.
type respBody struct {
IdToken string `json:"id_token"`
Access string `json:"access_token"`
Type string `json:"token_type"`
ExpiresIn time.Duration `json:"expires_in"`
}
// handleResponse returns a filled in *oauth.Token given the *http.Response from
// a *http.Request created by buildRequest.
func handleResponse(r *http.Response) (*oauth.Token, error) {
o := &oauth.Token{}
defer r.Body.Close()
if r.StatusCode != 200 {
return o, errors.New("invalid response: " + r.Status)
}
b := &respBody{}
err := json.NewDecoder(r.Body).Decode(b)
if err != nil {
return o, err
}
o.AccessToken = b.Access
if b.IdToken != "" {
// decode returned id token to get expiry
o.AccessToken = b.IdToken
s := strings.Split(b.IdToken, ".")
if len(s) < 2 {
return nil, errors.New("invalid token received")
}
d, err := base64Decode(s[1])
if err != nil {
return o, err
}
c := &ClaimSet{}
err = json.NewDecoder(bytes.NewBuffer(d)).Decode(c)
if err != nil {
return o, err
}
o.Expiry = time.Unix(c.Exp, 0)
return o, nil
}
o.Expiry = time.Now().Add(b.ExpiresIn * time.Second)
return o, nil
}
jwt: Add support for googleapi delegation with 'sub'
R=adg
CC=golang-dev
https://codereview.appspot.com/13336047
Committer: Andrew Gerrand <395a7d33bec8475c9b83b7d440f141bcbd994aa5@golang.org>
// Copyright 2012 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The jwt package provides support for creating credentials for OAuth2 service
// account requests.
//
// For examples of the package usage please see jwt_test.go.
// Example usage (error handling omitted for brevity):
//
// // Craft the ClaimSet and JWT token.
// t := &jwt.Token{
// Key: pemKeyBytes,
// }
// t.ClaimSet = &jwt.ClaimSet{
// Iss: "XXXXXXXXXXXX@developer.gserviceaccount.com",
// Scope: "https://www.googleapis.com/auth/devstorage.read_only",
// }
//
// // We need to provide a client.
// c := &http.Client{}
//
// // Get the access token.
// o, _ := t.Assert(c)
//
// // Form the request to the service.
// req, _ := http.NewRequest("GET", "https://storage.googleapis.com/", nil)
// req.Header.Set("Authorization", "OAuth "+o.AccessToken)
// req.Header.Set("x-goog-api-version", "2")
// req.Header.Set("x-goog-project-id", "XXXXXXXXXXXX")
//
// // Make the request.
// result, _ := c.Do(req)
//
// For info on OAuth2 service accounts please see the online documentation.
// https://developers.google.com/accounts/docs/OAuth2ServiceAccount
//
package jwt
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"code.google.com/p/goauth2/oauth"
)
// These are the default/standard values for this to work for Google service accounts.
const (
stdAlgorithm = "RS256"
stdType = "JWT"
stdAssertionType = "http://oauth.net/grant_type/jwt/1.0/bearer"
stdGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
stdAud = "https://accounts.google.com/o/oauth2/token"
)
var (
ErrInvalidKey = errors.New("Invalid Key")
)
// base64Encode returns and Base64url encoded version of the input string with any
// trailing "=" stripped.
func base64Encode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// base64Decode decodes the Base64url encoded string
func base64Decode(s string) ([]byte, error) {
// add back missing padding
switch len(s) % 4 {
case 2:
s += "=="
case 3:
s += "="
}
return base64.URLEncoding.DecodeString(s)
}
// The JWT claim set contains information about the JWT including the
// permissions being requested (scopes), the target of the token, the issuer,
// the time the token was issued, and the lifetime of the token.
//
// Aud is usually https://accounts.google.com/o/oauth2/token
type ClaimSet struct {
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
Prn string `json:"prn,omitempty"` // email for which the application is requesting delegated access (Optional).
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
Typ string `json:"typ,omitempty"`
Sub string `json:"sub,omitempty"` // Add support for googleapi delegation support
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
PrivateClaims map[string]interface{} `json:"-"`
exp time.Time
iat time.Time
}
// setTimes sets iat and exp to time.Now() and iat.Add(time.Hour) respectively.
//
// Note that these times have nothing to do with the expiration time for the
// access_token returned by the server. These have to do with the lifetime of
// the encoded JWT.
//
// A JWT can be re-used for up to one hour after it was encoded. The access
// token that is granted will also be good for one hour so there is little point
// in trying to use the JWT a second time.
func (c *ClaimSet) setTimes(t time.Time) {
c.iat = t
c.exp = c.iat.Add(time.Hour)
}
var (
jsonStart = []byte{'{'}
jsonEnd = []byte{'}'}
)
// encode returns the Base64url encoded form of the Signature.
func (c *ClaimSet) encode() string {
if c.exp.IsZero() || c.iat.IsZero() {
c.setTimes(time.Now())
}
if c.Aud == "" {
c.Aud = stdAud
}
c.Exp = c.exp.Unix()
c.Iat = c.iat.Unix()
b, err := json.Marshal(c)
if err != nil {
panic(err)
}
if len(c.PrivateClaims) == 0 {
return base64Encode(b)
}
// Marshal private claim set and then append it to b.
prv, err := json.Marshal(c.PrivateClaims)
if err != nil {
panic(fmt.Errorf("Invalid map of private claims %v", c.PrivateClaims))
}
// Concatenate public and private claim JSON objects.
if !bytes.HasSuffix(b, jsonEnd) {
panic(fmt.Errorf("Invalid JSON %s", b))
}
if !bytes.HasPrefix(prv, jsonStart) {
panic(fmt.Errorf("Invalid JSON %s", prv))
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
return base64Encode(b)
}
// Header describes the algorithm and type of token being generated,
// and optionally a KeyID describing additional parameters for the
// signature.
type Header struct {
Algorithm string `json:"alg"`
Type string `json:"typ"`
KeyId string `json:"kid,omitempty"`
}
func (h *Header) encode() string {
b, err := json.Marshal(h)
if err != nil {
panic(err)
}
return base64Encode(b)
}
// A JWT is composed of three parts: a header, a claim set, and a signature.
// The well formed and encoded JWT can then be exchanged for an access token.
//
// The Token is not a JWT, but is is encoded to produce a well formed JWT.
//
// When obtaining a key from the Google API console it will be downloaded in a
// PKCS12 encoding. To use this key you will need to convert it to a PEM file.
// This can be achieved with openssl.
//
// $ openssl pkcs12 -in <key.p12> -nocerts -passin pass:notasecret -nodes -out <key.pem>
//
// The contents of this file can then be used as the Key.
type Token struct {
ClaimSet *ClaimSet // claim set used to construct the JWT
Header *Header // header used to construct the JWT
Key []byte // PEM printable encoding of the private key
pKey *rsa.PrivateKey
header string
claim string
sig string
useExternalSigner bool
signer Signer
}
// NewToken returns a filled in *Token based on the standard header,
// and sets the Iat and Exp times based on when the call to Assert is
// made.
func NewToken(iss, scope string, key []byte) *Token {
c := &ClaimSet{
Iss: iss,
Scope: scope,
Aud: stdAud,
}
h := &Header{
Algorithm: stdAlgorithm,
Type: stdType,
}
t := &Token{
ClaimSet: c,
Header: h,
Key: key,
}
return t
}
// Signer is an interface that given a JWT token, returns the header &
// claim (serialized and urlEncoded to a byte slice), along with the
// signature and an error (if any occured). It could modify any data
// to sign (typically the KeyID).
//
// Example usage where a SHA256 hash of the original url-encoded token
// with an added KeyID and secret data is used as a signature:
//
// var privateData = "secret data added to hash, indexed by KeyID"
//
// type SigningService struct{}
//
// func (ss *SigningService) Sign(in *jwt.Token) (newTokenData, sig []byte, err error) {
// in.Header.KeyID = "signing service"
// newTokenData = in.EncodeWithoutSignature()
// dataToSign := fmt.Sprintf("%s.%s", newTokenData, privateData)
// h := sha256.New()
// _, err := h.Write([]byte(dataToSign))
// sig = h.Sum(nil)
// return
// }
type Signer interface {
Sign(in *Token) (tokenData, signature []byte, err error)
}
// NewSignerToken returns a *Token, using an external signer function
func NewSignerToken(iss, scope string, signer Signer) *Token {
t := NewToken(iss, scope, nil)
t.useExternalSigner = true
t.signer = signer
return t
}
// Expired returns a boolean value letting us know if the token has expired.
func (t *Token) Expired() bool {
return t.ClaimSet.exp.Before(time.Now())
}
// Encode constructs and signs a Token returning a JWT ready to use for
// requesting an access token.
func (t *Token) encode() (string, error) {
var tok string
t.header = t.Header.encode()
t.claim = t.ClaimSet.encode()
err := t.sign()
if err != nil {
return tok, err
}
tok = fmt.Sprintf("%s.%s.%s", t.header, t.claim, t.sig)
return tok, nil
}
// EncodeWithoutSignature returns the url-encoded value of the Token
// before signing has occured (typically for use by external signers).
func (t *Token) EncodeWithoutSignature() string {
t.header = t.Header.encode()
t.claim = t.ClaimSet.encode()
return fmt.Sprintf("%s.%s", t.header, t.claim)
}
// sign computes the signature for a Token. The details for this can be found
// in the OAuth2 Service Account documentation.
// https://developers.google.com/accounts/docs/OAuth2ServiceAccount#computingsignature
func (t *Token) sign() error {
if t.useExternalSigner {
fulldata, sig, err := t.signer.Sign(t)
if err != nil {
return err
}
split := strings.Split(string(fulldata), ".")
if len(split) != 2 {
return errors.New("no token returned")
}
t.header = split[0]
t.claim = split[1]
t.sig = base64Encode(sig)
return err
}
ss := fmt.Sprintf("%s.%s", t.header, t.claim)
if t.pKey == nil {
err := t.parsePrivateKey()
if err != nil {
return err
}
}
h := sha256.New()
h.Write([]byte(ss))
b, err := rsa.SignPKCS1v15(rand.Reader, t.pKey, crypto.SHA256, h.Sum(nil))
t.sig = base64Encode(b)
return err
}
// parsePrivateKey converts the Token's Key ([]byte) into a parsed
// rsa.PrivateKey. If the key is not well formed this method will return an
// ErrInvalidKey error.
func (t *Token) parsePrivateKey() error {
block, _ := pem.Decode(t.Key)
if block == nil {
return ErrInvalidKey
}
parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return err
}
}
var ok bool
t.pKey, ok = parsedKey.(*rsa.PrivateKey)
if !ok {
return ErrInvalidKey
}
return nil
}
// Assert obtains an *oauth.Token from the remote server by encoding and sending
// a JWT. The access_token will expire in one hour (3600 seconds) and cannot be
// refreshed (no refresh_token is returned with the response). Once this token
// expires call this method again to get a fresh one.
func (t *Token) Assert(c *http.Client) (*oauth.Token, error) {
var o *oauth.Token
u, v, err := t.buildRequest()
if err != nil {
return o, err
}
resp, err := c.PostForm(u, v)
if err != nil {
return o, err
}
o, err = handleResponse(resp)
return o, err
}
// buildRequest sets up the URL values and the proper URL string for making our
// access_token request.
func (t *Token) buildRequest() (string, url.Values, error) {
v := url.Values{}
j, err := t.encode()
if err != nil {
return t.ClaimSet.Aud, v, err
}
v.Set("grant_type", stdGrantType)
v.Set("assertion", j)
return t.ClaimSet.Aud, v, nil
}
// Used for decoding the response body.
type respBody struct {
IdToken string `json:"id_token"`
Access string `json:"access_token"`
Type string `json:"token_type"`
ExpiresIn time.Duration `json:"expires_in"`
}
// handleResponse returns a filled in *oauth.Token given the *http.Response from
// a *http.Request created by buildRequest.
func handleResponse(r *http.Response) (*oauth.Token, error) {
o := &oauth.Token{}
defer r.Body.Close()
if r.StatusCode != 200 {
return o, errors.New("invalid response: " + r.Status)
}
b := &respBody{}
err := json.NewDecoder(r.Body).Decode(b)
if err != nil {
return o, err
}
o.AccessToken = b.Access
if b.IdToken != "" {
// decode returned id token to get expiry
o.AccessToken = b.IdToken
s := strings.Split(b.IdToken, ".")
if len(s) < 2 {
return nil, errors.New("invalid token received")
}
d, err := base64Decode(s[1])
if err != nil {
return o, err
}
c := &ClaimSet{}
err = json.NewDecoder(bytes.NewBuffer(d)).Decode(c)
if err != nil {
return o, err
}
o.Expiry = time.Unix(c.Exp, 0)
return o, nil
}
o.Expiry = time.Now().Add(b.ExpiresIn * time.Second)
return o, nil
}
|
package torrentlog
import (
"errors"
"fmt"
"os"
"time"
"code.uber.internal/infra/kraken/core"
"code.uber.internal/infra/kraken/utils/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var (
errEmptyReceivedPieces = errors.New("empty received piece counts")
errNegativeReceivedPieces = errors.New("negative value in received piece counts")
)
// Logger wraps structured log entries for important torrent events. These events
// are intended to be consumed at the cluster level via ELK, and are distinct from
// the verbose stdout logs of the agent. In particular, Logger bridges host-agnostic
// metrics to individual hostnames.
//
// For example, if there is a spike in download times, an engineer can cross-reference
// the spike with the torrent logs in ELK and zero-in on a single host. From there,
// the engineer can inspect the stdout logs of the host for more detailed information
// as to why the download took so long.
type Logger struct {
zap *zap.Logger
}
// New creates a new Logger.
func New(config log.Config, pctx core.PeerContext) (*Logger, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("hostname: %s", err)
}
logger, err := log.New(config, map[string]interface{}{
"hostname": hostname,
"zone": pctx.Zone,
"cluster": pctx.Cluster,
"peer_id": pctx.PeerID.String(),
})
if err != nil {
return nil, fmt.Errorf("config: %s", err)
}
return &Logger{logger}, nil
}
// NewNopLogger returns a Logger containing a no-op zap logger for testing purposes.
func NewNopLogger() *Logger {
return &Logger{zap.NewNop()}
}
// OutgoingConnectionAccept logs an accepted outgoing connection.
func (l *Logger) OutgoingConnectionAccept(
name string,
infoHash core.InfoHash,
remotePeerID core.PeerID) {
l.zap.Debug(
"Outgoing connection accept",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()))
}
// OutgoingConnectionReject logs a rejected outgoing connection.
func (l *Logger) OutgoingConnectionReject(name string,
infoHash core.InfoHash,
remotePeerID core.PeerID,
err error) {
l.zap.Debug(
"Outgoing connection reject",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()),
zap.Error(err))
}
// IncomingConnectionAccept logs an accepted incoming connection.
func (l *Logger) IncomingConnectionAccept(
name string,
infoHash core.InfoHash,
remotePeerID core.PeerID) {
l.zap.Debug(
"Incoming connection accept",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()))
}
// IncomingConnectionReject logs a rejected incoming connection.
func (l *Logger) IncomingConnectionReject(
name string,
infoHash core.InfoHash,
remotePeerID core.PeerID,
err error) {
l.zap.Debug(
"Incoming connection reject",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()),
zap.Error(err))
}
// SeedTimeout logs a seeding torrent being torn down due to timeout.
func (l *Logger) SeedTimeout(name string, infoHash core.InfoHash) {
l.zap.Debug(
"Seed timeout",
zap.String("name", name),
zap.String("info_hash", infoHash.String()))
}
// LeechTimeout logs a leeching torrent being torn down due to timeout.
func (l *Logger) LeechTimeout(name string, infoHash core.InfoHash) {
l.zap.Debug(
"Leech timeout",
zap.String("name", name),
zap.String("info_hash", infoHash.String()))
}
// DownloadSuccess logs a successful download.
func (l *Logger) DownloadSuccess(namespace, name string, size int64, downloadTime time.Duration) {
l.zap.Info(
"Download success",
zap.String("namespace", namespace),
zap.String("name", name),
zap.Int64("size", size),
zap.Duration("download_time", downloadTime))
}
// DownloadFailure logs a failed download.
func (l *Logger) DownloadFailure(namespace, name string, size int64, err error) {
l.zap.Info(
"Download failure",
zap.String("namespace", namespace),
zap.String("name", name),
zap.Int64("size", size),
zap.Error(err))
}
// SeederSummaries logs a summary of the pieces requested and received from peers for a torrent.
func (l *Logger) SeederSummaries(
name string,
infoHash core.InfoHash,
summaries SeederSummaries) error {
l.zap.Debug(
"Seeder summaries",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.Array("seeder_summaries", summaries))
return nil
}
// LeecherSummaries logs a summary of the pieces requested by and sent to peers for a torrent.
func (l *Logger) LeecherSummaries(
name string,
infoHash core.InfoHash,
summaries LeecherSummaries) error {
l.zap.Debug(
"Leecher summaries",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.Array("leecher_summaries", summaries))
return nil
}
// Sync flushes the log.
func (l *Logger) Sync() {
l.zap.Sync()
}
// SeederSummary contains information about piece requests to and pieces received from a peer.
type SeederSummary struct {
PeerID core.PeerID
RequestsSent int
PiecesReceived int
}
// MarshalLogObject marshals a SeederSummary for logging.
func (s SeederSummary) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("peer_id", s.PeerID.String())
enc.AddInt("requests_sent", s.RequestsSent)
enc.AddInt("pieces_received", s.PiecesReceived)
return nil
}
// SeederSummaries represents a slice of type SeederSummary
// that can be marshalled for logging.
type SeederSummaries []SeederSummary
// MarshalLogArray marshals a SeederSummaries slice for logging.
func (ss SeederSummaries) MarshalLogArray(enc zapcore.ArrayEncoder) error {
for _, summary := range ss {
enc.AppendObject(summary)
}
return nil
}
// LeecherSummary contains information about piece requests from and pieces sent to a peer.
type LeecherSummary struct {
PeerID core.PeerID
RequestsReceived int
PiecesSent int
}
// MarshalLogObject marshals a LeecherSummary for logging.
func (s LeecherSummary) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("peer_id", s.PeerID.String())
enc.AddInt("requests_received", s.RequestsReceived)
enc.AddInt("pieces_sent", s.PiecesSent)
return nil
}
// LeecherSummaries represents a slice of type LeecherSummary
// that can be marshalled for logging.
type LeecherSummaries []LeecherSummary
// MarshalLogArray marshals a LeecherSummaries slice for logging.
func (ls LeecherSummaries) MarshalLogArray(enc zapcore.ArrayEncoder) error {
for _, summary := range ls {
enc.AppendObject(summary)
}
return nil
}
Set download failures to level=error
Summary: ATG is being throttled by kafka, so let's only log errors for now.
Reviewers: #kraken, O1553 Project kraken: Add blocking reviewers, yiran
Reviewed By: #kraken, O1553 Project kraken: Add blocking reviewers, yiran
Differential Revision: https://code.uberinternal.com/D2112735
package torrentlog
import (
"errors"
"fmt"
"os"
"time"
"code.uber.internal/infra/kraken/core"
"code.uber.internal/infra/kraken/utils/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var (
errEmptyReceivedPieces = errors.New("empty received piece counts")
errNegativeReceivedPieces = errors.New("negative value in received piece counts")
)
// Logger wraps structured log entries for important torrent events. These events
// are intended to be consumed at the cluster level via ELK, and are distinct from
// the verbose stdout logs of the agent. In particular, Logger bridges host-agnostic
// metrics to individual hostnames.
//
// For example, if there is a spike in download times, an engineer can cross-reference
// the spike with the torrent logs in ELK and zero-in on a single host. From there,
// the engineer can inspect the stdout logs of the host for more detailed information
// as to why the download took so long.
type Logger struct {
zap *zap.Logger
}
// New creates a new Logger.
func New(config log.Config, pctx core.PeerContext) (*Logger, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("hostname: %s", err)
}
logger, err := log.New(config, map[string]interface{}{
"hostname": hostname,
"zone": pctx.Zone,
"cluster": pctx.Cluster,
"peer_id": pctx.PeerID.String(),
})
if err != nil {
return nil, fmt.Errorf("config: %s", err)
}
return &Logger{logger}, nil
}
// NewNopLogger returns a Logger containing a no-op zap logger for testing purposes.
func NewNopLogger() *Logger {
return &Logger{zap.NewNop()}
}
// OutgoingConnectionAccept logs an accepted outgoing connection.
func (l *Logger) OutgoingConnectionAccept(
name string,
infoHash core.InfoHash,
remotePeerID core.PeerID) {
l.zap.Debug(
"Outgoing connection accept",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()))
}
// OutgoingConnectionReject logs a rejected outgoing connection.
func (l *Logger) OutgoingConnectionReject(name string,
infoHash core.InfoHash,
remotePeerID core.PeerID,
err error) {
l.zap.Debug(
"Outgoing connection reject",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()),
zap.Error(err))
}
// IncomingConnectionAccept logs an accepted incoming connection.
func (l *Logger) IncomingConnectionAccept(
name string,
infoHash core.InfoHash,
remotePeerID core.PeerID) {
l.zap.Debug(
"Incoming connection accept",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()))
}
// IncomingConnectionReject logs a rejected incoming connection.
func (l *Logger) IncomingConnectionReject(
name string,
infoHash core.InfoHash,
remotePeerID core.PeerID,
err error) {
l.zap.Debug(
"Incoming connection reject",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.String("remote_peer_id", remotePeerID.String()),
zap.Error(err))
}
// SeedTimeout logs a seeding torrent being torn down due to timeout.
func (l *Logger) SeedTimeout(name string, infoHash core.InfoHash) {
l.zap.Debug(
"Seed timeout",
zap.String("name", name),
zap.String("info_hash", infoHash.String()))
}
// LeechTimeout logs a leeching torrent being torn down due to timeout.
func (l *Logger) LeechTimeout(name string, infoHash core.InfoHash) {
l.zap.Debug(
"Leech timeout",
zap.String("name", name),
zap.String("info_hash", infoHash.String()))
}
// DownloadSuccess logs a successful download.
func (l *Logger) DownloadSuccess(namespace, name string, size int64, downloadTime time.Duration) {
l.zap.Info(
"Download success",
zap.String("namespace", namespace),
zap.String("name", name),
zap.Int64("size", size),
zap.Duration("download_time", downloadTime))
}
// DownloadFailure logs a failed download.
func (l *Logger) DownloadFailure(namespace, name string, size int64, err error) {
l.zap.Error(
"Download failure",
zap.String("namespace", namespace),
zap.String("name", name),
zap.Int64("size", size),
zap.Error(err))
}
// SeederSummaries logs a summary of the pieces requested and received from peers for a torrent.
func (l *Logger) SeederSummaries(
name string,
infoHash core.InfoHash,
summaries SeederSummaries) error {
l.zap.Debug(
"Seeder summaries",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.Array("seeder_summaries", summaries))
return nil
}
// LeecherSummaries logs a summary of the pieces requested by and sent to peers for a torrent.
func (l *Logger) LeecherSummaries(
name string,
infoHash core.InfoHash,
summaries LeecherSummaries) error {
l.zap.Debug(
"Leecher summaries",
zap.String("name", name),
zap.String("info_hash", infoHash.String()),
zap.Array("leecher_summaries", summaries))
return nil
}
// Sync flushes the log.
func (l *Logger) Sync() {
l.zap.Sync()
}
// SeederSummary contains information about piece requests to and pieces received from a peer.
type SeederSummary struct {
PeerID core.PeerID
RequestsSent int
PiecesReceived int
}
// MarshalLogObject marshals a SeederSummary for logging.
func (s SeederSummary) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("peer_id", s.PeerID.String())
enc.AddInt("requests_sent", s.RequestsSent)
enc.AddInt("pieces_received", s.PiecesReceived)
return nil
}
// SeederSummaries represents a slice of type SeederSummary
// that can be marshalled for logging.
type SeederSummaries []SeederSummary
// MarshalLogArray marshals a SeederSummaries slice for logging.
func (ss SeederSummaries) MarshalLogArray(enc zapcore.ArrayEncoder) error {
for _, summary := range ss {
enc.AppendObject(summary)
}
return nil
}
// LeecherSummary contains information about piece requests from and pieces sent to a peer.
type LeecherSummary struct {
PeerID core.PeerID
RequestsReceived int
PiecesSent int
}
// MarshalLogObject marshals a LeecherSummary for logging.
func (s LeecherSummary) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("peer_id", s.PeerID.String())
enc.AddInt("requests_received", s.RequestsReceived)
enc.AddInt("pieces_sent", s.PiecesSent)
return nil
}
// LeecherSummaries represents a slice of type LeecherSummary
// that can be marshalled for logging.
type LeecherSummaries []LeecherSummary
// MarshalLogArray marshals a LeecherSummaries slice for logging.
func (ls LeecherSummaries) MarshalLogArray(enc zapcore.ArrayEncoder) error {
for _, summary := range ls {
enc.AppendObject(summary)
}
return nil
}
|
/*
Package fetcher downloads and parses web pages into sets of links.
*/
package fetcher
import (
"errors"
"fmt"
"net/http"
"github.com/jackdanger/collectlinks"
)
// WebFetch encapsulates a Fetch operation and its result
type WebFetch struct {
url string
internalLinks []string
externalLinks []string
resourceLinks []string
err error
}
// New is a convenience function to return a new Fetch struct
func New() *WebFetch {
return new(WebFetch)
}
// Fetch takes a URL, downloads the page body
// and parses it into a structure of links,
// filtering out duplicate & self links
func (wf *WebFetch) Fetch(url string) {
wf.url = url
resp, err := http.Get(url)
if err != nil {
wf.err = err
return
}
defer resp.Body.Close()
links := collectlinks.All(resp.Body)
for _, link := range links {
fmt.Println(link)
}
wf.err = errors.New("mock")
}
// URL returns the url of the fetch operation
func (wf *WebFetch) URL() string {
return wf.url
}
// InternalLinks gets the domain-internal link list
// from a successful fetch operation, nil otherwise.
func (wf *WebFetch) InternalLinks() []string {
return wf.internalLinks
}
// ExternalLinks gets the domain-external link list
// from a successful fetch operation, nil otherwise.
func (wf *WebFetch) ExternalLinks() []string {
return wf.externalLinks
}
// ResourceLinks gets the resource link list
// from a successful fetch operation, nil otherwise.
func (wf *WebFetch) ResourceLinks() []string {
return wf.resourceLinks
}
// Err returns the error from a fetch operation, if there was one.
func (wf *WebFetch) Err() error {
return wf.err
}
added link classification rules
/*
Package fetcher downloads and parses web pages into sets of links.
*/
package fetcher
import (
"crypto/tls"
"io/ioutil"
"net/http"
pkgurl "net/url"
"os"
"fmt"
"strings"
"github.com/jackdanger/collectlinks"
)
// WebFetch encapsulates a Fetch operation and its result
type WebFetch struct {
url string
internalLinks []string
externalLinks []string
resourceLinks []string
err error
}
// New is a convenience function to return a new Fetch struct
func New() *WebFetch {
return new(WebFetch)
}
// Fetch takes a URL, downloads the page body
// and parses it into a structure of links,
// filtering out duplicate & self links
func (wf *WebFetch) Fetch(url string) {
// validate parent
parent, err := pkgurl.Parse(url)
if err != nil {
wf.err = err
return
}
wf.url = url
// disable security since this is just a test app
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
}
transport := &http.Transport{
TLSClientConfig: tlsConfig,
}
client := http.Client{Transport: transport}
// download the html
resp, err := client.Get(url)
if err != nil {
wf.err = err
return
}
defer resp.Body.Close()
// use an open-source library to parse it for links
links := collectlinks.All(resp.Body)
// distribute the links into desired categories
for _, link := range links {
//fmt.Println(link)
child, err := pkgurl.Parse(link)
if err != nil {
fmt.Fprintf(os.Stdout, "\nWarning: failed to validate a link as a proper url:\n%v\n", err)
continue
}
if child.Host != parent.Host {
wf.externalLinks = append(wf.externalLinks, link)
continue
}
if child.Scheme != "http" && child.Scheme != "https" {
wf.resourceLinks = append(wf.resourceLinks, link)
continue
}
hdResp, err := client.Head(link)
if err != nil {
fmt.Fprintf(os.Stdout, "\nWarning: failed to make a HEAD request to a link:\n%v\n", err)
continue
}
defer hdResp.Body.Close()
header, err := ioutil.ReadAll(hdResp.Body)
if err != nil {
fmt.Fprintf(os.Stdout, "\nWarning: failed to parse a HEAD request to a link:\n%v\n", err)
continue
}
headerString := string(header)
//fmt.Printf("%v", headerString)
if strings.Contains(headerString, "Content-Type: text/html") {
wf.internalLinks = append(wf.internalLinks, link)
continue
} else {
wf.resourceLinks = append(wf.resourceLinks, link)
continue
}
}
}
// URL returns the url of the fetch operation
func (wf *WebFetch) URL() string {
return wf.url
}
// InternalLinks gets the domain-internal link list
// from a successful fetch operation, nil otherwise.
func (wf *WebFetch) InternalLinks() []string {
return wf.internalLinks
}
// ExternalLinks gets the domain-external link list
// from a successful fetch operation, nil otherwise.
func (wf *WebFetch) ExternalLinks() []string {
return wf.externalLinks
}
// ResourceLinks gets the resource link list
// from a successful fetch operation, nil otherwise.
func (wf *WebFetch) ResourceLinks() []string {
return wf.resourceLinks
}
// Err returns the error from a fetch operation, if there was one.
func (wf *WebFetch) Err() error {
return wf.err
}
|
package options
import "time"
const DEFAULT_MAX_RETRY_ATTEMPTS = 3
const DEFAULT_SLEEP = 5 * time.Second
// List of recurring transient errors encountered when calling terraform
// If any of these match, we'll retry the command
var RETRYABLE_ERRORS = []string{
"(?s).*Failed to load state.*tcp.*timeout.*",
"(?s).*Failed to load backend.*TLS handshake timeout.*",
"(?s).*Creating metric alarm failed.*request to update this alarm is in progress.*",
"(?s).*Error installing provider.*TLS handshake timeout.*",
"(?s).*Error configuring the backend.*TLS handshake timeout.*",
"(?s).*Error installing provider.*tcp.*timeout.*",
"(?s).*Error installing provider.*tcp.*connection reset by peer.*",
"NoSuchBucket: The specified bucket does not exist",
"(?s).*Error creating SSM parameter: TooManyUpdates:.*",
"(?s).*\"app.terraform.io/.*\": 429 Too Many Requests.*",
"(?s).*app.terraform.io: error looking up module versions: 429 Too Many Requests.*",
}
Updated regex as per @kwilczynski suggestion to cope with both scenario's
package options
import "time"
const DEFAULT_MAX_RETRY_ATTEMPTS = 3
const DEFAULT_SLEEP = 5 * time.Second
// List of recurring transient errors encountered when calling terraform
// If any of these match, we'll retry the command
var RETRYABLE_ERRORS = []string{
"(?s).*Failed to load state.*tcp.*timeout.*",
"(?s).*Failed to load backend.*TLS handshake timeout.*",
"(?s).*Creating metric alarm failed.*request to update this alarm is in progress.*",
"(?s).*Error installing provider.*TLS handshake timeout.*",
"(?s).*Error configuring the backend.*TLS handshake timeout.*",
"(?s).*Error installing provider.*tcp.*timeout.*",
"(?s).*Error installing provider.*tcp.*connection reset by peer.*",
"NoSuchBucket: The specified bucket does not exist",
"(?s).*Error creating SSM parameter: TooManyUpdates:.*",
"(?s).*app.terraform.io.*: 429 Too Many Requests.*",
}
|
package ics
import "errors"
const vCalendar = "VCALENDAR"
type Calendar struct {
ProductID, Method string
}
func (c *Calendar) decode(d Decoder) error {
var pID, ver, cs, m bool
for {
p, err := d.p.GetProperty()
if err != nil {
return err
}
switch p := p.(type) {
case productID:
if pID {
return ErrMultipleUnique
}
pID = true
c.ProductID = string(p)
case version:
if ver {
return ErrMultipleUnique
}
ver = true
if p.Min != "2.0" && p.Max != "2.0" {
return ErrUnsupportedVersion
}
case calscale:
if cs {
return ErrMultipleUnique
}
cs = true
if p != "GREGORIAN" {
return ErrUnsupportedCalendar
}
case method:
if m {
return ErrMultipleUnique
}
m = true
// do something with value?
case begin:
if !pID || !ver {
return ErrRequiredMissing
}
switch p {
case vEvent:
if err = c.decodeEvent(d); err != nil {
return err
}
case vTodo:
if err = c.decodeTodo(d); err != nil {
return err
}
case vJournal:
if err = c.decodeJournal(d); err != nil {
return err
}
case vFreeBusy:
if err = c.decodeFreeBusy(d); err != nil {
return err
}
case vTimezone:
if err = c.decodeTimezone(d); err != nil {
return err
}
default:
if err = d.readUnknownComponent(string(p)); err != nil {
return err
}
}
case end:
if !pID || !ver {
return ErrRequiredMissing
}
if p != vCalendar {
return ErrInvalidEnd
}
return nil
}
}
}
// Errors
var (
ErrUnsupportedCalendar = errors.New("unsupported calendar")
ErrUnsupportedVersion = errors.New("unsupported ics version")
)
initial calendar decoding
package ics
import (
"errors"
"github.com/MJKWoolnough/bitmask"
)
const vCalendar = "VCALENDAR"
type Calendar struct {
ProductID, Method string
Events []Event
Todo []Todo
Journals []Journal
Timezones []Timezone
}
func (c *Calendar) decode(d Decoder) error {
bm := bitmask.New(4)
for {
p, err := d.p.GetProperty()
if err != nil {
return err
}
switch p := p.(type) {
case productID:
if bm.SetIfNot(0, true) {
return ErrMultipleUnique
}
pID = true
c.ProductID = string(p)
case version:
if bm.SetIfNot(1, true) {
return ErrMultipleUnique
}
ver = true
if p.Min != "2.0" && p.Max != "2.0" {
return ErrUnsupportedVersion
}
case calscale:
if bm.SetIfNot(2, true) {
return ErrMultipleUnique
}
cs = true
if p != "GREGORIAN" {
return ErrUnsupportedCalendar
}
case method:
if bm.SetIfNot(3, true) {
return ErrMultipleUnique
}
m = true
c.Method = string(p)
case begin:
if !bm.Get(0) || !bm.Get(1) {
return ErrRequiredMissing
}
switch p {
case vEvent:
if err = c.decodeEvent(d); err != nil {
return err
}
case vTodo:
if err = c.decodeTodo(d); err != nil {
return err
}
case vJournal:
if err = c.decodeJournal(d); err != nil {
return err
}
case vFreeBusy:
if err = c.decodeFreeBusy(d); err != nil {
return err
}
case vTimezone:
if err = c.decodeTimezone(d); err != nil {
return err
}
default:
if err = d.readUnknownComponent(string(p)); err != nil {
return err
}
}
case end:
if !bm.Get(0) || !bm.Get(1) {
return ErrRequiredMissing
}
if p != vCalendar {
return ErrInvalidEnd
}
return nil
}
}
}
// Errors
var (
ErrUnsupportedCalendar = errors.New("unsupported calendar")
ErrUnsupportedVersion = errors.New("unsupported ics version")
)
|
package dockerfile2llb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/signal"
"github.com/docker/go-connections/nat"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/imagemetaresolver"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/system"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const (
emptyImageName = "scratch"
localNameContext = "context"
historyComment = "buildkit.dockerfile.v0"
DefaultCopyImage = "tonistiigi/copy:v0.1.7@sha256:9aab7d9ab369c6daf4831bf0653f7592110ab4b7e8a33fee2b9dca546e9d3089"
)
type ConvertOpt struct {
Target string
MetaResolver llb.ImageMetaResolver
BuildArgs map[string]string
Labels map[string]string
SessionID string
BuildContext *llb.State
Excludes []string
// IgnoreCache contains names of the stages that should not use build cache.
// Empty slice means ignore cache for all stages. Nil doesn't disable cache.
IgnoreCache []string
// CacheIDNamespace scopes the IDs for different cache mounts
CacheIDNamespace string
ImageResolveMode llb.ResolveMode
TargetPlatform *specs.Platform
BuildPlatforms []specs.Platform
PrefixPlatform bool
ExtraHosts []llb.HostIP
ForceNetMode pb.NetMode
OverrideCopyImage string
LLBCaps *apicaps.CapSet
}
func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) {
if len(dt) == 0 {
return nil, nil, errors.Errorf("the Dockerfile cannot be empty")
}
platformOpt := buildPlatformOpt(&opt)
optMetaArgs := getPlatformArgs(platformOpt)
for i, arg := range optMetaArgs {
optMetaArgs[i] = setKVValue(arg, opt.BuildArgs)
}
dockerfile, err := parser.Parse(bytes.NewReader(dt))
if err != nil {
return nil, nil, err
}
proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs)
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
if err != nil {
return nil, nil, err
}
shlex := shell.NewLex(dockerfile.EscapeToken)
for _, metaArg := range metaArgs {
if metaArg.Value != nil {
*metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs))
}
optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs))
}
metaResolver := opt.MetaResolver
if metaResolver == nil {
metaResolver = imagemetaresolver.Default()
}
allDispatchStates := newDispatchStates()
// set base state for every image
for i, st := range stages {
name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs))
if err != nil {
return nil, nil, err
}
if name == "" {
return nil, nil, errors.Errorf("base name (%s) should not be blank", st.BaseName)
}
st.BaseName = name
ds := &dispatchState{
stage: st,
deps: make(map[*dispatchState]struct{}),
ctxPaths: make(map[string]struct{}),
stageName: st.Name,
prefixPlatform: opt.PrefixPlatform,
}
if st.Name == "" {
ds.stageName = fmt.Sprintf("stage-%d", i)
}
if v := st.Platform; v != "" {
v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v)
}
p, err := platforms.Parse(v)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse platform %s", v)
}
ds.platform = &p
}
allDispatchStates.addState(ds)
total := 0
if ds.stage.BaseName != emptyImageName && ds.base == nil {
total = 1
}
for _, cmd := range ds.stage.Commands {
switch cmd.(type) {
case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand:
total++
}
}
ds.cmdTotal = total
if opt.IgnoreCache != nil {
if len(opt.IgnoreCache) == 0 {
ds.ignoreCache = true
} else if st.Name != "" {
for _, n := range opt.IgnoreCache {
if strings.EqualFold(n, st.Name) {
ds.ignoreCache = true
}
}
}
}
}
if len(allDispatchStates.states) == 1 {
allDispatchStates.states[0].stageName = ""
}
var target *dispatchState
if opt.Target == "" {
target = allDispatchStates.lastTarget()
} else {
var ok bool
target, ok = allDispatchStates.findStateByName(opt.Target)
if !ok {
return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target)
}
}
// fill dependencies to stages so unreachable ones can avoid loading image configs
for _, d := range allDispatchStates.states {
d.commands = make([]command, len(d.stage.Commands))
for i, cmd := range d.stage.Commands {
newCmd, err := toCommand(cmd, allDispatchStates)
if err != nil {
return nil, nil, err
}
d.commands[i] = newCmd
for _, src := range newCmd.sources {
if src != nil {
d.deps[src] = struct{}{}
if src.unregistered {
allDispatchStates.addState(src)
}
}
}
}
}
eg, ctx := errgroup.WithContext(ctx)
for i, d := range allDispatchStates.states {
reachable := isReachable(target, d)
// resolve image config for every stage
if d.base == nil {
if d.stage.BaseName == emptyImageName {
d.state = llb.Scratch()
d.image = emptyImage(platformOpt.targetPlatform)
continue
}
func(i int, d *dispatchState) {
eg.Go(func() error {
ref, err := reference.ParseNormalizedNamed(d.stage.BaseName)
if err != nil {
return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName)
}
platform := d.platform
if platform == nil {
platform = &platformOpt.targetPlatform
}
d.stage.BaseName = reference.TagNameOnly(ref).String()
var isScratch bool
if metaResolver != nil && reachable && !d.unregistered {
prefix := "["
if opt.PrefixPlatform && platform != nil {
prefix += platforms.Format(*platform) + " "
}
prefix += "internal]"
dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, gw.ResolveImageConfigOpt{
Platform: platform,
ResolveMode: opt.ImageResolveMode.String(),
LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
})
if err == nil { // handle the error while builder is actually running
var img Image
if err := json.Unmarshal(dt, &img); err != nil {
return err
}
img.Created = nil
// if there is no explicit target platform, try to match based on image config
if d.platform == nil && platformOpt.implicitTarget {
p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms)
platform = &p
}
d.image = img
if dgst != "" {
ref, err = reference.WithDigest(ref, dgst)
if err != nil {
return err
}
}
d.stage.BaseName = ref.String()
_ = ref
if len(img.RootFS.DiffIDs) == 0 {
isScratch = true
// schema1 images can't return diffIDs so double check :(
for _, h := range img.History {
if !h.EmptyLayer {
isScratch = false
break
}
}
}
}
}
if isScratch {
d.state = llb.Scratch()
} else {
d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform)))
}
d.platform = platform
return nil
})
}(i, d)
}
}
if err := eg.Wait(); err != nil {
return nil, nil, err
}
buildContext := &mutableOutput{}
ctxPaths := map[string]struct{}{}
for _, d := range allDispatchStates.states {
if !isReachable(target, d) {
continue
}
if d.base != nil {
d.state = d.base.state
d.platform = d.base.platform
d.image = clone(d.base.image)
}
// make sure that PATH is always set
if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok {
d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv)
}
// initialize base metadata from image conf
for _, env := range d.image.Config.Env {
k, v := parseKeyValue(env)
d.state = d.state.AddEnv(k, v)
}
if d.image.Config.WorkingDir != "" {
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil {
return nil, nil, err
}
}
if d.image.Config.User != "" {
if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil {
return nil, nil, err
}
}
d.state = d.state.Network(opt.ForceNetMode)
opt := dispatchOpt{
allDispatchStates: allDispatchStates,
metaArgs: optMetaArgs,
buildArgValues: opt.BuildArgs,
shlex: shlex,
sessionID: opt.SessionID,
buildContext: llb.NewState(buildContext),
proxyEnv: proxyEnv,
cacheIDNamespace: opt.CacheIDNamespace,
buildPlatforms: platformOpt.buildPlatforms,
targetPlatform: platformOpt.targetPlatform,
extraHosts: opt.ExtraHosts,
copyImage: opt.OverrideCopyImage,
}
if opt.copyImage == "" {
opt.copyImage = DefaultCopyImage
}
if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil {
return nil, nil, err
}
for _, cmd := range d.commands {
if err := dispatch(d, cmd, opt); err != nil {
return nil, nil, err
}
}
for p := range d.ctxPaths {
ctxPaths[p] = struct{}{}
}
}
if len(opt.Labels) != 0 && target.image.Config.Labels == nil {
target.image.Config.Labels = make(map[string]string, len(opt.Labels))
}
for k, v := range opt.Labels {
target.image.Config.Labels[k] = v
}
opts := []llb.LocalOption{
llb.SessionID(opt.SessionID),
llb.ExcludePatterns(opt.Excludes),
llb.SharedKeyHint(localNameContext),
WithInternalName("load build context"),
}
if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil {
opts = append(opts, llb.FollowPaths(includePatterns))
}
bc := llb.Local(localNameContext, opts...)
if opt.BuildContext != nil {
bc = *opt.BuildContext
}
buildContext.Output = bc.Output()
defaults := []llb.ConstraintsOpt{
llb.Platform(platformOpt.targetPlatform),
}
if opt.LLBCaps != nil {
defaults = append(defaults, llb.WithCaps(*opt.LLBCaps))
}
st := target.state.SetMarshalDefaults(defaults...)
if !platformOpt.implicitTarget {
target.image.OS = platformOpt.targetPlatform.OS
target.image.Architecture = platformOpt.targetPlatform.Architecture
}
return &st, &target.image, nil
}
func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string {
m := map[string]string{}
for _, arg := range metaArgs {
m[arg.Key] = arg.ValueString()
}
return m
}
func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) {
cmd := command{Command: ic}
if c, ok := ic.(*instructions.CopyCommand); ok {
if c.From != "" {
var stn *dispatchState
index, err := strconv.Atoi(c.From)
if err != nil {
stn, ok = allDispatchStates.findStateByName(c.From)
if !ok {
stn = &dispatchState{
stage: instructions.Stage{BaseName: c.From},
deps: make(map[*dispatchState]struct{}),
unregistered: true,
}
}
} else {
stn, err = allDispatchStates.findStateByIndex(index)
if err != nil {
return command{}, err
}
}
cmd.sources = []*dispatchState{stn}
}
}
if ok := detectRunMount(&cmd, allDispatchStates); ok {
return cmd, nil
}
return cmd, nil
}
type dispatchOpt struct {
allDispatchStates *dispatchStates
metaArgs []instructions.KeyValuePairOptional
buildArgValues map[string]string
shlex *shell.Lex
sessionID string
buildContext llb.State
proxyEnv *llb.ProxyEnv
cacheIDNamespace string
targetPlatform specs.Platform
buildPlatforms []specs.Platform
extraHosts []llb.HostIP
copyImage string
}
func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok {
err := ex.Expand(func(word string) (string, error) {
return opt.shlex.ProcessWordWithMap(word, toEnvMap(d.buildArgs, d.image.Config.Env))
})
if err != nil {
return err
}
}
var err error
switch c := cmd.Command.(type) {
case *instructions.MaintainerCommand:
err = dispatchMaintainer(d, c)
case *instructions.EnvCommand:
err = dispatchEnv(d, c)
case *instructions.RunCommand:
err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt)
case *instructions.WorkdirCommand:
err = dispatchWorkdir(d, c, true)
case *instructions.AddCommand:
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "", opt)
if err == nil {
for _, src := range c.Sources() {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
}
case *instructions.LabelCommand:
err = dispatchLabel(d, c)
case *instructions.OnbuildCommand:
err = dispatchOnbuild(d, c)
case *instructions.CmdCommand:
err = dispatchCmd(d, c)
case *instructions.EntrypointCommand:
err = dispatchEntrypoint(d, c)
case *instructions.HealthCheckCommand:
err = dispatchHealthcheck(d, c)
case *instructions.ExposeCommand:
err = dispatchExpose(d, c, opt.shlex)
case *instructions.UserCommand:
err = dispatchUser(d, c, true)
case *instructions.VolumeCommand:
err = dispatchVolume(d, c)
case *instructions.StopSignalCommand:
err = dispatchStopSignal(d, c)
case *instructions.ShellCommand:
err = dispatchShell(d, c)
case *instructions.ArgCommand:
err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues)
case *instructions.CopyCommand:
l := opt.buildContext
if len(cmd.sources) != 0 {
l = cmd.sources[0].state
}
err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, opt)
if err == nil && len(cmd.sources) == 0 {
for _, src := range c.Sources() {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
}
default:
}
return err
}
type dispatchState struct {
state llb.State
image Image
platform *specs.Platform
stage instructions.Stage
base *dispatchState
deps map[*dispatchState]struct{}
buildArgs []instructions.KeyValuePairOptional
commands []command
ctxPaths map[string]struct{}
ignoreCache bool
cmdSet bool
unregistered bool
stageName string
cmdIndex int
cmdTotal int
prefixPlatform bool
}
type dispatchStates struct {
states []*dispatchState
statesByName map[string]*dispatchState
}
func newDispatchStates() *dispatchStates {
return &dispatchStates{statesByName: map[string]*dispatchState{}}
}
func (dss *dispatchStates) addState(ds *dispatchState) {
dss.states = append(dss.states, ds)
if d, ok := dss.statesByName[ds.stage.BaseName]; ok {
ds.base = d
}
if ds.stage.Name != "" {
dss.statesByName[strings.ToLower(ds.stage.Name)] = ds
}
}
func (dss *dispatchStates) findStateByName(name string) (*dispatchState, bool) {
ds, ok := dss.statesByName[strings.ToLower(name)]
return ds, ok
}
func (dss *dispatchStates) findStateByIndex(index int) (*dispatchState, error) {
if index < 0 || index >= len(dss.states) {
return nil, errors.Errorf("invalid stage index %d", index)
}
return dss.states[index], nil
}
func (dss *dispatchStates) lastTarget() *dispatchState {
return dss.states[len(dss.states)-1]
}
type command struct {
instructions.Command
sources []*dispatchState
}
func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error {
for _, trigger := range triggers {
ast, err := parser.Parse(strings.NewReader(trigger))
if err != nil {
return err
}
if len(ast.AST.Children) != 1 {
return errors.New("onbuild trigger should be a single expression")
}
ic, err := instructions.ParseCommand(ast.AST.Children[0])
if err != nil {
return err
}
cmd, err := toCommand(ic, opt.allDispatchStates)
if err != nil {
return err
}
if err := dispatch(d, cmd, opt); err != nil {
return err
}
}
return nil
}
func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error {
commitMessage := bytes.NewBufferString("ENV")
for _, e := range c.Env {
commitMessage.WriteString(" " + e.String())
d.state = d.state.AddEnv(e.Key, e.Value)
d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value)
}
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
env := d.state.Env()
opt := []llb.RunOption{llb.Args(args)}
for _, arg := range d.buildArgs {
env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
}
opt = append(opt, dfCmd(c))
if d.ignoreCache {
opt = append(opt, llb.IgnoreCache)
}
if proxy != nil {
opt = append(opt, llb.WithProxy(*proxy))
}
runMounts, err := dispatchRunMounts(d, c, sources, dopt)
if err != nil {
return err
}
opt = append(opt, runMounts...)
opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(dopt.shlex, c.String(), env)), d.prefixPlatform, d.state.GetPlatform())))
for _, h := range dopt.extraHosts {
opt = append(opt, llb.AddExtraHost(h.Host, h.IP))
}
d.state = d.state.Run(opt...).Root()
return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state)
}
func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error {
d.state = d.state.Dir(c.Path)
wd := c.Path
if !path.IsAbs(c.Path) {
wd = path.Join("/", d.image.Config.WorkingDir, wd)
}
d.image.Config.WorkingDir = wd
if commit {
return commitToHistory(&d.image, "WORKDIR "+wd, false, nil)
}
return nil
}
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
// TODO: this should use CopyOp instead. Current implementation is inefficient
img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest()))
if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator {
dest += string(filepath.Separator)
}
args := []string{"copy"}
unpack := isAddCommand
mounts := make([]llb.RunOption, 0, len(c.Sources()))
if chown != "" {
args = append(args, fmt.Sprintf("--chown=%s", chown))
_, _, err := parseUser(chown)
if err != nil {
mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly))
mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly))
}
}
commitMessage := bytes.NewBufferString("")
if isAddCommand {
commitMessage.WriteString("ADD")
} else {
commitMessage.WriteString("COPY")
}
for i, src := range c.Sources() {
commitMessage.WriteString(" " + src)
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
if !isAddCommand {
return errors.New("source can't be a URL for COPY")
}
// Resources from remote URLs are not decompressed.
// https://docs.docker.com/engine/reference/builder/#add
//
// Note: mixing up remote archives and local archives in a single ADD instruction
// would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717
unpack = false
u, err := url.Parse(src)
f := "__unnamed__"
if err == nil {
if base := path.Base(u.Path); base != "." && base != "/" {
f = base
}
}
target := path.Join(fmt.Sprintf("/src-%d", i), f)
args = append(args, target)
mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly))
} else {
d, f := splitWildcards(src)
targetCmd := fmt.Sprintf("/src-%d", i)
targetMount := targetCmd
if f == "" {
f = path.Base(src)
targetMount = path.Join(targetMount, f)
}
targetCmd = path.Join(targetCmd, f)
args = append(args, targetCmd)
mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly))
}
}
commitMessage.WriteString(" " + c.Dest())
args = append(args, dest)
if unpack {
args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...)
}
platform := opt.targetPlatform
if d.platform != nil {
platform = *d.platform
}
runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))}
if d.ignoreCache {
runOpt = append(runOpt, llb.IgnoreCache)
}
run := img.Run(append(runOpt, mounts...)...)
d.state = run.AddMount("/dest", d.state).Platform(platform)
return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
}
func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error {
d.image.Author = c.Maintainer
return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil)
}
func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error {
commitMessage := bytes.NewBufferString("LABEL")
if d.image.Config.Labels == nil {
d.image.Config.Labels = make(map[string]string, len(c.Labels))
}
for _, v := range c.Labels {
d.image.Config.Labels[v.Key] = v.Value
commitMessage.WriteString(" " + v.String())
}
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error {
d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression)
return nil
}
func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
d.image.Config.Cmd = args
d.image.Config.ArgsEscaped = true
d.cmdSet = true
return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil)
}
func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
d.image.Config.Entrypoint = args
if !d.cmdSet {
d.image.Config.Cmd = nil
}
return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil)
}
func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error {
d.image.Config.Healthcheck = &HealthConfig{
Test: c.Health.Test,
Interval: c.Health.Interval,
Timeout: c.Health.Timeout,
StartPeriod: c.Health.StartPeriod,
Retries: c.Health.Retries,
}
return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil)
}
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
ports := []string{}
for _, p := range c.Ports {
ps, err := shlex.ProcessWordsWithMap(p, toEnvMap(d.buildArgs, d.image.Config.Env))
if err != nil {
return err
}
ports = append(ports, ps...)
}
c.Ports = ports
ps, _, err := nat.ParsePortSpecs(c.Ports)
if err != nil {
return err
}
if d.image.Config.ExposedPorts == nil {
d.image.Config.ExposedPorts = make(map[string]struct{})
}
for p := range ps {
d.image.Config.ExposedPorts[string(p)] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil)
}
func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error {
d.state = d.state.User(c.User)
d.image.Config.User = c.User
if commit {
return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil)
}
return nil
}
func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error {
if d.image.Config.Volumes == nil {
d.image.Config.Volumes = map[string]struct{}{}
}
for _, v := range c.Volumes {
if v == "" {
return errors.New("VOLUME specified can not be an empty string")
}
d.image.Config.Volumes[v] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil)
}
func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error {
if _, err := signal.ParseSignal(c.Signal); err != nil {
return err
}
d.image.Config.StopSignal = c.Signal
return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil)
}
func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error {
d.image.Config.Shell = c.Shell
return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil)
}
func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error {
commitStr := "ARG " + c.Key
buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues)
if c.Value != nil {
commitStr += "=" + *c.Value
}
if buildArg.Value == nil {
for _, ma := range metaArgs {
if ma.Key == buildArg.Key {
buildArg.Value = ma.Value
}
}
}
d.buildArgs = append(d.buildArgs, buildArg)
return commitToHistory(&d.image, commitStr, false, nil)
}
func pathRelativeToWorkingDir(s llb.State, p string) string {
if path.IsAbs(p) {
return p
}
return path.Join(s.GetDir(), p)
}
func splitWildcards(name string) (string, string) {
i := 0
for ; i < len(name); i++ {
ch := name[i]
if ch == '\\' {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
break
}
}
if i == len(name) {
return name, ""
}
base := path.Base(name[:i])
if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) {
base = ""
}
return path.Dir(name[:i]), base + name[i:]
}
func addEnv(env []string, k, v string) []string {
gotOne := false
for i, envVar := range env {
key, _ := parseKeyValue(envVar)
if shell.EqualEnvKeys(key, k) {
env[i] = k + "=" + v
gotOne = true
break
}
}
if !gotOne {
env = append(env, k+"="+v)
}
return env
}
func parseKeyValue(env string) (string, string) {
parts := strings.SplitN(env, "=", 2)
v := ""
if len(parts) > 1 {
v = parts[1]
}
return parts[0], v
}
func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional {
if v, ok := values[kvpo.Key]; ok {
kvpo.Value = &v
}
return kvpo
}
func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string {
m := shell.BuildEnvs(env)
for _, arg := range args {
// If key already exists, keep previous value.
if _, ok := m[arg.Key]; ok {
continue
}
m[arg.Key] = arg.ValueString()
}
return m
}
func dfCmd(cmd interface{}) llb.ConstraintsOpt {
// TODO: add fmt.Stringer to instructions.Command to remove interface{}
var cmdStr string
if cmd, ok := cmd.(fmt.Stringer); ok {
cmdStr = cmd.String()
}
if cmd, ok := cmd.(string); ok {
cmdStr = cmd
}
return llb.WithDescription(map[string]string{
"com.docker.dockerfile.v1.command": cmdStr,
})
}
func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional) string {
var tmpBuildEnv []string
for _, arg := range buildArgs {
tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+arg.ValueString())
}
if len(tmpBuildEnv) > 0 {
tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
}
return strings.Join(append(tmpBuildEnv, args...), " ")
}
func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error {
if st != nil {
msg += " # buildkit"
}
img.History = append(img.History, specs.History{
CreatedBy: msg,
Comment: historyComment,
EmptyLayer: !withLayer,
})
return nil
}
func isReachable(from, to *dispatchState) (ret bool) {
if from == nil {
return false
}
if from == to || isReachable(from.base, to) {
return true
}
for d := range from.deps {
if isReachable(d, to) {
return true
}
}
return false
}
func parseUser(str string) (uid uint32, gid uint32, err error) {
if str == "" {
return 0, 0, nil
}
parts := strings.SplitN(str, ":", 2)
for i, v := range parts {
switch i {
case 0:
uid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
if len(parts) == 1 {
gid = uid
}
case 1:
gid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
}
}
return
}
func parseUID(str string) (uint32, error) {
if str == "root" {
return 0, nil
}
uid, err := strconv.ParseUint(str, 10, 32)
if err != nil {
return 0, err
}
return uint32(uid), nil
}
func normalizeContextPaths(paths map[string]struct{}) []string {
pathSlice := make([]string, 0, len(paths))
for p := range paths {
if p == "/" {
return nil
}
pathSlice = append(pathSlice, p)
}
toDelete := map[string]struct{}{}
for i := range pathSlice {
for j := range pathSlice {
if i == j {
continue
}
if strings.HasPrefix(pathSlice[j], pathSlice[i]+"/") {
delete(paths, pathSlice[j])
}
}
}
toSort := make([]string, 0, len(paths))
for p := range paths {
if _, ok := toDelete[p]; !ok {
toSort = append(toSort, path.Join(".", p))
}
}
sort.Slice(toSort, func(i, j int) bool {
return toSort[i] < toSort[j]
})
return toSort
}
func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {
pe := &llb.ProxyEnv{}
isNil := true
for k, v := range args {
if strings.EqualFold(k, "http_proxy") {
pe.HttpProxy = v
isNil = false
}
if strings.EqualFold(k, "https_proxy") {
pe.HttpsProxy = v
isNil = false
}
if strings.EqualFold(k, "ftp_proxy") {
pe.FtpProxy = v
isNil = false
}
if strings.EqualFold(k, "no_proxy") {
pe.NoProxy = v
isNil = false
}
}
if isNil {
return nil
}
return pe
}
type mutableOutput struct {
llb.Output
}
func withShell(img Image, args []string) []string {
var shell []string
if len(img.Config.Shell) > 0 {
shell = append([]string{}, img.Config.Shell...)
} else {
shell = defaultShell()
}
return append(shell, strings.Join(args, " "))
}
func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Platform) specs.Platform {
os := img.OS
arch := img.Architecture
if target.OS == os && target.Architecture == arch {
return target
}
for _, p := range supported {
if p.OS == os && p.Architecture == arch {
return p
}
}
return target
}
func WithInternalName(name string, a ...interface{}) llb.ConstraintsOpt {
return llb.WithCustomName("[internal] "+name, a...)
}
func uppercaseCmd(str string) string {
p := strings.SplitN(str, " ", 2)
p[0] = strings.ToUpper(p[0])
return strings.Join(p, " ")
}
func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string {
w, err := shlex.ProcessWord(cmd, env)
if err != nil {
return cmd
}
return w
}
func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string {
if ds.cmdTotal == 0 {
return str
}
out := "["
if prefixPlatform && platform != nil {
out += platforms.Format(*platform) + " "
}
if ds.stageName != "" {
out += ds.stageName + " "
}
ds.cmdIndex++
out += fmt.Sprintf("%d/%d] ", ds.cmdIndex, ds.cmdTotal)
return out + str
}
dockerfile: disable network for copy
Signed-off-by: Tonis Tiigi <c2470c48b2d3312d61f94f18d3a1cd113d1915ad@gmail.com>
package dockerfile2llb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/signal"
"github.com/docker/go-connections/nat"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/imagemetaresolver"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell"
gw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/system"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
const (
emptyImageName = "scratch"
localNameContext = "context"
historyComment = "buildkit.dockerfile.v0"
DefaultCopyImage = "tonistiigi/copy:v0.1.7@sha256:9aab7d9ab369c6daf4831bf0653f7592110ab4b7e8a33fee2b9dca546e9d3089"
)
type ConvertOpt struct {
Target string
MetaResolver llb.ImageMetaResolver
BuildArgs map[string]string
Labels map[string]string
SessionID string
BuildContext *llb.State
Excludes []string
// IgnoreCache contains names of the stages that should not use build cache.
// Empty slice means ignore cache for all stages. Nil doesn't disable cache.
IgnoreCache []string
// CacheIDNamespace scopes the IDs for different cache mounts
CacheIDNamespace string
ImageResolveMode llb.ResolveMode
TargetPlatform *specs.Platform
BuildPlatforms []specs.Platform
PrefixPlatform bool
ExtraHosts []llb.HostIP
ForceNetMode pb.NetMode
OverrideCopyImage string
LLBCaps *apicaps.CapSet
}
func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) {
if len(dt) == 0 {
return nil, nil, errors.Errorf("the Dockerfile cannot be empty")
}
platformOpt := buildPlatformOpt(&opt)
optMetaArgs := getPlatformArgs(platformOpt)
for i, arg := range optMetaArgs {
optMetaArgs[i] = setKVValue(arg, opt.BuildArgs)
}
dockerfile, err := parser.Parse(bytes.NewReader(dt))
if err != nil {
return nil, nil, err
}
proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs)
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
if err != nil {
return nil, nil, err
}
shlex := shell.NewLex(dockerfile.EscapeToken)
for _, metaArg := range metaArgs {
if metaArg.Value != nil {
*metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs))
}
optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs))
}
metaResolver := opt.MetaResolver
if metaResolver == nil {
metaResolver = imagemetaresolver.Default()
}
allDispatchStates := newDispatchStates()
// set base state for every image
for i, st := range stages {
name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs))
if err != nil {
return nil, nil, err
}
if name == "" {
return nil, nil, errors.Errorf("base name (%s) should not be blank", st.BaseName)
}
st.BaseName = name
ds := &dispatchState{
stage: st,
deps: make(map[*dispatchState]struct{}),
ctxPaths: make(map[string]struct{}),
stageName: st.Name,
prefixPlatform: opt.PrefixPlatform,
}
if st.Name == "" {
ds.stageName = fmt.Sprintf("stage-%d", i)
}
if v := st.Platform; v != "" {
v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v)
}
p, err := platforms.Parse(v)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse platform %s", v)
}
ds.platform = &p
}
allDispatchStates.addState(ds)
total := 0
if ds.stage.BaseName != emptyImageName && ds.base == nil {
total = 1
}
for _, cmd := range ds.stage.Commands {
switch cmd.(type) {
case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand:
total++
}
}
ds.cmdTotal = total
if opt.IgnoreCache != nil {
if len(opt.IgnoreCache) == 0 {
ds.ignoreCache = true
} else if st.Name != "" {
for _, n := range opt.IgnoreCache {
if strings.EqualFold(n, st.Name) {
ds.ignoreCache = true
}
}
}
}
}
if len(allDispatchStates.states) == 1 {
allDispatchStates.states[0].stageName = ""
}
var target *dispatchState
if opt.Target == "" {
target = allDispatchStates.lastTarget()
} else {
var ok bool
target, ok = allDispatchStates.findStateByName(opt.Target)
if !ok {
return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target)
}
}
// fill dependencies to stages so unreachable ones can avoid loading image configs
for _, d := range allDispatchStates.states {
d.commands = make([]command, len(d.stage.Commands))
for i, cmd := range d.stage.Commands {
newCmd, err := toCommand(cmd, allDispatchStates)
if err != nil {
return nil, nil, err
}
d.commands[i] = newCmd
for _, src := range newCmd.sources {
if src != nil {
d.deps[src] = struct{}{}
if src.unregistered {
allDispatchStates.addState(src)
}
}
}
}
}
eg, ctx := errgroup.WithContext(ctx)
for i, d := range allDispatchStates.states {
reachable := isReachable(target, d)
// resolve image config for every stage
if d.base == nil {
if d.stage.BaseName == emptyImageName {
d.state = llb.Scratch()
d.image = emptyImage(platformOpt.targetPlatform)
continue
}
func(i int, d *dispatchState) {
eg.Go(func() error {
ref, err := reference.ParseNormalizedNamed(d.stage.BaseName)
if err != nil {
return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName)
}
platform := d.platform
if platform == nil {
platform = &platformOpt.targetPlatform
}
d.stage.BaseName = reference.TagNameOnly(ref).String()
var isScratch bool
if metaResolver != nil && reachable && !d.unregistered {
prefix := "["
if opt.PrefixPlatform && platform != nil {
prefix += platforms.Format(*platform) + " "
}
prefix += "internal]"
dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, gw.ResolveImageConfigOpt{
Platform: platform,
ResolveMode: opt.ImageResolveMode.String(),
LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
})
if err == nil { // handle the error while builder is actually running
var img Image
if err := json.Unmarshal(dt, &img); err != nil {
return err
}
img.Created = nil
// if there is no explicit target platform, try to match based on image config
if d.platform == nil && platformOpt.implicitTarget {
p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms)
platform = &p
}
d.image = img
if dgst != "" {
ref, err = reference.WithDigest(ref, dgst)
if err != nil {
return err
}
}
d.stage.BaseName = ref.String()
_ = ref
if len(img.RootFS.DiffIDs) == 0 {
isScratch = true
// schema1 images can't return diffIDs so double check :(
for _, h := range img.History {
if !h.EmptyLayer {
isScratch = false
break
}
}
}
}
}
if isScratch {
d.state = llb.Scratch()
} else {
d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform)))
}
d.platform = platform
return nil
})
}(i, d)
}
}
if err := eg.Wait(); err != nil {
return nil, nil, err
}
buildContext := &mutableOutput{}
ctxPaths := map[string]struct{}{}
for _, d := range allDispatchStates.states {
if !isReachable(target, d) {
continue
}
if d.base != nil {
d.state = d.base.state
d.platform = d.base.platform
d.image = clone(d.base.image)
}
// make sure that PATH is always set
if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok {
d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv)
}
// initialize base metadata from image conf
for _, env := range d.image.Config.Env {
k, v := parseKeyValue(env)
d.state = d.state.AddEnv(k, v)
}
if d.image.Config.WorkingDir != "" {
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil {
return nil, nil, err
}
}
if d.image.Config.User != "" {
if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil {
return nil, nil, err
}
}
d.state = d.state.Network(opt.ForceNetMode)
opt := dispatchOpt{
allDispatchStates: allDispatchStates,
metaArgs: optMetaArgs,
buildArgValues: opt.BuildArgs,
shlex: shlex,
sessionID: opt.SessionID,
buildContext: llb.NewState(buildContext),
proxyEnv: proxyEnv,
cacheIDNamespace: opt.CacheIDNamespace,
buildPlatforms: platformOpt.buildPlatforms,
targetPlatform: platformOpt.targetPlatform,
extraHosts: opt.ExtraHosts,
copyImage: opt.OverrideCopyImage,
llbCaps: opt.LLBCaps,
}
if opt.copyImage == "" {
opt.copyImage = DefaultCopyImage
}
if err = dispatchOnBuild(d, d.image.Config.OnBuild, opt); err != nil {
return nil, nil, err
}
for _, cmd := range d.commands {
if err := dispatch(d, cmd, opt); err != nil {
return nil, nil, err
}
}
for p := range d.ctxPaths {
ctxPaths[p] = struct{}{}
}
}
if len(opt.Labels) != 0 && target.image.Config.Labels == nil {
target.image.Config.Labels = make(map[string]string, len(opt.Labels))
}
for k, v := range opt.Labels {
target.image.Config.Labels[k] = v
}
opts := []llb.LocalOption{
llb.SessionID(opt.SessionID),
llb.ExcludePatterns(opt.Excludes),
llb.SharedKeyHint(localNameContext),
WithInternalName("load build context"),
}
if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil {
opts = append(opts, llb.FollowPaths(includePatterns))
}
bc := llb.Local(localNameContext, opts...)
if opt.BuildContext != nil {
bc = *opt.BuildContext
}
buildContext.Output = bc.Output()
defaults := []llb.ConstraintsOpt{
llb.Platform(platformOpt.targetPlatform),
}
if opt.LLBCaps != nil {
defaults = append(defaults, llb.WithCaps(*opt.LLBCaps))
}
st := target.state.SetMarshalDefaults(defaults...)
if !platformOpt.implicitTarget {
target.image.OS = platformOpt.targetPlatform.OS
target.image.Architecture = platformOpt.targetPlatform.Architecture
}
return &st, &target.image, nil
}
func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string {
m := map[string]string{}
for _, arg := range metaArgs {
m[arg.Key] = arg.ValueString()
}
return m
}
func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) {
cmd := command{Command: ic}
if c, ok := ic.(*instructions.CopyCommand); ok {
if c.From != "" {
var stn *dispatchState
index, err := strconv.Atoi(c.From)
if err != nil {
stn, ok = allDispatchStates.findStateByName(c.From)
if !ok {
stn = &dispatchState{
stage: instructions.Stage{BaseName: c.From},
deps: make(map[*dispatchState]struct{}),
unregistered: true,
}
}
} else {
stn, err = allDispatchStates.findStateByIndex(index)
if err != nil {
return command{}, err
}
}
cmd.sources = []*dispatchState{stn}
}
}
if ok := detectRunMount(&cmd, allDispatchStates); ok {
return cmd, nil
}
return cmd, nil
}
type dispatchOpt struct {
allDispatchStates *dispatchStates
metaArgs []instructions.KeyValuePairOptional
buildArgValues map[string]string
shlex *shell.Lex
sessionID string
buildContext llb.State
proxyEnv *llb.ProxyEnv
cacheIDNamespace string
targetPlatform specs.Platform
buildPlatforms []specs.Platform
extraHosts []llb.HostIP
copyImage string
llbCaps *apicaps.CapSet
}
func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok {
err := ex.Expand(func(word string) (string, error) {
return opt.shlex.ProcessWordWithMap(word, toEnvMap(d.buildArgs, d.image.Config.Env))
})
if err != nil {
return err
}
}
var err error
switch c := cmd.Command.(type) {
case *instructions.MaintainerCommand:
err = dispatchMaintainer(d, c)
case *instructions.EnvCommand:
err = dispatchEnv(d, c)
case *instructions.RunCommand:
err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt)
case *instructions.WorkdirCommand:
err = dispatchWorkdir(d, c, true)
case *instructions.AddCommand:
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "", opt)
if err == nil {
for _, src := range c.Sources() {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
}
case *instructions.LabelCommand:
err = dispatchLabel(d, c)
case *instructions.OnbuildCommand:
err = dispatchOnbuild(d, c)
case *instructions.CmdCommand:
err = dispatchCmd(d, c)
case *instructions.EntrypointCommand:
err = dispatchEntrypoint(d, c)
case *instructions.HealthCheckCommand:
err = dispatchHealthcheck(d, c)
case *instructions.ExposeCommand:
err = dispatchExpose(d, c, opt.shlex)
case *instructions.UserCommand:
err = dispatchUser(d, c, true)
case *instructions.VolumeCommand:
err = dispatchVolume(d, c)
case *instructions.StopSignalCommand:
err = dispatchStopSignal(d, c)
case *instructions.ShellCommand:
err = dispatchShell(d, c)
case *instructions.ArgCommand:
err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues)
case *instructions.CopyCommand:
l := opt.buildContext
if len(cmd.sources) != 0 {
l = cmd.sources[0].state
}
err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, opt)
if err == nil && len(cmd.sources) == 0 {
for _, src := range c.Sources() {
d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{}
}
}
default:
}
return err
}
type dispatchState struct {
state llb.State
image Image
platform *specs.Platform
stage instructions.Stage
base *dispatchState
deps map[*dispatchState]struct{}
buildArgs []instructions.KeyValuePairOptional
commands []command
ctxPaths map[string]struct{}
ignoreCache bool
cmdSet bool
unregistered bool
stageName string
cmdIndex int
cmdTotal int
prefixPlatform bool
}
type dispatchStates struct {
states []*dispatchState
statesByName map[string]*dispatchState
}
func newDispatchStates() *dispatchStates {
return &dispatchStates{statesByName: map[string]*dispatchState{}}
}
func (dss *dispatchStates) addState(ds *dispatchState) {
dss.states = append(dss.states, ds)
if d, ok := dss.statesByName[ds.stage.BaseName]; ok {
ds.base = d
}
if ds.stage.Name != "" {
dss.statesByName[strings.ToLower(ds.stage.Name)] = ds
}
}
func (dss *dispatchStates) findStateByName(name string) (*dispatchState, bool) {
ds, ok := dss.statesByName[strings.ToLower(name)]
return ds, ok
}
func (dss *dispatchStates) findStateByIndex(index int) (*dispatchState, error) {
if index < 0 || index >= len(dss.states) {
return nil, errors.Errorf("invalid stage index %d", index)
}
return dss.states[index], nil
}
func (dss *dispatchStates) lastTarget() *dispatchState {
return dss.states[len(dss.states)-1]
}
type command struct {
instructions.Command
sources []*dispatchState
}
func dispatchOnBuild(d *dispatchState, triggers []string, opt dispatchOpt) error {
for _, trigger := range triggers {
ast, err := parser.Parse(strings.NewReader(trigger))
if err != nil {
return err
}
if len(ast.AST.Children) != 1 {
return errors.New("onbuild trigger should be a single expression")
}
ic, err := instructions.ParseCommand(ast.AST.Children[0])
if err != nil {
return err
}
cmd, err := toCommand(ic, opt.allDispatchStates)
if err != nil {
return err
}
if err := dispatch(d, cmd, opt); err != nil {
return err
}
}
return nil
}
func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error {
commitMessage := bytes.NewBufferString("ENV")
for _, e := range c.Env {
commitMessage.WriteString(" " + e.String())
d.state = d.state.AddEnv(e.Key, e.Value)
d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value)
}
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
env := d.state.Env()
opt := []llb.RunOption{llb.Args(args)}
for _, arg := range d.buildArgs {
env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
}
opt = append(opt, dfCmd(c))
if d.ignoreCache {
opt = append(opt, llb.IgnoreCache)
}
if proxy != nil {
opt = append(opt, llb.WithProxy(*proxy))
}
runMounts, err := dispatchRunMounts(d, c, sources, dopt)
if err != nil {
return err
}
opt = append(opt, runMounts...)
opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(dopt.shlex, c.String(), env)), d.prefixPlatform, d.state.GetPlatform())))
for _, h := range dopt.extraHosts {
opt = append(opt, llb.AddExtraHost(h.Host, h.IP))
}
d.state = d.state.Run(opt...).Root()
return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state)
}
func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error {
d.state = d.state.Dir(c.Path)
wd := c.Path
if !path.IsAbs(c.Path) {
wd = path.Join("/", d.image.Config.WorkingDir, wd)
}
d.image.Config.WorkingDir = wd
if commit {
return commitToHistory(&d.image, "WORKDIR "+wd, false, nil)
}
return nil
}
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
// TODO: this should use CopyOp instead. Current implementation is inefficient
img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest()))
if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator {
dest += string(filepath.Separator)
}
args := []string{"copy"}
unpack := isAddCommand
mounts := make([]llb.RunOption, 0, len(c.Sources()))
if chown != "" {
args = append(args, fmt.Sprintf("--chown=%s", chown))
_, _, err := parseUser(chown)
if err != nil {
mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly))
mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly))
}
}
commitMessage := bytes.NewBufferString("")
if isAddCommand {
commitMessage.WriteString("ADD")
} else {
commitMessage.WriteString("COPY")
}
for i, src := range c.Sources() {
commitMessage.WriteString(" " + src)
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
if !isAddCommand {
return errors.New("source can't be a URL for COPY")
}
// Resources from remote URLs are not decompressed.
// https://docs.docker.com/engine/reference/builder/#add
//
// Note: mixing up remote archives and local archives in a single ADD instruction
// would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717
unpack = false
u, err := url.Parse(src)
f := "__unnamed__"
if err == nil {
if base := path.Base(u.Path); base != "." && base != "/" {
f = base
}
}
target := path.Join(fmt.Sprintf("/src-%d", i), f)
args = append(args, target)
mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly))
} else {
d, f := splitWildcards(src)
targetCmd := fmt.Sprintf("/src-%d", i)
targetMount := targetCmd
if f == "" {
f = path.Base(src)
targetMount = path.Join(targetMount, f)
}
targetCmd = path.Join(targetCmd, f)
args = append(args, targetCmd)
mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly))
}
}
commitMessage.WriteString(" " + c.Dest())
args = append(args, dest)
if unpack {
args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...)
}
platform := opt.targetPlatform
if d.platform != nil {
platform = *d.platform
}
runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))}
if d.ignoreCache {
runOpt = append(runOpt, llb.IgnoreCache)
}
if opt.llbCaps != nil {
if err := opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil {
runOpt = append(runOpt, llb.Network(llb.NetModeNone))
}
}
run := img.Run(append(runOpt, mounts...)...)
d.state = run.AddMount("/dest", d.state).Platform(platform)
return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
}
func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error {
d.image.Author = c.Maintainer
return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil)
}
func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error {
commitMessage := bytes.NewBufferString("LABEL")
if d.image.Config.Labels == nil {
d.image.Config.Labels = make(map[string]string, len(c.Labels))
}
for _, v := range c.Labels {
d.image.Config.Labels[v.Key] = v.Value
commitMessage.WriteString(" " + v.String())
}
return commitToHistory(&d.image, commitMessage.String(), false, nil)
}
func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error {
d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression)
return nil
}
func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
d.image.Config.Cmd = args
d.image.Config.ArgsEscaped = true
d.cmdSet = true
return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil)
}
func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error {
var args []string = c.CmdLine
if c.PrependShell {
args = withShell(d.image, args)
}
d.image.Config.Entrypoint = args
if !d.cmdSet {
d.image.Config.Cmd = nil
}
return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil)
}
func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error {
d.image.Config.Healthcheck = &HealthConfig{
Test: c.Health.Test,
Interval: c.Health.Interval,
Timeout: c.Health.Timeout,
StartPeriod: c.Health.StartPeriod,
Retries: c.Health.Retries,
}
return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil)
}
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
ports := []string{}
for _, p := range c.Ports {
ps, err := shlex.ProcessWordsWithMap(p, toEnvMap(d.buildArgs, d.image.Config.Env))
if err != nil {
return err
}
ports = append(ports, ps...)
}
c.Ports = ports
ps, _, err := nat.ParsePortSpecs(c.Ports)
if err != nil {
return err
}
if d.image.Config.ExposedPorts == nil {
d.image.Config.ExposedPorts = make(map[string]struct{})
}
for p := range ps {
d.image.Config.ExposedPorts[string(p)] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil)
}
func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error {
d.state = d.state.User(c.User)
d.image.Config.User = c.User
if commit {
return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil)
}
return nil
}
func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error {
if d.image.Config.Volumes == nil {
d.image.Config.Volumes = map[string]struct{}{}
}
for _, v := range c.Volumes {
if v == "" {
return errors.New("VOLUME specified can not be an empty string")
}
d.image.Config.Volumes[v] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil)
}
func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error {
if _, err := signal.ParseSignal(c.Signal); err != nil {
return err
}
d.image.Config.StopSignal = c.Signal
return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil)
}
func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error {
d.image.Config.Shell = c.Shell
return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil)
}
func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error {
commitStr := "ARG " + c.Key
buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues)
if c.Value != nil {
commitStr += "=" + *c.Value
}
if buildArg.Value == nil {
for _, ma := range metaArgs {
if ma.Key == buildArg.Key {
buildArg.Value = ma.Value
}
}
}
d.buildArgs = append(d.buildArgs, buildArg)
return commitToHistory(&d.image, commitStr, false, nil)
}
func pathRelativeToWorkingDir(s llb.State, p string) string {
if path.IsAbs(p) {
return p
}
return path.Join(s.GetDir(), p)
}
func splitWildcards(name string) (string, string) {
i := 0
for ; i < len(name); i++ {
ch := name[i]
if ch == '\\' {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
break
}
}
if i == len(name) {
return name, ""
}
base := path.Base(name[:i])
if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) {
base = ""
}
return path.Dir(name[:i]), base + name[i:]
}
func addEnv(env []string, k, v string) []string {
gotOne := false
for i, envVar := range env {
key, _ := parseKeyValue(envVar)
if shell.EqualEnvKeys(key, k) {
env[i] = k + "=" + v
gotOne = true
break
}
}
if !gotOne {
env = append(env, k+"="+v)
}
return env
}
func parseKeyValue(env string) (string, string) {
parts := strings.SplitN(env, "=", 2)
v := ""
if len(parts) > 1 {
v = parts[1]
}
return parts[0], v
}
func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional {
if v, ok := values[kvpo.Key]; ok {
kvpo.Value = &v
}
return kvpo
}
func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string {
m := shell.BuildEnvs(env)
for _, arg := range args {
// If key already exists, keep previous value.
if _, ok := m[arg.Key]; ok {
continue
}
m[arg.Key] = arg.ValueString()
}
return m
}
func dfCmd(cmd interface{}) llb.ConstraintsOpt {
// TODO: add fmt.Stringer to instructions.Command to remove interface{}
var cmdStr string
if cmd, ok := cmd.(fmt.Stringer); ok {
cmdStr = cmd.String()
}
if cmd, ok := cmd.(string); ok {
cmdStr = cmd
}
return llb.WithDescription(map[string]string{
"com.docker.dockerfile.v1.command": cmdStr,
})
}
func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional) string {
var tmpBuildEnv []string
for _, arg := range buildArgs {
tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+arg.ValueString())
}
if len(tmpBuildEnv) > 0 {
tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
}
return strings.Join(append(tmpBuildEnv, args...), " ")
}
func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error {
if st != nil {
msg += " # buildkit"
}
img.History = append(img.History, specs.History{
CreatedBy: msg,
Comment: historyComment,
EmptyLayer: !withLayer,
})
return nil
}
func isReachable(from, to *dispatchState) (ret bool) {
if from == nil {
return false
}
if from == to || isReachable(from.base, to) {
return true
}
for d := range from.deps {
if isReachable(d, to) {
return true
}
}
return false
}
func parseUser(str string) (uid uint32, gid uint32, err error) {
if str == "" {
return 0, 0, nil
}
parts := strings.SplitN(str, ":", 2)
for i, v := range parts {
switch i {
case 0:
uid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
if len(parts) == 1 {
gid = uid
}
case 1:
gid, err = parseUID(v)
if err != nil {
return 0, 0, err
}
}
}
return
}
func parseUID(str string) (uint32, error) {
if str == "root" {
return 0, nil
}
uid, err := strconv.ParseUint(str, 10, 32)
if err != nil {
return 0, err
}
return uint32(uid), nil
}
func normalizeContextPaths(paths map[string]struct{}) []string {
pathSlice := make([]string, 0, len(paths))
for p := range paths {
if p == "/" {
return nil
}
pathSlice = append(pathSlice, p)
}
toDelete := map[string]struct{}{}
for i := range pathSlice {
for j := range pathSlice {
if i == j {
continue
}
if strings.HasPrefix(pathSlice[j], pathSlice[i]+"/") {
delete(paths, pathSlice[j])
}
}
}
toSort := make([]string, 0, len(paths))
for p := range paths {
if _, ok := toDelete[p]; !ok {
toSort = append(toSort, path.Join(".", p))
}
}
sort.Slice(toSort, func(i, j int) bool {
return toSort[i] < toSort[j]
})
return toSort
}
func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv {
pe := &llb.ProxyEnv{}
isNil := true
for k, v := range args {
if strings.EqualFold(k, "http_proxy") {
pe.HttpProxy = v
isNil = false
}
if strings.EqualFold(k, "https_proxy") {
pe.HttpsProxy = v
isNil = false
}
if strings.EqualFold(k, "ftp_proxy") {
pe.FtpProxy = v
isNil = false
}
if strings.EqualFold(k, "no_proxy") {
pe.NoProxy = v
isNil = false
}
}
if isNil {
return nil
}
return pe
}
type mutableOutput struct {
llb.Output
}
func withShell(img Image, args []string) []string {
var shell []string
if len(img.Config.Shell) > 0 {
shell = append([]string{}, img.Config.Shell...)
} else {
shell = defaultShell()
}
return append(shell, strings.Join(args, " "))
}
func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Platform) specs.Platform {
os := img.OS
arch := img.Architecture
if target.OS == os && target.Architecture == arch {
return target
}
for _, p := range supported {
if p.OS == os && p.Architecture == arch {
return p
}
}
return target
}
func WithInternalName(name string, a ...interface{}) llb.ConstraintsOpt {
return llb.WithCustomName("[internal] "+name, a...)
}
func uppercaseCmd(str string) string {
p := strings.SplitN(str, " ", 2)
p[0] = strings.ToUpper(p[0])
return strings.Join(p, " ")
}
func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string {
w, err := shlex.ProcessWord(cmd, env)
if err != nil {
return cmd
}
return w
}
func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string {
if ds.cmdTotal == 0 {
return str
}
out := "["
if prefixPlatform && platform != nil {
out += platforms.Format(*platform) + " "
}
if ds.stageName != "" {
out += ds.stageName + " "
}
ds.cmdIndex++
out += fmt.Sprintf("%d/%d] ", ds.cmdIndex, ds.cmdTotal)
return out + str
}
|
package oembed
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
)
// Oembed contains list of available oembed items (official endpoints)
type Oembed struct {
items []*Item
}
// Endpoint contains single endpoint to check against
type Endpoint struct {
URL string `json:"url"`
Discovery bool `json:"discovery,omitempty"`
Schemes []string `json:"schemes,omitempty"`
}
// Provider contains a single provider which can have multiple endpoints
type Provider struct {
Name string `json:"provider_name"`
URL string `json:"provider_url"`
Endpoints []Endpoint `json:"endpoints"`
}
// Item contains data for a schema
type Item struct {
IsEndpointURLComplete bool
EndpointURL string
ProviderName string
ProviderURL string
regex *regexp.Regexp
}
// ComposeURL returns url of oembed resource ready to be queried
func (item *Item) ComposeURL(u string) string {
if item.IsEndpointURLComplete {
return item.EndpointURL
}
return item.EndpointURL + url.QueryEscape(u)
}
// FetchOembed return oembed info from an url containing it
func (item *Item) FetchOembed(u string, client *http.Client) (*Info, error) {
resURL := item.ComposeURL(u)
var resp *http.Response
var err error
if client != nil {
resp, err = client.Get(resURL)
} else {
resp, err = http.Get(resURL)
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode > 200 {
return &Info{Status: resp.StatusCode}, nil
}
reader := io.LimitReader(resp.Body, 40000) // 40 KB max
info := NewInfo()
err = info.FillFromJSON(reader)
if err != nil {
return nil, err
}
if len(info.URL) == 0 {
info.URL = u
}
if len(info.ProviderURL) == 0 {
info.ProviderURL = item.ProviderURL
}
if len(info.ProviderName) == 0 {
info.ProviderName = item.ProviderName
}
return info, nil
}
// MatchURL tests if given url applies to the endpoint
func (item *Item) MatchURL(url string) bool {
return item.regex.MatchString(strings.Trim(url, "\r\n"))
}
// NewOembed creates Oembed instance
func NewOembed() *Oembed {
return &Oembed{}
}
// ParseProviders build oembed endpoint list based on provided json stream
func (o *Oembed) ParseProviders(buf io.Reader) error {
var providers []Provider
data, err := ioutil.ReadAll(buf)
if err != nil {
return err
}
err = json.Unmarshal(data, &providers)
if err != nil {
return err
}
var items []*Item
for _, provider := range providers {
for _, endpoint := range provider.Endpoints {
if len(endpoint.Schemes) == 0 {
endpoint.Schemes = append(endpoint.Schemes, strings.TrimRight(provider.URL, "/")+"/*")
}
for _, schema := range endpoint.Schemes {
or := &Item{ProviderName: provider.Name, ProviderURL: provider.URL}
or.EndpointURL = o.prepareEndpointURL(endpoint.URL)
or.regex = o.convertSchemaURL2Regexp(schema)
items = append(items, or)
}
}
}
o.items = items
return nil
}
// FindItem returns Oembed item based on provided url
func (o *Oembed) FindItem(url string) *Item {
for _, or := range o.items {
if or.MatchURL(url) {
return or
}
}
return nil
}
// TODO: add more intelligent parameters parsing
func (o *Oembed) prepareEndpointURL(url string) string {
url = strings.Replace(url, "{format}", "json", -1)
url = strings.Replace(url, "/*", "", -1) // hack for Ora TV.. wtf they put in?
if strings.IndexRune(url, '?') == -1 {
url += "?format=json&url="
} else {
url += "&format=json&url="
}
return url
}
// TODO: precompile and move out regexes from the function
func (o *Oembed) convertSchemaURL2Regexp(url string) *regexp.Regexp {
// domain replacements
url = strings.Replace(url, "?", "\\?", -1)
re1 := regexp.MustCompile("^(https?://[^/]*?)\\*(.+)$")
url = re1.ReplaceAllString(url, "${1}[^/]%?${2}")
re2 := regexp.MustCompile("^(https?://[^/]*?/.*?)\\*(.+)$")
url = re2.ReplaceAllString(url, "${1}.%?${2}")
re3 := regexp.MustCompile("^(https?://.*?)\\*$")
url = re3.ReplaceAllString(url, "${1}.%")
re4 := regexp.MustCompile("^http://")
url = re4.ReplaceAllString(url, "https?://")
url = strings.Replace(url, "%", "*", -1)
////
res, err := regexp.Compile("^" + url + "$")
if err != nil {
panic(err)
}
return res
}
moved url parsing regexps out to module level
package oembed
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
)
// replacements to convert patterns to regexes
var (
su2re1 = regexp.MustCompile("^(https?://[^/]*?)\\*(.+)$")
su2re2 = regexp.MustCompile("^(https?://[^/]*?/.*?)\\*(.+)$")
su2re3 = regexp.MustCompile("^(https?://.*?)\\*$")
su2re4 = regexp.MustCompile("^http://")
)
// Oembed contains list of available oembed items (official endpoints)
type Oembed struct {
items []*Item
}
// Endpoint contains single endpoint to check against
type Endpoint struct {
URL string `json:"url"`
Discovery bool `json:"discovery,omitempty"`
Schemes []string `json:"schemes,omitempty"`
}
// Provider contains a single provider which can have multiple endpoints
type Provider struct {
Name string `json:"provider_name"`
URL string `json:"provider_url"`
Endpoints []Endpoint `json:"endpoints"`
}
// Item contains data for a schema
type Item struct {
IsEndpointURLComplete bool
EndpointURL string
ProviderName string
ProviderURL string
regex *regexp.Regexp
}
// ComposeURL returns url of oembed resource ready to be queried
func (item *Item) ComposeURL(u string) string {
if item.IsEndpointURLComplete {
return item.EndpointURL
}
return item.EndpointURL + url.QueryEscape(u)
}
// FetchOembed return oembed info from an url containing it
func (item *Item) FetchOembed(u string, client *http.Client) (*Info, error) {
resURL := item.ComposeURL(u)
var resp *http.Response
var err error
if client != nil {
resp, err = client.Get(resURL)
} else {
resp, err = http.Get(resURL)
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode > 200 {
return &Info{Status: resp.StatusCode}, nil
}
reader := io.LimitReader(resp.Body, 40000) // 40 KB max
info := NewInfo()
err = info.FillFromJSON(reader)
if err != nil {
return nil, err
}
if len(info.URL) == 0 {
info.URL = u
}
if len(info.ProviderURL) == 0 {
info.ProviderURL = item.ProviderURL
}
if len(info.ProviderName) == 0 {
info.ProviderName = item.ProviderName
}
return info, nil
}
// MatchURL tests if given url applies to the endpoint
func (item *Item) MatchURL(url string) bool {
return item.regex.MatchString(strings.Trim(url, "\r\n"))
}
// NewOembed creates Oembed instance
func NewOembed() *Oembed {
return &Oembed{}
}
// ParseProviders build oembed endpoint list based on provided json stream
func (o *Oembed) ParseProviders(buf io.Reader) error {
var providers []Provider
data, err := ioutil.ReadAll(buf)
if err != nil {
return err
}
err = json.Unmarshal(data, &providers)
if err != nil {
return err
}
var items []*Item
for _, provider := range providers {
for _, endpoint := range provider.Endpoints {
if len(endpoint.Schemes) == 0 {
endpoint.Schemes = append(endpoint.Schemes, strings.TrimRight(provider.URL, "/")+"/*")
}
for _, schema := range endpoint.Schemes {
or := &Item{ProviderName: provider.Name, ProviderURL: provider.URL}
or.EndpointURL = o.prepareEndpointURL(endpoint.URL)
or.regex = o.convertSchemaURL2Regexp(schema)
items = append(items, or)
}
}
}
o.items = items
return nil
}
// FindItem returns Oembed item based on provided url
func (o *Oembed) FindItem(url string) *Item {
for _, or := range o.items {
if or.MatchURL(url) {
return or
}
}
return nil
}
// TODO: add more intelligent parameters parsing
func (o *Oembed) prepareEndpointURL(url string) string {
url = strings.Replace(url, "{format}", "json", -1)
url = strings.Replace(url, "/*", "", -1) // hack for Ora TV.. wtf they put in?
if strings.IndexRune(url, '?') == -1 {
url += "?format=json&url="
} else {
url += "&format=json&url="
}
return url
}
func (o *Oembed) convertSchemaURL2Regexp(url string) *regexp.Regexp {
// domain replacements
url = strings.Replace(url, "?", "\\?", -1)
url = su2re1.ReplaceAllString(url, "${1}[^/]%?${2}")
url = su2re2.ReplaceAllString(url, "${1}.%?${2}")
url = su2re3.ReplaceAllString(url, "${1}.%")
url = su2re4.ReplaceAllString(url, "https?://")
url = strings.Replace(url, "%", "*", -1)
////
res, err := regexp.Compile("^" + url + "$")
if err != nil {
panic(err)
}
return res
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package factory
import (
"context"
"fmt"
"net"
"net/url"
"path"
"strings"
"sync"
"sync/atomic"
"time"
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/runtime"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
genericfeatures "k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/server/egressselector"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/traces"
"k8s.io/klog/v2"
)
const (
// The short keepalive timeout and interval have been chosen to aggressively
// detect a failed etcd server without introducing much overhead.
keepaliveTime = 30 * time.Second
keepaliveTimeout = 10 * time.Second
// dialTimeout is the timeout for failing to establish a connection.
// It is set to 20 seconds as times shorter than that will cause TLS connections to fail
// on heavily loaded arm64 CPUs (issue #64649)
dialTimeout = 20 * time.Second
dbMetricsMonitorJitter = 0.5
)
func init() {
// grpcprom auto-registers (via an init function) their client metrics, since we are opting out of
// using the global prometheus registry and using our own wrapped global registry,
// we need to explicitly register these metrics to our global registry here.
// For reference: https://github.com/kubernetes/kubernetes/pull/81387
legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)
dbMetricsMonitors = make(map[string]struct{})
}
func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
// constructing the etcd v3 client blocks and times out if etcd is not available.
// retry in a loop in the background until we successfully create the client, storing the client or error encountered
clientValue := &atomic.Value{}
clientErrMsg := &atomic.Value{}
clientErrMsg.Store("etcd client connection not yet established")
go wait.PollUntil(time.Second, func() (bool, error) {
client, err := newETCD3Client(c.Transport)
if err != nil {
clientErrMsg.Store(err.Error())
return false, nil
}
clientValue.Store(client)
clientErrMsg.Store("")
return true, nil
}, wait.NeverStop)
return func() error {
if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {
return fmt.Errorf(errMsg)
}
client := clientValue.Load().(*clientv3.Client)
healthcheckTimeout := storagebackend.DefaultHealthcheckTimeout
if c.HealthcheckTimeout != time.Duration(0) {
healthcheckTimeout = c.HealthcheckTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), healthcheckTimeout)
defer cancel()
// See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118
_, err := client.Get(ctx, path.Join("/", c.Prefix, "health"))
if err == nil {
return nil
}
return fmt.Errorf("error getting data from etcd: %v", err)
}, nil
}
func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {
tlsInfo := transport.TLSInfo{
CertFile: c.CertFile,
KeyFile: c.KeyFile,
TrustedCAFile: c.TrustedCAFile,
}
tlsConfig, err := tlsInfo.ClientConfig()
if err != nil {
return nil, err
}
// NOTE: Client relies on nil tlsConfig
// for non-secure connections, update the implicit variable
if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 {
tlsConfig = nil
}
networkContext := egressselector.Etcd.AsNetworkContext()
var egressDialer utilnet.DialFunc
if c.EgressLookup != nil {
egressDialer, err = c.EgressLookup(networkContext)
if err != nil {
return nil, err
}
}
dialOptions := []grpc.DialOption{
grpc.WithBlock(), // block until the underlying connection is up
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) {
tracingOpts := []otelgrpc.Option{
otelgrpc.WithPropagators(traces.Propagators()),
}
if c.TracerProvider != nil {
tracingOpts = append(tracingOpts, otelgrpc.WithTracerProvider(*c.TracerProvider))
}
// Even if there is no TracerProvider, the otelgrpc still handles context propagation.
// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
dialOptions = append(dialOptions,
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor(tracingOpts...)),
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...)))
}
if egressDialer != nil {
dialer := func(ctx context.Context, addr string) (net.Conn, error) {
if strings.Contains(addr, "//") {
// etcd client prior to 3.5 passed URLs to dialer, normalize to address
u, err := url.Parse(addr)
if err != nil {
return nil, err
}
addr = u.Host
}
return egressDialer(ctx, "tcp", addr)
}
dialOptions = append(dialOptions, grpc.WithContextDialer(dialer))
}
cfg := clientv3.Config{
DialTimeout: dialTimeout,
DialKeepAliveTime: keepaliveTime,
DialKeepAliveTimeout: keepaliveTimeout,
PermitWithoutStream: true,
DialOptions: dialOptions,
Endpoints: c.ServerList,
TLS: tlsConfig,
}
return clientv3.New(cfg)
}
type runningCompactor struct {
interval time.Duration
cancel context.CancelFunc
client *clientv3.Client
refs int
}
var (
// compactorsMu guards access to compactors map
compactorsMu sync.Mutex
compactors = map[string]*runningCompactor{}
// dbMetricsMonitorsMu guards access to dbMetricsMonitors map
dbMetricsMonitorsMu sync.Mutex
dbMetricsMonitors map[string]struct{}
)
// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the
// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,
// the compactor is stopped.
func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {
compactorsMu.Lock()
defer compactorsMu.Unlock()
key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile}
if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {
compactorClient, err := newETCD3Client(c)
if err != nil {
return nil, err
}
if foundBefore {
// replace compactor
compactor.cancel()
compactor.client.Close()
} else {
// start new compactor
compactor = &runningCompactor{}
compactors[key] = compactor
}
ctx, cancel := context.WithCancel(context.Background())
compactor.interval = interval
compactor.cancel = cancel
compactor.client = compactorClient
etcd3.StartCompactor(ctx, compactorClient, interval)
}
compactors[key].refs++
return func() {
compactorsMu.Lock()
defer compactorsMu.Unlock()
compactor := compactors[key]
compactor.refs--
if compactor.refs == 0 {
compactor.cancel()
compactor.client.Close()
delete(compactors, key)
}
}, nil
}
func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)
if err != nil {
return nil, nil, err
}
client, err := newETCD3Client(c.Transport)
if err != nil {
stopCompactor()
return nil, nil, err
}
stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval)
if err != nil {
return nil, nil, err
}
var once sync.Once
destroyFunc := func() {
// we know that storage destroy funcs are called multiple times (due to reuse in subresources).
// Hence, we only destroy once.
// TODO: fix duplicated storage destroy calls higher level
once.Do(func() {
stopCompactor()
stopDBSizeMonitor()
client.Close()
})
}
transformer := c.Transformer
if transformer == nil {
transformer = value.IdentityTransformer
}
return etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil
}
// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the
// corresponding metric etcd_db_total_size_in_bytes for each etcd server endpoint.
func startDBSizeMonitorPerEndpoint(client *clientv3.Client, interval time.Duration) (func(), error) {
if interval == 0 {
return func() {}, nil
}
dbMetricsMonitorsMu.Lock()
defer dbMetricsMonitorsMu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
for _, ep := range client.Endpoints() {
if _, found := dbMetricsMonitors[ep]; found {
continue
}
dbMetricsMonitors[ep] = struct{}{}
endpoint := ep
klog.V(4).Infof("Start monitoring storage db size metric for endpoint %s with polling interval %v", endpoint, interval)
go wait.JitterUntilWithContext(ctx, func(context.Context) {
epStatus, err := client.Maintenance.Status(ctx, endpoint)
if err != nil {
klog.V(4).Infof("Failed to get storage db size for ep %s: %v", endpoint, err)
metrics.UpdateEtcdDbSize(endpoint, -1)
} else {
metrics.UpdateEtcdDbSize(endpoint, epStatus.DbSize)
}
}, interval, dbMetricsMonitorJitter, true)
}
return func() {
cancel()
}, nil
}
Revert "use PermitWithoutStream=true for etcd: send pings even without active stream"
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package factory
import (
"context"
"fmt"
"net"
"net/url"
"path"
"strings"
"sync"
"sync/atomic"
"time"
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/runtime"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
genericfeatures "k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/server/egressselector"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/traces"
"k8s.io/klog/v2"
)
const (
// The short keepalive timeout and interval have been chosen to aggressively
// detect a failed etcd server without introducing much overhead.
keepaliveTime = 30 * time.Second
keepaliveTimeout = 10 * time.Second
// dialTimeout is the timeout for failing to establish a connection.
// It is set to 20 seconds as times shorter than that will cause TLS connections to fail
// on heavily loaded arm64 CPUs (issue #64649)
dialTimeout = 20 * time.Second
dbMetricsMonitorJitter = 0.5
)
func init() {
// grpcprom auto-registers (via an init function) their client metrics, since we are opting out of
// using the global prometheus registry and using our own wrapped global registry,
// we need to explicitly register these metrics to our global registry here.
// For reference: https://github.com/kubernetes/kubernetes/pull/81387
legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)
dbMetricsMonitors = make(map[string]struct{})
}
func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
// constructing the etcd v3 client blocks and times out if etcd is not available.
// retry in a loop in the background until we successfully create the client, storing the client or error encountered
clientValue := &atomic.Value{}
clientErrMsg := &atomic.Value{}
clientErrMsg.Store("etcd client connection not yet established")
go wait.PollUntil(time.Second, func() (bool, error) {
client, err := newETCD3Client(c.Transport)
if err != nil {
clientErrMsg.Store(err.Error())
return false, nil
}
clientValue.Store(client)
clientErrMsg.Store("")
return true, nil
}, wait.NeverStop)
return func() error {
if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {
return fmt.Errorf(errMsg)
}
client := clientValue.Load().(*clientv3.Client)
healthcheckTimeout := storagebackend.DefaultHealthcheckTimeout
if c.HealthcheckTimeout != time.Duration(0) {
healthcheckTimeout = c.HealthcheckTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), healthcheckTimeout)
defer cancel()
// See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118
_, err := client.Get(ctx, path.Join("/", c.Prefix, "health"))
if err == nil {
return nil
}
return fmt.Errorf("error getting data from etcd: %v", err)
}, nil
}
func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {
tlsInfo := transport.TLSInfo{
CertFile: c.CertFile,
KeyFile: c.KeyFile,
TrustedCAFile: c.TrustedCAFile,
}
tlsConfig, err := tlsInfo.ClientConfig()
if err != nil {
return nil, err
}
// NOTE: Client relies on nil tlsConfig
// for non-secure connections, update the implicit variable
if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 {
tlsConfig = nil
}
networkContext := egressselector.Etcd.AsNetworkContext()
var egressDialer utilnet.DialFunc
if c.EgressLookup != nil {
egressDialer, err = c.EgressLookup(networkContext)
if err != nil {
return nil, err
}
}
dialOptions := []grpc.DialOption{
grpc.WithBlock(), // block until the underlying connection is up
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) {
tracingOpts := []otelgrpc.Option{
otelgrpc.WithPropagators(traces.Propagators()),
}
if c.TracerProvider != nil {
tracingOpts = append(tracingOpts, otelgrpc.WithTracerProvider(*c.TracerProvider))
}
// Even if there is no TracerProvider, the otelgrpc still handles context propagation.
// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
dialOptions = append(dialOptions,
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor(tracingOpts...)),
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...)))
}
if egressDialer != nil {
dialer := func(ctx context.Context, addr string) (net.Conn, error) {
if strings.Contains(addr, "//") {
// etcd client prior to 3.5 passed URLs to dialer, normalize to address
u, err := url.Parse(addr)
if err != nil {
return nil, err
}
addr = u.Host
}
return egressDialer(ctx, "tcp", addr)
}
dialOptions = append(dialOptions, grpc.WithContextDialer(dialer))
}
cfg := clientv3.Config{
DialTimeout: dialTimeout,
DialKeepAliveTime: keepaliveTime,
DialKeepAliveTimeout: keepaliveTimeout,
DialOptions: dialOptions,
Endpoints: c.ServerList,
TLS: tlsConfig,
}
return clientv3.New(cfg)
}
type runningCompactor struct {
interval time.Duration
cancel context.CancelFunc
client *clientv3.Client
refs int
}
var (
// compactorsMu guards access to compactors map
compactorsMu sync.Mutex
compactors = map[string]*runningCompactor{}
// dbMetricsMonitorsMu guards access to dbMetricsMonitors map
dbMetricsMonitorsMu sync.Mutex
dbMetricsMonitors map[string]struct{}
)
// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the
// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,
// the compactor is stopped.
func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {
compactorsMu.Lock()
defer compactorsMu.Unlock()
key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile}
if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {
compactorClient, err := newETCD3Client(c)
if err != nil {
return nil, err
}
if foundBefore {
// replace compactor
compactor.cancel()
compactor.client.Close()
} else {
// start new compactor
compactor = &runningCompactor{}
compactors[key] = compactor
}
ctx, cancel := context.WithCancel(context.Background())
compactor.interval = interval
compactor.cancel = cancel
compactor.client = compactorClient
etcd3.StartCompactor(ctx, compactorClient, interval)
}
compactors[key].refs++
return func() {
compactorsMu.Lock()
defer compactorsMu.Unlock()
compactor := compactors[key]
compactor.refs--
if compactor.refs == 0 {
compactor.cancel()
compactor.client.Close()
delete(compactors, key)
}
}, nil
}
func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)
if err != nil {
return nil, nil, err
}
client, err := newETCD3Client(c.Transport)
if err != nil {
stopCompactor()
return nil, nil, err
}
stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval)
if err != nil {
return nil, nil, err
}
var once sync.Once
destroyFunc := func() {
// we know that storage destroy funcs are called multiple times (due to reuse in subresources).
// Hence, we only destroy once.
// TODO: fix duplicated storage destroy calls higher level
once.Do(func() {
stopCompactor()
stopDBSizeMonitor()
client.Close()
})
}
transformer := c.Transformer
if transformer == nil {
transformer = value.IdentityTransformer
}
return etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil
}
// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the
// corresponding metric etcd_db_total_size_in_bytes for each etcd server endpoint.
func startDBSizeMonitorPerEndpoint(client *clientv3.Client, interval time.Duration) (func(), error) {
if interval == 0 {
return func() {}, nil
}
dbMetricsMonitorsMu.Lock()
defer dbMetricsMonitorsMu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
for _, ep := range client.Endpoints() {
if _, found := dbMetricsMonitors[ep]; found {
continue
}
dbMetricsMonitors[ep] = struct{}{}
endpoint := ep
klog.V(4).Infof("Start monitoring storage db size metric for endpoint %s with polling interval %v", endpoint, interval)
go wait.JitterUntilWithContext(ctx, func(context.Context) {
epStatus, err := client.Maintenance.Status(ctx, endpoint)
if err != nil {
klog.V(4).Infof("Failed to get storage db size for ep %s: %v", endpoint, err)
metrics.UpdateEtcdDbSize(endpoint, -1)
} else {
metrics.UpdateEtcdDbSize(endpoint, epStatus.DbSize)
}
}, interval, dbMetricsMonitorJitter, true)
}
return func() {
cancel()
}, nil
}
|
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"context"
"errors"
"fmt"
"io/ioutil"
"path"
"path/filepath"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
cloudvolume "k8s.io/cloud-provider/volume"
volerr "k8s.io/cloud-provider/volume/errors"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/component-base/metrics"
"github.com/gophercloud/gophercloud"
volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
volumes_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes"
volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
volumes_v3 "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach"
"k8s.io/klog/v2"
)
type volumeService interface {
createVolume(opts volumeCreateOpts) (string, string, error)
getVolume(volumeID string) (Volume, error)
deleteVolume(volumeName string) error
expandVolume(volumeID string, newSize int) error
}
// VolumesV1 is a Volumes implementation for cinder v1
type VolumesV1 struct {
blockstorage *gophercloud.ServiceClient
opts BlockStorageOpts
}
// VolumesV2 is a Volumes implementation for cinder v2
type VolumesV2 struct {
blockstorage *gophercloud.ServiceClient
opts BlockStorageOpts
}
// VolumesV3 is a Volumes implementation for cinder v3
type VolumesV3 struct {
blockstorage *gophercloud.ServiceClient
opts BlockStorageOpts
}
// Volume stores information about a single volume
type Volume struct {
// ID of the instance, to which this volume is attached. "" if not attached
AttachedServerID string
// Device file path
AttachedDevice string
// availabilityZone is which availability zone the volume is in
AvailabilityZone string
// Unique identifier for the volume.
ID string
// Human-readable display name for the volume.
Name string
// Current status of the volume.
Status string
// Volume size in GB
Size int
}
type volumeCreateOpts struct {
Size int
Availability string
Name string
VolumeType string
Metadata map[string]string
}
// implements PVLabeler.
var _ cloudprovider.PVLabeler = (*OpenStack)(nil)
const (
volumeAvailableStatus = "available"
volumeInUseStatus = "in-use"
volumeDeletedStatus = "deleted"
volumeErrorStatus = "error"
// On some environments, we need to query the metadata service in order
// to locate disks. We'll use the Newton version, which includes device
// metadata.
newtonMetadataVersion = "2016-06-30"
)
func (volumes *VolumesV1) createVolume(opts volumeCreateOpts) (string, string, error) {
startTime := time.Now()
createOpts := volumes_v1.CreateOpts{
Name: opts.Name,
Size: opts.Size,
VolumeType: opts.VolumeType,
AvailabilityZone: opts.Availability,
Metadata: opts.Metadata,
}
vol, err := volumes_v1.Create(volumes.blockstorage, createOpts).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("create_v1_volume", timeTaken, err)
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV2) createVolume(opts volumeCreateOpts) (string, string, error) {
startTime := time.Now()
createOpts := volumes_v2.CreateOpts{
Name: opts.Name,
Size: opts.Size,
VolumeType: opts.VolumeType,
AvailabilityZone: opts.Availability,
Metadata: opts.Metadata,
}
vol, err := volumes_v2.Create(volumes.blockstorage, createOpts).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("create_v2_volume", timeTaken, err)
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV3) createVolume(opts volumeCreateOpts) (string, string, error) {
startTime := time.Now()
createOpts := volumes_v3.CreateOpts{
Name: opts.Name,
Size: opts.Size,
VolumeType: opts.VolumeType,
AvailabilityZone: opts.Availability,
Metadata: opts.Metadata,
}
vol, err := volumes_v3.Create(volumes.blockstorage, createOpts).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("create_v3_volume", timeTaken, err)
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
volumeV1, err := volumes_v1.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v1_volume", timeTaken, err)
if err != nil {
if isNotFound(err) {
return Volume{}, ErrNotFound
}
return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
}
volume := Volume{
AvailabilityZone: volumeV1.AvailabilityZone,
ID: volumeV1.ID,
Name: volumeV1.Name,
Status: volumeV1.Status,
Size: volumeV1.Size,
}
if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil {
volume.AttachedServerID = volumeV1.Attachments[0]["server_id"].(string)
volume.AttachedDevice = volumeV1.Attachments[0]["device"].(string)
}
return volume, nil
}
func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
volumeV2, err := volumes_v2.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v2_volume", timeTaken, err)
if err != nil {
if isNotFound(err) {
return Volume{}, ErrNotFound
}
return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
}
volume := Volume{
AvailabilityZone: volumeV2.AvailabilityZone,
ID: volumeV2.ID,
Name: volumeV2.Name,
Status: volumeV2.Status,
Size: volumeV2.Size,
}
if len(volumeV2.Attachments) > 0 {
volume.AttachedServerID = volumeV2.Attachments[0].ServerID
volume.AttachedDevice = volumeV2.Attachments[0].Device
}
return volume, nil
}
func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
volumeV3, err := volumes_v3.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v3_volume", timeTaken, err)
if err != nil {
if isNotFound(err) {
return Volume{}, ErrNotFound
}
return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
}
volume := Volume{
AvailabilityZone: volumeV3.AvailabilityZone,
ID: volumeV3.ID,
Name: volumeV3.Name,
Status: volumeV3.Status,
Size: volumeV3.Size,
}
if len(volumeV3.Attachments) > 0 {
volume.AttachedServerID = volumeV3.Attachments[0].ServerID
volume.AttachedDevice = volumeV3.Attachments[0].Device
}
return volume, nil
}
func (volumes *VolumesV1) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v1.Delete(volumes.blockstorage, volumeID).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v1_volume", timeTaken, err)
return err
}
func (volumes *VolumesV2) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v2.Delete(volumes.blockstorage, volumeID, nil).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v2_volume", timeTaken, err)
return err
}
func (volumes *VolumesV3) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v3.Delete(volumes.blockstorage, volumeID, nil).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v3_volume", timeTaken, err)
return err
}
func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error {
startTime := time.Now()
createOpts := volumeexpand.ExtendSizeOpts{
NewSize: newSize,
}
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
return err
}
func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error {
startTime := time.Now()
createOpts := volumeexpand.ExtendSizeOpts{
NewSize: newSize,
}
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
return err
}
func (volumes *VolumesV3) expandVolume(volumeID string, newSize int) error {
startTime := time.Now()
createOpts := volumeexpand.ExtendSizeOpts{
NewSize: newSize,
}
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
return err
}
// OperationPending checks if there is an operation pending on a volume
func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
volume, err := os.getVolume(diskName)
if err != nil {
return false, "", err
}
volumeStatus := volume.Status
if volumeStatus == volumeErrorStatus {
err = fmt.Errorf("status of volume %s is %s", diskName, volumeStatus)
return false, volumeStatus, err
}
if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus {
return false, volume.Status, nil
}
return true, volumeStatus, nil
}
// AttachDisk attaches given cinder volume to the compute running kubelet
func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return "", err
}
cClient, err := os.NewComputeV2()
if err != nil {
return "", err
}
if volume.AttachedServerID != "" {
if instanceID == volume.AttachedServerID {
klog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
return volume.ID, nil
}
nodeName, err := os.GetNodeNameByID(volume.AttachedServerID)
attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID)
if err != nil {
klog.Error(attachErr)
return "", errors.New(attachErr)
}
// using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128
devicePath := volume.AttachedDevice
danglingErr := volerr.NewDanglingError(attachErr, nodeName, devicePath)
klog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName)
return "", danglingErr
}
startTime := time.Now()
// add read only flag here if possible spothanis
_, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{
VolumeID: volume.ID,
}).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("attach_disk", timeTaken, err)
if err != nil {
return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err)
}
klog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID)
return volume.ID, nil
}
// DetachDisk detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(instanceID, volumeID string) error {
volume, err := os.getVolume(volumeID)
if err != nil {
return err
}
if volume.Status == volumeAvailableStatus {
// "available" is fine since that means the volume is detached from instance already.
klog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID)
return nil
}
if volume.Status != volumeInUseStatus {
return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status)
}
cClient, err := os.NewComputeV2()
if err != nil {
return err
}
if volume.AttachedServerID != instanceID {
return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
}
startTime := time.Now()
// This is a blocking call and effects kubelet's performance directly.
// We should consider kicking it out into a separate routine, if it is bad.
err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("detach_disk", timeTaken, err)
if err != nil {
return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
}
klog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
return nil
}
// ExpandVolume expands the size of specific cinder volume (in GiB)
func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return oldSize, err
}
if volume.Status != volumeAvailableStatus {
// cinder volume can not be expanded if its status is not available
if volume.Status == volumeInUseStatus {
// Send a nice event when the volume is used
return oldSize, fmt.Errorf("PVC used by a Pod can not be expanded, please ensure the PVC is not used by any Pod and is fully detached from a node")
}
// Send not so nice event when the volume is in any other state (deleted, error)
return oldSize, fmt.Errorf("volume in state %q can not be expanded, it must be \"available\"", volume.Status)
}
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGiB, err := volumehelpers.RoundUpToGiBInt(newSize)
if err != nil {
return oldSize, err
}
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGiB))
// if volume size equals to or greater than the newSize, return nil
if volume.Size >= volSizeGiB {
return newSizeQuant, nil
}
volumes, err := os.volumeService("")
if err != nil {
return oldSize, err
}
err = volumes.expandVolume(volumeID, volSizeGiB)
if err != nil {
return oldSize, err
}
return newSizeQuant, nil
}
// getVolume retrieves Volume by its ID.
func (os *OpenStack) getVolume(volumeID string) (Volume, error) {
volumes, err := os.volumeService("")
if err != nil {
return Volume{}, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
}
return volumes.getVolume(volumeID)
}
// CreateVolume creates a volume of given size (in GiB)
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
volumes, err := os.volumeService("")
if err != nil {
return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
}
opts := volumeCreateOpts{
Name: name,
Size: size,
VolumeType: vtype,
Availability: availability,
}
if tags != nil {
opts.Metadata = *tags
}
volumeID, volumeAZ, err := volumes.createVolume(opts)
if err != nil {
return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err)
}
klog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ)
return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil
}
// GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id.
func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string {
// Build a list of candidate device paths.
// Certain Nova drivers will set the disk serial ID, including the Cinder volume id.
candidateDeviceNodes := []string{
// KVM
fmt.Sprintf("virtio-%s", volumeID[:20]),
// KVM virtio-scsi
fmt.Sprintf("scsi-0QEMU_QEMU_HARDDISK_%s", volumeID[:20]),
// ESXi
fmt.Sprintf("wwn-0x%s", strings.Replace(volumeID, "-", "", -1)),
}
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
for _, f := range files {
for _, c := range candidateDeviceNodes {
if c == f.Name() {
klog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
return path.Join("/dev/disk/by-id/", f.Name())
}
}
}
klog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID)
return ""
}
func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string {
// Nova Hyper-V hosts cannot override disk SCSI IDs. In order to locate
// volumes, we're querying the metadata service. Note that the Hyper-V
// driver will include device metadata for untagged volumes as well.
//
// We're avoiding using cached metadata (or the configdrive),
// relying on the metadata service.
instanceMetadata, err := getMetadataFromMetadataService(
newtonMetadataVersion)
if err != nil {
klog.V(4).Infof(
"Could not retrieve instance metadata. Error: %v", err)
return ""
}
for _, device := range instanceMetadata.Devices {
if device.Type == "disk" && device.Serial == volumeID {
klog.V(4).Infof(
"Found disk metadata for volumeID %q. Bus: %q, Address: %q",
volumeID, device.Bus, device.Address)
diskPattern := fmt.Sprintf(
"/dev/disk/by-path/*-%s-%s",
device.Bus, device.Address)
diskPaths, err := filepath.Glob(diskPattern)
if err != nil {
klog.Errorf(
"could not retrieve disk path for volumeID: %q. Error filepath.Glob(%q): %v",
volumeID, diskPattern, err)
return ""
}
if len(diskPaths) == 1 {
return diskPaths[0]
}
klog.Errorf(
"expecting to find one disk path for volumeID %q, found %d: %v",
volumeID, len(diskPaths), diskPaths)
return ""
}
}
klog.V(4).Infof(
"Could not retrieve device metadata for volumeID: %q", volumeID)
return ""
}
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
func (os *OpenStack) GetDevicePath(volumeID string) string {
devicePath := os.GetDevicePathBySerialID(volumeID)
if devicePath == "" {
devicePath = os.getDevicePathFromInstanceMetadata(volumeID)
}
if devicePath == "" {
klog.Warningf("Failed to find device for the volumeID: %q", volumeID)
}
return devicePath
}
// DeleteVolume deletes a volume given volume name.
func (os *OpenStack) DeleteVolume(volumeID string) error {
used, err := os.diskIsUsed(volumeID)
if err != nil {
return err
}
if used {
msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeID)
return volerr.NewDeletedVolumeInUseError(msg)
}
volumes, err := os.volumeService("")
if err != nil {
return fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
}
err = volumes.deleteVolume(volumeID)
return err
}
// GetAttachmentDiskPath gets device path of attached volume to the compute running kubelet, as known by cinder
func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
// See issue #33128 - Cinder does not always tell you the right device path, as such
// we must only use this value as a last resort.
volume, err := os.getVolume(volumeID)
if err != nil {
return "", err
}
if volume.Status != volumeInUseStatus {
return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status)
}
if volume.AttachedServerID != "" {
if instanceID == volume.AttachedServerID {
// Attachment[0]["device"] points to the device path
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
return volume.AttachedDevice, nil
}
return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerID)
}
return "", fmt.Errorf("volume %s has no ServerId", volumeID)
}
// DiskIsAttached queries if a volume is attached to a compute instance
func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) {
if instanceID == "" {
klog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID)
}
volume, err := os.getVolume(volumeID)
if err != nil {
if err == ErrNotFound {
// Volume does not exists, it can't be attached.
return false, nil
}
return false, err
}
return instanceID == volume.AttachedServerID, nil
}
// DiskIsAttachedByName queries if a volume is attached to a compute instance by name
func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) {
cClient, err := os.NewComputeV2()
if err != nil {
return false, "", err
}
srv, err := getServerByName(cClient, nodeName)
if err != nil {
if err == ErrNotFound {
// instance not found anymore in cloudprovider, assume that cinder is detached
return false, "", nil
}
return false, "", err
}
instanceID := "/" + srv.ID
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
attached, err := os.DiskIsAttached(instanceID, volumeID)
return attached, instanceID, err
}
// DisksAreAttached queries if a list of volumes are attached to a compute instance
func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
attached := make(map[string]bool)
for _, volumeID := range volumeIDs {
isAttached, err := os.DiskIsAttached(instanceID, volumeID)
if err != nil && err != ErrNotFound {
attached[volumeID] = true
continue
}
attached[volumeID] = isAttached
}
return attached, nil
}
// DisksAreAttachedByName queries if a list of volumes are attached to a compute instance by name
func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) {
attached := make(map[string]bool)
cClient, err := os.NewComputeV2()
if err != nil {
return attached, err
}
srv, err := getServerByName(cClient, nodeName)
if err != nil {
if err == ErrNotFound {
// instance not found anymore, mark all volumes as detached
for _, volumeID := range volumeIDs {
attached[volumeID] = false
}
return attached, nil
}
return attached, err
}
instanceID := "/" + srv.ID
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
return os.DisksAreAttached(instanceID, volumeIDs)
}
// diskIsUsed returns true a disk is attached to any node.
func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return false, err
}
return volume.AttachedServerID != "", nil
}
// ShouldTrustDevicePath queries if we should trust the cinder provide deviceName, See issue #33128
func (os *OpenStack) ShouldTrustDevicePath() bool {
return os.bsOpts.TrustDevicePath
}
// NodeVolumeAttachLimit specifies number of cinder volumes that can be attached to this node.
func (os *OpenStack) NodeVolumeAttachLimit() int {
return os.bsOpts.NodeVolumeAttachLimit
}
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore if not Cinder.
if pv.Spec.Cinder == nil {
return nil, nil
}
// Ignore any volumes that are being provisioned
if pv.Spec.Cinder.VolumeID == cloudvolume.ProvisionedVolumeName {
return nil, nil
}
// if volume az is to be ignored we should return nil from here
if os.bsOpts.IgnoreVolumeAZ {
return nil, nil
}
// Get Volume
volume, err := os.getVolume(pv.Spec.Cinder.VolumeID)
if err != nil {
return nil, err
}
// Construct Volume Labels
labels := make(map[string]string)
if volume.AvailabilityZone != "" {
labels[v1.LabelFailureDomainBetaZone] = volume.AvailabilityZone
}
if os.region != "" {
labels[v1.LabelFailureDomainBetaRegion] = os.region
}
klog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels)
return labels, nil
}
// recordOpenstackOperationMetric records openstack operation metrics
func recordOpenstackOperationMetric(operation string, timeTaken float64, err error) {
if err != nil {
openstackAPIRequestErrors.With(metrics.Labels{"request": operation}).Inc()
} else {
openstackOperationsLatency.With(metrics.Labels{"request": operation}).Observe(timeTaken)
}
}
Fix Cinder volume detection on OpenStack Train
Newer OpenStack does not truncate volumeID to 20 characters.
/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_033fa19a-a5e3-445a-8631-3e9349e540e5
was seen on an OpenStack Train node.
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"context"
"errors"
"fmt"
"io/ioutil"
"path"
"path/filepath"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
cloudvolume "k8s.io/cloud-provider/volume"
volerr "k8s.io/cloud-provider/volume/errors"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/component-base/metrics"
"github.com/gophercloud/gophercloud"
volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
volumes_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes"
volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
volumes_v3 "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach"
"k8s.io/klog/v2"
)
type volumeService interface {
createVolume(opts volumeCreateOpts) (string, string, error)
getVolume(volumeID string) (Volume, error)
deleteVolume(volumeName string) error
expandVolume(volumeID string, newSize int) error
}
// VolumesV1 is a Volumes implementation for cinder v1
type VolumesV1 struct {
blockstorage *gophercloud.ServiceClient
opts BlockStorageOpts
}
// VolumesV2 is a Volumes implementation for cinder v2
type VolumesV2 struct {
blockstorage *gophercloud.ServiceClient
opts BlockStorageOpts
}
// VolumesV3 is a Volumes implementation for cinder v3
type VolumesV3 struct {
blockstorage *gophercloud.ServiceClient
opts BlockStorageOpts
}
// Volume stores information about a single volume
type Volume struct {
// ID of the instance, to which this volume is attached. "" if not attached
AttachedServerID string
// Device file path
AttachedDevice string
// availabilityZone is which availability zone the volume is in
AvailabilityZone string
// Unique identifier for the volume.
ID string
// Human-readable display name for the volume.
Name string
// Current status of the volume.
Status string
// Volume size in GB
Size int
}
type volumeCreateOpts struct {
Size int
Availability string
Name string
VolumeType string
Metadata map[string]string
}
// implements PVLabeler.
var _ cloudprovider.PVLabeler = (*OpenStack)(nil)
const (
volumeAvailableStatus = "available"
volumeInUseStatus = "in-use"
volumeDeletedStatus = "deleted"
volumeErrorStatus = "error"
// On some environments, we need to query the metadata service in order
// to locate disks. We'll use the Newton version, which includes device
// metadata.
newtonMetadataVersion = "2016-06-30"
)
func (volumes *VolumesV1) createVolume(opts volumeCreateOpts) (string, string, error) {
startTime := time.Now()
createOpts := volumes_v1.CreateOpts{
Name: opts.Name,
Size: opts.Size,
VolumeType: opts.VolumeType,
AvailabilityZone: opts.Availability,
Metadata: opts.Metadata,
}
vol, err := volumes_v1.Create(volumes.blockstorage, createOpts).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("create_v1_volume", timeTaken, err)
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV2) createVolume(opts volumeCreateOpts) (string, string, error) {
startTime := time.Now()
createOpts := volumes_v2.CreateOpts{
Name: opts.Name,
Size: opts.Size,
VolumeType: opts.VolumeType,
AvailabilityZone: opts.Availability,
Metadata: opts.Metadata,
}
vol, err := volumes_v2.Create(volumes.blockstorage, createOpts).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("create_v2_volume", timeTaken, err)
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV3) createVolume(opts volumeCreateOpts) (string, string, error) {
startTime := time.Now()
createOpts := volumes_v3.CreateOpts{
Name: opts.Name,
Size: opts.Size,
VolumeType: opts.VolumeType,
AvailabilityZone: opts.Availability,
Metadata: opts.Metadata,
}
vol, err := volumes_v3.Create(volumes.blockstorage, createOpts).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("create_v3_volume", timeTaken, err)
if err != nil {
return "", "", err
}
return vol.ID, vol.AvailabilityZone, nil
}
func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
volumeV1, err := volumes_v1.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v1_volume", timeTaken, err)
if err != nil {
if isNotFound(err) {
return Volume{}, ErrNotFound
}
return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
}
volume := Volume{
AvailabilityZone: volumeV1.AvailabilityZone,
ID: volumeV1.ID,
Name: volumeV1.Name,
Status: volumeV1.Status,
Size: volumeV1.Size,
}
if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil {
volume.AttachedServerID = volumeV1.Attachments[0]["server_id"].(string)
volume.AttachedDevice = volumeV1.Attachments[0]["device"].(string)
}
return volume, nil
}
func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
volumeV2, err := volumes_v2.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v2_volume", timeTaken, err)
if err != nil {
if isNotFound(err) {
return Volume{}, ErrNotFound
}
return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
}
volume := Volume{
AvailabilityZone: volumeV2.AvailabilityZone,
ID: volumeV2.ID,
Name: volumeV2.Name,
Status: volumeV2.Status,
Size: volumeV2.Size,
}
if len(volumeV2.Attachments) > 0 {
volume.AttachedServerID = volumeV2.Attachments[0].ServerID
volume.AttachedDevice = volumeV2.Attachments[0].Device
}
return volume, nil
}
func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) {
startTime := time.Now()
volumeV3, err := volumes_v3.Get(volumes.blockstorage, volumeID).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("get_v3_volume", timeTaken, err)
if err != nil {
if isNotFound(err) {
return Volume{}, ErrNotFound
}
return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
}
volume := Volume{
AvailabilityZone: volumeV3.AvailabilityZone,
ID: volumeV3.ID,
Name: volumeV3.Name,
Status: volumeV3.Status,
Size: volumeV3.Size,
}
if len(volumeV3.Attachments) > 0 {
volume.AttachedServerID = volumeV3.Attachments[0].ServerID
volume.AttachedDevice = volumeV3.Attachments[0].Device
}
return volume, nil
}
func (volumes *VolumesV1) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v1.Delete(volumes.blockstorage, volumeID).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v1_volume", timeTaken, err)
return err
}
func (volumes *VolumesV2) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v2.Delete(volumes.blockstorage, volumeID, nil).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v2_volume", timeTaken, err)
return err
}
func (volumes *VolumesV3) deleteVolume(volumeID string) error {
startTime := time.Now()
err := volumes_v3.Delete(volumes.blockstorage, volumeID, nil).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("delete_v3_volume", timeTaken, err)
return err
}
func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error {
startTime := time.Now()
createOpts := volumeexpand.ExtendSizeOpts{
NewSize: newSize,
}
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
return err
}
func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error {
startTime := time.Now()
createOpts := volumeexpand.ExtendSizeOpts{
NewSize: newSize,
}
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
return err
}
func (volumes *VolumesV3) expandVolume(volumeID string, newSize int) error {
startTime := time.Now()
createOpts := volumeexpand.ExtendSizeOpts{
NewSize: newSize,
}
err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("expand_volume", timeTaken, err)
return err
}
// OperationPending checks if there is an operation pending on a volume
func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
volume, err := os.getVolume(diskName)
if err != nil {
return false, "", err
}
volumeStatus := volume.Status
if volumeStatus == volumeErrorStatus {
err = fmt.Errorf("status of volume %s is %s", diskName, volumeStatus)
return false, volumeStatus, err
}
if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus {
return false, volume.Status, nil
}
return true, volumeStatus, nil
}
// AttachDisk attaches given cinder volume to the compute running kubelet
func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return "", err
}
cClient, err := os.NewComputeV2()
if err != nil {
return "", err
}
if volume.AttachedServerID != "" {
if instanceID == volume.AttachedServerID {
klog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
return volume.ID, nil
}
nodeName, err := os.GetNodeNameByID(volume.AttachedServerID)
attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID)
if err != nil {
klog.Error(attachErr)
return "", errors.New(attachErr)
}
// using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128
devicePath := volume.AttachedDevice
danglingErr := volerr.NewDanglingError(attachErr, nodeName, devicePath)
klog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName)
return "", danglingErr
}
startTime := time.Now()
// add read only flag here if possible spothanis
_, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{
VolumeID: volume.ID,
}).Extract()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("attach_disk", timeTaken, err)
if err != nil {
return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err)
}
klog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID)
return volume.ID, nil
}
// DetachDisk detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(instanceID, volumeID string) error {
volume, err := os.getVolume(volumeID)
if err != nil {
return err
}
if volume.Status == volumeAvailableStatus {
// "available" is fine since that means the volume is detached from instance already.
klog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID)
return nil
}
if volume.Status != volumeInUseStatus {
return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status)
}
cClient, err := os.NewComputeV2()
if err != nil {
return err
}
if volume.AttachedServerID != instanceID {
return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
}
startTime := time.Now()
// This is a blocking call and effects kubelet's performance directly.
// We should consider kicking it out into a separate routine, if it is bad.
err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr()
timeTaken := time.Since(startTime).Seconds()
recordOpenstackOperationMetric("detach_disk", timeTaken, err)
if err != nil {
return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
}
klog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
return nil
}
// ExpandVolume expands the size of specific cinder volume (in GiB)
func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return oldSize, err
}
if volume.Status != volumeAvailableStatus {
// cinder volume can not be expanded if its status is not available
if volume.Status == volumeInUseStatus {
// Send a nice event when the volume is used
return oldSize, fmt.Errorf("PVC used by a Pod can not be expanded, please ensure the PVC is not used by any Pod and is fully detached from a node")
}
// Send not so nice event when the volume is in any other state (deleted, error)
return oldSize, fmt.Errorf("volume in state %q can not be expanded, it must be \"available\"", volume.Status)
}
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGiB, err := volumehelpers.RoundUpToGiBInt(newSize)
if err != nil {
return oldSize, err
}
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGiB))
// if volume size equals to or greater than the newSize, return nil
if volume.Size >= volSizeGiB {
return newSizeQuant, nil
}
volumes, err := os.volumeService("")
if err != nil {
return oldSize, err
}
err = volumes.expandVolume(volumeID, volSizeGiB)
if err != nil {
return oldSize, err
}
return newSizeQuant, nil
}
// getVolume retrieves Volume by its ID.
func (os *OpenStack) getVolume(volumeID string) (Volume, error) {
volumes, err := os.volumeService("")
if err != nil {
return Volume{}, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
}
return volumes.getVolume(volumeID)
}
// CreateVolume creates a volume of given size (in GiB)
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
volumes, err := os.volumeService("")
if err != nil {
return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
}
opts := volumeCreateOpts{
Name: name,
Size: size,
VolumeType: vtype,
Availability: availability,
}
if tags != nil {
opts.Metadata = *tags
}
volumeID, volumeAZ, err := volumes.createVolume(opts)
if err != nil {
return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err)
}
klog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ)
return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil
}
// GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id.
func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string {
// Build a list of candidate device paths.
// Certain Nova drivers will set the disk serial ID, including the Cinder volume id.
// Newer OpenStacks may not truncate the volumeID to 20 chars.
candidateDeviceNodes := []string{
// KVM
fmt.Sprintf("virtio-%s", volumeID[:20]),
fmt.Sprintf("virtio-%s", volumeID),
// KVM virtio-scsi
fmt.Sprintf("scsi-0QEMU_QEMU_HARDDISK_%s", volumeID[:20]),
fmt.Sprintf("scsi-0QEMU_QEMU_HARDDISK_%s", volumeID),
// ESXi
fmt.Sprintf("wwn-0x%s", strings.Replace(volumeID, "-", "", -1)),
}
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
for _, f := range files {
for _, c := range candidateDeviceNodes {
if c == f.Name() {
klog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
return path.Join("/dev/disk/by-id/", f.Name())
}
}
}
klog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID)
return ""
}
func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string {
// Nova Hyper-V hosts cannot override disk SCSI IDs. In order to locate
// volumes, we're querying the metadata service. Note that the Hyper-V
// driver will include device metadata for untagged volumes as well.
//
// We're avoiding using cached metadata (or the configdrive),
// relying on the metadata service.
instanceMetadata, err := getMetadataFromMetadataService(
newtonMetadataVersion)
if err != nil {
klog.V(4).Infof(
"Could not retrieve instance metadata. Error: %v", err)
return ""
}
for _, device := range instanceMetadata.Devices {
if device.Type == "disk" && device.Serial == volumeID {
klog.V(4).Infof(
"Found disk metadata for volumeID %q. Bus: %q, Address: %q",
volumeID, device.Bus, device.Address)
diskPattern := fmt.Sprintf(
"/dev/disk/by-path/*-%s-%s",
device.Bus, device.Address)
diskPaths, err := filepath.Glob(diskPattern)
if err != nil {
klog.Errorf(
"could not retrieve disk path for volumeID: %q. Error filepath.Glob(%q): %v",
volumeID, diskPattern, err)
return ""
}
if len(diskPaths) == 1 {
return diskPaths[0]
}
klog.Errorf(
"expecting to find one disk path for volumeID %q, found %d: %v",
volumeID, len(diskPaths), diskPaths)
return ""
}
}
klog.V(4).Infof(
"Could not retrieve device metadata for volumeID: %q", volumeID)
return ""
}
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
func (os *OpenStack) GetDevicePath(volumeID string) string {
devicePath := os.GetDevicePathBySerialID(volumeID)
if devicePath == "" {
devicePath = os.getDevicePathFromInstanceMetadata(volumeID)
}
if devicePath == "" {
klog.Warningf("Failed to find device for the volumeID: %q", volumeID)
}
return devicePath
}
// DeleteVolume deletes a volume given volume name.
func (os *OpenStack) DeleteVolume(volumeID string) error {
used, err := os.diskIsUsed(volumeID)
if err != nil {
return err
}
if used {
msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeID)
return volerr.NewDeletedVolumeInUseError(msg)
}
volumes, err := os.volumeService("")
if err != nil {
return fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
}
err = volumes.deleteVolume(volumeID)
return err
}
// GetAttachmentDiskPath gets device path of attached volume to the compute running kubelet, as known by cinder
func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
// See issue #33128 - Cinder does not always tell you the right device path, as such
// we must only use this value as a last resort.
volume, err := os.getVolume(volumeID)
if err != nil {
return "", err
}
if volume.Status != volumeInUseStatus {
return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status)
}
if volume.AttachedServerID != "" {
if instanceID == volume.AttachedServerID {
// Attachment[0]["device"] points to the device path
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
return volume.AttachedDevice, nil
}
return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerID)
}
return "", fmt.Errorf("volume %s has no ServerId", volumeID)
}
// DiskIsAttached queries if a volume is attached to a compute instance
func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) {
if instanceID == "" {
klog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID)
}
volume, err := os.getVolume(volumeID)
if err != nil {
if err == ErrNotFound {
// Volume does not exists, it can't be attached.
return false, nil
}
return false, err
}
return instanceID == volume.AttachedServerID, nil
}
// DiskIsAttachedByName queries if a volume is attached to a compute instance by name
func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) {
cClient, err := os.NewComputeV2()
if err != nil {
return false, "", err
}
srv, err := getServerByName(cClient, nodeName)
if err != nil {
if err == ErrNotFound {
// instance not found anymore in cloudprovider, assume that cinder is detached
return false, "", nil
}
return false, "", err
}
instanceID := "/" + srv.ID
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
attached, err := os.DiskIsAttached(instanceID, volumeID)
return attached, instanceID, err
}
// DisksAreAttached queries if a list of volumes are attached to a compute instance
func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
attached := make(map[string]bool)
for _, volumeID := range volumeIDs {
isAttached, err := os.DiskIsAttached(instanceID, volumeID)
if err != nil && err != ErrNotFound {
attached[volumeID] = true
continue
}
attached[volumeID] = isAttached
}
return attached, nil
}
// DisksAreAttachedByName queries if a list of volumes are attached to a compute instance by name
func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) {
attached := make(map[string]bool)
cClient, err := os.NewComputeV2()
if err != nil {
return attached, err
}
srv, err := getServerByName(cClient, nodeName)
if err != nil {
if err == ErrNotFound {
// instance not found anymore, mark all volumes as detached
for _, volumeID := range volumeIDs {
attached[volumeID] = false
}
return attached, nil
}
return attached, err
}
instanceID := "/" + srv.ID
if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
instanceID = instanceID[(ind + 1):]
}
return os.DisksAreAttached(instanceID, volumeIDs)
}
// diskIsUsed returns true a disk is attached to any node.
func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) {
volume, err := os.getVolume(volumeID)
if err != nil {
return false, err
}
return volume.AttachedServerID != "", nil
}
// ShouldTrustDevicePath queries if we should trust the cinder provide deviceName, See issue #33128
func (os *OpenStack) ShouldTrustDevicePath() bool {
return os.bsOpts.TrustDevicePath
}
// NodeVolumeAttachLimit specifies number of cinder volumes that can be attached to this node.
func (os *OpenStack) NodeVolumeAttachLimit() int {
return os.bsOpts.NodeVolumeAttachLimit
}
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
// Ignore if not Cinder.
if pv.Spec.Cinder == nil {
return nil, nil
}
// Ignore any volumes that are being provisioned
if pv.Spec.Cinder.VolumeID == cloudvolume.ProvisionedVolumeName {
return nil, nil
}
// if volume az is to be ignored we should return nil from here
if os.bsOpts.IgnoreVolumeAZ {
return nil, nil
}
// Get Volume
volume, err := os.getVolume(pv.Spec.Cinder.VolumeID)
if err != nil {
return nil, err
}
// Construct Volume Labels
labels := make(map[string]string)
if volume.AvailabilityZone != "" {
labels[v1.LabelFailureDomainBetaZone] = volume.AvailabilityZone
}
if os.region != "" {
labels[v1.LabelFailureDomainBetaRegion] = os.region
}
klog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels)
return labels, nil
}
// recordOpenstackOperationMetric records openstack operation metrics
func recordOpenstackOperationMetric(operation string, timeTaken float64, err error) {
if err != nil {
openstackAPIRequestErrors.With(metrics.Labels{"request": operation}).Inc()
} else {
openstackOperationsLatency.With(metrics.Labels{"request": operation}).Observe(timeTaken)
}
}
|
package main
import (
"image"
"image/color"
"io"
"os"
"time"
"github.com/lucasb-eyer/go-colorful"
"github.com/ninjasphere/go-ninja/api"
"github.com/ninjasphere/go-ninja/config"
"github.com/ninjasphere/go-ninja/logger"
"github.com/ninjasphere/go-ninja/model"
ledmodel "github.com/ninjasphere/sphere-go-led-controller/model"
"github.com/ninjasphere/sphere-go-led-controller/ui"
"github.com/ninjasphere/sphere-go-led-controller/util"
)
var log = logger.GetLogger("sphere-go-led-controller")
var fps Tick = Tick{
name: "Pane FPS",
}
type LedController struct {
controlEnabled bool
controlRequested bool
controlRendering bool
controlLayout *ui.PaneLayout
pairingLayout *ui.PairingLayout
conn *ninja.Connection
serial io.ReadWriteCloser
waiting chan bool
}
func NewLedController(conn *ninja.Connection) (*LedController, error) {
s, err := util.GetLEDConnection()
if err != nil {
log.Fatalf("Failed to get connection to LED matrix: %s", err)
}
// Send a blank image to the led matrix
util.WriteLEDMatrix(image.NewRGBA(image.Rect(0, 0, 16, 16)), s)
controller := &LedController{
conn: conn,
pairingLayout: ui.NewPairingLayout(),
serial: s,
waiting: make(chan bool),
}
conn.MustExportService(controller, "$node/"+config.Serial()+"/led-controller", &model.ServiceAnnouncement{
Schema: "/service/led-controller",
})
return controller, nil
}
func (c *LedController) start(enableControl bool) {
c.controlRequested = enableControl
frameWritten := make(chan bool)
go func() {
fps.start()
for {
fps.tick()
if c.controlEnabled {
// Good to go
image, wake, err := c.controlLayout.Render()
if err != nil {
log.Fatalf("Unable to render()", err)
}
go func() {
util.WriteLEDMatrix(image, c.serial)
frameWritten <- true
}()
select {
case <-frameWritten:
// All good.
case <-time.After(10 * time.Second):
log.Infof("Timeout writing to LED matrix. Quitting.")
os.Exit(1)
}
if wake != nil {
log.Infof("Waiting as the UI is asleep")
select {
case <-wake:
log.Infof("UI woke up!")
case <-c.waiting:
log.Infof("Got a command from rpc...")
}
}
} else if c.controlRequested && !c.controlRendering {
// We want to display controls, so lets render the pane
c.controlRendering = true
go func() {
log.Infof("Starting control layout")
c.controlLayout = getPaneLayout(c.conn)
c.controlRendering = false
c.controlEnabled = true
log.Infof("Finished control layout")
}()
}
if c.controlRendering || !c.controlEnabled {
// We're either already controlling, or waiting for the pane to render
image, err := c.pairingLayout.Render()
if err != nil {
log.Fatalf("Unable to render()", err)
}
util.WriteLEDMatrix(image, c.serial)
}
}
}()
}
func (c *LedController) EnableControl() error {
c.controlRequested = true
c.gotCommand()
return nil
}
func (c *LedController) DisableControl() error {
c.controlEnabled = false
c.controlRequested = false
c.gotCommand()
return nil
}
type PairingCodeRequest struct {
Code string `json:"code"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayPairingCode(req *PairingCodeRequest) error {
c.DisableControl()
c.pairingLayout.ShowCode(req.Code)
c.gotCommand()
return nil
}
type ColorRequest struct {
Color string `json:"color"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayColor(req *ColorRequest) error {
c.DisableControl()
col, err := colorful.Hex(req.Color)
if err != nil {
return err
}
c.pairingLayout.ShowColor(col)
c.gotCommand()
return nil
}
func (c *LedController) DisplayIcon(req *ledmodel.IconRequest) error {
c.DisableControl()
log.Infof("Displaying icon: %v", req)
c.pairingLayout.ShowIcon(req.Icon)
c.gotCommand()
return nil
}
func (c *LedController) DisplayDrawing() error {
c.DisableControl()
c.pairingLayout.ShowDrawing()
return nil
}
func (c *LedController) Draw(updates *[][]uint8) error {
c.DisableControl()
c.pairingLayout.Draw(updates)
return nil
}
func (c *LedController) DisplayResetMode(m *ledmodel.ResetMode) error {
c.DisableControl()
fade := m.Duration > 0 && !m.Hold
loading := false
var col color.Color
switch m.Mode {
case "reboot":
col, _ = colorful.Hex("#00FF00")
case "reset-userdata":
col, _ = colorful.Hex("#FFFF00")
case "reset-root":
col, _ = colorful.Hex("#FF0000")
default:
loading = true
}
if loading {
c.pairingLayout.ShowIcon("loading.gif")
} else if fade {
c.pairingLayout.ShowFadingShrinkingColor(col, m.Duration)
} else {
c.pairingLayout.ShowColor(col)
}
c.gotCommand()
return nil
}
func (c *LedController) DisplayUpdateProgress(p *ledmodel.DisplayUpdateProgress) error {
c.pairingLayout.ShowUpdateProgress(p.Progress)
return nil
}
func (c *LedController) gotCommand() {
select {
case c.waiting <- true:
default:
}
}
// Load from a config file instead...
func getPaneLayout(conn *ninja.Connection) *ui.PaneLayout {
layout, wake := ui.NewPaneLayout(false, conn)
layout.AddPane(ui.NewClockPane())
layout.AddPane(ui.NewWeatherPane(conn))
layout.AddPane(ui.NewGesturePane())
layout.AddPane(ui.NewGameOfLifePane())
layout.AddPane(ui.NewMediaPane(conn))
layout.AddPane(ui.NewCertPane(conn.GetMqttClient()))
//layout.AddPane(ui.NewTextScrollPane("Exit Music (For A Film)"))
heaterPane := ui.NewOnOffPane(util.ResolveImagePath("heater-off.png"), util.ResolveImagePath("heater-on.gif"), func(state bool) {
log.Debugf("Heater state: %t", state)
}, conn, "heater")
layout.AddPane(heaterPane)
brightnessPane := ui.NewLightPane(false, util.ResolveImagePath("light-off.png"), util.ResolveImagePath("light-on.png"), conn)
layout.AddPane(brightnessPane)
colorPane := ui.NewLightPane(true, util.ResolveImagePath("light-off.png"), util.ResolveImagePath("light-on.png"), conn)
layout.AddPane(colorPane)
fanPane := ui.NewOnOffPane(util.ResolveImagePath("fan-off.png"), util.ResolveImagePath("fan-on.gif"), func(state bool) {
log.Debugf("Fan state: %t", state)
}, conn, "fan")
layout.AddPane(fanPane)
go func() {
<-wake
}()
go layout.Wake()
return layout
}
type Tick struct {
count int
name string
}
func (t *Tick) tick() {
t.count++
}
func (t *Tick) start() {
go func() {
for {
time.Sleep(time.Second)
log.Debugf("%s - %d", t.name, t.count)
t.count = 0
}
}()
}
Export led service to $home/led-controller as well as local node path
package main
import (
"image"
"image/color"
"io"
"os"
"time"
"github.com/lucasb-eyer/go-colorful"
"github.com/ninjasphere/go-ninja/api"
"github.com/ninjasphere/go-ninja/config"
"github.com/ninjasphere/go-ninja/logger"
"github.com/ninjasphere/go-ninja/model"
ledmodel "github.com/ninjasphere/sphere-go-led-controller/model"
"github.com/ninjasphere/sphere-go-led-controller/ui"
"github.com/ninjasphere/sphere-go-led-controller/util"
)
var log = logger.GetLogger("sphere-go-led-controller")
var fps Tick = Tick{
name: "Pane FPS",
}
type LedController struct {
controlEnabled bool
controlRequested bool
controlRendering bool
controlLayout *ui.PaneLayout
pairingLayout *ui.PairingLayout
conn *ninja.Connection
serial io.ReadWriteCloser
waiting chan bool
}
func NewLedController(conn *ninja.Connection) (*LedController, error) {
s, err := util.GetLEDConnection()
if err != nil {
log.Fatalf("Failed to get connection to LED matrix: %s", err)
}
// Send a blank image to the led matrix
util.WriteLEDMatrix(image.NewRGBA(image.Rect(0, 0, 16, 16)), s)
controller := &LedController{
conn: conn,
pairingLayout: ui.NewPairingLayout(),
serial: s,
waiting: make(chan bool),
}
conn.MustExportService(controller, "$node/"+config.Serial()+"/led-controller", &model.ServiceAnnouncement{
Schema: "/service/led-controller",
})
conn.MustExportService(controller, "$home/led-controller", &model.ServiceAnnouncement{
Schema: "/service/led-controller",
})
return controller, nil
}
func (c *LedController) start(enableControl bool) {
c.controlRequested = enableControl
frameWritten := make(chan bool)
go func() {
fps.start()
for {
fps.tick()
if c.controlEnabled {
// Good to go
image, wake, err := c.controlLayout.Render()
if err != nil {
log.Fatalf("Unable to render()", err)
}
go func() {
util.WriteLEDMatrix(image, c.serial)
frameWritten <- true
}()
select {
case <-frameWritten:
// All good.
case <-time.After(10 * time.Second):
log.Infof("Timeout writing to LED matrix. Quitting.")
os.Exit(1)
}
if wake != nil {
log.Infof("Waiting as the UI is asleep")
select {
case <-wake:
log.Infof("UI woke up!")
case <-c.waiting:
log.Infof("Got a command from rpc...")
}
}
} else if c.controlRequested && !c.controlRendering {
// We want to display controls, so lets render the pane
c.controlRendering = true
go func() {
log.Infof("Starting control layout")
c.controlLayout = getPaneLayout(c.conn)
c.controlRendering = false
c.controlEnabled = true
log.Infof("Finished control layout")
}()
}
if c.controlRendering || !c.controlEnabled {
// We're either already controlling, or waiting for the pane to render
image, err := c.pairingLayout.Render()
if err != nil {
log.Fatalf("Unable to render()", err)
}
util.WriteLEDMatrix(image, c.serial)
}
}
}()
}
func (c *LedController) EnableControl() error {
c.controlRequested = true
c.gotCommand()
return nil
}
func (c *LedController) DisableControl() error {
c.controlEnabled = false
c.controlRequested = false
c.gotCommand()
return nil
}
type PairingCodeRequest struct {
Code string `json:"code"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayPairingCode(req *PairingCodeRequest) error {
c.DisableControl()
c.pairingLayout.ShowCode(req.Code)
c.gotCommand()
return nil
}
type ColorRequest struct {
Color string `json:"color"`
DisplayTime int `json:"displayTime"`
}
func (c *LedController) DisplayColor(req *ColorRequest) error {
c.DisableControl()
col, err := colorful.Hex(req.Color)
if err != nil {
return err
}
c.pairingLayout.ShowColor(col)
c.gotCommand()
return nil
}
func (c *LedController) DisplayIcon(req *ledmodel.IconRequest) error {
c.DisableControl()
log.Infof("Displaying icon: %v", req)
c.pairingLayout.ShowIcon(req.Icon)
c.gotCommand()
return nil
}
func (c *LedController) DisplayDrawing() error {
c.DisableControl()
c.pairingLayout.ShowDrawing()
return nil
}
func (c *LedController) Draw(updates *[][]uint8) error {
c.DisableControl()
c.pairingLayout.Draw(updates)
return nil
}
func (c *LedController) DisplayResetMode(m *ledmodel.ResetMode) error {
c.DisableControl()
fade := m.Duration > 0 && !m.Hold
loading := false
var col color.Color
switch m.Mode {
case "reboot":
col, _ = colorful.Hex("#00FF00")
case "reset-userdata":
col, _ = colorful.Hex("#FFFF00")
case "reset-root":
col, _ = colorful.Hex("#FF0000")
default:
loading = true
}
if loading {
c.pairingLayout.ShowIcon("loading.gif")
} else if fade {
c.pairingLayout.ShowFadingShrinkingColor(col, m.Duration)
} else {
c.pairingLayout.ShowColor(col)
}
c.gotCommand()
return nil
}
func (c *LedController) DisplayUpdateProgress(p *ledmodel.DisplayUpdateProgress) error {
c.pairingLayout.ShowUpdateProgress(p.Progress)
return nil
}
func (c *LedController) gotCommand() {
select {
case c.waiting <- true:
default:
}
}
// Load from a config file instead...
func getPaneLayout(conn *ninja.Connection) *ui.PaneLayout {
layout, wake := ui.NewPaneLayout(false, conn)
layout.AddPane(ui.NewClockPane())
layout.AddPane(ui.NewWeatherPane(conn))
layout.AddPane(ui.NewGesturePane())
layout.AddPane(ui.NewGameOfLifePane())
layout.AddPane(ui.NewMediaPane(conn))
layout.AddPane(ui.NewCertPane(conn.GetMqttClient()))
//layout.AddPane(ui.NewTextScrollPane("Exit Music (For A Film)"))
heaterPane := ui.NewOnOffPane(util.ResolveImagePath("heater-off.png"), util.ResolveImagePath("heater-on.gif"), func(state bool) {
log.Debugf("Heater state: %t", state)
}, conn, "heater")
layout.AddPane(heaterPane)
brightnessPane := ui.NewLightPane(false, util.ResolveImagePath("light-off.png"), util.ResolveImagePath("light-on.png"), conn)
layout.AddPane(brightnessPane)
colorPane := ui.NewLightPane(true, util.ResolveImagePath("light-off.png"), util.ResolveImagePath("light-on.png"), conn)
layout.AddPane(colorPane)
fanPane := ui.NewOnOffPane(util.ResolveImagePath("fan-off.png"), util.ResolveImagePath("fan-on.gif"), func(state bool) {
log.Debugf("Fan state: %t", state)
}, conn, "fan")
layout.AddPane(fanPane)
go func() {
<-wake
}()
go layout.Wake()
return layout
}
type Tick struct {
count int
name string
}
func (t *Tick) tick() {
t.count++
}
func (t *Tick) start() {
go func() {
for {
time.Sleep(time.Second)
log.Debugf("%s - %d", t.name, t.count)
t.count = 0
}
}()
}
|
package lex_test
import (
"testing"
"github.com/lxc/lxd/lxd/db/generate/lex"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParse(t *testing.T) {
pkg, err := lex.Parse("./lxd/db/generate/lex")
require.NoError(t, err)
obj := pkg.Scope.Lookup("Parse")
assert.NotNil(t, obj)
}
lxd/db/generate/lxd/parse/test: Updates TestParse
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package lex_test
import (
"path/filepath"
"runtime"
"testing"
"github.com/lxc/lxd/lxd/db/generate/lex"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParse(t *testing.T) {
_, filename, _, _ := runtime.Caller(0)
pkg, err := lex.Parse(filepath.Dir(filename))
require.NoError(t, err)
obj := pkg.Scope.Lookup("Parse")
assert.NotNil(t, obj)
}
|
package applyspec
import (
models "bosh/agent/applier/models"
)
type V1ApplySpec struct {
PropertiesSpec PropertiesSpec `json:"properties"`
JobSpec JobSpec `json:"job"`
PackageSpecs map[string]PackageSpec `json:"packages"`
ConfigurationHash string `json:"configuration_hash"`
NetworkSpecs map[string]interface{} `json:"networks"`
ResourcePoolSpecs map[string]interface{} `json:"resource_pool"`
Deployment string `json:"deployment"`
Index int `json:"index"`
RenderedTemplatesArchiveSpec RenderedTemplatesArchiveSpec `json:"rendered_templates_archive"`
}
type PropertiesSpec struct {
LoggingSpec LoggingSpec `json:"logging"`
}
type LoggingSpec struct {
MaxLogFileSize string `json:"max_log_file_size"`
}
// BOSH Director provides a single tarball with all job templates pre-rendered
func (s V1ApplySpec) Jobs() []models.Job {
jobsWithSource := []models.Job{}
for _, j := range s.JobSpec.JobTemplateSpecsAsJobs() {
j.Source = s.RenderedTemplatesArchiveSpec.AsSource(j)
jobsWithSource = append(jobsWithSource, j)
}
return jobsWithSource
}
func (s V1ApplySpec) Packages() []models.Package {
packages := make([]models.Package, 0)
for _, value := range s.PackageSpecs {
packages = append(packages, value.AsPackage())
}
return packages
}
func (s V1ApplySpec) MaxLogFileSize() string {
fileSize := s.PropertiesSpec.LoggingSpec.MaxLogFileSize
if len(fileSize) > 0 {
return fileSize
}
return "50M"
}
Ignore ResourcePoolSpec structure in apply message but pass it through
resource_pool in apply message differs from compile vm to job vm
Signed-off-by: Matthew Boedicker <bd646ec310948bf2f36146b5501a0018bf548d0d@pivotallabs.com>
package applyspec
import (
models "bosh/agent/applier/models"
)
type V1ApplySpec struct {
PropertiesSpec PropertiesSpec `json:"properties"`
JobSpec JobSpec `json:"job"`
PackageSpecs map[string]PackageSpec `json:"packages"`
ConfigurationHash string `json:"configuration_hash"`
NetworkSpecs map[string]interface{} `json:"networks"`
ResourcePoolSpecs interface{} `json:"resource_pool"`
Deployment string `json:"deployment"`
Index int `json:"index"`
RenderedTemplatesArchiveSpec RenderedTemplatesArchiveSpec `json:"rendered_templates_archive"`
}
type PropertiesSpec struct {
LoggingSpec LoggingSpec `json:"logging"`
}
type LoggingSpec struct {
MaxLogFileSize string `json:"max_log_file_size"`
}
// BOSH Director provides a single tarball with all job templates pre-rendered
func (s V1ApplySpec) Jobs() []models.Job {
jobsWithSource := []models.Job{}
for _, j := range s.JobSpec.JobTemplateSpecsAsJobs() {
j.Source = s.RenderedTemplatesArchiveSpec.AsSource(j)
jobsWithSource = append(jobsWithSource, j)
}
return jobsWithSource
}
func (s V1ApplySpec) Packages() []models.Package {
packages := make([]models.Package, 0)
for _, value := range s.PackageSpecs {
packages = append(packages, value.AsPackage())
}
return packages
}
func (s V1ApplySpec) MaxLogFileSize() string {
fileSize := s.PropertiesSpec.LoggingSpec.MaxLogFileSize
if len(fileSize) > 0 {
return fileSize
}
return "50M"
}
|
package lytics
import (
"strings"
"time"
)
const (
campaignEndpoint = "program/campaign/:id"
campaignListEndpoint = "program/campaign" //status
variationEndpoint = "program/campaign/variation/:id"
variationListEndpoint = "program/campaign/variation"
)
type Campaign struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Status string `json:"status,omitempty"`
PublishedAt *time.Time `json:"published_at,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
StartAt *time.Time `json:"start_at,omitempty"`
EndAt *time.Time `json:"end_at,omitempty"`
Segments []string `json:"segments,omitempty"`
Aid int `json:"aid,omitempty"`
AccountId string `json:"account_id,omitempty"`
UserId string `json:"user_id,omitempty"`
}
type Variation struct {
Id string `json:"id,omitempty"`
Variation int `json:"variation"`
CampaignId string `json:"campaign_id,omitempty"`
Vehicle string `json:"vehicle,omitempty"`
Reach string `json:"reach,omitempty"`
Conversion string `json:"conversion,omitempty"`
Detail map[string]interface{} `json:"detail,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
Preview bool `json:"preview,omitempty"`
Aid int `json:"aid,omitempty"`
AccountId string `json:"account_id,omitempty" bson:"account_id"`
UserId string `json:"user_id,omitempty" bson:"user_id"`
}
// GetCampaign returns the details for a single personalization campaign
func (l *Client) GetCampaign(id string) (Campaign, error) {
res := ApiResp{}
data := Campaign{}
// make the request
err := l.Get(parseLyticsURL(campaignEndpoint, map[string]string{"id": id}), nil, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
// GetCampaignList returns the details for all campaigns in an account
// optional status parameter to filter by status
func (l *Client) GetCampaignList(status []string) ([]Campaign, error) {
var params map[string]string
res := ApiResp{}
data := []Campaign{}
if len(status) > 0 {
params = map[string]string{
"status": strings.Join(status, ","),
}
}
// make the request
err := l.Get(campaignListEndpoint, params, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
// GetVariation returns the details for a single campaign variation
func (l *Client) GetVariation(id string) (Variation, error) {
res := ApiResp{}
data := Variation{}
// make the request
err := l.Get(parseLyticsURL(variationEndpoint, map[string]string{"id": id}), nil, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
// GetVariationList returns the details for all variations of all campaigns in the account
func (l *Client) GetVariationList() ([]Variation, error) {
res := ApiResp{}
data := []Variation{}
// make the request
err := l.Get(variationListEndpoint, nil, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
Campaign times should not be pointers.
package lytics
import (
"strings"
"time"
)
const (
campaignEndpoint = "program/campaign/:id"
campaignListEndpoint = "program/campaign" //status
variationEndpoint = "program/campaign/variation/:id"
variationListEndpoint = "program/campaign/variation"
)
type Campaign struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Status string `json:"status,omitempty"`
PublishedAt time.Time `json:"published_at,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
StartAt time.Time `json:"start_at,omitempty"`
EndAt time.Time `json:"end_at,omitempty"`
Segments []string `json:"segments,omitempty"`
Aid int `json:"aid,omitempty"`
AccountId string `json:"account_id,omitempty"`
UserId string `json:"user_id,omitempty"`
}
type Variation struct {
Id string `json:"id,omitempty"`
Variation int `json:"variation"`
CampaignId string `json:"campaign_id,omitempty"`
Vehicle string `json:"vehicle,omitempty"`
Reach string `json:"reach,omitempty"`
Conversion string `json:"conversion,omitempty"`
Detail map[string]interface{} `json:"detail,omitempty"`
CreatedAt time.Time `json:"created_at,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
Preview bool `json:"preview,omitempty"`
Aid int `json:"aid,omitempty"`
AccountId string `json:"account_id,omitempty" bson:"account_id"`
UserId string `json:"user_id,omitempty" bson:"user_id"`
}
// GetCampaign returns the details for a single personalization campaign
func (l *Client) GetCampaign(id string) (Campaign, error) {
res := ApiResp{}
data := Campaign{}
// make the request
err := l.Get(parseLyticsURL(campaignEndpoint, map[string]string{"id": id}), nil, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
// GetCampaignList returns the details for all campaigns in an account
// optional status parameter to filter by status
func (l *Client) GetCampaignList(status []string) ([]Campaign, error) {
var params map[string]string
res := ApiResp{}
data := []Campaign{}
if len(status) > 0 {
params = map[string]string{
"status": strings.Join(status, ","),
}
}
// make the request
err := l.Get(campaignListEndpoint, params, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
// GetVariation returns the details for a single campaign variation
func (l *Client) GetVariation(id string) (Variation, error) {
res := ApiResp{}
data := Variation{}
// make the request
err := l.Get(parseLyticsURL(variationEndpoint, map[string]string{"id": id}), nil, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
// GetVariationList returns the details for all variations of all campaigns in the account
func (l *Client) GetVariationList() ([]Variation, error) {
res := ApiResp{}
data := []Variation{}
// make the request
err := l.Get(variationListEndpoint, nil, nil, &res, &data)
if err != nil {
return data, err
}
return data, nil
}
|
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ogier/pflag"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
)
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(obj fs.Object) {
i, ok := is.byName[obj.Remote()]
if !ok {
log.Fatalf("Unexpected file %q", obj.Remote())
}
delete(is.byName, obj.Remote())
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
// check the mod time to the given precision
modTime := obj.ModTime()
dt := modTime.Sub(i.ModTime)
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
}
}
// Check all done
func (is *Items) Done() {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
log.Fatalf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListing(f fs.Fs, items []Item) {
is := NewItems(items)
for obj := range f.List() {
is.Find(obj)
}
is.Done()
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
err := ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
func TestMkdir(flocal, fremote fs.Fs) {
err := fs.Mkdir(fremote)
if err != nil {
log.Fatalf("Mkdir failed: %v", err)
}
items := []Item{}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.499999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
var t3 = Time("2011-12-30T12:59:59.000000000Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
// Check dry run is working
log.Printf("Copy with --dry-run")
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, []Item{})
// Now without dry run
log.Printf("Copy")
err = fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestSync(flocal, fremote fs.Fs) {
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file and adding a file --dry-run")
WriteFile("potato2", "------------------------------------------------------------", t1)
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
before := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, before)
log.Printf("Sync after removing a file and adding a file")
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
// Underlying List has been tested above, so we just make sure it runs
err := fs.List(fremote)
if err != nil {
log.Fatalf("List failed: %v", err)
}
}
func TestLsd(flocal, fremote fs.Fs) {
}
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(flocal, fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
err := fs.Rmdir(fremote)
if err != nil {
log.Fatalf("Rmdir failed: %v", err)
}
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
Need a remote: as argument. This will create a random container or
directory under it and perform tests on it, deleting it at the end.
Options:
`, fs.Version)
pflag.PrintDefaults()
}
// Clean the temporary directory
func cleanTempDir() {
log.Printf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
log.Printf("Failed to remove %q: %v", localName, err)
}
}
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", fs.Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()
if len(args) != 1 {
syntaxError()
os.Exit(1)
}
remoteName = args[0]
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
remoteName += RandomString(32)
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
}
log.Printf("Testing with local %q", localName)
fremote, err := fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
flocal, err := fs.NewFs(localName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
fs.CalculateModifyWindow(fremote, flocal)
TestMkdir(flocal, fremote)
TestCopy(flocal, fremote)
TestSync(flocal, fremote)
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(flocal, fremote)
//TestRmdir(flocal, fremote)
cleanTempDir()
log.Printf("Tests OK")
}
rclonetest: check sub directory and downloads
// Test rclone by doing real transactions to a storage provider to and
// from the local disk
package main
import (
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ogier/pflag"
// Active file systems
_ "github.com/ncw/rclone/drive"
_ "github.com/ncw/rclone/local"
_ "github.com/ncw/rclone/s3"
_ "github.com/ncw/rclone/swift"
)
// Globals
var (
localName, remoteName string
version = pflag.BoolP("version", "V", false, "Print the version number")
)
// Represents an item for checking
type Item struct {
Path string
Md5sum string
ModTime time.Time
Size int64
}
// Represents all items for checking
type Items struct {
byName map[string]*Item
items []Item
}
// Make an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[items[i].Path] = &items[i]
}
return is
}
// Check off an item
func (is *Items) Find(obj fs.Object) {
i, ok := is.byName[obj.Remote()]
if !ok {
log.Fatalf("Unexpected file %q", obj.Remote())
}
delete(is.byName, obj.Remote())
// Check attributes
Md5sum, err := obj.Md5sum()
if err != nil {
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
}
// check the mod time to the given precision
modTime := obj.ModTime()
dt := modTime.Sub(i.ModTime)
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
}
}
// Check all done
func (is *Items) Done() {
if len(is.byName) != 0 {
for name := range is.byName {
log.Printf("Not found %q", name)
}
log.Fatalf("%d objects not found", len(is.byName))
}
}
// Checks the fs to see if it has the expected contents
func CheckListing(f fs.Fs, items []Item) {
is := NewItems(items)
for obj := range f.List() {
is.Find(obj)
}
is.Done()
}
// Parse a time string or explode
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// Write a file
func WriteFile(filePath, content string, t time.Time) {
// FIXME make directories?
filePath = path.Join(localName, filePath)
dirPath := path.Dir(filePath)
err := os.MkdirAll(dirPath, 0770)
if err != nil {
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
}
err = ioutil.WriteFile(filePath, []byte(content), 0600)
if err != nil {
log.Fatalf("Failed to write file %q: %v", filePath, err)
}
err = os.Chtimes(filePath, t, t)
if err != nil {
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
}
}
// Create a random string
func RandomString(n int) string {
source := "abcdefghijklmnopqrstuvwxyz0123456789"
out := make([]byte, n)
for i := range out {
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
func TestMkdir(flocal, fremote fs.Fs) {
err := fs.Mkdir(fremote)
if err != nil {
log.Fatalf("Mkdir failed: %v", err)
}
items := []Item{}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
var t1 = Time("2001-02-03T04:05:06.499999999Z")
var t2 = Time("2011-12-25T12:59:59.123456789Z")
var t3 = Time("2011-12-30T12:59:59.000000000Z")
func TestCopy(flocal, fremote fs.Fs) {
WriteFile("sub dir/hello world", "hello world", t1)
// Check dry run is working
log.Printf("Copy with --dry-run")
fs.Config.DryRun = true
err := fs.Sync(fremote, flocal, false)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
items := []Item{
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
}
CheckListing(flocal, items)
CheckListing(fremote, []Item{})
// Now without dry run
log.Printf("Copy")
err = fs.Sync(fremote, flocal, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Now delete the local file and download it
err = os.Remove(localName + "/sub dir/hello world")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
CheckListing(flocal, []Item{})
CheckListing(fremote, items)
log.Printf("Copy - redownload")
err = fs.Sync(flocal, fremote, false)
if err != nil {
log.Fatalf("Copy failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// Clean the directory
cleanTempDir()
}
func TestSync(flocal, fremote fs.Fs) {
WriteFile("empty space", "", t1)
log.Printf("Sync after changing file modtime only")
err := os.Chtimes(localName+"/empty space", t2, t2)
if err != nil {
log.Fatalf("Chtimes failed: %v", err)
}
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after adding a file")
WriteFile("potato", "------------------------------------------------------------", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after changing a file's size only")
WriteFile("potato", "smaller but same date", t3)
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
CheckListing(flocal, items)
CheckListing(fremote, items)
// ------------------------------------------------------------
log.Printf("Sync after removing a file and adding a file --dry-run")
WriteFile("potato2", "------------------------------------------------------------", t1)
err = os.Remove(localName + "/potato")
if err != nil {
log.Fatalf("Remove failed: %v", err)
}
fs.Config.DryRun = true
err = fs.Sync(fremote, flocal, true)
fs.Config.DryRun = false
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
before := []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
}
items = []Item{
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
}
CheckListing(flocal, items)
CheckListing(fremote, before)
log.Printf("Sync after removing a file and adding a file")
err = fs.Sync(fremote, flocal, true)
if err != nil {
log.Fatalf("Sync failed: %v", err)
}
CheckListing(flocal, items)
CheckListing(fremote, items)
}
func TestLs(flocal, fremote fs.Fs) {
// Underlying List has been tested above, so we just make sure it runs
err := fs.List(fremote)
if err != nil {
log.Fatalf("List failed: %v", err)
}
}
func TestLsd(flocal, fremote fs.Fs) {
}
func TestCheck(flocal, fremote fs.Fs) {
}
func TestPurge(flocal, fremote fs.Fs) {
err := fs.Purge(fremote)
if err != nil {
log.Fatalf("Purge failed: %v", err)
}
}
func TestRmdir(flocal, fremote fs.Fs) {
err := fs.Rmdir(fremote)
if err != nil {
log.Fatalf("Rmdir failed: %v", err)
}
}
func syntaxError() {
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
Syntax: [options] remote:
Need a remote: as argument. This will create a random container or
directory under it and perform tests on it, deleting it at the end.
Options:
`, fs.Version)
pflag.PrintDefaults()
}
// Clean the temporary directory
func cleanTempDir() {
log.Printf("Cleaning temporary directory: %q", localName)
err := os.RemoveAll(localName)
if err != nil {
log.Printf("Failed to remove %q: %v", localName, err)
}
}
func main() {
pflag.Usage = syntaxError
pflag.Parse()
if *version {
fmt.Printf("rclonetest %s\n", fs.Version)
os.Exit(0)
}
fs.LoadConfig()
rand.Seed(time.Now().UnixNano())
args := pflag.Args()
if len(args) != 1 {
syntaxError()
os.Exit(1)
}
remoteName = args[0]
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
remoteName += RandomString(32)
log.Printf("Testing with remote %q", remoteName)
var err error
localName, err = ioutil.TempDir("", "rclone")
if err != nil {
log.Fatalf("Failed to create temp dir: %v", err)
}
log.Printf("Testing with local %q", localName)
fremote, err := fs.NewFs(remoteName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
flocal, err := fs.NewFs(localName)
if err != nil {
log.Fatalf("Failed to make %q: %v", remoteName, err)
}
fs.CalculateModifyWindow(fremote, flocal)
TestMkdir(flocal, fremote)
TestCopy(flocal, fremote)
TestSync(flocal, fremote)
TestLs(flocal, fremote)
TestLsd(flocal, fremote)
TestCheck(flocal, fremote)
TestPurge(flocal, fremote)
//TestRmdir(flocal, fremote)
cleanTempDir()
log.Printf("Tests OK")
}
|
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"context"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/cilium/cilium/api/v1/models"
. "github.com/cilium/cilium/api/v1/server/restapi/daemon"
health "github.com/cilium/cilium/cilium-health/launch"
"github.com/cilium/cilium/common"
monitorLaunch "github.com/cilium/cilium/monitor/launch"
"github.com/cilium/cilium/pkg/api"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/clustermesh"
"github.com/cilium/cilium/pkg/command/exec"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/counter"
bpfIPCache "github.com/cilium/cilium/pkg/datapath/ipcache"
"github.com/cilium/cilium/pkg/datapath/iptables"
"github.com/cilium/cilium/pkg/datapath/prefilter"
"github.com/cilium/cilium/pkg/debug"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/endpoint"
"github.com/cilium/cilium/pkg/endpointmanager"
"github.com/cilium/cilium/pkg/envoy"
"github.com/cilium/cilium/pkg/fqdn"
"github.com/cilium/cilium/pkg/fqdn/dnsproxy"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/ipam"
"github.com/cilium/cilium/pkg/ipcache"
"github.com/cilium/cilium/pkg/k8s"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/ctmap"
"github.com/cilium/cilium/pkg/maps/eppolicymap"
ipcachemap "github.com/cilium/cilium/pkg/maps/ipcache"
"github.com/cilium/cilium/pkg/maps/lbmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/metricsmap"
"github.com/cilium/cilium/pkg/maps/policymap"
"github.com/cilium/cilium/pkg/maps/proxymap"
"github.com/cilium/cilium/pkg/maps/sockmap"
"github.com/cilium/cilium/pkg/maps/tunnel"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/mtu"
"github.com/cilium/cilium/pkg/node"
nodeStore "github.com/cilium/cilium/pkg/node/store"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
policyApi "github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/proxy"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/proxy/logger"
"github.com/cilium/cilium/pkg/revert"
"github.com/cilium/cilium/pkg/sockops"
"github.com/cilium/cilium/pkg/status"
"github.com/cilium/cilium/pkg/trigger"
"github.com/cilium/cilium/pkg/u8proto"
"github.com/cilium/cilium/pkg/workloads"
"github.com/go-openapi/runtime/middleware"
"github.com/miekg/dns"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sync/semaphore"
)
const (
// AutoCIDR indicates that a CIDR should be allocated
AutoCIDR = "auto"
)
const (
initArgLib int = iota
initArgRundir
initArgIPv4NodeIP
initArgIPv6NodeIP
initArgMode
initArgDevice
initArgDevicePreFilter
initArgModePreFilter
initArgMTU
initArgMax
)
// Daemon is the cilium daemon that is in charge of perform all necessary plumbing,
// monitoring when a LXC starts.
type Daemon struct {
buildEndpointSem *semaphore.Weighted
l7Proxy *proxy.Proxy
loadBalancer *loadbalancer.LoadBalancer
policy *policy.Repository
preFilter *prefilter.PreFilter
// Only used for CRI-O since it does not support events.
workloadsEventsCh chan<- *workloads.EventMessage
statusCollectMutex lock.RWMutex
statusResponse models.StatusResponse
statusCollector *status.Collector
uniqueIDMU lock.Mutex
uniqueID map[uint64]context.CancelFunc
nodeMonitor *monitorLaunch.NodeMonitor
ciliumHealth *health.CiliumHealth
// dnsRuleGen manages toFQDNs rules
dnsRuleGen *fqdn.RuleGen
// dnsPoller polls DNS names and sends them to dnsRuleGen
dnsPoller *fqdn.DNSPoller
// k8sAPIs is a set of k8s API in use. They are setup in EnableK8sWatcher,
// and may be disabled while the agent runs.
// This is on this object, instead of a global, because EnableK8sWatcher is
// on Daemon.
k8sAPIGroups k8sAPIGroupsUsed
// Used to synchronize generation of daemon's BPF programs and endpoint BPF
// programs.
compilationMutex *lock.RWMutex
// prefixLengths tracks a mapping from CIDR prefix length to the count
// of rules that refer to that prefix length.
prefixLengths *counter.PrefixLengthCounter
clustermesh *clustermesh.ClusterMesh
// k8sResourceSyncWaitGroup is used to block the starting of the daemon,
// including regenerating restored endpoints (if specified) until all
// policies, services, ingresses, and endpoints stored in Kubernetes at the
// time of bootstrapping of the agent are consumed by Cilium.
// This prevents regeneration of endpoints, restoring of loadbalancer BPF
// maps, etc. being performed without crucial information in securing said
// components. See GH-5038 and GH-4457.
k8sResourceSyncWaitGroup sync.WaitGroup
// k8sSvcCache is a cache of all Kubernetes services and endpoints
k8sSvcCache k8s.ServiceCache
mtuConfig mtu.Configuration
policyTrigger *trigger.Trigger
}
// UpdateProxyRedirect updates the redirect rules in the proxy for a particular
// endpoint using the provided L4 filter. Returns the allocated proxy port
func (d *Daemon) UpdateProxyRedirect(e *endpoint.Endpoint, l4 *policy.L4Filter, proxyWaitGroup *completion.WaitGroup) (uint16, error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return 0, fmt.Errorf("can't redirect, proxy disabled"), nil, nil
}
r, err, finalizeFunc, revertFunc := d.l7Proxy.CreateOrUpdateRedirect(l4, e.ProxyID(l4), e, proxyWaitGroup)
if err != nil {
return 0, err, nil, nil
}
return r.ProxyPort, nil, finalizeFunc, revertFunc
}
// RemoveProxyRedirect removes a previously installed proxy redirect for an
// endpoint
func (d *Daemon) RemoveProxyRedirect(e *endpoint.Endpoint, id string, proxyWaitGroup *completion.WaitGroup) (error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return nil, nil, nil
}
log.WithFields(logrus.Fields{
logfields.EndpointID: e.ID,
logfields.L4PolicyID: id,
}).Debug("Removing redirect to endpoint")
return d.l7Proxy.RemoveRedirect(id, proxyWaitGroup)
}
// UpdateNetworkPolicy adds or updates a network policy in the set
// published to L7 proxies.
func (d *Daemon) UpdateNetworkPolicy(e *endpoint.Endpoint, policy *policy.L4Policy,
labelsMap, deniedIngressIdentities, deniedEgressIdentities cache.IdentityCache, proxyWaitGroup *completion.WaitGroup) (error, revert.RevertFunc) {
if d.l7Proxy == nil {
return fmt.Errorf("can't update network policy, proxy disabled"), nil
}
err, revertFunc := d.l7Proxy.UpdateNetworkPolicy(e, policy, e.GetIngressPolicyEnabledLocked(), e.GetEgressPolicyEnabledLocked(),
labelsMap, deniedIngressIdentities, deniedEgressIdentities, proxyWaitGroup)
return err, revert.RevertFunc(revertFunc)
}
// RemoveNetworkPolicy removes a network policy from the set published to
// L7 proxies.
func (d *Daemon) RemoveNetworkPolicy(e *endpoint.Endpoint) {
if d.l7Proxy == nil {
return
}
d.l7Proxy.RemoveNetworkPolicy(e)
}
// QueueEndpointBuild waits for a "build permit" for the endpoint
// identified by 'epID'. This function blocks until the endpoint can
// start building. The returned function must then be called to
// release the "build permit" when the most resource intensive parts
// of the build are done. The returned function is idempotent, so it
// may be called more than once. Returns nil if the caller should NOT
// start building the endpoint. This may happen due to a build being
// queued for the endpoint already, or due to the wait for the build
// permit being canceled. The latter case happens when the endpoint is
// being deleted.
func (d *Daemon) QueueEndpointBuild(epID uint64) func() {
d.uniqueIDMU.Lock()
// Skip new build requests if the endpoint is already in the queue
// waiting. In this case the queued build will pick up any changes
// made so far, so there is no need to queue another build now.
if _, queued := d.uniqueID[epID]; queued {
d.uniqueIDMU.Unlock()
return nil
}
// Store a cancel function to the 'uniqueID' map so that we can
// cancel the wait when the endpoint is being deleted.
ctx, cancel := context.WithCancel(context.Background())
d.uniqueID[epID] = cancel
d.uniqueIDMU.Unlock()
// Acquire build permit. This may block.
err := d.buildEndpointSem.Acquire(ctx, 1)
// Not queueing any more, so remove the cancel func from 'uniqueID' map.
// The caller may still cancel the build by calling the cancel func\
// after we return it. After this point another build may be queued for
// this endpoint.
d.uniqueIDMU.Lock()
delete(d.uniqueID, epID)
d.uniqueIDMU.Unlock()
if err != nil {
return nil // Acquire failed
}
// Acquire succeeded, but the context was canceled after?
if ctx.Err() == context.Canceled {
d.buildEndpointSem.Release(1)
return nil
}
// At this point the build permit has been acquired. It must
// be released by the caller by calling the returned function
// when the heavy lifting of the build is done.
// Using sync.Once to make the returned function idempotent.
var once sync.Once
return func() {
once.Do(func() {
d.buildEndpointSem.Release(1)
})
}
}
// RemoveFromEndpointQueue removes the endpoint from the "build permit" queue,
// canceling the wait for the build permit if still waiting.
func (d *Daemon) RemoveFromEndpointQueue(epID uint64) {
d.uniqueIDMU.Lock()
if cancel, queued := d.uniqueID[epID]; queued && cancel != nil {
delete(d.uniqueID, epID)
cancel()
}
d.uniqueIDMU.Unlock()
}
// GetPolicyRepository returns the policy repository of the daemon
func (d *Daemon) GetPolicyRepository() *policy.Repository {
return d.policy
}
// DebugEnabled returns if debug mode is enabled.
func (d *Daemon) DebugEnabled() bool {
return option.Config.Opts.IsEnabled(option.Debug)
}
func (d *Daemon) writeNetdevHeader(dir string) error {
headerPath := filepath.Join(dir, common.NetdevHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
fw := bufio.NewWriter(f)
fw.WriteString(option.Config.Opts.GetFmtList())
fw.WriteString(d.fmtPolicyEnforcementIngress())
fw.WriteString(d.fmtPolicyEnforcementEgress())
endpoint.WriteIPCachePrefixes(fw, d.prefixLengths.ToBPFData)
return fw.Flush()
}
// returns #define for PolicyIngress based on the configuration of the daemon.
func (d *Daemon) fmtPolicyEnforcementIngress() string {
if policy.GetPolicyEnabled() == option.AlwaysEnforce {
return fmt.Sprintf("#define %s\n", option.IngressSpecPolicy.Define)
}
return fmt.Sprintf("#undef %s\n", option.IngressSpecPolicy.Define)
}
// returns #define for PolicyEgress based on the configuration of the daemon.
func (d *Daemon) fmtPolicyEnforcementEgress() string {
if policy.GetPolicyEnabled() == option.AlwaysEnforce {
return fmt.Sprintf("#define %s\n", option.EgressSpecPolicy.Define)
}
return fmt.Sprintf("#undef %s\n", option.EgressSpecPolicy.Define)
}
// Must be called with option.Config.EnablePolicyMU locked.
func (d *Daemon) writePreFilterHeader(dir string) error {
headerPath := filepath.Join(dir, common.PreFilterHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
fw := bufio.NewWriter(f)
fmt.Fprint(fw, "/*\n")
fmt.Fprintf(fw, " * XDP device: %s\n", option.Config.DevicePreFilter)
fmt.Fprintf(fw, " * XDP mode: %s\n", option.Config.ModePreFilter)
fmt.Fprint(fw, " */\n\n")
d.preFilter.WriteConfig(fw)
return fw.Flush()
}
func (d *Daemon) setHostAddresses() error {
l, err := netlink.LinkByName(option.Config.LBInterface)
if err != nil {
return fmt.Errorf("unable to get network device %s: %s", option.Config.Device, err)
}
getAddr := func(netLinkFamily int) (net.IP, error) {
addrs, err := netlink.AddrList(l, netLinkFamily)
if err != nil {
return nil, fmt.Errorf("error while getting %s's addresses: %s", option.Config.Device, err)
}
for _, possibleAddr := range addrs {
if netlink.Scope(possibleAddr.Scope) == netlink.SCOPE_UNIVERSE {
return possibleAddr.IP, nil
}
}
return nil, nil
}
if option.Config.EnableIPv4 {
hostV4Addr, err := getAddr(netlink.FAMILY_V4)
if err != nil {
return err
}
if hostV4Addr != nil {
option.Config.HostV4Addr = hostV4Addr
log.Infof("Using IPv4 host address: %s", option.Config.HostV4Addr)
}
}
if option.Config.EnableIPv6 {
hostV6Addr, err := getAddr(netlink.FAMILY_V6)
if err != nil {
return err
}
if hostV6Addr != nil {
option.Config.HostV6Addr = hostV6Addr
log.Infof("Using IPv6 host address: %s", option.Config.HostV6Addr)
}
}
return nil
}
// GetCompilationLock returns the mutex responsible for synchronizing compilation
// of BPF programs.
func (d *Daemon) GetCompilationLock() *lock.RWMutex {
return d.compilationMutex
}
func (d *Daemon) compileBase() error {
var args []string
var mode string
var ret error
args = make([]string, initArgMax)
// Lock so that endpoints cannot be built while we are compile base programs.
d.compilationMutex.Lock()
defer d.compilationMutex.Unlock()
if err := d.writeNetdevHeader("./"); err != nil {
log.WithError(err).Warn("Unable to write netdev header")
return err
}
scopedLog := log.WithField(logfields.XDPDevice, option.Config.DevicePreFilter)
if option.Config.DevicePreFilter != "undefined" {
if err := prefilter.ProbePreFilter(option.Config.DevicePreFilter, option.Config.ModePreFilter); err != nil {
scopedLog.WithError(err).Warn("Turning off prefilter")
option.Config.DevicePreFilter = "undefined"
}
}
if option.Config.DevicePreFilter != "undefined" {
if d.preFilter, ret = prefilter.NewPreFilter(); ret != nil {
scopedLog.WithError(ret).Warn("Unable to init prefilter")
return ret
}
if err := d.writePreFilterHeader("./"); err != nil {
scopedLog.WithError(err).Warn("Unable to write prefilter header")
return err
}
args[initArgDevicePreFilter] = option.Config.DevicePreFilter
args[initArgModePreFilter] = option.Config.ModePreFilter
}
args[initArgLib] = option.Config.BpfDir
args[initArgRundir] = option.Config.StateDir
if option.Config.EnableIPv4 {
args[initArgIPv4NodeIP] = node.GetInternalIPv4().String()
} else {
args[initArgIPv4NodeIP] = "<nil>"
}
if option.Config.EnableIPv6 {
args[initArgIPv6NodeIP] = node.GetIPv6().String()
} else {
args[initArgIPv6NodeIP] = "<nil>"
}
args[initArgMTU] = fmt.Sprintf("%d", d.mtuConfig.GetDeviceMTU())
if option.Config.Device != "undefined" {
_, err := netlink.LinkByName(option.Config.Device)
if err != nil {
log.WithError(err).WithField("device", option.Config.Device).Warn("Link does not exist")
return err
}
if option.Config.IsLBEnabled() {
if option.Config.Device != option.Config.LBInterface {
//FIXME: allow different interfaces
return fmt.Errorf("Unable to have an interface for LB mode different than snooping interface")
}
if err := d.setHostAddresses(); err != nil {
return err
}
mode = "lb"
} else {
if option.Config.DatapathMode == option.DatapathModeIpvlan {
mode = "ipvlan"
} else {
mode = "direct"
}
}
args[initArgMode] = mode
args[initArgDevice] = option.Config.Device
args = append(args, option.Config.Device)
} else {
if option.Config.IsLBEnabled() {
//FIXME: allow LBMode in tunnel
return fmt.Errorf("Unable to run LB mode with tunnel mode")
}
args[initArgMode] = option.Config.Tunnel
}
prog := filepath.Join(option.Config.BpfDir, "init.sh")
ctx, cancel := context.WithTimeout(context.Background(), defaults.ExecTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, prog, args...)
cmd.Env = bpf.Environment()
if _, err := cmd.CombinedOutput(log, true); err != nil {
return err
}
ipam.ReserveLocalRoutes()
node.InstallHostRoutes(d.mtuConfig)
if option.Config.EnableIPv4 {
// Always remove masquerade rule and then re-add it if required
iptables.RemoveRules()
if err := iptables.InstallRules(); err != nil {
return err
}
}
log.Info("Setting sysctl net.core.bpf_jit_enable=1")
log.Info("Setting sysctl net.ipv4.conf.all.rp_filter=0")
log.Info("Setting sysctl net.ipv6.conf.all.disable_ipv6=0")
return nil
}
func (d *Daemon) init() error {
globalsDir := option.Config.GetGlobalsDir()
if err := os.MkdirAll(globalsDir, defaults.RuntimePathRights); err != nil {
log.WithError(err).WithField(logfields.Path, globalsDir).Fatal("Could not create runtime directory")
}
if err := os.Chdir(option.Config.StateDir); err != nil {
log.WithError(err).WithField(logfields.Path, option.Config.StateDir).Fatal("Could not change to runtime directory")
}
if err := d.createNodeConfigHeaderfile(); err != nil {
return err
}
if !option.Config.DryMode {
if _, err := lxcmap.LXCMap.OpenOrCreate(); err != nil {
return err
}
if _, err := ipcachemap.IPCache.OpenOrCreate(); err != nil {
return err
}
if _, err := metricsmap.Metrics.OpenOrCreate(); err != nil {
return err
}
if _, err := tunnel.TunnelMap.OpenOrCreate(); err != nil {
return err
}
if err := openServiceMaps(); err != nil {
log.WithError(err).Fatal("Unable to open service maps")
}
if err := d.compileBase(); err != nil {
return err
}
// Remove any old sockops and re-enable with _new_ programs if flag is set
sockops.SockmapDisable()
sockops.SkmsgDisable()
if option.Config.SockopsEnable {
eppolicymap.CreateEPPolicyMap()
sockops.SockmapEnable()
sockops.SkmsgEnable()
sockmap.SockmapCreate()
}
// Set up the list of IPCache listeners in the daemon, to be
// used by syncLXCMap().
ipcache.IPIdentityCache.SetListeners([]ipcache.IPIdentityMappingListener{
&envoy.NetworkPolicyHostsCache,
bpfIPCache.NewListener(d),
})
// Insert local host entries to bpf maps
if err := d.syncLXCMap(); err != nil {
return err
}
// Start the controller for periodic sync
// The purpose of the controller is to ensure that the host entries are
// reinserted to the bpf maps if they are ever removed from them.
// TODO: Determine if we can get rid of this when we have more rigorous
// desired/realized state implementation for the bpf maps.
controller.NewManager().UpdateController("lxcmap-bpf-host-sync",
controller.ControllerParams{
DoFunc: func() error { return d.syncLXCMap() },
RunInterval: 5 * time.Second,
})
// Start the controller for periodic sync of the metrics map with
// the prometheus server.
controller.NewManager().UpdateController("metricsmap-bpf-prom-sync",
controller.ControllerParams{
DoFunc: metricsmap.SyncMetricsMap,
RunInterval: 5 * time.Second,
})
// Clean all lb entries
if !option.Config.RestoreState {
log.Debug("cleaning up all BPF LB maps")
d.loadBalancer.BPFMapMU.Lock()
defer d.loadBalancer.BPFMapMU.Unlock()
if option.Config.EnableIPv6 {
if err := lbmap.Service6Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq6Map.DeleteAll(); err != nil {
return err
}
}
if err := d.RevNATDeleteAll(); err != nil {
return err
}
if option.Config.EnableIPv4 {
if err := lbmap.Service4Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq4Map.DeleteAll(); err != nil {
return err
}
}
// If we are not restoring state, all endpoints can be
// deleted. Entries will be re-populated.
lxcmap.LXCMap.DeleteAll()
}
}
return nil
}
func (d *Daemon) createNodeConfigHeaderfile() error {
nodeConfigPath := option.Config.GetNodeConfigPath()
f, err := os.Create(nodeConfigPath)
if err != nil {
log.WithError(err).WithField(logfields.Path, nodeConfigPath).Fatal("Failed to create node configuration file")
return err
}
fw := bufio.NewWriter(f)
routerIP := node.GetIPv6Router()
hostIP := node.GetIPv6()
fmt.Fprintf(fw, ""+
"/*\n"+
" * Node-IPv6: %s\n"+
" * Router-IPv6: %s\n"+
" * Host-IPv4: %s\n"+
" */\n\n",
hostIP.String(), routerIP.String(),
node.GetInternalIPv4().String())
fw.WriteString(common.FmtDefineComma("ROUTER_IP", routerIP))
if option.Config.EnableIPv4 {
ipv4GW := node.GetInternalIPv4()
loopbackIPv4 := node.GetIPv4Loopback()
fmt.Fprintf(fw, "#define IPV4_GATEWAY %#x\n", byteorder.HostSliceToNetwork(ipv4GW, reflect.Uint32).(uint32))
fmt.Fprintf(fw, "#define IPV4_LOOPBACK %#x\n", byteorder.HostSliceToNetwork(loopbackIPv4, reflect.Uint32).(uint32))
} else {
// FIXME: Workaround so the bpf program compiles
fmt.Fprintf(fw, "#define IPV4_GATEWAY %#x\n", 0)
fmt.Fprintf(fw, "#define IPV4_LOOPBACK %#x\n", 0)
}
ipv4Range := node.GetIPv4AllocRange()
fmt.Fprintf(fw, "#define IPV4_MASK %#x\n", byteorder.HostSliceToNetwork(ipv4Range.Mask, reflect.Uint32).(uint32))
if nat46Range := option.Config.NAT46Prefix; nat46Range != nil {
fw.WriteString(common.FmtDefineAddress("NAT46_PREFIX", nat46Range.IP))
}
fw.WriteString(common.FmtDefineComma("HOST_IP", hostIP))
fmt.Fprintf(fw, "#define HOST_ID %d\n", identity.GetReservedID(labels.IDNameHost))
fmt.Fprintf(fw, "#define WORLD_ID %d\n", identity.GetReservedID(labels.IDNameWorld))
fmt.Fprintf(fw, "#define HEALTH_ID %d\n", identity.GetReservedID(labels.IDNameHealth))
fmt.Fprintf(fw, "#define UNMANAGED_ID %d\n", identity.GetReservedID(labels.IDNameUnmanaged))
fmt.Fprintf(fw, "#define INIT_ID %d\n", identity.GetReservedID(labels.IDNameInit))
fmt.Fprintf(fw, "#define LB_RR_MAX_SEQ %d\n", lbmap.MaxSeq)
fmt.Fprintf(fw, "#define CILIUM_LB_MAP_MAX_ENTRIES %d\n", lbmap.MaxEntries)
fmt.Fprintf(fw, "#define TUNNEL_ENDPOINT_MAP_SIZE %d\n", tunnel.MaxEntries)
fmt.Fprintf(fw, "#define PROXY_MAP_SIZE %d\n", proxymap.MaxEntries)
fmt.Fprintf(fw, "#define ENDPOINTS_MAP_SIZE %d\n", lxcmap.MaxEntries)
fmt.Fprintf(fw, "#define METRICS_MAP_SIZE %d\n", metricsmap.MaxEntries)
fmt.Fprintf(fw, "#define POLICY_MAP_SIZE %d\n", policymap.MaxEntries)
fmt.Fprintf(fw, "#define IPCACHE_MAP_SIZE %d\n", ipcachemap.MaxEntries)
fmt.Fprintf(fw, "#define POLICY_PROG_MAP_SIZE %d\n", policymap.ProgArrayMaxEntries)
fmt.Fprintf(fw, "#define SOCKOPS_MAP_SIZE %d\n", sockmap.MaxEntries)
if option.Config.PreAllocateMaps {
fmt.Fprintf(fw, "#define PREALLOCATE_MAPS\n")
}
fmt.Fprintf(fw, "#define TRACE_PAYLOAD_LEN %dULL\n", option.Config.TracePayloadlen)
fmt.Fprintf(fw, "#define MTU %d\n", d.mtuConfig.GetDeviceMTU())
if option.Config.EnableIPv4 {
fmt.Fprintf(fw, "#define ENABLE_IPV4\n")
}
if option.Config.EnableIPv6 {
fmt.Fprintf(fw, "#define ENABLE_IPV6\n")
}
fw.Flush()
f.Close()
return nil
}
// syncLXCMap adds local host enties to bpf lxcmap, as well as
// ipcache, if needed, and also notifies the daemon and network policy
// hosts cache if changes were made.
func (d *Daemon) syncLXCMap() error {
// TODO: Update addresses first, in case node addressing has changed.
// TODO: Once these start changing on runtime, figure out the locking strategy.
specialIdentities := []identity.IPIdentityPair{}
if option.Config.EnableIPv4 {
specialIdentities = append(specialIdentities,
[]identity.IPIdentityPair{
{
IP: node.GetInternalIPv4(),
ID: identity.ReservedIdentityHost,
},
{
IP: node.GetExternalIPv4(),
ID: identity.ReservedIdentityHost,
},
{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, net.IPv4len*8),
ID: identity.ReservedIdentityWorld,
},
}...)
}
if option.Config.EnableIPv6 {
specialIdentities = append(specialIdentities,
[]identity.IPIdentityPair{
{
IP: node.GetIPv6(),
ID: identity.ReservedIdentityHost,
},
{
IP: node.GetIPv6Router(),
ID: identity.ReservedIdentityHost,
},
{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, net.IPv6len*8),
ID: identity.ReservedIdentityWorld,
},
}...)
}
existingEndpoints, err := lxcmap.DumpToMap()
if err != nil {
return err
}
for _, ipIDPair := range specialIdentities {
isHost := ipIDPair.ID == identity.ReservedIdentityHost
if isHost {
added, err := lxcmap.SyncHostEntry(ipIDPair.IP)
if err != nil {
return fmt.Errorf("Unable to add host entry to endpoint map: %s", err)
}
if added {
log.WithField(logfields.IPAddr, ipIDPair.IP).Debugf("Added local ip to endpoint map")
}
}
delete(existingEndpoints, ipIDPair.IP.String())
// Upsert will not propagate (reserved:foo->ID) mappings across the cluster,
// and we specifically don't want to do so.
ipcache.IPIdentityCache.Upsert(ipIDPair.PrefixString(), nil, ipcache.Identity{
ID: ipIDPair.ID,
Source: ipcache.FromAgentLocal,
})
}
for hostIP, info := range existingEndpoints {
if ip := net.ParseIP(hostIP); info.IsHost() && ip != nil {
if err := lxcmap.DeleteEntry(ip); err != nil {
log.WithError(err).WithFields(logrus.Fields{
logfields.IPAddr: hostIP,
}).Warn("Unable to delete obsolete host IP from BPF map")
} else {
log.Debugf("Removed outdated host ip %s from endpoint map", hostIP)
}
}
}
return nil
}
func createIPNet(ones, bits int) *net.IPNet {
return &net.IPNet{
Mask: net.CIDRMask(ones, bits),
}
}
// createPrefixLengthCounter wraps around the counter library, providing
// references to prefix lengths that will always be present.
func createPrefixLengthCounter() *counter.PrefixLengthCounter {
prefixLengths4 := ipcachemap.IPCache.GetMaxPrefixLengths(false)
prefixLengths6 := ipcachemap.IPCache.GetMaxPrefixLengths(true)
counter := counter.NewPrefixLengthCounter(prefixLengths6, prefixLengths4)
// This is a bit ugly, but there's not a great way to define an IPNet
// without parsing strings, etc.
defaultPrefixes := []*net.IPNet{
// IPv4
createIPNet(0, net.IPv4len*8), // world
createIPNet(net.IPv4len*8, net.IPv4len*8), // hosts
// IPv6
createIPNet(0, net.IPv6len*8), // world
createIPNet(net.IPv6len*8, net.IPv6len*8), // hosts
}
_, err := counter.Add(defaultPrefixes)
if err != nil {
log.WithError(err).Fatal("Failed to create default prefix lengths")
}
return counter
}
// NewDaemon creates and returns a new Daemon with the parameters set in c.
func NewDaemon() (*Daemon, *endpointRestoreState, error) {
// Validate the daemon-specific global options.
if err := option.Config.Validate(); err != nil {
return nil, nil, fmt.Errorf("invalid daemon configuration: %s", err)
}
ctmap.InitMapInfo(option.Config.CTMapEntriesGlobalTCP, option.Config.CTMapEntriesGlobalAny)
if err := workloads.Setup(option.Config.Workloads, map[string]string{}); err != nil {
return nil, nil, fmt.Errorf("unable to setup workload: %s", err)
}
d := Daemon{
loadBalancer: loadbalancer.NewLoadBalancer(),
k8sSvcCache: k8s.NewServiceCache(),
policy: policy.NewPolicyRepository(),
uniqueID: map[uint64]context.CancelFunc{},
nodeMonitor: monitorLaunch.NewNodeMonitor(option.Config.MonitorQueueSize),
prefixLengths: createPrefixLengthCounter(),
buildEndpointSem: semaphore.NewWeighted(int64(numWorkerThreads())),
compilationMutex: new(lock.RWMutex),
mtuConfig: mtu.NewConfiguration(option.Config.Tunnel != option.TunnelDisabled, option.Config.MTU),
}
t, err := trigger.NewTrigger(trigger.Parameters{
Name: "policy_update",
PrometheusMetrics: true,
MinInterval: time.Second,
TriggerFunc: d.policyUpdateTrigger,
})
if err != nil {
return nil, nil, err
}
d.policyTrigger = t
debug.RegisterStatusObject("k8s-service-cache", &d.k8sSvcCache)
d.runK8sServiceHandler()
policyApi.InitEntities(option.Config.ClusterName)
workloads.Init(&d)
// Clear previous leftovers before listening for new requests
log.Info("Clearing leftover Cilium veths")
err = d.clearCiliumVeths()
if err != nil {
log.WithError(err).Debug("Unable to clean leftover veths")
}
if k8s.IsEnabled() {
if err := k8s.Init(); err != nil {
log.WithError(err).Fatal("Unable to initialize Kubernetes subsystem")
}
log.Info("Kubernetes information:")
log.Infof(" Namespace: %s", option.Config.K8sNamespace)
// Kubernetes demands that the localhost can always reach local
// pods. Therefore unless the AllowLocalhost policy is set to a
// specific mode, always allow localhost to reach local
// endpoints.
if option.Config.AllowLocalhost == option.AllowLocalhostAuto {
option.Config.AllowLocalhost = option.AllowLocalhostAlways
log.Info("k8s mode: Allowing localhost to reach local endpoints")
}
// In Cilium 1.0, due to limitations on the data path, traffic
// from the outside world on ingress was treated as though it
// was from the host for policy purposes. In order to not break
// existing policies, this option retains the behavior.
if option.Config.K8sLegacyHostAllowsWorld != "false" {
option.Config.HostAllowsWorld = true
log.Warn("k8s mode: Configuring ingress policy for host to also allow from world. For more information, see https://cilium.link/host-vs-world")
}
}
// If the device has been specified, the IPv4AllocPrefix and the
// IPv6AllocPrefix were already allocated before the k8s.Init().
//
// If the device hasn't been specified, k8s.Init() allocated the
// IPv4AllocPrefix and the IPv6AllocPrefix from k8s node annotations.
//
// If k8s.Init() failed to retrieve the IPv4AllocPrefix we can try to derive
// it from an existing node_config.h file or from previous cilium_host
// interfaces.
//
// Then, we will calculate the IPv4 or IPv6 alloc prefix based on the IPv6
// or IPv4 alloc prefix, respectively, retrieved by k8s node annotations.
log.Info("Initializing node addressing")
// Inject BPF dependency, kvstore dependency into node package.
node.TunnelDatapath = tunnel.TunnelMap
node.NodeReg = &nodeStore.NodeRegistrar{}
if err := node.AutoComplete(); err != nil {
log.WithError(err).Fatal("Cannot autocomplete node addresses")
}
node.SetIPv4ClusterCidrMaskSize(option.Config.IPv4ClusterCIDRMaskSize)
if option.Config.IPv4Range != AutoCIDR {
_, net, err := net.ParseCIDR(option.Config.IPv4Range)
if err != nil {
log.WithError(err).WithField(logfields.V4Prefix, option.Config.IPv4Range).Fatal("Invalid IPv4 allocation prefix")
}
node.SetIPv4AllocRange(net)
}
if option.Config.IPv4ServiceRange != AutoCIDR {
_, ipnet, err := net.ParseCIDR(option.Config.IPv4ServiceRange)
if err != nil {
log.WithError(err).WithField(logfields.V4Prefix, option.Config.IPv4ServiceRange).Fatal("Invalid IPv4 service prefix")
}
node.AddAuxPrefix(ipnet)
}
if option.Config.IPv6Range != AutoCIDR {
_, net, err := net.ParseCIDR(option.Config.IPv6Range)
if err != nil {
log.WithError(err).WithField(logfields.V6Prefix, option.Config.IPv6Range).Fatal("Invalid IPv6 allocation prefix")
}
if err := node.SetIPv6NodeRange(net); err != nil {
log.WithError(err).WithField(logfields.V6Prefix, net).Fatal("Invalid per node IPv6 allocation prefix")
}
}
if option.Config.IPv6ServiceRange != AutoCIDR {
_, ipnet, err := net.ParseCIDR(option.Config.IPv6ServiceRange)
if err != nil {
log.WithError(err).WithField(logfields.V6Prefix, option.Config.IPv6ServiceRange).Fatal("Invalid IPv6 service prefix")
}
node.AddAuxPrefix(ipnet)
}
// Set up ipam conf after init() because we might be running d.conf.KVStoreIPv4Registration
log.Info("Initializing IPAM")
ipam.Init()
// restore endpoints before any IPs are allocated to avoid eventual IP
// conflicts later on, otherwise any IP conflict will result in the
// endpoint not being able to be restored.
restoredEndpoints, err := d.restoreOldEndpoints(option.Config.StateDir, true)
if err != nil {
log.WithError(err).Error("Unable to restore existing endpoints")
}
switch err := ipam.AllocateInternalIPs(); err.(type) {
case ipam.ErrAllocation:
if option.Config.IPv4Range == AutoCIDR || option.Config.IPv6ServiceRange == AutoCIDR {
log.WithError(err).Fatalf(
"The allocation CIDR is different from the previous cilium instance. " +
"This error is most likely caused by a temporary network disruption to the kube-apiserver " +
"that prevent Cilium from retrieve the node's IPv4/IPv6 allocation range. " +
"If you believe the allocation range is supposed to be different you need to clean " +
"up all Cilium state with the `cilium cleanup` command on this node. Be aware " +
"this will cause network disruption for all existing containers managed by Cilium " +
"running on this node and you will have to restart them.")
} else {
log.WithError(err).Fatalf(
"The allocation CIDR is different from the previous cilium instance. " +
"If you believe the allocation range is supposed to be different you need to clean " +
"up all Cilium state with the `cilium cleanup` command on this node. Be aware " +
"this will cause network disruption for all existing containers managed by Cilium " +
"running on this node and you will have to restart them.")
}
case error:
log.WithError(err).Fatal("IPAM init failed")
}
log.Info("Validating configured node address ranges")
if err := node.ValidatePostInit(); err != nil {
log.WithError(err).Fatal("postinit failed")
}
if k8s.IsEnabled() {
log.Info("Annotating k8s node with CIDR ranges")
err := k8s.Client().AnnotateNode(node.GetName(),
node.GetIPv4AllocRange(), node.GetIPv6NodeRange(),
nil, nil, node.GetInternalIPv4())
if err != nil {
log.WithError(err).Warning("Cannot annotate k8s node with CIDR range")
}
}
log.Info("Addressing information:")
log.Infof(" Cluster-Name: %s", option.Config.ClusterName)
log.Infof(" Cluster-ID: %d", option.Config.ClusterID)
log.Infof(" Local node-name: %s", node.GetName())
if option.Config.EnableIPv6 {
log.Infof(" Node-IPv6: %s", node.GetIPv6())
log.Infof(" IPv6 node prefix: %s", node.GetIPv6NodeRange())
log.Infof(" IPv6 allocation prefix: %s", node.GetIPv6AllocRange())
log.Infof(" IPv6 router address: %s", node.GetIPv6Router())
}
if option.Config.EnableIPv4 {
log.Infof(" External-Node IPv4: %s", node.GetExternalIPv4())
log.Infof(" Internal-Node IPv4: %s", node.GetInternalIPv4())
log.Infof(" Cluster IPv4 prefix: %s", node.GetIPv4ClusterRange())
log.Infof(" IPv4 allocation prefix: %s", node.GetIPv4AllocRange())
// Allocate IPv4 service loopback IP
loopbackIPv4, _, err := ipam.AllocateNext("ipv4")
if err != nil {
return nil, restoredEndpoints, fmt.Errorf("Unable to reserve IPv4 loopback address: %s", err)
}
node.SetIPv4Loopback(loopbackIPv4)
log.Infof(" Loopback IPv4: %s", node.GetIPv4Loopback().String())
}
if err := node.ConfigureLocalNode(); err != nil {
log.WithError(err).Fatal("Unable to initialize local node")
}
// This needs to be done after the node addressing has been configured
// as the node address is required as suffix.
cache.InitIdentityAllocator(&d)
if path := option.Config.ClusterMeshConfig; path != "" {
if option.Config.ClusterID == 0 {
log.Info("Cluster-ID is not specified, skipping ClusterMesh initialization")
} else {
log.WithField("path", path).Info("Initializing ClusterMesh routing")
clustermesh, err := clustermesh.NewClusterMesh(clustermesh.Configuration{
Name: "clustermesh",
ConfigDirectory: path,
NodeKeyCreator: nodeStore.KeyCreator,
ServiceMerger: &d.k8sSvcCache,
})
if err != nil {
log.WithError(err).Fatal("Unable to initialize ClusterMesh")
}
d.clustermesh = clustermesh
}
}
if err = d.init(); err != nil {
log.WithError(err).Error("Error while initializing daemon")
return nil, restoredEndpoints, err
}
// Start watcher for endpoint IP --> identity mappings in key-value store.
// this needs to be done *after* init() for the daemon in that function,
// we populate the IPCache with the host's IP(s).
ipcache.InitIPIdentityWatcher()
// FIXME: Make the port range configurable.
d.l7Proxy = proxy.StartProxySupport(10000, 20000, option.Config.RunDir,
option.Config.AccessLog, &d, option.Config.AgentLabels)
if err := fqdn.ConfigFromResolvConf(); err != nil {
return nil, nil, err
}
err = d.bootstrapFQDN(restoredEndpoints)
if err != nil {
return nil, restoredEndpoints, err
}
return &d, restoredEndpoints, nil
}
// Close shuts down a daemon
func (d *Daemon) Close() {
if d.policyTrigger != nil {
d.policyTrigger.Shutdown()
}
}
// TriggerReloadWithoutCompile causes all BPF programs and maps to be reloaded,
// without recompiling the datapath logic for each endpoint. It first attempts
// to recompile the base programs, and if this fails returns an error. If base
// program load is successful, it subsequently triggers regeneration of all
// endpoints and returns a waitgroup that may be used by the caller to wait for
// all endpoint regeneration to complete.
//
// If an error is returned, then no regeneration was successful. If no error
// is returned, then the base programs were successfully regenerated, but
// endpoints may or may not have successfully regenerated.
func (d *Daemon) TriggerReloadWithoutCompile(reason string) (*sync.WaitGroup, error) {
log.Debugf("BPF reload triggered from %s", reason)
if err := d.compileBase(); err != nil {
return nil, fmt.Errorf("Unable to recompile base programs from %s: %s", reason, err)
}
regenRequest := &endpoint.ExternalRegenerationMetadata{
Reason: reason,
ReloadDatapath: true,
}
return endpointmanager.RegenerateAllEndpoints(d, regenRequest), nil
}
func changedOption(key string, value option.OptionSetting, data interface{}) {
d := data.(*Daemon)
if key == option.Debug {
// Set the debug toggle (this can be a no-op)
logging.ToggleDebugLogs(d.DebugEnabled())
// Reflect log level change to proxies
proxy.ChangeLogLevel(logging.GetLevel(logging.DefaultLogger))
}
d.policy.BumpRevision() // force policy recalculation
}
type patchConfig struct {
daemon *Daemon
}
func NewPatchConfigHandler(d *Daemon) PatchConfigHandler {
return &patchConfig{daemon: d}
}
func (h *patchConfig) Handle(params PatchConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("PATCH /config request")
d := h.daemon
cfgSpec := params.Configuration
om, err := option.Config.Opts.Library.ValidateConfigurationMap(cfgSpec.Options)
if err != nil {
msg := fmt.Errorf("Invalid configuration option %s", err)
return api.Error(PatchConfigBadRequestCode, msg)
}
// Serialize configuration updates to the daemon.
option.Config.ConfigPatchMutex.Lock()
defer option.Config.ConfigPatchMutex.Unlock()
nmArgs := d.nodeMonitor.GetArgs()
if numPagesEntry, ok := cfgSpec.Options["MonitorNumPages"]; ok && nmArgs[0] != numPagesEntry {
if len(nmArgs) == 0 || nmArgs[0] != numPagesEntry {
args := []string{"--num-pages %s", numPagesEntry}
d.nodeMonitor.Restart(args)
}
if len(cfgSpec.Options) == 0 {
return NewPatchConfigOK()
}
delete(cfgSpec.Options, "MonitorNumPages")
}
// Track changes to daemon's configuration
var changes int
// Only update if value provided for PolicyEnforcement.
if enforcement := cfgSpec.PolicyEnforcement; enforcement != "" {
switch enforcement {
case option.NeverEnforce, option.DefaultEnforcement, option.AlwaysEnforce:
// Update policy enforcement configuration if needed.
oldEnforcementValue := policy.GetPolicyEnabled()
// If the policy enforcement configuration has indeed changed, we have
// to regenerate endpoints and update daemon's configuration.
if enforcement != oldEnforcementValue {
log.Debug("configuration request to change PolicyEnforcement for daemon")
changes++
policy.SetPolicyEnabled(enforcement)
}
default:
msg := fmt.Errorf("Invalid option for PolicyEnforcement %s", enforcement)
log.Warn(msg)
return api.Error(PatchConfigFailureCode, msg)
}
log.Debug("finished configuring PolicyEnforcement for daemon")
}
changes += option.Config.Opts.ApplyValidated(om, changedOption, d)
log.WithField("count", changes).Debug("Applied changes to daemon's configuration")
if changes > 0 {
// Only recompile if configuration has changed.
log.Debug("daemon configuration has changed; recompiling base programs")
if err := d.compileBase(); err != nil {
msg := fmt.Errorf("Unable to recompile base programs: %s", err)
return api.Error(PatchConfigFailureCode, msg)
}
d.TriggerPolicyUpdates(true, "agent configuration update")
}
return NewPatchConfigOK()
}
type getConfig struct {
daemon *Daemon
}
func NewGetConfigHandler(d *Daemon) GetConfigHandler {
return &getConfig{daemon: d}
}
func (h *getConfig) Handle(params GetConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("GET /config request")
d := h.daemon
spec := &models.DaemonConfigurationSpec{
Options: *option.Config.Opts.GetMutableModel(),
PolicyEnforcement: policy.GetPolicyEnabled(),
}
status := &models.DaemonConfigurationStatus{
Addressing: node.GetNodeAddressing(),
K8sConfiguration: k8s.GetKubeconfigPath(),
K8sEndpoint: k8s.GetAPIServer(),
NodeMonitor: d.nodeMonitor.State(),
KvstoreConfiguration: &models.KVstoreConfiguration{
Type: option.Config.KVStore,
Options: option.Config.KVStoreOpt,
},
Realized: spec,
DeviceMTU: int64(d.mtuConfig.GetDeviceMTU()),
RouteMTU: int64(d.mtuConfig.GetRouteMTU()),
DatapathMode: models.DatapathMode(option.Config.DatapathMode),
IpvlanDeviceIfIndex: int64(option.Config.IpvlanDeviceIfIndex),
}
cfg := &models.DaemonConfiguration{
Spec: spec,
Status: status,
}
return NewGetConfigOK().WithPayload(cfg)
}
// listFilterIfs returns a map of interfaces based on the given filter.
// The filter should take a link and, if found, return the index of that
// interface, if not found return -1.
func listFilterIfs(filter func(netlink.Link) int) (map[int]netlink.Link, error) {
ifs, err := netlink.LinkList()
if err != nil {
return nil, err
}
vethLXCIdxs := map[int]netlink.Link{}
for _, intf := range ifs {
if idx := filter(intf); idx != -1 {
vethLXCIdxs[idx] = intf
}
}
return vethLXCIdxs, nil
}
// clearCiliumVeths checks all veths created by cilium and removes all that
// are considered a leftover from failed attempts to connect the container.
func (d *Daemon) clearCiliumVeths() error {
leftVeths, err := listFilterIfs(func(intf netlink.Link) int {
// Filter by veth and return the index of the interface.
if intf.Type() == "veth" {
return intf.Attrs().Index
}
return -1
})
if err != nil {
return fmt.Errorf("unable to retrieve host network interfaces: %s", err)
}
for _, v := range leftVeths {
peerIndex := v.Attrs().ParentIndex
parentVeth, found := leftVeths[peerIndex]
if found && peerIndex != 0 && strings.HasPrefix(parentVeth.Attrs().Name, "lxc") {
err := netlink.LinkDel(v)
if err != nil {
fmt.Printf(`CleanVeths: Unable to delete leftover veth "%d %s": %s`,
v.Attrs().Index, v.Attrs().Name, err)
}
}
}
return nil
}
// numWorkerThreads returns the number of worker threads with a minimum of 4.
func numWorkerThreads() int {
ncpu := runtime.NumCPU()
minWorkerThreads := 2
if ncpu < minWorkerThreads {
return minWorkerThreads
}
return ncpu
}
// GetServiceList returns list of services
func (d *Daemon) GetServiceList() []*models.Service {
list := []*models.Service{}
d.loadBalancer.BPFMapMU.RLock()
defer d.loadBalancer.BPFMapMU.RUnlock()
for _, v := range d.loadBalancer.SVCMap {
list = append(list, v.GetModel())
}
return list
}
// SendNotification sends an agent notification to the monitor
func (d *Daemon) SendNotification(typ monitorAPI.AgentNotification, text string) error {
event := monitorAPI.AgentNotify{Type: typ, Text: text}
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAgent, event)
}
// NewProxyLogRecord is invoked by the proxy accesslog on each new access log entry
func (d *Daemon) NewProxyLogRecord(l *logger.LogRecord) error {
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAccessLog, l.LogRecord)
}
// GetNodeSuffix returns the suffix to be appended to kvstore keys of this
// agent
func (d *Daemon) GetNodeSuffix() string {
if ip := node.GetExternalIPv4(); ip != nil {
return ip.String()
}
log.Fatal("Node IP not available yet")
return "<nil>"
}
// bootstrapFQDN initializes the toFQDNs related subsystems: DNSPoller,
// d.dnsRuleGen, and the DNS proxy.
// dnsRuleGen and DNSPoller will use the default resolver and, implicitly, the
// default DNS cache. The proxy binds to all interfaces, and uses the
// configured DNS proxy port (this may be 0 and so OS-assigned).
func (d *Daemon) bootstrapFQDN(restoredEndpoints *endpointRestoreState) (err error) {
cfg := fqdn.Config{
MinTTL: option.Config.ToFQDNsMinTTL,
Cache: fqdn.DefaultDNSCache,
LookupDNSNames: fqdn.DNSLookupDefaultResolver,
AddGeneratedRules: func(generatedRules []*policyApi.Rule) error {
// Insert the new rules into the policy repository. We need them to
// replace the previous set. This requires the labels to match (including
// the ToFQDN-UUID one).
_, err := d.PolicyAdd(generatedRules, &AddOptions{Replace: true, Generated: true})
return err
},
PollerResponseNotify: func(lookupTime time.Time, qname string, response *fqdn.DNSIPRecords) {
// Do nothing if this option is off
if !option.Config.ToFQDNsEnablePollerEvents {
return
}
// FIXME: Not always true but we don't have the protocol information here
protocol := accesslog.TransportProtocol(u8proto.ProtoIDs["udp"])
record := logger.LogRecord{
LogRecord: accesslog.LogRecord{
Type: accesslog.TypeResponse,
ObservationPoint: accesslog.Ingress,
IPVersion: accesslog.VersionIPv4,
TransportProtocol: protocol,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
NodeAddressInfo: accesslog.NodeAddressInfo{
IPv4: node.GetExternalIPv4().String(),
IPv6: node.GetIPv6().String(),
},
},
}
logger.LogTags.Verdict(accesslog.VerdictForwarded, "DNSPoller")(&record)
logger.LogTags.DNS(&accesslog.LogRecordDNS{
Query: qname,
IPs: response.IPs,
TTL: uint32(response.TTL),
CNAMEs: nil,
ObservationSource: accesslog.DNSSourceAgentPoller,
})(&record)
record.Log()
}}
d.dnsRuleGen = fqdn.NewRuleGen(cfg)
d.dnsPoller = fqdn.NewDNSPoller(cfg, d.dnsRuleGen)
if option.Config.ToFQDNsEnablePoller {
fqdn.StartDNSPoller(d.dnsPoller)
}
// Prefill the cache with DNS lookups from restored endpoints. This is needed
// to maintain continuity of which IPs are allowed.
// Note: This is TTL aware, and expired data will not be used (e.g. when
// restoring after a long delay).
for _, restoredEP := range restoredEndpoints.restored {
// Upgrades from old ciliums have this nil
if restoredEP.DNSHistory != nil {
fqdn.DefaultDNSCache.UpdateFromCache(restoredEP.DNSHistory)
}
}
// Once we stop returning errors from StartDNSProxy this should live in
// StartProxySupport
proxy.DefaultDNSProxy, err = dnsproxy.StartDNSProxy("", uint16(option.Config.ToFQDNsProxyPort),
// LookupEPByIP
func(endpointIP net.IP) (endpointID string, err error) {
e := endpointmanager.LookupIPv4(endpointIP.String())
if e == nil {
return "", fmt.Errorf("Cannot find endpoint with IP %s", endpointIP.String())
}
return e.StringID(), nil
},
// NotifyOnDNSMsg handles DNS data in the daemon by emitting monitor
// events, proxy metrics and storing DNS data in the DNS cache. This may
// result in rule generation.
// It will:
// - Report a monitor error event and proxy metrics when the proxy sees an
// error, and when it can't process something in this function
// - Report the verdict in a monitor event and emit proxy metrics
// - Insert the DNS data into the cache when msg is a DNS response and we
// can lookup the endpoint related to it
func(lookupTime time.Time, srcAddr, dstAddr string, msg *dns.Msg, protocol string, allowed bool, proxyErr error) error {
var protoID = u8proto.ProtoIDs[strings.ToLower(protocol)]
var verdict accesslog.FlowVerdict
var reason string
switch {
case proxyErr != nil:
verdict = accesslog.VerdictError
reason = "Error: " + proxyErr.Error()
case allowed:
verdict = accesslog.VerdictForwarded
reason = "Allowed by policy"
case !allowed:
verdict = accesslog.VerdictDenied
reason = "Denied by policy"
}
var ingress = msg.Response
var flowType accesslog.FlowType
if ingress {
flowType = accesslog.TypeResponse
} else {
flowType = accesslog.TypeRequest
}
var ep *endpoint.Endpoint
srcID := uint32(0)
srcIP, _, err := net.SplitHostPort(srcAddr)
if err != nil {
// dstAddr is used instead, below
log.WithError(err).Error("cannot extract endpoint IP from DNS request")
} else {
ep = endpointmanager.LookupIPv4(srcIP)
if ep != nil {
srcID = ep.GetIdentity().Uint32()
}
}
var dstPort int
dstIPStr, dstPortStr, err := net.SplitHostPort(dstAddr)
if err != nil {
// This may be fatal, but we handle that case below, if ep == nil
log.WithError(err).Error("cannot extract endpoint IP from DNS request")
} else {
if dstPort, err = strconv.Atoi(dstPortStr); err != nil {
log.WithError(err).WithField(logfields.Port, dstPortStr).Error("cannot parse destination port")
}
}
if ep == nil {
// ep needs to be non-nil for the access log. One of the ends of this connection will be an endpoint.
ep = endpointmanager.LookupIPv4(dstIPStr)
}
if ep == nil {
// This is a hard fail. We cannot proceed because record.Log requires a
// non-nil ep, and we also don't want to insert this data into the
// cache if we don't know that an endpoint asked for it (this is
// asserted via ep != nil here and msg.Response && msg.Rcode ==
// dns.RcodeSuccess below).
err := fmt.Errorf("Cannot find matching endpoint for IPs %s or %s", srcAddr, dstAddr)
log.WithError(err).Error("cannot find matching endpoint")
ep.UpdateProxyStatistics("dns", uint16(dstPort), ingress, !ingress, accesslog.VerdictError)
return err
}
qname, responseIPs, TTL, CNAMEs, err := dnsproxy.ExtractMsgDetails(msg)
if err != nil {
// This error is ok because all these values are used for reporting, or filling in the cache.
log.WithError(err).Error("cannot extract DNS message details")
}
ep.UpdateProxyStatistics("dns", uint16(dstPort), ingress, !ingress, verdict)
record := logger.NewLogRecord(proxy.DefaultEndpointInfoRegistry, ep, flowType, ingress,
func(lr *logger.LogRecord) { lr.LogRecord.TransportProtocol = accesslog.TransportProtocol(protoID) },
logger.LogTags.Verdict(verdict, reason),
logger.LogTags.Addressing(logger.AddressingInfo{
SrcIPPort: srcAddr,
DstIPPort: dstAddr,
SrcIdentity: srcID,
}),
logger.LogTags.DNS(&accesslog.LogRecordDNS{
Query: qname,
IPs: responseIPs,
TTL: TTL,
CNAMEs: CNAMEs,
ObservationSource: accesslog.DNSSourceProxy,
}),
)
record.Log()
if msg.Response && msg.Rcode == dns.RcodeSuccess {
// This must happen before the ruleGen update below, to ensure that
// this data is included in the serialized Endpoint object.
// Note: We need to fixup minTTL to be consistent with how we insert it
// elsewhere i.e. we don't want to lose the lower bound for DNS data
// TTL if we reboot twice.
log.WithField(logfields.EndpointID, ep.ID).Debug("Recording DNS lookup in endpoint specific cache")
effectiveTTL := int(TTL)
if effectiveTTL < option.Config.ToFQDNsMinTTL {
effectiveTTL = option.Config.ToFQDNsMinTTL
}
ep.DNSHistory.Update(lookupTime, qname, responseIPs, effectiveTTL)
log.Debug("Updating DNS name in cache from response to to query")
err = d.dnsRuleGen.UpdateGenerateDNS(lookupTime, map[string]*fqdn.DNSIPRecords{
qname: {
IPs: responseIPs,
TTL: int(effectiveTTL),
}})
if err != nil {
log.WithError(err).Error("error updating internal DNS cache for rule generation")
}
}
return nil
})
proxy.DefaultDNSProxy.SetRejectReply(option.Config.FQDNRejectResponse)
return err // filled by StartDNSProxy
}
fqdn/proxy: More reliable notify endpoint lookups
When we report monitor events for an an endpoint, we need to find the
endpoint that made the request. We previously tried one address, then
the other, hoping to find one that worked. This behaved incorrectly for
responses from other pods, as we would use the source address to look up
the "server", incorrectly recording DNS history in that endpoint and
confusing the monitor event generation.
Furthermore, we passed the endpoint ID to the addressing utility
function and this resulted in incorrect data. Passing 0 forces a lookup
by IP, which seems to bypass whatever error we introduced doing the
lookup ourselves.
Separately, this also fixes the port determination when reporting
metrics. We previously uses the destination port in either direction but
it is expected to be the port that is being proxied (53 in the DNS
case).
Fixes 12ea234cdb2740d743774fb4901af4db49798fa2
Signed-off-by: Ray Bejjani <7f525154e857f3d57a641a83dddc584f42addc6a@covalent.io>
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"context"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/cilium/cilium/api/v1/models"
. "github.com/cilium/cilium/api/v1/server/restapi/daemon"
health "github.com/cilium/cilium/cilium-health/launch"
"github.com/cilium/cilium/common"
monitorLaunch "github.com/cilium/cilium/monitor/launch"
"github.com/cilium/cilium/pkg/api"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/clustermesh"
"github.com/cilium/cilium/pkg/command/exec"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/controller"
"github.com/cilium/cilium/pkg/counter"
bpfIPCache "github.com/cilium/cilium/pkg/datapath/ipcache"
"github.com/cilium/cilium/pkg/datapath/iptables"
"github.com/cilium/cilium/pkg/datapath/prefilter"
"github.com/cilium/cilium/pkg/debug"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/endpoint"
"github.com/cilium/cilium/pkg/endpointmanager"
"github.com/cilium/cilium/pkg/envoy"
"github.com/cilium/cilium/pkg/fqdn"
"github.com/cilium/cilium/pkg/fqdn/dnsproxy"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/ipam"
"github.com/cilium/cilium/pkg/ipcache"
"github.com/cilium/cilium/pkg/k8s"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/maps/ctmap"
"github.com/cilium/cilium/pkg/maps/eppolicymap"
ipcachemap "github.com/cilium/cilium/pkg/maps/ipcache"
"github.com/cilium/cilium/pkg/maps/lbmap"
"github.com/cilium/cilium/pkg/maps/lxcmap"
"github.com/cilium/cilium/pkg/maps/metricsmap"
"github.com/cilium/cilium/pkg/maps/policymap"
"github.com/cilium/cilium/pkg/maps/proxymap"
"github.com/cilium/cilium/pkg/maps/sockmap"
"github.com/cilium/cilium/pkg/maps/tunnel"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/mtu"
"github.com/cilium/cilium/pkg/node"
nodeStore "github.com/cilium/cilium/pkg/node/store"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
policyApi "github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/proxy"
"github.com/cilium/cilium/pkg/proxy/accesslog"
"github.com/cilium/cilium/pkg/proxy/logger"
"github.com/cilium/cilium/pkg/revert"
"github.com/cilium/cilium/pkg/sockops"
"github.com/cilium/cilium/pkg/status"
"github.com/cilium/cilium/pkg/trigger"
"github.com/cilium/cilium/pkg/u8proto"
"github.com/cilium/cilium/pkg/workloads"
"github.com/go-openapi/runtime/middleware"
"github.com/miekg/dns"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sync/semaphore"
)
const (
// AutoCIDR indicates that a CIDR should be allocated
AutoCIDR = "auto"
)
const (
initArgLib int = iota
initArgRundir
initArgIPv4NodeIP
initArgIPv6NodeIP
initArgMode
initArgDevice
initArgDevicePreFilter
initArgModePreFilter
initArgMTU
initArgMax
)
// Daemon is the cilium daemon that is in charge of perform all necessary plumbing,
// monitoring when a LXC starts.
type Daemon struct {
buildEndpointSem *semaphore.Weighted
l7Proxy *proxy.Proxy
loadBalancer *loadbalancer.LoadBalancer
policy *policy.Repository
preFilter *prefilter.PreFilter
// Only used for CRI-O since it does not support events.
workloadsEventsCh chan<- *workloads.EventMessage
statusCollectMutex lock.RWMutex
statusResponse models.StatusResponse
statusCollector *status.Collector
uniqueIDMU lock.Mutex
uniqueID map[uint64]context.CancelFunc
nodeMonitor *monitorLaunch.NodeMonitor
ciliumHealth *health.CiliumHealth
// dnsRuleGen manages toFQDNs rules
dnsRuleGen *fqdn.RuleGen
// dnsPoller polls DNS names and sends them to dnsRuleGen
dnsPoller *fqdn.DNSPoller
// k8sAPIs is a set of k8s API in use. They are setup in EnableK8sWatcher,
// and may be disabled while the agent runs.
// This is on this object, instead of a global, because EnableK8sWatcher is
// on Daemon.
k8sAPIGroups k8sAPIGroupsUsed
// Used to synchronize generation of daemon's BPF programs and endpoint BPF
// programs.
compilationMutex *lock.RWMutex
// prefixLengths tracks a mapping from CIDR prefix length to the count
// of rules that refer to that prefix length.
prefixLengths *counter.PrefixLengthCounter
clustermesh *clustermesh.ClusterMesh
// k8sResourceSyncWaitGroup is used to block the starting of the daemon,
// including regenerating restored endpoints (if specified) until all
// policies, services, ingresses, and endpoints stored in Kubernetes at the
// time of bootstrapping of the agent are consumed by Cilium.
// This prevents regeneration of endpoints, restoring of loadbalancer BPF
// maps, etc. being performed without crucial information in securing said
// components. See GH-5038 and GH-4457.
k8sResourceSyncWaitGroup sync.WaitGroup
// k8sSvcCache is a cache of all Kubernetes services and endpoints
k8sSvcCache k8s.ServiceCache
mtuConfig mtu.Configuration
policyTrigger *trigger.Trigger
}
// UpdateProxyRedirect updates the redirect rules in the proxy for a particular
// endpoint using the provided L4 filter. Returns the allocated proxy port
func (d *Daemon) UpdateProxyRedirect(e *endpoint.Endpoint, l4 *policy.L4Filter, proxyWaitGroup *completion.WaitGroup) (uint16, error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return 0, fmt.Errorf("can't redirect, proxy disabled"), nil, nil
}
r, err, finalizeFunc, revertFunc := d.l7Proxy.CreateOrUpdateRedirect(l4, e.ProxyID(l4), e, proxyWaitGroup)
if err != nil {
return 0, err, nil, nil
}
return r.ProxyPort, nil, finalizeFunc, revertFunc
}
// RemoveProxyRedirect removes a previously installed proxy redirect for an
// endpoint
func (d *Daemon) RemoveProxyRedirect(e *endpoint.Endpoint, id string, proxyWaitGroup *completion.WaitGroup) (error, revert.FinalizeFunc, revert.RevertFunc) {
if d.l7Proxy == nil {
return nil, nil, nil
}
log.WithFields(logrus.Fields{
logfields.EndpointID: e.ID,
logfields.L4PolicyID: id,
}).Debug("Removing redirect to endpoint")
return d.l7Proxy.RemoveRedirect(id, proxyWaitGroup)
}
// UpdateNetworkPolicy adds or updates a network policy in the set
// published to L7 proxies.
func (d *Daemon) UpdateNetworkPolicy(e *endpoint.Endpoint, policy *policy.L4Policy,
labelsMap, deniedIngressIdentities, deniedEgressIdentities cache.IdentityCache, proxyWaitGroup *completion.WaitGroup) (error, revert.RevertFunc) {
if d.l7Proxy == nil {
return fmt.Errorf("can't update network policy, proxy disabled"), nil
}
err, revertFunc := d.l7Proxy.UpdateNetworkPolicy(e, policy, e.GetIngressPolicyEnabledLocked(), e.GetEgressPolicyEnabledLocked(),
labelsMap, deniedIngressIdentities, deniedEgressIdentities, proxyWaitGroup)
return err, revert.RevertFunc(revertFunc)
}
// RemoveNetworkPolicy removes a network policy from the set published to
// L7 proxies.
func (d *Daemon) RemoveNetworkPolicy(e *endpoint.Endpoint) {
if d.l7Proxy == nil {
return
}
d.l7Proxy.RemoveNetworkPolicy(e)
}
// QueueEndpointBuild waits for a "build permit" for the endpoint
// identified by 'epID'. This function blocks until the endpoint can
// start building. The returned function must then be called to
// release the "build permit" when the most resource intensive parts
// of the build are done. The returned function is idempotent, so it
// may be called more than once. Returns nil if the caller should NOT
// start building the endpoint. This may happen due to a build being
// queued for the endpoint already, or due to the wait for the build
// permit being canceled. The latter case happens when the endpoint is
// being deleted.
func (d *Daemon) QueueEndpointBuild(epID uint64) func() {
d.uniqueIDMU.Lock()
// Skip new build requests if the endpoint is already in the queue
// waiting. In this case the queued build will pick up any changes
// made so far, so there is no need to queue another build now.
if _, queued := d.uniqueID[epID]; queued {
d.uniqueIDMU.Unlock()
return nil
}
// Store a cancel function to the 'uniqueID' map so that we can
// cancel the wait when the endpoint is being deleted.
ctx, cancel := context.WithCancel(context.Background())
d.uniqueID[epID] = cancel
d.uniqueIDMU.Unlock()
// Acquire build permit. This may block.
err := d.buildEndpointSem.Acquire(ctx, 1)
// Not queueing any more, so remove the cancel func from 'uniqueID' map.
// The caller may still cancel the build by calling the cancel func\
// after we return it. After this point another build may be queued for
// this endpoint.
d.uniqueIDMU.Lock()
delete(d.uniqueID, epID)
d.uniqueIDMU.Unlock()
if err != nil {
return nil // Acquire failed
}
// Acquire succeeded, but the context was canceled after?
if ctx.Err() == context.Canceled {
d.buildEndpointSem.Release(1)
return nil
}
// At this point the build permit has been acquired. It must
// be released by the caller by calling the returned function
// when the heavy lifting of the build is done.
// Using sync.Once to make the returned function idempotent.
var once sync.Once
return func() {
once.Do(func() {
d.buildEndpointSem.Release(1)
})
}
}
// RemoveFromEndpointQueue removes the endpoint from the "build permit" queue,
// canceling the wait for the build permit if still waiting.
func (d *Daemon) RemoveFromEndpointQueue(epID uint64) {
d.uniqueIDMU.Lock()
if cancel, queued := d.uniqueID[epID]; queued && cancel != nil {
delete(d.uniqueID, epID)
cancel()
}
d.uniqueIDMU.Unlock()
}
// GetPolicyRepository returns the policy repository of the daemon
func (d *Daemon) GetPolicyRepository() *policy.Repository {
return d.policy
}
// DebugEnabled returns if debug mode is enabled.
func (d *Daemon) DebugEnabled() bool {
return option.Config.Opts.IsEnabled(option.Debug)
}
func (d *Daemon) writeNetdevHeader(dir string) error {
headerPath := filepath.Join(dir, common.NetdevHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
fw := bufio.NewWriter(f)
fw.WriteString(option.Config.Opts.GetFmtList())
fw.WriteString(d.fmtPolicyEnforcementIngress())
fw.WriteString(d.fmtPolicyEnforcementEgress())
endpoint.WriteIPCachePrefixes(fw, d.prefixLengths.ToBPFData)
return fw.Flush()
}
// returns #define for PolicyIngress based on the configuration of the daemon.
func (d *Daemon) fmtPolicyEnforcementIngress() string {
if policy.GetPolicyEnabled() == option.AlwaysEnforce {
return fmt.Sprintf("#define %s\n", option.IngressSpecPolicy.Define)
}
return fmt.Sprintf("#undef %s\n", option.IngressSpecPolicy.Define)
}
// returns #define for PolicyEgress based on the configuration of the daemon.
func (d *Daemon) fmtPolicyEnforcementEgress() string {
if policy.GetPolicyEnabled() == option.AlwaysEnforce {
return fmt.Sprintf("#define %s\n", option.EgressSpecPolicy.Define)
}
return fmt.Sprintf("#undef %s\n", option.EgressSpecPolicy.Define)
}
// Must be called with option.Config.EnablePolicyMU locked.
func (d *Daemon) writePreFilterHeader(dir string) error {
headerPath := filepath.Join(dir, common.PreFilterHeaderFileName)
log.WithField(logfields.Path, headerPath).Debug("writing configuration")
f, err := os.Create(headerPath)
if err != nil {
return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
}
defer f.Close()
fw := bufio.NewWriter(f)
fmt.Fprint(fw, "/*\n")
fmt.Fprintf(fw, " * XDP device: %s\n", option.Config.DevicePreFilter)
fmt.Fprintf(fw, " * XDP mode: %s\n", option.Config.ModePreFilter)
fmt.Fprint(fw, " */\n\n")
d.preFilter.WriteConfig(fw)
return fw.Flush()
}
func (d *Daemon) setHostAddresses() error {
l, err := netlink.LinkByName(option.Config.LBInterface)
if err != nil {
return fmt.Errorf("unable to get network device %s: %s", option.Config.Device, err)
}
getAddr := func(netLinkFamily int) (net.IP, error) {
addrs, err := netlink.AddrList(l, netLinkFamily)
if err != nil {
return nil, fmt.Errorf("error while getting %s's addresses: %s", option.Config.Device, err)
}
for _, possibleAddr := range addrs {
if netlink.Scope(possibleAddr.Scope) == netlink.SCOPE_UNIVERSE {
return possibleAddr.IP, nil
}
}
return nil, nil
}
if option.Config.EnableIPv4 {
hostV4Addr, err := getAddr(netlink.FAMILY_V4)
if err != nil {
return err
}
if hostV4Addr != nil {
option.Config.HostV4Addr = hostV4Addr
log.Infof("Using IPv4 host address: %s", option.Config.HostV4Addr)
}
}
if option.Config.EnableIPv6 {
hostV6Addr, err := getAddr(netlink.FAMILY_V6)
if err != nil {
return err
}
if hostV6Addr != nil {
option.Config.HostV6Addr = hostV6Addr
log.Infof("Using IPv6 host address: %s", option.Config.HostV6Addr)
}
}
return nil
}
// GetCompilationLock returns the mutex responsible for synchronizing compilation
// of BPF programs.
func (d *Daemon) GetCompilationLock() *lock.RWMutex {
return d.compilationMutex
}
func (d *Daemon) compileBase() error {
var args []string
var mode string
var ret error
args = make([]string, initArgMax)
// Lock so that endpoints cannot be built while we are compile base programs.
d.compilationMutex.Lock()
defer d.compilationMutex.Unlock()
if err := d.writeNetdevHeader("./"); err != nil {
log.WithError(err).Warn("Unable to write netdev header")
return err
}
scopedLog := log.WithField(logfields.XDPDevice, option.Config.DevicePreFilter)
if option.Config.DevicePreFilter != "undefined" {
if err := prefilter.ProbePreFilter(option.Config.DevicePreFilter, option.Config.ModePreFilter); err != nil {
scopedLog.WithError(err).Warn("Turning off prefilter")
option.Config.DevicePreFilter = "undefined"
}
}
if option.Config.DevicePreFilter != "undefined" {
if d.preFilter, ret = prefilter.NewPreFilter(); ret != nil {
scopedLog.WithError(ret).Warn("Unable to init prefilter")
return ret
}
if err := d.writePreFilterHeader("./"); err != nil {
scopedLog.WithError(err).Warn("Unable to write prefilter header")
return err
}
args[initArgDevicePreFilter] = option.Config.DevicePreFilter
args[initArgModePreFilter] = option.Config.ModePreFilter
}
args[initArgLib] = option.Config.BpfDir
args[initArgRundir] = option.Config.StateDir
if option.Config.EnableIPv4 {
args[initArgIPv4NodeIP] = node.GetInternalIPv4().String()
} else {
args[initArgIPv4NodeIP] = "<nil>"
}
if option.Config.EnableIPv6 {
args[initArgIPv6NodeIP] = node.GetIPv6().String()
} else {
args[initArgIPv6NodeIP] = "<nil>"
}
args[initArgMTU] = fmt.Sprintf("%d", d.mtuConfig.GetDeviceMTU())
if option.Config.Device != "undefined" {
_, err := netlink.LinkByName(option.Config.Device)
if err != nil {
log.WithError(err).WithField("device", option.Config.Device).Warn("Link does not exist")
return err
}
if option.Config.IsLBEnabled() {
if option.Config.Device != option.Config.LBInterface {
//FIXME: allow different interfaces
return fmt.Errorf("Unable to have an interface for LB mode different than snooping interface")
}
if err := d.setHostAddresses(); err != nil {
return err
}
mode = "lb"
} else {
if option.Config.DatapathMode == option.DatapathModeIpvlan {
mode = "ipvlan"
} else {
mode = "direct"
}
}
args[initArgMode] = mode
args[initArgDevice] = option.Config.Device
args = append(args, option.Config.Device)
} else {
if option.Config.IsLBEnabled() {
//FIXME: allow LBMode in tunnel
return fmt.Errorf("Unable to run LB mode with tunnel mode")
}
args[initArgMode] = option.Config.Tunnel
}
prog := filepath.Join(option.Config.BpfDir, "init.sh")
ctx, cancel := context.WithTimeout(context.Background(), defaults.ExecTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, prog, args...)
cmd.Env = bpf.Environment()
if _, err := cmd.CombinedOutput(log, true); err != nil {
return err
}
ipam.ReserveLocalRoutes()
node.InstallHostRoutes(d.mtuConfig)
if option.Config.EnableIPv4 {
// Always remove masquerade rule and then re-add it if required
iptables.RemoveRules()
if err := iptables.InstallRules(); err != nil {
return err
}
}
log.Info("Setting sysctl net.core.bpf_jit_enable=1")
log.Info("Setting sysctl net.ipv4.conf.all.rp_filter=0")
log.Info("Setting sysctl net.ipv6.conf.all.disable_ipv6=0")
return nil
}
func (d *Daemon) init() error {
globalsDir := option.Config.GetGlobalsDir()
if err := os.MkdirAll(globalsDir, defaults.RuntimePathRights); err != nil {
log.WithError(err).WithField(logfields.Path, globalsDir).Fatal("Could not create runtime directory")
}
if err := os.Chdir(option.Config.StateDir); err != nil {
log.WithError(err).WithField(logfields.Path, option.Config.StateDir).Fatal("Could not change to runtime directory")
}
if err := d.createNodeConfigHeaderfile(); err != nil {
return err
}
if !option.Config.DryMode {
if _, err := lxcmap.LXCMap.OpenOrCreate(); err != nil {
return err
}
if _, err := ipcachemap.IPCache.OpenOrCreate(); err != nil {
return err
}
if _, err := metricsmap.Metrics.OpenOrCreate(); err != nil {
return err
}
if _, err := tunnel.TunnelMap.OpenOrCreate(); err != nil {
return err
}
if err := openServiceMaps(); err != nil {
log.WithError(err).Fatal("Unable to open service maps")
}
if err := d.compileBase(); err != nil {
return err
}
// Remove any old sockops and re-enable with _new_ programs if flag is set
sockops.SockmapDisable()
sockops.SkmsgDisable()
if option.Config.SockopsEnable {
eppolicymap.CreateEPPolicyMap()
sockops.SockmapEnable()
sockops.SkmsgEnable()
sockmap.SockmapCreate()
}
// Set up the list of IPCache listeners in the daemon, to be
// used by syncLXCMap().
ipcache.IPIdentityCache.SetListeners([]ipcache.IPIdentityMappingListener{
&envoy.NetworkPolicyHostsCache,
bpfIPCache.NewListener(d),
})
// Insert local host entries to bpf maps
if err := d.syncLXCMap(); err != nil {
return err
}
// Start the controller for periodic sync
// The purpose of the controller is to ensure that the host entries are
// reinserted to the bpf maps if they are ever removed from them.
// TODO: Determine if we can get rid of this when we have more rigorous
// desired/realized state implementation for the bpf maps.
controller.NewManager().UpdateController("lxcmap-bpf-host-sync",
controller.ControllerParams{
DoFunc: func() error { return d.syncLXCMap() },
RunInterval: 5 * time.Second,
})
// Start the controller for periodic sync of the metrics map with
// the prometheus server.
controller.NewManager().UpdateController("metricsmap-bpf-prom-sync",
controller.ControllerParams{
DoFunc: metricsmap.SyncMetricsMap,
RunInterval: 5 * time.Second,
})
// Clean all lb entries
if !option.Config.RestoreState {
log.Debug("cleaning up all BPF LB maps")
d.loadBalancer.BPFMapMU.Lock()
defer d.loadBalancer.BPFMapMU.Unlock()
if option.Config.EnableIPv6 {
if err := lbmap.Service6Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq6Map.DeleteAll(); err != nil {
return err
}
}
if err := d.RevNATDeleteAll(); err != nil {
return err
}
if option.Config.EnableIPv4 {
if err := lbmap.Service4Map.DeleteAll(); err != nil {
return err
}
if err := lbmap.RRSeq4Map.DeleteAll(); err != nil {
return err
}
}
// If we are not restoring state, all endpoints can be
// deleted. Entries will be re-populated.
lxcmap.LXCMap.DeleteAll()
}
}
return nil
}
func (d *Daemon) createNodeConfigHeaderfile() error {
nodeConfigPath := option.Config.GetNodeConfigPath()
f, err := os.Create(nodeConfigPath)
if err != nil {
log.WithError(err).WithField(logfields.Path, nodeConfigPath).Fatal("Failed to create node configuration file")
return err
}
fw := bufio.NewWriter(f)
routerIP := node.GetIPv6Router()
hostIP := node.GetIPv6()
fmt.Fprintf(fw, ""+
"/*\n"+
" * Node-IPv6: %s\n"+
" * Router-IPv6: %s\n"+
" * Host-IPv4: %s\n"+
" */\n\n",
hostIP.String(), routerIP.String(),
node.GetInternalIPv4().String())
fw.WriteString(common.FmtDefineComma("ROUTER_IP", routerIP))
if option.Config.EnableIPv4 {
ipv4GW := node.GetInternalIPv4()
loopbackIPv4 := node.GetIPv4Loopback()
fmt.Fprintf(fw, "#define IPV4_GATEWAY %#x\n", byteorder.HostSliceToNetwork(ipv4GW, reflect.Uint32).(uint32))
fmt.Fprintf(fw, "#define IPV4_LOOPBACK %#x\n", byteorder.HostSliceToNetwork(loopbackIPv4, reflect.Uint32).(uint32))
} else {
// FIXME: Workaround so the bpf program compiles
fmt.Fprintf(fw, "#define IPV4_GATEWAY %#x\n", 0)
fmt.Fprintf(fw, "#define IPV4_LOOPBACK %#x\n", 0)
}
ipv4Range := node.GetIPv4AllocRange()
fmt.Fprintf(fw, "#define IPV4_MASK %#x\n", byteorder.HostSliceToNetwork(ipv4Range.Mask, reflect.Uint32).(uint32))
if nat46Range := option.Config.NAT46Prefix; nat46Range != nil {
fw.WriteString(common.FmtDefineAddress("NAT46_PREFIX", nat46Range.IP))
}
fw.WriteString(common.FmtDefineComma("HOST_IP", hostIP))
fmt.Fprintf(fw, "#define HOST_ID %d\n", identity.GetReservedID(labels.IDNameHost))
fmt.Fprintf(fw, "#define WORLD_ID %d\n", identity.GetReservedID(labels.IDNameWorld))
fmt.Fprintf(fw, "#define HEALTH_ID %d\n", identity.GetReservedID(labels.IDNameHealth))
fmt.Fprintf(fw, "#define UNMANAGED_ID %d\n", identity.GetReservedID(labels.IDNameUnmanaged))
fmt.Fprintf(fw, "#define INIT_ID %d\n", identity.GetReservedID(labels.IDNameInit))
fmt.Fprintf(fw, "#define LB_RR_MAX_SEQ %d\n", lbmap.MaxSeq)
fmt.Fprintf(fw, "#define CILIUM_LB_MAP_MAX_ENTRIES %d\n", lbmap.MaxEntries)
fmt.Fprintf(fw, "#define TUNNEL_ENDPOINT_MAP_SIZE %d\n", tunnel.MaxEntries)
fmt.Fprintf(fw, "#define PROXY_MAP_SIZE %d\n", proxymap.MaxEntries)
fmt.Fprintf(fw, "#define ENDPOINTS_MAP_SIZE %d\n", lxcmap.MaxEntries)
fmt.Fprintf(fw, "#define METRICS_MAP_SIZE %d\n", metricsmap.MaxEntries)
fmt.Fprintf(fw, "#define POLICY_MAP_SIZE %d\n", policymap.MaxEntries)
fmt.Fprintf(fw, "#define IPCACHE_MAP_SIZE %d\n", ipcachemap.MaxEntries)
fmt.Fprintf(fw, "#define POLICY_PROG_MAP_SIZE %d\n", policymap.ProgArrayMaxEntries)
fmt.Fprintf(fw, "#define SOCKOPS_MAP_SIZE %d\n", sockmap.MaxEntries)
if option.Config.PreAllocateMaps {
fmt.Fprintf(fw, "#define PREALLOCATE_MAPS\n")
}
fmt.Fprintf(fw, "#define TRACE_PAYLOAD_LEN %dULL\n", option.Config.TracePayloadlen)
fmt.Fprintf(fw, "#define MTU %d\n", d.mtuConfig.GetDeviceMTU())
if option.Config.EnableIPv4 {
fmt.Fprintf(fw, "#define ENABLE_IPV4\n")
}
if option.Config.EnableIPv6 {
fmt.Fprintf(fw, "#define ENABLE_IPV6\n")
}
fw.Flush()
f.Close()
return nil
}
// syncLXCMap adds local host enties to bpf lxcmap, as well as
// ipcache, if needed, and also notifies the daemon and network policy
// hosts cache if changes were made.
func (d *Daemon) syncLXCMap() error {
// TODO: Update addresses first, in case node addressing has changed.
// TODO: Once these start changing on runtime, figure out the locking strategy.
specialIdentities := []identity.IPIdentityPair{}
if option.Config.EnableIPv4 {
specialIdentities = append(specialIdentities,
[]identity.IPIdentityPair{
{
IP: node.GetInternalIPv4(),
ID: identity.ReservedIdentityHost,
},
{
IP: node.GetExternalIPv4(),
ID: identity.ReservedIdentityHost,
},
{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, net.IPv4len*8),
ID: identity.ReservedIdentityWorld,
},
}...)
}
if option.Config.EnableIPv6 {
specialIdentities = append(specialIdentities,
[]identity.IPIdentityPair{
{
IP: node.GetIPv6(),
ID: identity.ReservedIdentityHost,
},
{
IP: node.GetIPv6Router(),
ID: identity.ReservedIdentityHost,
},
{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, net.IPv6len*8),
ID: identity.ReservedIdentityWorld,
},
}...)
}
existingEndpoints, err := lxcmap.DumpToMap()
if err != nil {
return err
}
for _, ipIDPair := range specialIdentities {
isHost := ipIDPair.ID == identity.ReservedIdentityHost
if isHost {
added, err := lxcmap.SyncHostEntry(ipIDPair.IP)
if err != nil {
return fmt.Errorf("Unable to add host entry to endpoint map: %s", err)
}
if added {
log.WithField(logfields.IPAddr, ipIDPair.IP).Debugf("Added local ip to endpoint map")
}
}
delete(existingEndpoints, ipIDPair.IP.String())
// Upsert will not propagate (reserved:foo->ID) mappings across the cluster,
// and we specifically don't want to do so.
ipcache.IPIdentityCache.Upsert(ipIDPair.PrefixString(), nil, ipcache.Identity{
ID: ipIDPair.ID,
Source: ipcache.FromAgentLocal,
})
}
for hostIP, info := range existingEndpoints {
if ip := net.ParseIP(hostIP); info.IsHost() && ip != nil {
if err := lxcmap.DeleteEntry(ip); err != nil {
log.WithError(err).WithFields(logrus.Fields{
logfields.IPAddr: hostIP,
}).Warn("Unable to delete obsolete host IP from BPF map")
} else {
log.Debugf("Removed outdated host ip %s from endpoint map", hostIP)
}
}
}
return nil
}
func createIPNet(ones, bits int) *net.IPNet {
return &net.IPNet{
Mask: net.CIDRMask(ones, bits),
}
}
// createPrefixLengthCounter wraps around the counter library, providing
// references to prefix lengths that will always be present.
func createPrefixLengthCounter() *counter.PrefixLengthCounter {
prefixLengths4 := ipcachemap.IPCache.GetMaxPrefixLengths(false)
prefixLengths6 := ipcachemap.IPCache.GetMaxPrefixLengths(true)
counter := counter.NewPrefixLengthCounter(prefixLengths6, prefixLengths4)
// This is a bit ugly, but there's not a great way to define an IPNet
// without parsing strings, etc.
defaultPrefixes := []*net.IPNet{
// IPv4
createIPNet(0, net.IPv4len*8), // world
createIPNet(net.IPv4len*8, net.IPv4len*8), // hosts
// IPv6
createIPNet(0, net.IPv6len*8), // world
createIPNet(net.IPv6len*8, net.IPv6len*8), // hosts
}
_, err := counter.Add(defaultPrefixes)
if err != nil {
log.WithError(err).Fatal("Failed to create default prefix lengths")
}
return counter
}
// NewDaemon creates and returns a new Daemon with the parameters set in c.
func NewDaemon() (*Daemon, *endpointRestoreState, error) {
// Validate the daemon-specific global options.
if err := option.Config.Validate(); err != nil {
return nil, nil, fmt.Errorf("invalid daemon configuration: %s", err)
}
ctmap.InitMapInfo(option.Config.CTMapEntriesGlobalTCP, option.Config.CTMapEntriesGlobalAny)
if err := workloads.Setup(option.Config.Workloads, map[string]string{}); err != nil {
return nil, nil, fmt.Errorf("unable to setup workload: %s", err)
}
d := Daemon{
loadBalancer: loadbalancer.NewLoadBalancer(),
k8sSvcCache: k8s.NewServiceCache(),
policy: policy.NewPolicyRepository(),
uniqueID: map[uint64]context.CancelFunc{},
nodeMonitor: monitorLaunch.NewNodeMonitor(option.Config.MonitorQueueSize),
prefixLengths: createPrefixLengthCounter(),
buildEndpointSem: semaphore.NewWeighted(int64(numWorkerThreads())),
compilationMutex: new(lock.RWMutex),
mtuConfig: mtu.NewConfiguration(option.Config.Tunnel != option.TunnelDisabled, option.Config.MTU),
}
t, err := trigger.NewTrigger(trigger.Parameters{
Name: "policy_update",
PrometheusMetrics: true,
MinInterval: time.Second,
TriggerFunc: d.policyUpdateTrigger,
})
if err != nil {
return nil, nil, err
}
d.policyTrigger = t
debug.RegisterStatusObject("k8s-service-cache", &d.k8sSvcCache)
d.runK8sServiceHandler()
policyApi.InitEntities(option.Config.ClusterName)
workloads.Init(&d)
// Clear previous leftovers before listening for new requests
log.Info("Clearing leftover Cilium veths")
err = d.clearCiliumVeths()
if err != nil {
log.WithError(err).Debug("Unable to clean leftover veths")
}
if k8s.IsEnabled() {
if err := k8s.Init(); err != nil {
log.WithError(err).Fatal("Unable to initialize Kubernetes subsystem")
}
log.Info("Kubernetes information:")
log.Infof(" Namespace: %s", option.Config.K8sNamespace)
// Kubernetes demands that the localhost can always reach local
// pods. Therefore unless the AllowLocalhost policy is set to a
// specific mode, always allow localhost to reach local
// endpoints.
if option.Config.AllowLocalhost == option.AllowLocalhostAuto {
option.Config.AllowLocalhost = option.AllowLocalhostAlways
log.Info("k8s mode: Allowing localhost to reach local endpoints")
}
// In Cilium 1.0, due to limitations on the data path, traffic
// from the outside world on ingress was treated as though it
// was from the host for policy purposes. In order to not break
// existing policies, this option retains the behavior.
if option.Config.K8sLegacyHostAllowsWorld != "false" {
option.Config.HostAllowsWorld = true
log.Warn("k8s mode: Configuring ingress policy for host to also allow from world. For more information, see https://cilium.link/host-vs-world")
}
}
// If the device has been specified, the IPv4AllocPrefix and the
// IPv6AllocPrefix were already allocated before the k8s.Init().
//
// If the device hasn't been specified, k8s.Init() allocated the
// IPv4AllocPrefix and the IPv6AllocPrefix from k8s node annotations.
//
// If k8s.Init() failed to retrieve the IPv4AllocPrefix we can try to derive
// it from an existing node_config.h file or from previous cilium_host
// interfaces.
//
// Then, we will calculate the IPv4 or IPv6 alloc prefix based on the IPv6
// or IPv4 alloc prefix, respectively, retrieved by k8s node annotations.
log.Info("Initializing node addressing")
// Inject BPF dependency, kvstore dependency into node package.
node.TunnelDatapath = tunnel.TunnelMap
node.NodeReg = &nodeStore.NodeRegistrar{}
if err := node.AutoComplete(); err != nil {
log.WithError(err).Fatal("Cannot autocomplete node addresses")
}
node.SetIPv4ClusterCidrMaskSize(option.Config.IPv4ClusterCIDRMaskSize)
if option.Config.IPv4Range != AutoCIDR {
_, net, err := net.ParseCIDR(option.Config.IPv4Range)
if err != nil {
log.WithError(err).WithField(logfields.V4Prefix, option.Config.IPv4Range).Fatal("Invalid IPv4 allocation prefix")
}
node.SetIPv4AllocRange(net)
}
if option.Config.IPv4ServiceRange != AutoCIDR {
_, ipnet, err := net.ParseCIDR(option.Config.IPv4ServiceRange)
if err != nil {
log.WithError(err).WithField(logfields.V4Prefix, option.Config.IPv4ServiceRange).Fatal("Invalid IPv4 service prefix")
}
node.AddAuxPrefix(ipnet)
}
if option.Config.IPv6Range != AutoCIDR {
_, net, err := net.ParseCIDR(option.Config.IPv6Range)
if err != nil {
log.WithError(err).WithField(logfields.V6Prefix, option.Config.IPv6Range).Fatal("Invalid IPv6 allocation prefix")
}
if err := node.SetIPv6NodeRange(net); err != nil {
log.WithError(err).WithField(logfields.V6Prefix, net).Fatal("Invalid per node IPv6 allocation prefix")
}
}
if option.Config.IPv6ServiceRange != AutoCIDR {
_, ipnet, err := net.ParseCIDR(option.Config.IPv6ServiceRange)
if err != nil {
log.WithError(err).WithField(logfields.V6Prefix, option.Config.IPv6ServiceRange).Fatal("Invalid IPv6 service prefix")
}
node.AddAuxPrefix(ipnet)
}
// Set up ipam conf after init() because we might be running d.conf.KVStoreIPv4Registration
log.Info("Initializing IPAM")
ipam.Init()
// restore endpoints before any IPs are allocated to avoid eventual IP
// conflicts later on, otherwise any IP conflict will result in the
// endpoint not being able to be restored.
restoredEndpoints, err := d.restoreOldEndpoints(option.Config.StateDir, true)
if err != nil {
log.WithError(err).Error("Unable to restore existing endpoints")
}
switch err := ipam.AllocateInternalIPs(); err.(type) {
case ipam.ErrAllocation:
if option.Config.IPv4Range == AutoCIDR || option.Config.IPv6ServiceRange == AutoCIDR {
log.WithError(err).Fatalf(
"The allocation CIDR is different from the previous cilium instance. " +
"This error is most likely caused by a temporary network disruption to the kube-apiserver " +
"that prevent Cilium from retrieve the node's IPv4/IPv6 allocation range. " +
"If you believe the allocation range is supposed to be different you need to clean " +
"up all Cilium state with the `cilium cleanup` command on this node. Be aware " +
"this will cause network disruption for all existing containers managed by Cilium " +
"running on this node and you will have to restart them.")
} else {
log.WithError(err).Fatalf(
"The allocation CIDR is different from the previous cilium instance. " +
"If you believe the allocation range is supposed to be different you need to clean " +
"up all Cilium state with the `cilium cleanup` command on this node. Be aware " +
"this will cause network disruption for all existing containers managed by Cilium " +
"running on this node and you will have to restart them.")
}
case error:
log.WithError(err).Fatal("IPAM init failed")
}
log.Info("Validating configured node address ranges")
if err := node.ValidatePostInit(); err != nil {
log.WithError(err).Fatal("postinit failed")
}
if k8s.IsEnabled() {
log.Info("Annotating k8s node with CIDR ranges")
err := k8s.Client().AnnotateNode(node.GetName(),
node.GetIPv4AllocRange(), node.GetIPv6NodeRange(),
nil, nil, node.GetInternalIPv4())
if err != nil {
log.WithError(err).Warning("Cannot annotate k8s node with CIDR range")
}
}
log.Info("Addressing information:")
log.Infof(" Cluster-Name: %s", option.Config.ClusterName)
log.Infof(" Cluster-ID: %d", option.Config.ClusterID)
log.Infof(" Local node-name: %s", node.GetName())
if option.Config.EnableIPv6 {
log.Infof(" Node-IPv6: %s", node.GetIPv6())
log.Infof(" IPv6 node prefix: %s", node.GetIPv6NodeRange())
log.Infof(" IPv6 allocation prefix: %s", node.GetIPv6AllocRange())
log.Infof(" IPv6 router address: %s", node.GetIPv6Router())
}
if option.Config.EnableIPv4 {
log.Infof(" External-Node IPv4: %s", node.GetExternalIPv4())
log.Infof(" Internal-Node IPv4: %s", node.GetInternalIPv4())
log.Infof(" Cluster IPv4 prefix: %s", node.GetIPv4ClusterRange())
log.Infof(" IPv4 allocation prefix: %s", node.GetIPv4AllocRange())
// Allocate IPv4 service loopback IP
loopbackIPv4, _, err := ipam.AllocateNext("ipv4")
if err != nil {
return nil, restoredEndpoints, fmt.Errorf("Unable to reserve IPv4 loopback address: %s", err)
}
node.SetIPv4Loopback(loopbackIPv4)
log.Infof(" Loopback IPv4: %s", node.GetIPv4Loopback().String())
}
if err := node.ConfigureLocalNode(); err != nil {
log.WithError(err).Fatal("Unable to initialize local node")
}
// This needs to be done after the node addressing has been configured
// as the node address is required as suffix.
cache.InitIdentityAllocator(&d)
if path := option.Config.ClusterMeshConfig; path != "" {
if option.Config.ClusterID == 0 {
log.Info("Cluster-ID is not specified, skipping ClusterMesh initialization")
} else {
log.WithField("path", path).Info("Initializing ClusterMesh routing")
clustermesh, err := clustermesh.NewClusterMesh(clustermesh.Configuration{
Name: "clustermesh",
ConfigDirectory: path,
NodeKeyCreator: nodeStore.KeyCreator,
ServiceMerger: &d.k8sSvcCache,
})
if err != nil {
log.WithError(err).Fatal("Unable to initialize ClusterMesh")
}
d.clustermesh = clustermesh
}
}
if err = d.init(); err != nil {
log.WithError(err).Error("Error while initializing daemon")
return nil, restoredEndpoints, err
}
// Start watcher for endpoint IP --> identity mappings in key-value store.
// this needs to be done *after* init() for the daemon in that function,
// we populate the IPCache with the host's IP(s).
ipcache.InitIPIdentityWatcher()
// FIXME: Make the port range configurable.
d.l7Proxy = proxy.StartProxySupport(10000, 20000, option.Config.RunDir,
option.Config.AccessLog, &d, option.Config.AgentLabels)
if err := fqdn.ConfigFromResolvConf(); err != nil {
return nil, nil, err
}
err = d.bootstrapFQDN(restoredEndpoints)
if err != nil {
return nil, restoredEndpoints, err
}
return &d, restoredEndpoints, nil
}
// Close shuts down a daemon
func (d *Daemon) Close() {
if d.policyTrigger != nil {
d.policyTrigger.Shutdown()
}
}
// TriggerReloadWithoutCompile causes all BPF programs and maps to be reloaded,
// without recompiling the datapath logic for each endpoint. It first attempts
// to recompile the base programs, and if this fails returns an error. If base
// program load is successful, it subsequently triggers regeneration of all
// endpoints and returns a waitgroup that may be used by the caller to wait for
// all endpoint regeneration to complete.
//
// If an error is returned, then no regeneration was successful. If no error
// is returned, then the base programs were successfully regenerated, but
// endpoints may or may not have successfully regenerated.
func (d *Daemon) TriggerReloadWithoutCompile(reason string) (*sync.WaitGroup, error) {
log.Debugf("BPF reload triggered from %s", reason)
if err := d.compileBase(); err != nil {
return nil, fmt.Errorf("Unable to recompile base programs from %s: %s", reason, err)
}
regenRequest := &endpoint.ExternalRegenerationMetadata{
Reason: reason,
ReloadDatapath: true,
}
return endpointmanager.RegenerateAllEndpoints(d, regenRequest), nil
}
func changedOption(key string, value option.OptionSetting, data interface{}) {
d := data.(*Daemon)
if key == option.Debug {
// Set the debug toggle (this can be a no-op)
logging.ToggleDebugLogs(d.DebugEnabled())
// Reflect log level change to proxies
proxy.ChangeLogLevel(logging.GetLevel(logging.DefaultLogger))
}
d.policy.BumpRevision() // force policy recalculation
}
type patchConfig struct {
daemon *Daemon
}
func NewPatchConfigHandler(d *Daemon) PatchConfigHandler {
return &patchConfig{daemon: d}
}
func (h *patchConfig) Handle(params PatchConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("PATCH /config request")
d := h.daemon
cfgSpec := params.Configuration
om, err := option.Config.Opts.Library.ValidateConfigurationMap(cfgSpec.Options)
if err != nil {
msg := fmt.Errorf("Invalid configuration option %s", err)
return api.Error(PatchConfigBadRequestCode, msg)
}
// Serialize configuration updates to the daemon.
option.Config.ConfigPatchMutex.Lock()
defer option.Config.ConfigPatchMutex.Unlock()
nmArgs := d.nodeMonitor.GetArgs()
if numPagesEntry, ok := cfgSpec.Options["MonitorNumPages"]; ok && nmArgs[0] != numPagesEntry {
if len(nmArgs) == 0 || nmArgs[0] != numPagesEntry {
args := []string{"--num-pages %s", numPagesEntry}
d.nodeMonitor.Restart(args)
}
if len(cfgSpec.Options) == 0 {
return NewPatchConfigOK()
}
delete(cfgSpec.Options, "MonitorNumPages")
}
// Track changes to daemon's configuration
var changes int
// Only update if value provided for PolicyEnforcement.
if enforcement := cfgSpec.PolicyEnforcement; enforcement != "" {
switch enforcement {
case option.NeverEnforce, option.DefaultEnforcement, option.AlwaysEnforce:
// Update policy enforcement configuration if needed.
oldEnforcementValue := policy.GetPolicyEnabled()
// If the policy enforcement configuration has indeed changed, we have
// to regenerate endpoints and update daemon's configuration.
if enforcement != oldEnforcementValue {
log.Debug("configuration request to change PolicyEnforcement for daemon")
changes++
policy.SetPolicyEnabled(enforcement)
}
default:
msg := fmt.Errorf("Invalid option for PolicyEnforcement %s", enforcement)
log.Warn(msg)
return api.Error(PatchConfigFailureCode, msg)
}
log.Debug("finished configuring PolicyEnforcement for daemon")
}
changes += option.Config.Opts.ApplyValidated(om, changedOption, d)
log.WithField("count", changes).Debug("Applied changes to daemon's configuration")
if changes > 0 {
// Only recompile if configuration has changed.
log.Debug("daemon configuration has changed; recompiling base programs")
if err := d.compileBase(); err != nil {
msg := fmt.Errorf("Unable to recompile base programs: %s", err)
return api.Error(PatchConfigFailureCode, msg)
}
d.TriggerPolicyUpdates(true, "agent configuration update")
}
return NewPatchConfigOK()
}
type getConfig struct {
daemon *Daemon
}
func NewGetConfigHandler(d *Daemon) GetConfigHandler {
return &getConfig{daemon: d}
}
func (h *getConfig) Handle(params GetConfigParams) middleware.Responder {
log.WithField(logfields.Params, logfields.Repr(params)).Debug("GET /config request")
d := h.daemon
spec := &models.DaemonConfigurationSpec{
Options: *option.Config.Opts.GetMutableModel(),
PolicyEnforcement: policy.GetPolicyEnabled(),
}
status := &models.DaemonConfigurationStatus{
Addressing: node.GetNodeAddressing(),
K8sConfiguration: k8s.GetKubeconfigPath(),
K8sEndpoint: k8s.GetAPIServer(),
NodeMonitor: d.nodeMonitor.State(),
KvstoreConfiguration: &models.KVstoreConfiguration{
Type: option.Config.KVStore,
Options: option.Config.KVStoreOpt,
},
Realized: spec,
DeviceMTU: int64(d.mtuConfig.GetDeviceMTU()),
RouteMTU: int64(d.mtuConfig.GetRouteMTU()),
DatapathMode: models.DatapathMode(option.Config.DatapathMode),
IpvlanDeviceIfIndex: int64(option.Config.IpvlanDeviceIfIndex),
}
cfg := &models.DaemonConfiguration{
Spec: spec,
Status: status,
}
return NewGetConfigOK().WithPayload(cfg)
}
// listFilterIfs returns a map of interfaces based on the given filter.
// The filter should take a link and, if found, return the index of that
// interface, if not found return -1.
func listFilterIfs(filter func(netlink.Link) int) (map[int]netlink.Link, error) {
ifs, err := netlink.LinkList()
if err != nil {
return nil, err
}
vethLXCIdxs := map[int]netlink.Link{}
for _, intf := range ifs {
if idx := filter(intf); idx != -1 {
vethLXCIdxs[idx] = intf
}
}
return vethLXCIdxs, nil
}
// clearCiliumVeths checks all veths created by cilium and removes all that
// are considered a leftover from failed attempts to connect the container.
func (d *Daemon) clearCiliumVeths() error {
leftVeths, err := listFilterIfs(func(intf netlink.Link) int {
// Filter by veth and return the index of the interface.
if intf.Type() == "veth" {
return intf.Attrs().Index
}
return -1
})
if err != nil {
return fmt.Errorf("unable to retrieve host network interfaces: %s", err)
}
for _, v := range leftVeths {
peerIndex := v.Attrs().ParentIndex
parentVeth, found := leftVeths[peerIndex]
if found && peerIndex != 0 && strings.HasPrefix(parentVeth.Attrs().Name, "lxc") {
err := netlink.LinkDel(v)
if err != nil {
fmt.Printf(`CleanVeths: Unable to delete leftover veth "%d %s": %s`,
v.Attrs().Index, v.Attrs().Name, err)
}
}
}
return nil
}
// numWorkerThreads returns the number of worker threads with a minimum of 4.
func numWorkerThreads() int {
ncpu := runtime.NumCPU()
minWorkerThreads := 2
if ncpu < minWorkerThreads {
return minWorkerThreads
}
return ncpu
}
// GetServiceList returns list of services
func (d *Daemon) GetServiceList() []*models.Service {
list := []*models.Service{}
d.loadBalancer.BPFMapMU.RLock()
defer d.loadBalancer.BPFMapMU.RUnlock()
for _, v := range d.loadBalancer.SVCMap {
list = append(list, v.GetModel())
}
return list
}
// SendNotification sends an agent notification to the monitor
func (d *Daemon) SendNotification(typ monitorAPI.AgentNotification, text string) error {
event := monitorAPI.AgentNotify{Type: typ, Text: text}
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAgent, event)
}
// NewProxyLogRecord is invoked by the proxy accesslog on each new access log entry
func (d *Daemon) NewProxyLogRecord(l *logger.LogRecord) error {
return d.nodeMonitor.SendEvent(monitorAPI.MessageTypeAccessLog, l.LogRecord)
}
// GetNodeSuffix returns the suffix to be appended to kvstore keys of this
// agent
func (d *Daemon) GetNodeSuffix() string {
if ip := node.GetExternalIPv4(); ip != nil {
return ip.String()
}
log.Fatal("Node IP not available yet")
return "<nil>"
}
// bootstrapFQDN initializes the toFQDNs related subsystems: DNSPoller,
// d.dnsRuleGen, and the DNS proxy.
// dnsRuleGen and DNSPoller will use the default resolver and, implicitly, the
// default DNS cache. The proxy binds to all interfaces, and uses the
// configured DNS proxy port (this may be 0 and so OS-assigned).
func (d *Daemon) bootstrapFQDN(restoredEndpoints *endpointRestoreState) (err error) {
cfg := fqdn.Config{
MinTTL: option.Config.ToFQDNsMinTTL,
Cache: fqdn.DefaultDNSCache,
LookupDNSNames: fqdn.DNSLookupDefaultResolver,
AddGeneratedRules: func(generatedRules []*policyApi.Rule) error {
// Insert the new rules into the policy repository. We need them to
// replace the previous set. This requires the labels to match (including
// the ToFQDN-UUID one).
_, err := d.PolicyAdd(generatedRules, &AddOptions{Replace: true, Generated: true})
return err
},
PollerResponseNotify: func(lookupTime time.Time, qname string, response *fqdn.DNSIPRecords) {
// Do nothing if this option is off
if !option.Config.ToFQDNsEnablePollerEvents {
return
}
// FIXME: Not always true but we don't have the protocol information here
protocol := accesslog.TransportProtocol(u8proto.ProtoIDs["udp"])
record := logger.LogRecord{
LogRecord: accesslog.LogRecord{
Type: accesslog.TypeResponse,
ObservationPoint: accesslog.Ingress,
IPVersion: accesslog.VersionIPv4,
TransportProtocol: protocol,
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
NodeAddressInfo: accesslog.NodeAddressInfo{
IPv4: node.GetExternalIPv4().String(),
IPv6: node.GetIPv6().String(),
},
},
}
logger.LogTags.Verdict(accesslog.VerdictForwarded, "DNSPoller")(&record)
logger.LogTags.DNS(&accesslog.LogRecordDNS{
Query: qname,
IPs: response.IPs,
TTL: uint32(response.TTL),
CNAMEs: nil,
ObservationSource: accesslog.DNSSourceAgentPoller,
})(&record)
record.Log()
}}
d.dnsRuleGen = fqdn.NewRuleGen(cfg)
d.dnsPoller = fqdn.NewDNSPoller(cfg, d.dnsRuleGen)
if option.Config.ToFQDNsEnablePoller {
fqdn.StartDNSPoller(d.dnsPoller)
}
// Prefill the cache with DNS lookups from restored endpoints. This is needed
// to maintain continuity of which IPs are allowed.
// Note: This is TTL aware, and expired data will not be used (e.g. when
// restoring after a long delay).
for _, restoredEP := range restoredEndpoints.restored {
// Upgrades from old ciliums have this nil
if restoredEP.DNSHistory != nil {
fqdn.DefaultDNSCache.UpdateFromCache(restoredEP.DNSHistory)
}
}
// Once we stop returning errors from StartDNSProxy this should live in
// StartProxySupport
proxy.DefaultDNSProxy, err = dnsproxy.StartDNSProxy("", uint16(option.Config.ToFQDNsProxyPort),
// LookupEPByIP
func(endpointIP net.IP) (endpointID string, err error) {
e := endpointmanager.LookupIPv4(endpointIP.String())
if e == nil {
return "", fmt.Errorf("Cannot find endpoint with IP %s", endpointIP.String())
}
return e.StringID(), nil
},
// NotifyOnDNSMsg handles DNS data in the daemon by emitting monitor
// events, proxy metrics and storing DNS data in the DNS cache. This may
// result in rule generation.
// It will:
// - Report a monitor error event and proxy metrics when the proxy sees an
// error, and when it can't process something in this function
// - Report the verdict in a monitor event and emit proxy metrics
// - Insert the DNS data into the cache when msg is a DNS response and we
// can lookup the endpoint related to it
// srcAddr and dstAddr should match the packet reported on (i.e. the
// endpoint is srcAddr for requests, and dstAddr for responses).
func(lookupTime time.Time, srcAddr, dstAddr string, msg *dns.Msg, protocol string, allowed bool, proxyErr error) error {
var protoID = u8proto.ProtoIDs[strings.ToLower(protocol)]
var verdict accesslog.FlowVerdict
var reason string
switch {
case proxyErr != nil:
verdict = accesslog.VerdictError
reason = "Error: " + proxyErr.Error()
case allowed:
verdict = accesslog.VerdictForwarded
reason = "Allowed by policy"
case !allowed:
verdict = accesslog.VerdictDenied
reason = "Denied by policy"
}
var epAddr string // the address of the endpoint that originated the request
var serverAddr string // the address of the DNS target
var ingress = msg.Response
var flowType accesslog.FlowType
if ingress {
flowType = accesslog.TypeResponse
epAddr = dstAddr
serverAddr = srcAddr
} else {
flowType = accesslog.TypeRequest
epAddr = srcAddr
serverAddr = dstAddr
}
var serverPort int
_, serverPortStr, err := net.SplitHostPort(serverAddr)
if err != nil {
log.WithError(err).Error("cannot extract endpoint IP from DNS request")
} else {
if serverPort, err = strconv.Atoi(serverPortStr); err != nil {
log.WithError(err).WithField(logfields.Port, serverPortStr).Error("cannot parse destination port")
}
}
var ep *endpoint.Endpoint
epIP, _, err := net.SplitHostPort(epAddr)
if err != nil {
log.WithError(err).Error("cannot extract endpoint IP from DNS request")
ep.UpdateProxyStatistics("dns", uint16(serverPort), ingress, !ingress, accesslog.VerdictError)
return err
}
ep = endpointmanager.LookupIPv4(epIP)
if ep == nil {
// This is a hard fail. We cannot proceed because record.Log requires a
// non-nil ep, and we also don't want to insert this data into the
// cache if we don't know that an endpoint asked for it (this is
// asserted via ep != nil here and msg.Response && msg.Rcode ==
// dns.RcodeSuccess below).
err := fmt.Errorf("Cannot find matching endpoint for IPs %s or %s", srcAddr, dstAddr)
log.WithError(err).Error("cannot find matching endpoint")
ep.UpdateProxyStatistics("dns", uint16(serverPort), ingress, !ingress, accesslog.VerdictError)
return err
}
qname, responseIPs, TTL, CNAMEs, err := dnsproxy.ExtractMsgDetails(msg)
if err != nil {
// This error is ok because all these values are used for reporting, or filling in the cache.
log.WithError(err).Error("cannot extract DNS message details")
}
ep.UpdateProxyStatistics("dns", uint16(serverPort), ingress, !ingress, verdict)
record := logger.NewLogRecord(proxy.DefaultEndpointInfoRegistry, ep, flowType, ingress,
func(lr *logger.LogRecord) { lr.LogRecord.TransportProtocol = accesslog.TransportProtocol(protoID) },
logger.LogTags.Verdict(verdict, reason),
logger.LogTags.Addressing(logger.AddressingInfo{
SrcIPPort: srcAddr,
DstIPPort: dstAddr,
SrcIdentity: 0, // 0 more correctly finds src and dst EP data
}),
logger.LogTags.DNS(&accesslog.LogRecordDNS{
Query: qname,
IPs: responseIPs,
TTL: TTL,
CNAMEs: CNAMEs,
ObservationSource: accesslog.DNSSourceProxy,
}),
)
record.Log()
if msg.Response && msg.Rcode == dns.RcodeSuccess {
// This must happen before the ruleGen update below, to ensure that
// this data is included in the serialized Endpoint object.
// Note: We need to fixup minTTL to be consistent with how we insert it
// elsewhere i.e. we don't want to lose the lower bound for DNS data
// TTL if we reboot twice.
log.WithField(logfields.EndpointID, ep.ID).Debug("Recording DNS lookup in endpoint specific cache")
effectiveTTL := int(TTL)
if effectiveTTL < option.Config.ToFQDNsMinTTL {
effectiveTTL = option.Config.ToFQDNsMinTTL
}
ep.DNSHistory.Update(lookupTime, qname, responseIPs, effectiveTTL)
log.Debug("Updating DNS name in cache from response to to query")
err = d.dnsRuleGen.UpdateGenerateDNS(lookupTime, map[string]*fqdn.DNSIPRecords{
qname: {
IPs: responseIPs,
TTL: int(effectiveTTL),
}})
if err != nil {
log.WithError(err).Error("error updating internal DNS cache for rule generation")
}
}
return nil
})
proxy.DefaultDNSProxy.SetRejectReply(option.Config.FQDNRejectResponse)
return err // filled by StartDNSProxy
}
|
package daemon
import (
"github.com/sevlyar/go-daemon"
"github.com/Sirupsen/logrus"
"github.com/BluePecker/JwtAuth/server"
"github.com/BluePecker/JwtAuth/server/router/jwt"
"github.com/BluePecker/JwtAuth/storage"
"reflect"
"github.com/kataras/iris/core/router"
)
var (
// Redis/Mongodb connection pool size
MaxPoolSize int = 50
)
type Storage struct {
Driver string
Path string
Host string
Port int
Username string
Password string
PoolSize int
}
type Security struct {
TLS bool
Key string
Cert string
}
type Option struct {
PidFile string
LogFile string
Port int
Host string
Daemon bool
Security Security
Storage Storage
}
type Daemon struct {
opt *Option
server *server.Server
storage *storage.Driver
}
func (d *Daemon) storageConf(p2 *storage.Option) {
p1 := d.opt.Storage
u1 := reflect.ValueOf(p1).Elem()
u2 := reflect.ValueOf(p2).Elem()
for seq := 0; seq < u2.NumField(); seq++ {
item := u2.Type().Field(seq)
v1 := u1.FieldByName(item.Name)
v2 := u2.FieldByName(item.Name)
if v1.IsValid() {
if v2.Type() == v1.Type() {
v2.Set(v1)
}
}
}
}
func (d *Daemon) initStorage() {
option := &storage.Option{}
driver := d.opt.Storage.Driver
d.storageConf(option)
driver, err := storage.New(driver, option)
if err != nil {
logrus.Error(err)
return
}
d.storage = driver
}
func (d *Daemon) initServer() {
d.server = &server.Server{}
}
func (d *Daemon) listen() {
if d.server == nil {
d.initServer()
}
d.server.Accept(server.Options{
Host: d.opt.Host,
Port: d.opt.Port,
Tls: &server.TLS{
Key: d.opt.Security.Key,
Cert: d.opt.Security.Cert,
},
})
}
func (d *Daemon) addRouter(routers... router.Router) {
if d.server == nil {
d.initServer()
}
d.server.AddRouter(routers)
}
func NewStart(opt Option) {
if (opt.Daemon == true) {
dCtx := daemon.Context{
PidFileName: opt.PidFile,
PidFilePerm: 0644,
LogFilePerm: 0640,
Umask: 027,
WorkDir: "/",
LogFileName: opt.LogFile,
}
defer dCtx.Release()
if child, err := dCtx.Reborn(); err != nil {
logrus.Fatal(err)
} else if child != nil {
return
}
}
jwtPro := &Daemon{
opt: &opt,
}
jwtPro.initStorage()
jwtPro.addRouter(jwt.NewRouter(nil))
jwtPro.listen()
}
fix bug
package daemon
import (
"github.com/sevlyar/go-daemon"
"github.com/Sirupsen/logrus"
"github.com/BluePecker/JwtAuth/server"
"github.com/BluePecker/JwtAuth/server/router/jwt"
"github.com/BluePecker/JwtAuth/storage"
"reflect"
"github.com/kataras/iris/core/router"
)
var (
// Redis/Mongodb connection pool size
MaxPoolSize int = 50
)
type Storage struct {
Driver string
Path string
Host string
Port int
Username string
Password string
PoolSize int
}
type Security struct {
TLS bool
Key string
Cert string
}
type Option struct {
PidFile string
LogFile string
Port int
Host string
Daemon bool
Security Security
Storage Storage
}
type Daemon struct {
opt *Option
server *server.Server
storage *storage.Driver
}
func (d *Daemon) storageConf(p2 *storage.Option) {
p1 := d.opt.Storage
u1 := reflect.ValueOf(p1).Elem()
u2 := reflect.ValueOf(p2).Elem()
for seq := 0; seq < u2.NumField(); seq++ {
item := u2.Type().Field(seq)
v1 := u1.FieldByName(item.Name)
v2 := u2.FieldByName(item.Name)
if v1.IsValid() {
if v2.Type() == v1.Type() {
v2.Set(v1)
}
}
}
}
func (d *Daemon) initStorage() {
option := &storage.Option{}
name := d.opt.Storage.Driver
d.storageConf(option)
driver, err := storage.New(name, *option)
if err != nil {
logrus.Error(err)
return
}
d.storage = driver
}
func (d *Daemon) initServer() {
d.server = &server.Server{}
}
func (d *Daemon) listen() {
if d.server == nil {
d.initServer()
}
d.server.Accept(server.Options{
Host: d.opt.Host,
Port: d.opt.Port,
Tls: &server.TLS{
Key: d.opt.Security.Key,
Cert: d.opt.Security.Cert,
},
})
}
func (d *Daemon) addRouter(routers... router.Router) {
if d.server == nil {
d.initServer()
}
d.server.AddRouter(routers)
}
func NewStart(opt Option) {
if (opt.Daemon == true) {
dCtx := daemon.Context{
PidFileName: opt.PidFile,
PidFilePerm: 0644,
LogFilePerm: 0640,
Umask: 027,
WorkDir: "/",
LogFileName: opt.LogFile,
}
defer dCtx.Release()
if child, err := dCtx.Reborn(); err != nil {
logrus.Fatal(err)
} else if child != nil {
return
}
}
jwtPro := &Daemon{
opt: &opt,
}
jwtPro.initStorage()
jwtPro.addRouter(jwt.NewRouter(nil))
jwtPro.listen()
} |
package daemon
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"io/ioutil"
"log"
"net/http"
)
const (
ADD_CHAN = iota
DEL_CHAN
)
var logStream = hub{
Broadcast: make(chan []byte),
register: make(chan *connection),
unregister: make(chan *connection),
connections: make(map[*connection]bool),
}
var uiStream = hub{
Broadcast: make(chan []byte),
register: make(chan *connection),
unregister: make(chan *connection),
connections: make(map[*connection]bool),
}
type Daemon struct {
manager *BlockManager
log chan *LogMsg
ui chan *LogMsg
Port string
}
func NewDaemon() *Daemon {
return &Daemon{
manager: NewBlockManager(),
log: make(chan *LogMsg),
ui: make(chan *LogMsg),
}
}
func addDefaultHeaders(fn http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fn(w, r)
}
}
func (d *Daemon) rootHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello!")
}
func (d *Daemon) staticHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello!")
}
func (d *Daemon) serveLogStream(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
w.Header().Set("Access-Control-Allow-Origin", "*")
/*if r.Header.Get("Origin") != "http://"+r.Host {
http.Error(w, "Origin not allowed", 403)
return
}*/
ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println(err)
return
}
c := &connection{send: make(chan []byte, 256), ws: ws, Hub: logStream}
c.Hub.register <- c
go c.writePump()
c.readPump()
}
func (d *Daemon) serveUIStream(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
w.Header().Set("Access-Control-Allow-Origin", "*")
/*if r.Header.Get("Origin") != "http://"+r.Host {
http.Error(w, "Origin not allowed", 403)
return
}*/
ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println(err)
return
}
c := &connection{send: make(chan []byte, 256), ws: ws, Hub: uiStream}
c.Hub.register <- c
go c.writePump()
c.readPump()
}
func (d *Daemon) importHandler(w http.ResponseWriter, r *http.Request) {
var export struct {
Blocks []*BlockInfo
Connections []*ConnectionInfo
}
corrected := make(map[string]string)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &export)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
for _, block := range export.Blocks {
corrected[block.Id] = block.Id
for d.manager.IdExists(corrected[block.Id]) {
corrected[block.Id] = block.Id + "_" + d.manager.GetId()
}
}
for _, conn := range export.Connections {
corrected[conn.Id] = conn.Id
for d.manager.IdExists(corrected[conn.Id]) {
corrected[conn.Id] = conn.Id + "_" + d.manager.GetId()
}
}
for _, block := range export.Blocks {
block.Id = corrected[block.Id]
_, err := d.manager.Create(block)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
}
for _, conn := range export.Connections {
conn.Id = corrected[conn.Id]
conn.FromId = corrected[conn.FromId]
conn.ToId = corrected[conn.ToId]
_, err := d.manager.Connect(conn)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
}
d.apiWrap(w, 200, d.response("OK"))
}
func (d *Daemon) exportHandler(w http.ResponseWriter, r *http.Request) {
export := struct {
Blocks []*BlockInfo
Connections []*ConnectionInfo
}{
d.manager.ListBlocks(),
d.manager.ListConnections(),
}
jex, err := json.Marshal(export)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, jex)
}
func (d *Daemon) listBlockHandler(w http.ResponseWriter, r *http.Request) {
blocks, err := json.Marshal(d.manager.ListBlocks())
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, blocks)
}
func (d *Daemon) createBlockHandler(w http.ResponseWriter, r *http.Request) {
var block *BlockInfo
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &block)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
mblock, err := d.manager.Create(block)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
jblock, err := json.Marshal(mblock)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.log <- &LogMsg{
Type: "INFO",
Data: "BLOCK CREATED",
Id: "DAEMON",
}
d.ui <- &LogMsg{
Type: "CREATE",
Data: jblock,
Id: "DAEMON",
}
d.apiWrap(w, 200, jblock)
}
func (d *Daemon) updateBlockHandler(w http.ResponseWriter, r *http.Request) {
var coord *Coords
vars := mux.Vars(r)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &coord)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
mblock, err := d.manager.UpdateBlock(vars["id"], coord)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
jblock, err := json.Marshal(mblock)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, jblock)
}
func (d *Daemon) blockInfoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
conn, err := d.manager.GetBlock(vars["id"])
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
jconn, err := json.Marshal(conn)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, jconn)
}
func (d *Daemon) deleteBlockHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
err := d.manager.DeleteBlock(vars["id"])
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "DELETE",
Data: struct{
Id string
}{
vars["id"],
},
Id: "DAEMON",
}
d.apiWrap(w, 200, d.response("OK"))
}
func (d *Daemon) sendRouteHandler(w http.ResponseWriter, r *http.Request) {
var msg interface{}
vars := mux.Vars(r)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &msg)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
err = d.manager.Send(vars["id"], vars["route"], msg)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, d.response("OK"))
}
func (d *Daemon) queryRouteHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
msg, err := d.manager.Query(vars["id"], vars["route"])
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
jmsg, err := json.Marshal(msg)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, jmsg)
}
func (d *Daemon) listConnectionHandler(w http.ResponseWriter, r *http.Request) {
conns, err := json.Marshal(d.manager.ListConnections())
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, conns)
}
func (d *Daemon) createConnectionHandler(w http.ResponseWriter, r *http.Request) {
var conn *ConnectionInfo
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &conn)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
mconn, err := d.manager.Connect(conn)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
jconn, err := json.Marshal(mconn)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, jconn)
}
func (d *Daemon) connectionInfoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
conn, err := d.manager.GetConnection(vars["id"])
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
jconn, err := json.Marshal(conn)
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, jconn)
}
func (d *Daemon) response(statusTxt string) []byte {
response, err := json.Marshal(struct {
StatusTxt string `json:"daemon"`
}{
statusTxt,
})
if err != nil {
response = []byte(fmt.Sprintf(`{"daemon":"%s"}`, err.Error()))
}
return response
}
func (d *Daemon) apiWrap(w http.ResponseWriter, statusCode int, data []byte) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
w.Write(data)
}
func (d *Daemon) deleteConnectionHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
err := d.manager.DeleteConnection(vars["id"])
if err != nil {
d.apiWrap(w, 500, d.response(err.Error()))
return
}
d.apiWrap(w, 200, d.response("OK"))
}
func (d *Daemon) Run() {
go BroadcastStream(d.ui, d.log)
go logStream.run()
go uiStream.run()
r := mux.NewRouter()
r.HandleFunc("/", d.rootHandler)
r.HandleFunc("/static/{file}", d.staticHandler)
r.HandleFunc("/log", d.serveLogStream)
r.HandleFunc("/ui", d.serveUIStream)
r.HandleFunc("/import", d.importHandler).Methods("POST")
r.HandleFunc("/export", d.exportHandler).Methods("GET")
r.HandleFunc("/blocks", d.listBlockHandler).Methods("GET") // list all blocks
r.HandleFunc("/blocks", d.createBlockHandler).Methods("POST") // create block w/o id
r.HandleFunc("/blocks/{id}", d.blockInfoHandler).Methods("GET") // get block info
r.HandleFunc("/blocks/{id}", d.updateBlockHandler).Methods("PUT") // update block
r.HandleFunc("/blocks/{id}", d.deleteBlockHandler).Methods("DELETE") // delete block
r.HandleFunc("/blocks/{id}/{route}", d.sendRouteHandler).Methods("POST") // send to block route
r.HandleFunc("/blocks/{id}/{route}", d.queryRouteHandler).Methods("GET") // get from block route
r.HandleFunc("/connections", d.createConnectionHandler).Methods("POST") // create connection
r.HandleFunc("/connections", d.listConnectionHandler).Methods("GET") // list connections
r.HandleFunc("/connections/{id}", d.connectionInfoHandler).Methods("GET") // get info for connection
r.HandleFunc("/connections/{id}", d.deleteConnectionHandler).Methods("DELETE") // delete connection
r.HandleFunc("/connections/{id}/{route}", d.queryRouteHandler).Methods("GET") // get from block route
http.Handle("/", r)
err := http.ListenAndServe(":"+d.Port, nil)
if err != nil {
log.Fatalf(err.Error())
}
}
type LogMsg struct {
Type string
Data interface{}
Id string
}
func BroadcastStream(ui chan *LogMsg, logger chan *LogMsg) {
for {
select {
case l := <-logger:
j, err := json.Marshal(l)
if err != nil {
log.Println("could not broadcast")
break
}
log.Println(string(j))
logStream.Broadcast <- j
case l := <-ui:
j, err := json.Marshal(l)
if err != nil {
log.Println("could not broadcast")
break
}
uiStream.Broadcast <- j
}
}
}
log fix
package daemon
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"io/ioutil"
"log"
"net/http"
)
const (
ADD_CHAN = iota
DEL_CHAN
)
const (
VERSION = ".2.1"
)
var logStream = hub{
Broadcast: make(chan []byte),
register: make(chan *connection),
unregister: make(chan *connection),
connections: make(map[*connection]bool),
}
var uiStream = hub{
Broadcast: make(chan []byte),
register: make(chan *connection),
unregister: make(chan *connection),
connections: make(map[*connection]bool),
}
type Daemon struct {
manager *BlockManager
log chan *LogMsg
ui chan *LogMsg
Port string
}
func NewDaemon() *Daemon {
return &Daemon{
manager: NewBlockManager(),
log: make(chan *LogMsg, 10),
ui: make(chan *LogMsg, 10),
}
}
func addDefaultHeaders(fn http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fn(w, r)
}
}
func (d *Daemon) rootHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello!")
}
func (d *Daemon) staticHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello!")
}
func (d *Daemon) serveLogStream(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
w.Header().Set("Access-Control-Allow-Origin", "*")
/*if r.Header.Get("Origin") != "http://"+r.Host {
http.Error(w, "Origin not allowed", 403)
return
}*/
ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println(err)
return
}
c := &connection{send: make(chan []byte, 256), ws: ws, Hub: logStream}
c.Hub.register <- c
go c.writePump()
c.readPump()
}
func (d *Daemon) serveUIStream(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
w.Header().Set("Access-Control-Allow-Origin", "*")
/*if r.Header.Get("Origin") != "http://"+r.Host {
http.Error(w, "Origin not allowed", 403)
return
}*/
ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println(err)
return
}
c := &connection{send: make(chan []byte, 256), ws: ws, Hub: uiStream}
c.Hub.register <- c
go c.writePump()
c.readPump()
}
func (d *Daemon) importHandler(w http.ResponseWriter, r *http.Request) {
var export struct {
Blocks []*BlockInfo
Connections []*ConnectionInfo
}
corrected := make(map[string]string)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &export)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
for _, block := range export.Blocks {
corrected[block.Id] = block.Id
for d.manager.IdExists(corrected[block.Id]) {
corrected[block.Id] = block.Id + "_" + d.manager.GetId()
}
}
for _, conn := range export.Connections {
corrected[conn.Id] = conn.Id
for d.manager.IdExists(corrected[conn.Id]) {
corrected[conn.Id] = conn.Id + "_" + d.manager.GetId()
}
}
for _, block := range export.Blocks {
block.Id = corrected[block.Id]
eblock, err := d.manager.Create(block)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "CREATE",
Data: eblock,
Id: "DAEMON",
}
}
for _, conn := range export.Connections {
conn.Id = corrected[conn.Id]
conn.FromId = corrected[conn.FromId]
conn.ToId = corrected[conn.ToId]
econn, err := d.manager.Connect(conn)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "CREATE",
Data: econn,
Id: "DAEMON",
}
}
d.log <- &LogMsg{
Type: "INFO",
Data: "Import OK",
Id: "DAEMON",
}
d.apiWrap(w, r, 200, d.response("OK"))
}
func (d *Daemon) exportHandler(w http.ResponseWriter, r *http.Request) {
export := struct {
Blocks []*BlockInfo
Connections []*ConnectionInfo
}{
d.manager.ListBlocks(),
d.manager.ListConnections(),
}
jex, err := json.Marshal(export)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, jex)
}
func (d *Daemon) listBlockHandler(w http.ResponseWriter, r *http.Request) {
blocks, err := json.Marshal(d.manager.ListBlocks())
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, blocks)
}
func (d *Daemon) createBlockHandler(w http.ResponseWriter, r *http.Request) {
var block *BlockInfo
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &block)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
mblock, err := d.manager.Create(block)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "CREATE",
Data: mblock,
Id: "DAEMON",
}
jblock, err := json.Marshal(mblock)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, jblock)
}
func (d *Daemon) updateBlockHandler(w http.ResponseWriter, r *http.Request) {
var coord *Coords
vars := mux.Vars(r)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &coord)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
mblock, err := d.manager.UpdateBlock(vars["id"], coord)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
jblock, err := json.Marshal(mblock)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "UPDATE",
Data: struct{
Id string
}{
vars["id"],
},
Id: "DAEMON",
}
d.apiWrap(w, r, 200, jblock)
}
func (d *Daemon) blockInfoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
conn, err := d.manager.GetBlock(vars["id"])
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
jconn, err := json.Marshal(conn)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, jconn)
}
func (d *Daemon) deleteBlockHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
err := d.manager.DeleteBlock(vars["id"])
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "DELETE",
Data: struct{
Id string
}{
vars["id"],
},
Id: "DAEMON",
}
d.apiWrap(w, r, 200, d.response("OK"))
}
func (d *Daemon) sendRouteHandler(w http.ResponseWriter, r *http.Request) {
var msg interface{}
vars := mux.Vars(r)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &msg)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
err = d.manager.Send(vars["id"], vars["route"], msg)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "UPDATE",
Data: struct{
Id string
}{
vars["id"],
},
Id: "DAEMON",
}
d.apiWrap(w, r, 200, d.response("OK"))
}
func (d *Daemon) queryRouteHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
msg, err := d.manager.Query(vars["id"], vars["route"])
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
jmsg, err := json.Marshal(msg)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "QUERY",
Data: struct{
Id string
}{
vars["id"],
},
Id: "DAEMON",
}
d.apiWrap(w, r, 200, jmsg)
}
func (d *Daemon) listConnectionHandler(w http.ResponseWriter, r *http.Request) {
conns, err := json.Marshal(d.manager.ListConnections())
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, conns)
}
func (d *Daemon) createConnectionHandler(w http.ResponseWriter, r *http.Request) {
var conn *ConnectionInfo
body, err := ioutil.ReadAll(r.Body)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
err = json.Unmarshal(body, &conn)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
mconn, err := d.manager.Connect(conn)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.ui <- &LogMsg{
Type: "CREATE",
Data: mconn,
Id: "DAEMON",
}
jconn, err := json.Marshal(mconn)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, jconn)
}
func (d *Daemon) connectionInfoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
conn, err := d.manager.GetConnection(vars["id"])
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
jconn, err := json.Marshal(conn)
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, jconn)
}
func (d *Daemon) response(statusTxt string) []byte {
response, err := json.Marshal(struct {
StatusTxt string `json:"daemon"`
}{
statusTxt,
})
if err != nil {
response = []byte(fmt.Sprintf(`{"DAEMON":"%s"}`, err.Error()))
}
return response
}
func (d *Daemon) apiWrap(w http.ResponseWriter, r *http.Request, statusCode int, data []byte) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
w.Write(data)
if statusCode == 200 {
d.log <- &LogMsg{
Type: "INFO",
Data: fmt.Sprintf("%d", statusCode) +": " + r.URL.Path,
Id: "DAEMON",
}
} else {
var err struct {
DAEMON string
}
_ = json.Unmarshal(data, &err)
d.log <- &LogMsg{
Type: "ERROR",
Data: err.DAEMON,
Id: "DAEMON",
}
}
}
func (d *Daemon) deleteConnectionHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
err := d.manager.DeleteConnection(vars["id"])
if err != nil {
d.apiWrap(w, r, 500, d.response(err.Error()))
return
}
d.apiWrap(w, r, 200, d.response("OK"))
}
func (d *Daemon) Run() {
go BroadcastStream(d.ui, d.log)
go logStream.run()
go uiStream.run()
r := mux.NewRouter()
r.HandleFunc("/", d.rootHandler)
r.HandleFunc("/static/{file}", d.staticHandler)
r.HandleFunc("/log", d.serveLogStream)
r.HandleFunc("/ui", d.serveUIStream)
r.HandleFunc("/import", d.importHandler).Methods("POST")
r.HandleFunc("/export", d.exportHandler).Methods("GET")
r.HandleFunc("/blocks", d.listBlockHandler).Methods("GET") // list all blocks
r.HandleFunc("/blocks", d.createBlockHandler).Methods("POST") // create block w/o id
r.HandleFunc("/blocks/{id}", d.blockInfoHandler).Methods("GET") // get block info
r.HandleFunc("/blocks/{id}", d.updateBlockHandler).Methods("PUT") // update block
r.HandleFunc("/blocks/{id}", d.deleteBlockHandler).Methods("DELETE") // delete block
r.HandleFunc("/blocks/{id}/{route}", d.sendRouteHandler).Methods("POST") // send to block route
r.HandleFunc("/blocks/{id}/{route}", d.queryRouteHandler).Methods("GET") // get from block route
r.HandleFunc("/connections", d.createConnectionHandler).Methods("POST") // create connection
r.HandleFunc("/connections", d.listConnectionHandler).Methods("GET") // list connections
r.HandleFunc("/connections/{id}", d.connectionInfoHandler).Methods("GET") // get info for connection
r.HandleFunc("/connections/{id}", d.deleteConnectionHandler).Methods("DELETE") // delete connection
r.HandleFunc("/connections/{id}/{route}", d.queryRouteHandler).Methods("GET") // get from block route
http.Handle("/", r)
d.log <- &LogMsg{
Type: "INFO",
Data: fmt.Sprintf("Starting Streamtools %s on port %s", VERSION, d.Port),
Id: "DAEMON",
}
err := http.ListenAndServe(":"+d.Port, nil)
if err != nil {
log.Fatalf(err.Error())
}
}
type LogMsg struct {
Type string
Data interface{}
Id string
}
func BroadcastStream(ui chan *LogMsg, logger chan *LogMsg) {
for {
select {
case l := <-logger:
j, err := json.Marshal(l)
if err != nil {
log.Println("could not broadcast")
break
}
log.Println(string(j))
logStream.Broadcast <- j
case l := <-ui:
j, err := json.Marshal(l)
if err != nil {
log.Println("could not broadcast")
break
}
uiStream.Broadcast <- j
logger <- l
}
}
}
|
package daemon
import (
"os"
"fmt"
"github.com/sevlyar/go-daemon"
"github.com/Sirupsen/logrus"
"github.com/BluePecker/JwtAuth/pkg/storage"
"github.com/BluePecker/JwtAuth/daemon/server"
"syscall"
)
const (
TOKEN_TTL = 2 * 3600
VERSION = "1.0.0"
ALLOW_LOGIN_NUM = 3
)
type Storage struct {
Driver string
Opts string
}
type TLS struct {
Key string
Cert string
}
type Options struct {
PidFile string
LogFile string
LogLevel string
Port int
Host string
SockFile string
Daemon bool
Version bool
TLS TLS
Storage Storage
Secret string
}
type Daemon struct {
Options *Options
shadow *server.Shadow
rosiness *server.Rosiness
StorageE *storage.Engine
}
func Logger(level string) {
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: "2006-01-02 15:04:05",
})
Level, err := logrus.ParseLevel(level)
if err != nil {
logrus.Error(err)
os.Exit(0)
}
logrus.SetLevel(Level)
}
func Version(version bool) {
if version == true {
fmt.Printf("JwtAuth version %s.\n", VERSION)
os.Exit(0)
}
}
func NewDaemon(background bool, args Options) *Daemon {
if background {
ctx := daemon.Context{
PidFileName: args.PidFile,
PidFilePerm: 0644,
LogFilePerm: 0640,
Umask: 027,
WorkDir: "/",
LogFileName: args.LogFile,
}
if process, err := ctx.Reborn(); err == nil {
defer ctx.Release()
if process != nil {
return nil
}
} else {
if err == daemon.ErrWouldBlock {
logrus.Error("daemon already exists.")
} else {
logrus.Errorf("Unable to run: ", err)
}
os.Exit(0)
}
}
return &Daemon{Options: &args}
}
func NewStart(args Options) {
Logger(args.LogLevel)
Version(args.Version)
if args.Secret == "" {
fmt.Println("please specify the key.")
os.Exit(0)
}
if proc := NewDaemon(args.Daemon, args); proc == nil {
return
} else {
if proc == nil {
return
}
if err := proc.Storage(); err != nil {
logrus.Error(err)
os.Exit(0)
}
var (
StopRosiness = make(chan struct{})
StopShadow = make(chan struct{})
)
daemon.SetSigHandler(func(sig os.Signal) error {
StopRosiness <- struct{}{}
StopShadow <- struct{}{}
return daemon.ErrStop
}, syscall.SIGTERM, syscall.SIGQUIT)
go proc.Rosiness(StopRosiness)
go proc.Shadow(StopShadow)
if err := daemon.ServeSignals(); err != nil {
logrus.Error(err)
}
logrus.Error("jwt daemon terminated.")
}
}
debug
package daemon
import (
"os"
"fmt"
"github.com/sevlyar/go-daemon"
"github.com/Sirupsen/logrus"
"github.com/BluePecker/JwtAuth/pkg/storage"
"github.com/BluePecker/JwtAuth/daemon/server"
"syscall"
)
const (
TOKEN_TTL = 2 * 3600
VERSION = "1.0.0"
ALLOW_LOGIN_NUM = 3
)
type Storage struct {
Driver string
Opts string
}
type TLS struct {
Key string
Cert string
}
type Options struct {
PidFile string
LogFile string
LogLevel string
Port int
Host string
SockFile string
Daemon bool
Version bool
TLS TLS
Storage Storage
Secret string
}
type Daemon struct {
Options *Options
shadow *server.Shadow
rosiness *server.Rosiness
StorageE *storage.Engine
}
func Logger(level string) {
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: "2006-01-02 15:04:05",
})
Level, err := logrus.ParseLevel(level)
if err != nil {
logrus.Error(err)
os.Exit(0)
}
logrus.SetLevel(Level)
}
func Version(version bool) {
if version == true {
fmt.Printf("JwtAuth version %s.\n", VERSION)
os.Exit(0)
}
}
func NewDaemon(background bool, args Options) *Daemon {
if background {
ctx := daemon.Context{
PidFileName: args.PidFile,
PidFilePerm: 0644,
LogFilePerm: 0640,
Umask: 027,
WorkDir: "/",
LogFileName: args.LogFile,
}
if process, err := ctx.Reborn(); err == nil {
defer ctx.Release()
if process != nil {
return nil
}
} else {
if err == daemon.ErrWouldBlock {
logrus.Error("daemon already exists.")
} else {
logrus.Errorf("Unable to run: ", err)
}
os.Exit(0)
}
}
return &Daemon{Options: &args}
}
func NewStart(args Options) {
Logger(args.LogLevel)
Version(args.Version)
if args.Secret == "" {
fmt.Println("please specify the key.")
os.Exit(0)
}
if proc := NewDaemon(args.Daemon, args); proc == nil {
return
} else {
if proc == nil {
return
}
if err := proc.Storage(); err != nil {
logrus.Error(err)
os.Exit(0)
}
var (
StopS = make(chan struct{})
StopR = make(chan struct{})
)
daemon.SetSigHandler(func(sig os.Signal) error {
StopR <- struct{}{}
StopS <- struct{}{}
return daemon.ErrStop
}, syscall.SIGTERM, syscall.SIGQUIT)
go proc.Shadow(StopS)
go proc.Rosiness(StopR)
if err := daemon.ServeSignals(); err != nil {
logrus.Error(err)
}
logrus.Error("jwt daemon terminated.")
}
} |
package daemon
import (
"encoding/json"
"fmt"
"github.com/rancher/convoy/api"
"github.com/rancher/convoy/util"
"net/http"
)
type pluginInfo struct {
Implements []string
}
type pluginResponse struct {
Mountpoint string `json:",omitempty"`
Err string `json:",omitempty"`
}
type pluginRequest struct {
Name string
Opts map[string]string
}
func (s *daemon) dockerActivate(w http.ResponseWriter, r *http.Request) {
log.Debugf("Handle plugin activate: %v %v", r.Method, r.RequestURI)
info := pluginInfo{
Implements: []string{"VolumeDriver"},
}
writeResponseOutput(w, info)
}
func getDockerVolumeName(r *http.Request) (string, error) {
request, err := getDockerVolumeRequest(r)
if err != nil {
return "", err
}
return request.Name, nil
}
func getDockerVolumeRequest(r *http.Request) (*pluginRequest, error) {
request := &pluginRequest{}
if err := json.NewDecoder(r.Body).Decode(request); err != nil {
return nil, err
}
log.Debugf("Request from docker: %v", request)
return request, nil
}
func (s *daemon) getDockerVolume(r *http.Request, create bool) (*Volume, error) {
request, err := getDockerVolumeRequest(r)
if err != nil {
return nil, err
}
name := request.Name
var (
volume *Volume
volumeName string
)
if util.ValidateName(name) {
volumeName = name
volume = s.loadVolumeByName(name)
} else {
// Not valid UUID or name
return nil, fmt.Errorf("Invalid volume name. Must be only contains 0-9, a-z, dash(-), underscore(_) and dot(.)")
}
if volume == nil {
if create {
log.Debugf("Create a new volume %v for docker", name)
size, err := util.ParseSize(request.Opts["size"])
if err != nil {
return nil, err
}
request := &api.VolumeCreateRequest{
Name: volumeName,
DriverName: request.Opts["driver"],
Size: size,
BackupURL: request.Opts["backup"],
}
volume, err = s.processVolumeCreate(request)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("Cannot find volume %v", name)
}
}
return volume, nil
}
func dockerResponse(w http.ResponseWriter, mountPoint string, err error) {
e := pluginResponse{
Mountpoint: mountPoint,
}
if err != nil {
e.Err = err.Error()
}
writeResponseOutput(w, e)
}
func (s *daemon) dockerCreateVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin create volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, true)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Found volume %v (name %v) for docker", volume.UUID, volume.Name)
dockerResponse(w, "", nil)
}
func (s *daemon) dockerRemoveVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin remove volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
request := &api.VolumeDeleteRequest{
VolumeUUID: volume.UUID,
// By default we don't want to remove the volume because probably we're using NFS
ReferenceOnly: true,
}
if err := s.processVolumeDelete(request); err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Removed volume %v (name %v) for docker", volume.UUID, volume.Name)
dockerResponse(w, "", nil)
}
func (s *daemon) dockerMountVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin mount volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Mount volume: %v (name %v) for docker", volume.UUID, volume.Name)
mountPoint, err := s.processVolumeMount(volume, &api.VolumeMountRequest{})
if err != nil {
dockerResponse(w, "", err)
return
}
dockerResponse(w, mountPoint, nil)
}
func (s *daemon) dockerUnmountVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin unmount volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Unmount volume: %v (name %v) for docker", volume.UUID, volume.Name)
if err := s.processVolumeUmount(volume); err != nil {
dockerResponse(w, "", err)
return
}
dockerResponse(w, "", nil)
}
func (s *daemon) dockerVolumePath(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.RLock()
defer s.GlobalLock.RUnlock()
log.Debugf("Handle plugin volume path: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
mountPoint, err := s.getVolumeMountPoint(volume)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Volume: %v (name %v) is mounted at %v for docker", volume.UUID, volume.Name, mountPoint)
dockerResponse(w, mountPoint, nil)
}
ebs: Add docker volume command support
package daemon
import (
"encoding/json"
"fmt"
"github.com/rancher/convoy/api"
"github.com/rancher/convoy/util"
"net/http"
"strconv"
)
type pluginInfo struct {
Implements []string
}
type pluginResponse struct {
Mountpoint string `json:",omitempty"`
Err string `json:",omitempty"`
}
type pluginRequest struct {
Name string
Opts map[string]string
}
func (s *daemon) dockerActivate(w http.ResponseWriter, r *http.Request) {
log.Debugf("Handle plugin activate: %v %v", r.Method, r.RequestURI)
info := pluginInfo{
Implements: []string{"VolumeDriver"},
}
writeResponseOutput(w, info)
}
func getDockerVolumeName(r *http.Request) (string, error) {
request, err := getDockerVolumeRequest(r)
if err != nil {
return "", err
}
return request.Name, nil
}
func getDockerVolumeRequest(r *http.Request) (*pluginRequest, error) {
request := &pluginRequest{}
if err := json.NewDecoder(r.Body).Decode(request); err != nil {
return nil, err
}
log.Debugf("Request from docker: %v", request)
return request, nil
}
func (s *daemon) getDockerVolume(r *http.Request, create bool) (*Volume, error) {
request, err := getDockerVolumeRequest(r)
if err != nil {
return nil, err
}
name := request.Name
var (
volume *Volume
volumeName string
)
if util.ValidateName(name) {
volumeName = name
volume = s.loadVolumeByName(name)
} else {
// Not valid UUID or name
return nil, fmt.Errorf("Invalid volume name. Must be only contains 0-9, a-z, dash(-), underscore(_) and dot(.)")
}
if volume == nil {
if create {
log.Debugf("Create a new volume %v for docker", name)
size, err := util.ParseSize(request.Opts["size"])
if err != nil {
return nil, err
}
iops := 0
if request.Opts["iops"] != "" {
iops, err = strconv.Atoi(request.Opts["iops"])
if err != nil {
return nil, err
}
}
request := &api.VolumeCreateRequest{
Name: volumeName,
DriverName: request.Opts["driver"],
Size: size,
BackupURL: request.Opts["backup"],
DriverVolumeID: request.Opts["id"],
Type: request.Opts["type"],
IOPS: int64(iops),
}
volume, err = s.processVolumeCreate(request)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("Cannot find volume %v", name)
}
}
return volume, nil
}
func dockerResponse(w http.ResponseWriter, mountPoint string, err error) {
e := pluginResponse{
Mountpoint: mountPoint,
}
if err != nil {
e.Err = err.Error()
}
writeResponseOutput(w, e)
}
func (s *daemon) dockerCreateVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin create volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, true)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Found volume %v (name %v) for docker", volume.UUID, volume.Name)
dockerResponse(w, "", nil)
}
func (s *daemon) dockerRemoveVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin remove volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
request := &api.VolumeDeleteRequest{
VolumeUUID: volume.UUID,
// By default we don't want to remove the volume because probably we're using NFS
ReferenceOnly: true,
}
if err := s.processVolumeDelete(request); err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Removed volume %v (name %v) for docker", volume.UUID, volume.Name)
dockerResponse(w, "", nil)
}
func (s *daemon) dockerMountVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin mount volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Mount volume: %v (name %v) for docker", volume.UUID, volume.Name)
mountPoint, err := s.processVolumeMount(volume, &api.VolumeMountRequest{})
if err != nil {
dockerResponse(w, "", err)
return
}
dockerResponse(w, mountPoint, nil)
}
func (s *daemon) dockerUnmountVolume(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.Lock()
defer s.GlobalLock.Unlock()
log.Debugf("Handle plugin unmount volume: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Unmount volume: %v (name %v) for docker", volume.UUID, volume.Name)
if err := s.processVolumeUmount(volume); err != nil {
dockerResponse(w, "", err)
return
}
dockerResponse(w, "", nil)
}
func (s *daemon) dockerVolumePath(w http.ResponseWriter, r *http.Request) {
s.GlobalLock.RLock()
defer s.GlobalLock.RUnlock()
log.Debugf("Handle plugin volume path: %v %v", r.Method, r.RequestURI)
volume, err := s.getDockerVolume(r, false)
if err != nil {
dockerResponse(w, "", err)
return
}
mountPoint, err := s.getVolumeMountPoint(volume)
if err != nil {
dockerResponse(w, "", err)
return
}
log.Debugf("Volume: %v (name %v) is mounted at %v for docker", volume.UUID, volume.Name, mountPoint)
dockerResponse(w, mountPoint, nil)
}
|
// Copyright 2018-2020 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"os"
"os/signal"
operatorMetrics "github.com/cilium/cilium/operator/metrics"
operatorOption "github.com/cilium/cilium/operator/option"
"github.com/cilium/cilium/pkg/components"
"github.com/cilium/cilium/pkg/ipam"
"github.com/cilium/cilium/pkg/k8s"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/k8s/types"
k8sversion "github.com/cilium/cilium/pkg/k8s/version"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/version"
gops "github.com/google/gops/agent"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "cilium-operator")
rootCmd = &cobra.Command{
Use: "cilium-operator",
Short: "Run the cilium-operator",
Run: func(cmd *cobra.Command, args []string) {
cmdRefDir := viper.GetString(option.CMDRef)
if cmdRefDir != "" {
genMarkdown(cmd, cmdRefDir)
os.Exit(0)
}
initEnv()
runOperator(cmd)
},
}
// Deprecated: remove in 1.9
apiServerPort uint16
shutdownSignal = make(chan struct{})
ciliumK8sClient clientset.Interface
)
func initEnv() {
// Prepopulate option.Config with options from CLI.
option.Config.Populate()
// add hooks after setting up metrics in the option.Confog
logging.DefaultLogger.Hooks.Add(metrics.NewLoggingHook(components.CiliumOperatortName))
// Logging should always be bootstrapped first. Do not add any code above this!
logging.SetupLogging(option.Config.LogDriver, option.Config.LogOpt, "cilium-operator", option.Config.Debug)
option.LogRegisteredOptions(log)
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, unix.SIGINT, unix.SIGTERM)
go func() {
<-signals
close(shutdownSignal)
}()
// Open socket for using gops to get stacktraces of the agent.
if err := gops.Listen(gops.Options{}); err != nil {
errorString := fmt.Sprintf("unable to start gops: %s", err)
fmt.Println(errorString)
os.Exit(-1)
}
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func kvstoreEnabled() bool {
if option.Config.KVStore == "" {
return false
}
return option.Config.IdentityAllocationMode == option.IdentityAllocationModeKVstore ||
operatorOption.Config.SyncK8sServices ||
operatorOption.Config.SyncK8sNodes
}
func getAPIServerAddr() []string {
if operatorOption.Config.OperatorAPIServeAddr == "" {
return []string{fmt.Sprintf("127.0.0.1:%d", apiServerPort), fmt.Sprintf("[::1]:%d", apiServerPort)}
}
return []string{operatorOption.Config.OperatorAPIServeAddr}
}
func runOperator(cmd *cobra.Command) {
log.Infof("Cilium Operator %s", version.Version)
k8sInitDone := make(chan struct{})
go startServer(shutdownSignal, k8sInitDone, getAPIServerAddr()...)
if operatorOption.Config.EnableMetrics {
operatorMetrics.Register()
}
k8s.Configure(
option.Config.K8sAPIServer,
option.Config.K8sKubeConfigPath,
float32(option.Config.K8sClientQPSLimit),
option.Config.K8sClientBurst,
)
if err := k8s.Init(); err != nil {
log.WithError(err).Fatal("Unable to connect to Kubernetes apiserver")
}
close(k8sInitDone)
ciliumK8sClient = k8s.CiliumClient()
k8sversion.Update(k8s.Client())
if !k8sversion.Capabilities().MinimalVersionMet {
log.Fatalf("Minimal kubernetes version not met: %s < %s",
k8sversion.Version(), k8sversion.MinimalVersionConstraint)
}
// Restart kube-dns as soon as possible since it helps etcd-operator to be
// properly setup. If kube-dns is not managed by Cilium it can prevent
// etcd from reaching out kube-dns in EKS.
if option.Config.DisableCiliumEndpointCRD {
log.Infof("KubeDNS unmanaged pods controller disabled as %q option is set to 'disabled' in Cilium ConfigMap", option.DisableCiliumEndpointCRDName)
} else if operatorOption.Config.UnmanagedPodWatcherInterval != 0 {
enableUnmanagedKubeDNSController()
}
var (
nodeManager *ipam.NodeManager
err error
)
switch option.Config.IPAM {
case option.IPAMENI:
ipamAllocatorAWS, providerBuiltin := allocatorProviders["aws"]
if !providerBuiltin {
log.WithError(err).Fatal("AWS ENI allocator is not supported by this version of cilium-operator")
}
if err := ipamAllocatorAWS.Init(); err != nil {
log.WithError(err).Fatal("Unable to init AWS ENI allocator")
}
nodeManager, err = ipamAllocatorAWS.Start(&ciliumNodeUpdateImplementation{})
if err != nil {
log.WithError(err).Fatal("Unable to start AWS ENI allocator")
}
startSynchronizingCiliumNodes(nodeManager)
case option.IPAMAzure:
ipamAllocatorAzure, providerBuiltin := allocatorProviders["azure"]
if !providerBuiltin {
log.WithError(err).Fatal("Azure allocator is not supported by this version of cilium-operator")
}
if err := ipamAllocatorAzure.Init(); err != nil {
log.WithError(err).Fatal("Unable to init Azure allocator")
}
nodeManager, err = ipamAllocatorAzure.Start(&ciliumNodeUpdateImplementation{})
if err != nil {
log.WithError(err).Fatal("Unable to start Azure allocator")
}
startSynchronizingCiliumNodes(nodeManager)
}
if kvstoreEnabled() {
if operatorOption.Config.SyncK8sServices {
startSynchronizingServices()
}
var goopts *kvstore.ExtraOptions
scopedLog := log.WithFields(logrus.Fields{
"kvstore": option.Config.KVStore,
"address": option.Config.KVStoreOpt[fmt.Sprintf("%s.address", option.Config.KVStore)],
})
if operatorOption.Config.SyncK8sServices {
// If K8s is enabled we can do the service translation automagically by
// looking at services from k8s and retrieve the service IP from that.
// This makes cilium to not depend on kube dns to interact with etcd
if k8s.IsEnabled() {
svcURL, isETCDOperator := kvstore.IsEtcdOperator(option.Config.KVStore, option.Config.KVStoreOpt, option.Config.K8sNamespace)
if isETCDOperator {
scopedLog.Info("cilium-operator running with service synchronization: automatic etcd service translation enabled")
svcGetter := k8s.ServiceIPGetter(&k8sSvcCache)
name, namespace, err := kvstore.SplitK8sServiceURL(svcURL)
if err != nil {
// If we couldn't derive the name/namespace for the given
// svcURL log the error so the user can see it.
// k8s.CreateCustomDialer won't be able to derive
// the name/namespace as well so it does not matter that
// we wait for all services to be synchronized with k8s.
scopedLog.WithError(err).WithFields(logrus.Fields{
"url": svcURL,
}).Error("Unable to derive service name from given url")
} else {
scopedLog.WithFields(logrus.Fields{
logfields.ServiceName: name,
logfields.ServiceNamespace: namespace,
}).Info("Retrieving service spec from k8s to perform automatic etcd service translation")
k8sSvc, err := k8s.Client().CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})
switch {
case err == nil:
// Create another service cache that contains the
// k8s service for etcd. As soon the k8s caches are
// synced, this hijack will stop happening.
sc := k8s.NewServiceCache(nil)
sc.UpdateService(&types.Service{Service: k8sSvc}, nil)
svcGetter = &serviceGetter{
shortCutK8sCache: &sc,
k8sCache: &k8sSvcCache,
}
break
case errors.IsNotFound(err):
scopedLog.Error("Service not found in k8s")
default:
scopedLog.Warning("Unable to get service spec from k8s, this might cause network disruptions with etcd")
}
}
log := log.WithField(logfields.LogSubsys, "etcd")
goopts = &kvstore.ExtraOptions{
DialOption: []grpc.DialOption{
grpc.WithDialer(k8s.CreateCustomDialer(svcGetter, log)),
},
}
}
}
} else {
scopedLog.Info("cilium-operator running without service synchronization: automatic etcd service translation disabled")
}
scopedLog.Info("Connecting to kvstore...")
if err := kvstore.Setup(context.TODO(), option.Config.KVStore, option.Config.KVStoreOpt, goopts); err != nil {
scopedLog.WithError(err).Fatal("Unable to setup kvstore")
}
if operatorOption.Config.SyncK8sNodes {
if err := runNodeWatcher(nodeManager); err != nil {
log.WithError(err).Error("Unable to setup node watcher")
}
}
startKvstoreWatchdog()
}
switch option.Config.IdentityAllocationMode {
case option.IdentityAllocationModeCRD:
if !k8s.IsEnabled() {
log.Fatal("CRD Identity allocation mode requires k8s to be configured.")
}
startManagingK8sIdentities()
if operatorOption.Config.IdentityGCInterval != 0 {
go startCRDIdentityGC()
}
case option.IdentityAllocationModeKVstore:
if operatorOption.Config.IdentityGCInterval != 0 {
startKvstoreIdentityGC()
}
}
if operatorOption.Config.EnableCEPGC && operatorOption.Config.EndpointGCInterval != 0 {
enableCiliumEndpointSyncGC()
}
err = enableCNPWatcher()
if err != nil {
log.WithError(err).WithField("subsys", "CNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}
err = enableCCNPWatcher()
if err != nil {
log.WithError(err).WithField("subsys", "CCNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}
log.Info("Initialization complete")
<-shutdownSignal
// graceful exit
log.Info("Received termination signal. Shutting down")
return
}
operator: Fix operator flags
The operator specific flags were not populated
Fixes: 588f8352a ("operator: split operator-only options into separate package")
Signed-off-by: Thomas Graf <5f50a84c1fa3bcff146405017f36aec1a10a9e38@cilium.io>
// Copyright 2018-2020 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"os"
"os/signal"
operatorMetrics "github.com/cilium/cilium/operator/metrics"
operatorOption "github.com/cilium/cilium/operator/option"
"github.com/cilium/cilium/pkg/components"
"github.com/cilium/cilium/pkg/ipam"
"github.com/cilium/cilium/pkg/k8s"
clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
"github.com/cilium/cilium/pkg/k8s/types"
k8sversion "github.com/cilium/cilium/pkg/k8s/version"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/version"
gops "github.com/google/gops/agent"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "cilium-operator")
rootCmd = &cobra.Command{
Use: "cilium-operator",
Short: "Run the cilium-operator",
Run: func(cmd *cobra.Command, args []string) {
cmdRefDir := viper.GetString(option.CMDRef)
if cmdRefDir != "" {
genMarkdown(cmd, cmdRefDir)
os.Exit(0)
}
initEnv()
runOperator(cmd)
},
}
// Deprecated: remove in 1.9
apiServerPort uint16
shutdownSignal = make(chan struct{})
ciliumK8sClient clientset.Interface
)
func initEnv() {
// Prepopulate option.Config with options from CLI.
option.Config.Populate()
operatorOption.Config.Populate()
// add hooks after setting up metrics in the option.Confog
logging.DefaultLogger.Hooks.Add(metrics.NewLoggingHook(components.CiliumOperatortName))
// Logging should always be bootstrapped first. Do not add any code above this!
logging.SetupLogging(option.Config.LogDriver, option.Config.LogOpt, "cilium-operator", option.Config.Debug)
option.LogRegisteredOptions(log)
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, unix.SIGINT, unix.SIGTERM)
go func() {
<-signals
close(shutdownSignal)
}()
// Open socket for using gops to get stacktraces of the agent.
if err := gops.Listen(gops.Options{}); err != nil {
errorString := fmt.Sprintf("unable to start gops: %s", err)
fmt.Println(errorString)
os.Exit(-1)
}
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func kvstoreEnabled() bool {
if option.Config.KVStore == "" {
return false
}
return option.Config.IdentityAllocationMode == option.IdentityAllocationModeKVstore ||
operatorOption.Config.SyncK8sServices ||
operatorOption.Config.SyncK8sNodes
}
func getAPIServerAddr() []string {
if operatorOption.Config.OperatorAPIServeAddr == "" {
return []string{fmt.Sprintf("127.0.0.1:%d", apiServerPort), fmt.Sprintf("[::1]:%d", apiServerPort)}
}
return []string{operatorOption.Config.OperatorAPIServeAddr}
}
func runOperator(cmd *cobra.Command) {
log.Infof("Cilium Operator %s", version.Version)
k8sInitDone := make(chan struct{})
go startServer(shutdownSignal, k8sInitDone, getAPIServerAddr()...)
if operatorOption.Config.EnableMetrics {
operatorMetrics.Register()
}
k8s.Configure(
option.Config.K8sAPIServer,
option.Config.K8sKubeConfigPath,
float32(option.Config.K8sClientQPSLimit),
option.Config.K8sClientBurst,
)
if err := k8s.Init(); err != nil {
log.WithError(err).Fatal("Unable to connect to Kubernetes apiserver")
}
close(k8sInitDone)
ciliumK8sClient = k8s.CiliumClient()
k8sversion.Update(k8s.Client())
if !k8sversion.Capabilities().MinimalVersionMet {
log.Fatalf("Minimal kubernetes version not met: %s < %s",
k8sversion.Version(), k8sversion.MinimalVersionConstraint)
}
// Restart kube-dns as soon as possible since it helps etcd-operator to be
// properly setup. If kube-dns is not managed by Cilium it can prevent
// etcd from reaching out kube-dns in EKS.
if option.Config.DisableCiliumEndpointCRD {
log.Infof("KubeDNS unmanaged pods controller disabled as %q option is set to 'disabled' in Cilium ConfigMap", option.DisableCiliumEndpointCRDName)
} else if operatorOption.Config.UnmanagedPodWatcherInterval != 0 {
enableUnmanagedKubeDNSController()
}
var (
nodeManager *ipam.NodeManager
err error
)
switch option.Config.IPAM {
case option.IPAMENI:
ipamAllocatorAWS, providerBuiltin := allocatorProviders["aws"]
if !providerBuiltin {
log.WithError(err).Fatal("AWS ENI allocator is not supported by this version of cilium-operator")
}
if err := ipamAllocatorAWS.Init(); err != nil {
log.WithError(err).Fatal("Unable to init AWS ENI allocator")
}
nodeManager, err = ipamAllocatorAWS.Start(&ciliumNodeUpdateImplementation{})
if err != nil {
log.WithError(err).Fatal("Unable to start AWS ENI allocator")
}
startSynchronizingCiliumNodes(nodeManager)
case option.IPAMAzure:
ipamAllocatorAzure, providerBuiltin := allocatorProviders["azure"]
if !providerBuiltin {
log.WithError(err).Fatal("Azure allocator is not supported by this version of cilium-operator")
}
if err := ipamAllocatorAzure.Init(); err != nil {
log.WithError(err).Fatal("Unable to init Azure allocator")
}
nodeManager, err = ipamAllocatorAzure.Start(&ciliumNodeUpdateImplementation{})
if err != nil {
log.WithError(err).Fatal("Unable to start Azure allocator")
}
startSynchronizingCiliumNodes(nodeManager)
}
if kvstoreEnabled() {
if operatorOption.Config.SyncK8sServices {
startSynchronizingServices()
}
var goopts *kvstore.ExtraOptions
scopedLog := log.WithFields(logrus.Fields{
"kvstore": option.Config.KVStore,
"address": option.Config.KVStoreOpt[fmt.Sprintf("%s.address", option.Config.KVStore)],
})
if operatorOption.Config.SyncK8sServices {
// If K8s is enabled we can do the service translation automagically by
// looking at services from k8s and retrieve the service IP from that.
// This makes cilium to not depend on kube dns to interact with etcd
if k8s.IsEnabled() {
svcURL, isETCDOperator := kvstore.IsEtcdOperator(option.Config.KVStore, option.Config.KVStoreOpt, option.Config.K8sNamespace)
if isETCDOperator {
scopedLog.Info("cilium-operator running with service synchronization: automatic etcd service translation enabled")
svcGetter := k8s.ServiceIPGetter(&k8sSvcCache)
name, namespace, err := kvstore.SplitK8sServiceURL(svcURL)
if err != nil {
// If we couldn't derive the name/namespace for the given
// svcURL log the error so the user can see it.
// k8s.CreateCustomDialer won't be able to derive
// the name/namespace as well so it does not matter that
// we wait for all services to be synchronized with k8s.
scopedLog.WithError(err).WithFields(logrus.Fields{
"url": svcURL,
}).Error("Unable to derive service name from given url")
} else {
scopedLog.WithFields(logrus.Fields{
logfields.ServiceName: name,
logfields.ServiceNamespace: namespace,
}).Info("Retrieving service spec from k8s to perform automatic etcd service translation")
k8sSvc, err := k8s.Client().CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{})
switch {
case err == nil:
// Create another service cache that contains the
// k8s service for etcd. As soon the k8s caches are
// synced, this hijack will stop happening.
sc := k8s.NewServiceCache(nil)
sc.UpdateService(&types.Service{Service: k8sSvc}, nil)
svcGetter = &serviceGetter{
shortCutK8sCache: &sc,
k8sCache: &k8sSvcCache,
}
break
case errors.IsNotFound(err):
scopedLog.Error("Service not found in k8s")
default:
scopedLog.Warning("Unable to get service spec from k8s, this might cause network disruptions with etcd")
}
}
log := log.WithField(logfields.LogSubsys, "etcd")
goopts = &kvstore.ExtraOptions{
DialOption: []grpc.DialOption{
grpc.WithDialer(k8s.CreateCustomDialer(svcGetter, log)),
},
}
}
}
} else {
scopedLog.Info("cilium-operator running without service synchronization: automatic etcd service translation disabled")
}
scopedLog.Info("Connecting to kvstore...")
if err := kvstore.Setup(context.TODO(), option.Config.KVStore, option.Config.KVStoreOpt, goopts); err != nil {
scopedLog.WithError(err).Fatal("Unable to setup kvstore")
}
if operatorOption.Config.SyncK8sNodes {
if err := runNodeWatcher(nodeManager); err != nil {
log.WithError(err).Error("Unable to setup node watcher")
}
}
startKvstoreWatchdog()
}
switch option.Config.IdentityAllocationMode {
case option.IdentityAllocationModeCRD:
if !k8s.IsEnabled() {
log.Fatal("CRD Identity allocation mode requires k8s to be configured.")
}
startManagingK8sIdentities()
if operatorOption.Config.IdentityGCInterval != 0 {
go startCRDIdentityGC()
}
case option.IdentityAllocationModeKVstore:
if operatorOption.Config.IdentityGCInterval != 0 {
startKvstoreIdentityGC()
}
}
if operatorOption.Config.EnableCEPGC && operatorOption.Config.EndpointGCInterval != 0 {
enableCiliumEndpointSyncGC()
}
err = enableCNPWatcher()
if err != nil {
log.WithError(err).WithField("subsys", "CNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}
err = enableCCNPWatcher()
if err != nil {
log.WithError(err).WithField("subsys", "CCNPWatcher").Fatal(
"Cannot connect to Kubernetes apiserver ")
}
log.Info("Initialization complete")
<-shutdownSignal
// graceful exit
log.Info("Received termination signal. Shutting down")
return
}
|
package opts
import (
"fmt"
"os"
"runtime"
"testing"
)
func TestValidateEnv(t *testing.T) {
valids := map[string]string{
"a": "a",
"something": "something",
"_=a": "_=a",
"env1=value1": "env1=value1",
"_env1=value1": "_env1=value1",
"env2=value2=value3": "env2=value2=value3",
"env3=abc!qwe": "env3=abc!qwe",
"env_4=value 4": "env_4=value 4",
"PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")),
"PATH=something": "PATH=something",
"asd!qwe": "asd!qwe",
"1asd": "1asd",
"123": "123",
"some space": "some space",
" some space before": " some space before",
"some space after ": "some space after ",
}
// Environment variables are case in-sensitive on Windows
if runtime.GOOS == "windows" {
valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH"))
}
for value, expected := range valids {
actual, err := ValidateEnv(value)
if err != nil {
t.Fatal(err)
}
if actual != expected {
t.Fatalf("Expected [%v], got [%v]", expected, actual)
}
}
}
add an exception case and map changge to struct with expect error
Signed-off-by: chchliang <dd9f3e7b4f81c9a8683381fac1edadd0f748268e@zte.com.cn>
add an exception case and map changge to struct with expect error
Signed-off-by: chchliang <dd9f3e7b4f81c9a8683381fac1edadd0f748268e@zte.com.cn>
package opts
import (
"fmt"
"os"
"runtime"
"testing"
)
func TestValidateEnv(t *testing.T) {
testcase := []struct {
value string
expected string
err error
}{
{
value: "a",
expected: "a",
},
{
value: "something",
expected: "something",
},
{
value: "_=a",
expected: "_=a",
},
{
value: "env1=value1",
expected: "env1=value1",
},
{
value: "_env1=value1",
expected: "_env1=value1",
},
{
value: "env2=value2=value3",
expected: "env2=value2=value3",
},
{
value: "env3=abc!qwe",
expected: "env3=abc!qwe",
},
{
value: "env_4=value 4",
expected: "env_4=value 4",
},
{
value: "PATH",
expected: fmt.Sprintf("PATH=%v", os.Getenv("PATH")),
},
{
value: "=a",
err: fmt.Errorf(fmt.Sprintf("invalid environment variable: %s", "=a")),
},
{
value: "PATH=something",
expected: "PATH=something",
},
{
value: "asd!qwe",
expected: "asd!qwe",
},
{
value: "1asd",
expected: "1asd",
},
{
value: "123",
expected: "123",
},
{
value: "some space",
expected: "some space",
},
{
value: " some space before",
expected: " some space before",
},
{
value: "some space after ",
expected: "some space after ",
},
{
value: "=",
err: fmt.Errorf(fmt.Sprintf("invalid environment variable: %s", "=")),
},
}
// Environment variables are case in-sensitive on Windows
if runtime.GOOS == "windows" {
tmp := struct {
value string
expected string
err error
}{
value: "PaTh",
expected: fmt.Sprintf("PaTh=%v", os.Getenv("PATH")),
}
testcase = append(testcase, tmp)
}
for _, r := range testcase {
actual, err := ValidateEnv(r.value)
if err != nil {
if r.err == nil {
t.Fatalf("Expected err is nil, got err[%v]", err)
}
if err.Error() != r.err.Error() {
t.Fatalf("Expected err[%v], got err[%v]", r.err, err)
}
}
if err == nil && r.err != nil {
t.Fatalf("Expected err[%v], but err is nil", r.err)
}
if actual != r.expected {
t.Fatalf("Expected [%v], got [%v]", r.expected, actual)
}
}
}
|
package isolated
import (
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("set-space-role command", func() {
Describe("help", func() {
When("--help flag is set", func() {
It("Displays command usage to output", func() {
session := helpers.CF("set-space-role", "--help")
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Say("set-space-role - Assign a space role to a user"))
Eventually(session).Should(Say("USAGE:"))
Eventually(session).Should(Say("cf set-space-role USERNAME ORG SPACE ROLE"))
Eventually(session).Should(Say("ROLES:"))
Eventually(session).Should(Say("'SpaceManager' - Invite and manage users, and enable features for a given space"))
Eventually(session).Should(Say("'SpaceDeveloper' - Create and manage apps and services, and see logs and reports"))
Eventually(session).Should(Say("'SpaceAuditor' - View logs, reports, and settings on this space"))
Eventually(session).Should(Say("SEE ALSO:"))
Eventually(session).Should(Say("space-users"))
Eventually(session).Should(Exit(0))
})
})
})
When("the org, space, and user all exist", func() {
var (
username string
orgName string
spaceName string
)
BeforeEach(func() {
helpers.LoginCF()
orgName = helpers.NewOrgName()
spaceName = helpers.NewSpaceName()
helpers.CreateOrgAndSpace(orgName, spaceName)
username, _ = helpers.CreateUser()
})
It("sets the space role for the user", func() {
session := helpers.CF("set-space-role", username, orgName, spaceName, "SpaceAuditor")
Eventually(session).Should(Say("Assigning role RoleSpaceAuditor to user %s in org %s / space %s as admin...", username, orgName, spaceName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
})
})
})
Add test that set-space-role is idempotent
[Finishes #164022856]
Signed-off-by: Will Murphy <a0e419adf8e2224ae039ba5f1877fb7f62d300e5@pivotal.io>
package isolated
import (
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("set-space-role command", func() {
Describe("help", func() {
When("--help flag is set", func() {
It("Displays command usage to output", func() {
session := helpers.CF("set-space-role", "--help")
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Say("set-space-role - Assign a space role to a user"))
Eventually(session).Should(Say("USAGE:"))
Eventually(session).Should(Say("cf set-space-role USERNAME ORG SPACE ROLE"))
Eventually(session).Should(Say("ROLES:"))
Eventually(session).Should(Say("'SpaceManager' - Invite and manage users, and enable features for a given space"))
Eventually(session).Should(Say("'SpaceDeveloper' - Create and manage apps and services, and see logs and reports"))
Eventually(session).Should(Say("'SpaceAuditor' - View logs, reports, and settings on this space"))
Eventually(session).Should(Say("SEE ALSO:"))
Eventually(session).Should(Say("space-users"))
Eventually(session).Should(Exit(0))
})
})
})
When("the org, space, and user all exist", func() {
var (
username string
orgName string
spaceName string
)
BeforeEach(func() {
helpers.LoginCF()
orgName = helpers.NewOrgName()
spaceName = helpers.NewSpaceName()
helpers.CreateOrgAndSpace(orgName, spaceName)
username, _ = helpers.CreateUser()
})
It("sets the space role for the user", func() {
session := helpers.CF("set-space-role", username, orgName, spaceName, "SpaceAuditor")
Eventually(session).Should(Say("Assigning role RoleSpaceAuditor to user %s in org %s / space %s as admin...", username, orgName, spaceName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
})
When("the user already has the desired role", func() {
BeforeEach(func() {
session := helpers.CF("set-space-role", username, orgName, spaceName, "SpaceAuditor")
Eventually(session).Should(Say("Assigning role RoleSpaceAuditor to user %s in org %s / space %s as admin...", username, orgName, spaceName))
Eventually(session).Should(Exit(0))
})
It("is idempotent", func() {
session := helpers.CF("set-space-role", username, orgName, spaceName, "SpaceAuditor")
Eventually(session).Should(Say("Assigning role RoleSpaceAuditor to user %s in org %s / space %s as admin...", username, orgName, spaceName))
Eventually(session).Should(Exit(0))
})
})
})
})
|
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"syscall"
"github.com/codegangsta/cli"
)
func main() {
app := cli.NewApp()
app.Name = "fail-notifier"
app.Version = "0.0.1"
app.Author = "Manabu Inoue"
app.Email = ""
app.HideVersion = true
app.EnableBashCompletion = true
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "command, c",
Usage: "execute given command",
},
cli.BoolFlag{
Name: "version, v",
Usage: "print the version",
},
}
app.Usage = "Send notifications when a given command fails"
app.Action = func(c *cli.Context) {
action(c)
}
app.Run(os.Args)
}
func action(c *cli.Context) {
command := c.String("c")
cmd := exec.Command(command)
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if err2, ok := err.(*exec.ExitError); ok {
if s, ok := err2.Sys().(syscall.WaitStatus); ok {
println(fmt.Sprintf("command failed. exitStatus=%v stdout=%v stderr=%v", s.ExitStatus(), stdout.String(), stderr.String()))
return
} else {
// Unix や Winodws とは異なり、 exec.ExitError.Sys() が syscall.WaitStatus ではないOSの場合
println(fmt.Sprintf("command failed. stdout=%v stderr=%v", stdout.String(), stderr.String()))
return
}
} else {
// may be returned for I/O problems.
println(fmt.Sprintf("command can't execute. err=%v", err))
return
}
}
println(stdout.String())
}
Add support for command args
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"syscall"
"github.com/codegangsta/cli"
)
func main() {
app := cli.NewApp()
app.Name = "fail-notifier"
app.Version = "0.0.1"
app.Author = "Manabu Inoue"
app.Email = ""
app.HideVersion = true
app.EnableBashCompletion = true
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "help, h",
Usage: "show help",
},
cli.BoolFlag{
Name: "version, v",
Usage: "show the version",
},
}
app.HideHelp = true
app.ArgsUsage = "command"
app.Usage = "Send notifications when a given command fails"
app.Action = func(c *cli.Context) {
action(c)
}
app.Run(os.Args)
}
func action(c *cli.Context) {
if !(c.Args().Present()) {
cli.ShowAppHelp(c)
return
}
var cmd *exec.Cmd
if len(c.Args()) == 1 {
cmd = exec.Command(c.Args().First())
} else {
cmd = exec.Command(c.Args().First(), c.Args().Tail()...)
}
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if err2, ok := err.(*exec.ExitError); ok {
if s, ok := err2.Sys().(syscall.WaitStatus); ok {
println(fmt.Sprintf("command failed. exitStatus=%v stdout=%v stderr=%v", s.ExitStatus(), stdout.String(), stderr.String()))
return
} else {
// Unix や Winodws とは異なり、 exec.ExitError.Sys() が syscall.WaitStatus ではないOSの場合
println(fmt.Sprintf("command failed. stdout=%v stderr=%v", stdout.String(), stderr.String()))
return
}
} else {
// may be returned for I/O problems.
println(fmt.Sprintf("command can't execute. err=%v", err))
return
}
}
println(stdout.String())
}
|
package main
import (
"bytes"
"fmt"
"github.com/codegangsta/cli"
"github.com/gr4y/fitbit-graphite/lib/fitbit"
"github.com/gr4y/fitbit-graphite/lib/processor"
"net"
"os"
)
func main() {
app := cli.NewApp()
app.Name = "fitbit-graphite"
app.Usage = "Exports your FitBit Data into your very own graphite instance"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "ClientID,CID",
Value: "229G69",
Usage: "OAuth 2.0 Client ID",
},
cli.StringFlag{
Name: "ClientSecret,CS",
Value: "cbe3e9792c1c495db76506b2204a834d",
Usage: "OAuth 2.0 Client Secret",
},
cli.StringFlag{
Name: "CarbonPrefix",
Value: "fitbit",
Usage: "Prefix for Carbon",
},
cli.StringFlag{
Name: "CarbonHost,CH",
Usage: "Hostname of Carbon instance",
},
cli.IntFlag{
Name: "CarbonPort,CP",
Value: 2003,
Usage: "Port of Carbon Instance",
},
}
app.Action = func(c *cli.Context) {
clientConfig := fitbit.ClientConfig{
ClientID: c.String("ClientID"),
ClientSecret: c.String("ClientSecret"),
Scopes: []string{"activity", "heartrate", "location", "nutrition", "profile", "settings", "sleep", "social", "weight"},
}
callbackFunc := func(url string) string {
fmt.Println("Open the following URL in your browser: ", url)
var code string
_, err := fmt.Scan(&code)
if err != nil {
return ""
}
return code
}
// Connect to FitBit
client, err := fitbit.Connect(clientConfig, callbackFunc)
if err != nil {
panic(err)
}
processors := []processor.Processor{
processor.ActivitiesProcessor{Activities: client.Activities},
processor.BodyProcessor{Body: client.Body},
processor.SleepProcessor{Sleep: client.Sleep},
}
var userId string
profileClient := client.Profile
profile, err := profileClient.GetProfile()
if err != nil {
userId = "-"
} else {
userId = profile.User.ID
}
var lines []string
for _, proc := range processors {
items, err := proc.FetchData("today", "max")
// TODO Maybe there should be some better error handling.
// In any cases where the Rate Limit is exceeded all data we already fetched is purged and not sent into carbon
// Which is not that great...
if err == nil {
lines = append(lines, items...)
} else {
panic(err)
}
}
conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", c.String("CarbonHost"), c.Int("CarbonPort")))
if err != nil {
panic(err)
}
buf := bytes.NewBufferString("")
for _, line := range lines {
buf.WriteString(fmt.Sprintf("%s.%s.%s\n\r", c.String("CarbonPrefix"), userId, line))
}
fmt.Print(buf.String())
_, err = conn.Write(buf.Bytes())
if err != nil {
panic(err)
}
conn.Close()
}
app.Run(os.Args)
}
Added CommandLine Parameter Error Handling
package main
import (
"bytes"
"errors"
"fmt"
"github.com/codegangsta/cli"
"github.com/gr4y/fitbit-graphite/lib/fitbit"
"github.com/gr4y/fitbit-graphite/lib/processor"
"net"
"os"
)
func main() {
app := cli.NewApp()
app.Name = "fitbit-graphite"
app.Usage = "Exports your FitBit Data into your very own graphite instance"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "ClientID,CID",
Value: "229G69",
Usage: "OAuth 2.0 Client ID",
},
cli.StringFlag{
Name: "ClientSecret,CS",
Value: "cbe3e9792c1c495db76506b2204a834d",
Usage: "OAuth 2.0 Client Secret",
},
cli.StringFlag{
Name: "CarbonPrefix",
Value: "fitbit",
Usage: "Prefix for Carbon",
},
cli.StringFlag{
Name: "CarbonHost,CH",
Usage: "Hostname of Carbon instance",
},
cli.IntFlag{
Name: "CarbonPort,CP",
Value: 2003,
Usage: "Port of Carbon Instance",
},
}
app.Action = func(c *cli.Context) {
if c.String("CarbonHost") == "" {
panic(errors.New("Command Line Parameter CarbonHost is required. Aborting."))
}
if c.String("ClientSecret") == "" {
panic(errors.New("Command Line Parameter ClientSecret is required. Aborting."))
}
if c.String("ClientID") == "" {
panic(errors.New("Command Line Parameter ClientID is required. Aborting."))
}
clientConfig := fitbit.ClientConfig{
ClientID: c.String("ClientID"),
ClientSecret: c.String("ClientSecret"),
Scopes: []string{"activity", "heartrate", "location", "nutrition", "profile", "settings", "sleep", "social", "weight"},
}
callbackFunc := func(url string) string {
fmt.Println("Open the following URL in your browser: ", url)
var code string
_, err := fmt.Scan(&code)
if err != nil {
return ""
}
return code
}
// Connect to FitBit
client, err := fitbit.Connect(clientConfig, callbackFunc)
if err != nil {
panic(err)
}
processors := []processor.Processor{
processor.ActivitiesProcessor{Activities: client.Activities},
processor.BodyProcessor{Body: client.Body},
processor.SleepProcessor{Sleep: client.Sleep},
}
var userId string
profileClient := client.Profile
profile, err := profileClient.GetProfile()
if err != nil {
userId = "-"
} else {
userId = profile.User.ID
}
var lines []string
for _, proc := range processors {
items, err := proc.FetchData("today", "max")
// TODO Maybe there should be some better error handling.
// In any cases where the Rate Limit is exceeded all data we already fetched is purged and not sent into carbon
// Which is not that great...
if err == nil {
lines = append(lines, items...)
} else {
panic(err)
}
}
conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", c.String("CarbonHost"), c.Int("CarbonPort")))
if err != nil {
panic(err)
}
buf := bytes.NewBufferString("")
for _, line := range lines {
buf.WriteString(fmt.Sprintf("%s.%s.%s\n\r", c.String("CarbonPrefix"), userId, line))
}
fmt.Print(buf.String())
_, err = conn.Write(buf.Bytes())
if err != nil {
panic(err)
}
conn.Close()
}
app.Run(os.Args)
}
|
package outputs
import (
"fmt"
"sort"
"github.com/hexbotio/hex/models"
"github.com/nlopes/slack"
)
// Slack struct
type Slack struct {
}
// Output function
func (x Slack) Write(message models.Message, config models.Config) {
api := slack.New(config.SlackToken)
msg := ""
params := slack.NewPostMessageParameters()
params.Username = config.BotName
image := config.SlackIcon
if image == "" {
image = ":nut_and_bolt:"
}
params.IconEmoji = image
for _, output := range message.Outputs {
if message.Attributes["hex.rule.format"] == "true" {
color := "grey"
if output.Success {
color = "good"
} else {
color = "danger"
}
attachment := slack.Attachment{
Title: "TBD", //message.Attributes["hex.pipeline.name"],
Text: "```" + output.Response + "```",
Color: color,
MarkdownIn: []string{"text"},
}
params.Attachments = []slack.Attachment{attachment}
} else {
msg = msg + output.Response + "\n"
}
}
if message.Debug {
keys := make([]string, 0, len(message.Attributes))
for key, _ := range message.Attributes {
keys = append(keys, key)
}
sort.Strings(keys)
msg = msg + fmt.Sprintf("\n```MESSAGE DEBUG (%d sec to complete)\n", message.EndTime-message.StartTime)
for _, key := range keys {
msg = msg + fmt.Sprintf(" %s: '%s'\n", key, message.Attributes[key])
}
msg = msg + "```"
}
api.PostMessage(message.Attributes["hex.channel"], msg, params)
}
fixing slack formatted title
package outputs
import (
"fmt"
"sort"
"github.com/hexbotio/hex/models"
"github.com/nlopes/slack"
)
// Slack struct
type Slack struct {
}
// Output function
func (x Slack) Write(message models.Message, config models.Config) {
api := slack.New(config.SlackToken)
msg := ""
params := slack.NewPostMessageParameters()
params.Username = config.BotName
image := config.SlackIcon
if image == "" {
image = ":nut_and_bolt:"
}
params.IconEmoji = image
for _, output := range message.Outputs {
if message.Attributes["hex.rule.format"] == "true" {
color := "grey"
if output.Success {
color = "good"
} else {
color = "danger"
}
attachment := slack.Attachment{
Title: message.Attributes["hex.rule.name"],
Text: "```" + output.Response + "```",
Color: color,
MarkdownIn: []string{"text"},
}
params.Attachments = []slack.Attachment{attachment}
} else {
msg = msg + output.Response + "\n"
}
}
if message.Debug {
keys := make([]string, 0, len(message.Attributes))
for key, _ := range message.Attributes {
keys = append(keys, key)
}
sort.Strings(keys)
msg = msg + fmt.Sprintf("\n```MESSAGE DEBUG (%d sec to complete)\n", message.EndTime-message.StartTime)
for _, key := range keys {
msg = msg + fmt.Sprintf(" %s: '%s'\n", key, message.Attributes[key])
}
msg = msg + "```"
}
api.PostMessage(message.Attributes["hex.channel"], msg, params)
}
|
package drivers
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
"github.com/lxc/lxd/shared/units"
)
// CreateVolume creates an empty volume and can optionally fill it by executing the supplied
// filler function.
func (d *ceph) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
// Revert handling.
revert := revert.New()
defer revert.Fail()
if vol.contentType == ContentTypeFS {
// Create mountpoint.
err := vol.EnsureMountPath()
if err != nil {
return err
}
revert.Add(func() { os.Remove(vol.MountPath()) })
}
// Figure out the potential zombie volume.
zombieImageVol := NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s", vol.name, d.getRBDFilesystem(vol)), nil, nil)
if (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock {
zombieImageVol = NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s.block", vol.name, d.getRBDFilesystem(vol)), nil, nil)
}
// Check if we have a zombie image. If so, restore it otherwise
// create a new image volume.
if vol.volType == VolumeTypeImage && d.HasVolume(zombieImageVol) {
// Figure out the names.
oldName := d.getRBDVolumeName(zombieImageVol, "", false, true)
newName := d.getRBDVolumeName(vol, "", false, true)
// Rename back to active.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"mv",
oldName,
newName)
if err != nil {
return err
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
revert.Success()
return nil
}
// Get size.
RBDSize, err := d.getRBDSize(vol)
if err != nil {
return err
}
// Create volume.
err = d.rbdCreateVolume(vol, RBDSize)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
// Get filesystem.
RBDFilesystem := d.getRBDFilesystem(vol)
if vol.contentType == ContentTypeFS {
_, err = makeFSType(RBDDevPath, RBDFilesystem, nil)
if err != nil {
return err
}
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
// Run the volume filler function if supplied.
if filler != nil && filler.Fill != nil {
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType == ContentTypeFS {
return filler.Fill(mountPath, "")
}
devPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = filler.Fill(mountPath, devPath)
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(devPath)
if err != nil {
return err
}
}
return err
}, op)
if err != nil {
return err
}
}
// Create a readonly snapshot of the image volume which will be used a the
// clone source for future non-image volumes.
if vol.volType == VolumeTypeImage {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(vol, "readonly") })
err = d.rbdProtectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
}
revert.Success()
return nil
}
// CreateVolumeFromBackup re-creates a volume from its exported state.
func (d *ceph) CreateVolumeFromBackup(vol Volume, snapshots []string, srcData io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol Volume) error, func(), error) {
return genericVFSBackupUnpack(d, vol, snapshots, srcData, op)
}
// CreateVolumeFromCopy provides same-pool volume copying functionality.
func (d *ceph) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
var err error
snapshots := []string{}
revert := revert.New()
defer revert.Fail()
if !srcVol.IsSnapshot() && copySnapshots {
snapshots, err = d.VolumeSnapshots(srcVol, op)
if err != nil {
return err
}
}
// Copy without snapshots.
if !copySnapshots || len(snapshots) == 0 {
if d.config["ceph.rbd.clone_copy"] != "" &&
!shared.IsTrue(d.config["ceph.rbd.clone_copy"]) &&
srcVol.volType != VolumeTypeImage {
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"cp",
d.getRBDVolumeName(srcVol, "", false, true),
d.getRBDVolumeName(vol, "", false, true))
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
_, err = d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
} else {
parentVol := srcVol
snapshotName := "readonly"
if srcVol.volType != VolumeTypeImage {
snapshotName = fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
if srcVol.IsSnapshot() {
srcParentName, srcSnapOnlyName, _ :=
shared.InstanceGetParentAndSnapshotName(srcVol.name)
snapshotName = fmt.Sprintf("snapshot_%s", srcSnapOnlyName)
parentVol = NewVolume(d, d.name, srcVol.volType, srcVol.contentType, srcParentName, nil, nil)
} else {
// Create snapshot.
err := d.rbdCreateVolumeSnapshot(srcVol, snapshotName)
if err != nil {
return err
}
}
// Protect volume so we can create clones of it.
err = d.rbdProtectVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, snapshotName) })
}
err = d.rbdCreateClone(parentVol, snapshotName, vol)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
if vol.contentType == ContentTypeFS {
// Re-generate the UUID.
err = d.generateUUID(vol)
if err != nil {
return err
}
}
// For VMs, also copy the filesystem volume.
if vol.IsVMBlock() {
srcFSVol := srcVol.NewVMBlockFilesystemVolume()
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromCopy(fsVol, srcFSVol, false, op)
if err != nil {
return err
}
}
revert.Success()
return nil
}
// Copy with snapshots.
// Create empty dummy volume
err = d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolume(vol) })
// Receive over the dummy volume we created above.
targetVolumeName := d.getRBDVolumeName(vol, "", false, true)
lastSnap := ""
if len(snapshots) > 0 {
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
}
for i, snap := range snapshots {
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snap)
sourceVolumeName := d.getRBDVolumeName(srcVol, lastSnap, false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
prev)
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolumeSnapshot(vol, snap) })
snapVol, err := vol.NewSnapshot(snap)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
// Copy snapshot.
sourceVolumeName := d.getRBDVolumeName(srcVol, "", false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
lastSnap)
if err != nil {
return err
}
// Re-generate the UUID.
err = d.generateUUID(vol)
if err != nil {
return err
}
ourMount, err := d.MountVolume(vol, op)
if err != nil {
return err
}
if ourMount {
defer d.UnmountVolume(vol, op)
}
revert.Success()
return nil
}
// CreateVolumeFromMigration creates a volume being sent via a migration.
func (d *ceph) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Handle simple rsync and block_and_rsync through generic.
if volTargetArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volTargetArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op)
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromMigration(fsVol, conn, volTargetArgs, preFiller, op)
if err != nil {
return err
}
}
recvName := d.getRBDVolumeName(vol, "", false, true)
if !d.HasVolume(vol) {
err := d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
}
err := vol.EnsureMountPath()
if err != nil {
return err
}
// Handle zfs send/receive migration.
if len(volTargetArgs.Snapshots) > 0 {
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
// Transfer the snapshots.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := d.getRBDVolumeName(vol, snapName, false, true)
wrapper := migration.ProgressWriter(op, "fs_progress", fullSnapshotName)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
}
defer func() {
// Delete all migration-send-* snapshots.
snaps, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
return
}
for _, snap := range snaps {
if !strings.HasPrefix(snap, "migration-send") {
continue
}
d.rbdDeleteVolumeSnapshot(vol, snap)
}
}()
wrapper := migration.ProgressWriter(op, "fs_progress", vol.name)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
if volTargetArgs.Live {
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
}
err = d.generateUUID(vol)
if err != nil {
return err
}
return nil
}
// RefreshVolume updates an existing volume to match the state of another.
func (d *ceph) RefreshVolume(vol Volume, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
return genericVFSCopyVolume(d, nil, vol, srcVol, srcSnapshots, true, op)
}
// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then
// this function will return an error.
func (d *ceph) DeleteVolume(vol Volume, op *operations.Operation) error {
if vol.volType == VolumeTypeImage {
// Try to umount but don't fail.
d.UnmountVolume(vol, op)
// Check if image has dependant snapshots.
_, err := d.rbdListSnapshotClones(vol, "readonly")
if err != nil {
if err != db.ErrNoSuchObject {
return err
}
// Unprotect snapshot.
err = d.rbdUnprotectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
// Delete snapshots.
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"purge",
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Unmap image.
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Delete image.
err = d.rbdDeleteVolume(vol)
} else {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdMarkVolumeDeleted(vol, vol.name)
}
if err != nil {
return err
}
} else {
if !d.HasVolume(vol) {
return nil
}
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
_, err = d.deleteVolume(vol)
if err != nil {
return errors.Wrap(err, "Failed to delete volume")
}
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.DeleteVolume(fsVol, op)
if err != nil {
return err
}
}
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err := wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
return nil
}
// HasVolume indicates whether a specific volume exists on the storage pool.
func (d *ceph) HasVolume(vol Volume) bool {
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"image-meta",
"list",
d.getRBDVolumeName(vol, "", false, false))
return err == nil
}
// ValidateVolume validates the supplied volume config.
func (d *ceph) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
rules := map[string]func(value string) error{
"block.filesystem": shared.IsAny,
"block.mount_options": shared.IsAny,
}
return d.validateVolume(vol, rules, removeUnknownKeys)
}
// UpdateVolume applies config changes to the volume.
func (d *ceph) UpdateVolume(vol Volume, changedConfig map[string]string) error {
if vol.volType != VolumeTypeCustom {
return ErrNotSupported
}
val, ok := changedConfig["size"]
if ok {
err := d.SetVolumeQuota(vol, val, nil)
if err != nil {
return err
}
}
return nil
}
// GetVolumeUsage returns the disk space used by the volume.
func (d *ceph) GetVolumeUsage(vol Volume) (int64, error) {
if vol.contentType == ContentTypeFS && shared.IsMountPoint(vol.MountPath()) {
var stat unix.Statfs_t
err := unix.Statfs(vol.MountPath(), &stat)
if err != nil {
return -1, err
}
return int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize), nil
}
type cephDuLine struct {
Name string `json:"name"`
Snapshot string `json:"snapshot"`
ProvisionedSize int64 `json:"provisioned_size"`
UsedSize int64 `json:"used_size"`
}
type cephDuInfo struct {
Images []cephDuLine `json:"images"`
}
jsonInfo, err := shared.TryRunCommand(
"rbd",
"du",
"--format", "json",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return -1, err
}
var usedSize int64
var result cephDuInfo
err = json.Unmarshal([]byte(jsonInfo), &result)
if err != nil {
return -1, err
}
// rbd du gives the output of all related rbd images, snapshots included
// to get the total size of the image we use the result that does not include
// a snapshot name, this is the total image size.
for _, image := range result.Images {
if image.Snapshot == "" {
usedSize = image.UsedSize
}
}
return usedSize, nil
}
// SetVolumeQuota applies a size limit on volume.
func (d *ceph) SetVolumeQuota(vol Volume, size string, op *operations.Operation) error {
fsType := d.getRBDFilesystem(vol)
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return err
}
// The grow/shrink functions use Mount/Unmount which may cause an unmap, so make sure to keep a reference.
oldKeepDevice := vol.keepDevice
vol.keepDevice = true
defer func() {
vol.keepDevice = oldKeepDevice
}()
oldSizeBytes, err := blockDevSizeBytes(RBDDevPath)
if err != nil {
return errors.Wrapf(err, "Error getting current size")
}
newSizeBytes, err := units.ParseByteSizeString(size)
if err != nil {
return err
}
// The right disjunct just means that someone unset the size property in the instance's config.
// We obviously cannot resize to 0.
if oldSizeBytes == newSizeBytes || newSizeBytes == 0 {
return nil
}
// Resize filesystem if needed.
if vol.contentType == ContentTypeFS {
if newSizeBytes < oldSizeBytes {
err = shrinkFileSystem(fsType, RBDDevPath, vol, newSizeBytes)
if err != nil {
return err
}
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--allow-shrink",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
} else {
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Grow the filesystem.
err = growFileSystem(fsType, RBDDevPath, vol)
if err != nil {
return err
}
}
} else {
if newSizeBytes < oldSizeBytes {
return fmt.Errorf("You cannot shrink block volumes")
}
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(RBDDevPath)
if err != nil {
return err
}
}
}
return nil
}
// GetVolumeDiskPath returns the location of a root disk block device.
func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) {
if vol.IsVMBlock() {
return d.getRBDMappedDevPath(vol)
}
return "", ErrNotSupported
}
// MountVolume simulates mounting a volume.
func (d *ceph) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
RBDFilesystem := d.getRBDFilesystem(vol)
err := vol.EnsureMountPath()
if err != nil {
return false, err
}
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return false, err
}
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(vol))
err = TryMount(RBDDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
return true, nil
}
// For VMs, mount the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
return d.MountVolume(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume.
func (d *ceph) UnmountVolume(vol Volume, op *operations.Operation) (bool, error) {
// For VMs, also unmount the filesystem dataset.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
_, err := d.UnmountVolume(fsVol, op)
if err != nil {
return false, err
}
}
// Attempt to unmount the volume.
mountPath := vol.MountPath()
if shared.IsMountPoint(mountPath) {
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
}
// Attempt to unmap.
if !vol.keepDevice {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return true, err
}
}
return true, nil
}
// RenameVolume renames a volume and its snapshots.
func (d *ceph) RenameVolume(vol Volume, newName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return nil
}
revert.Add(func() { d.rbdMapVolume(vol) })
err = d.rbdRenameVolume(vol, newName)
if err != nil {
return err
}
newVol := NewVolume(d, d.name, vol.volType, vol.contentType, newName, nil, nil)
revert.Add(func() { d.rbdRenameVolume(newVol, vol.name) })
_, err = d.rbdMapVolume(newVol)
if err != nil {
return err
}
err = genericVFSRenameVolume(d, vol, newName, op)
if err != nil {
return nil
}
revert.Success()
return nil
}
// MigrateVolume sends a volume for migration.
func (d *ceph) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
// If data is set, this request is coming from the clustering code.
// In this case, we only need to unmap and rename the rbd image.
if volSrcArgs.Data != nil {
data, ok := volSrcArgs.Data.(string)
if ok {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Rename volume.
if vol.name != data {
err = d.rbdRenameVolume(vol, data)
if err != nil {
return err
}
}
return nil
}
}
// Handle simple rsync and block_and_rsync through generic.
if volSrcArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volSrcArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSMigrateVolume(d, d.state, vol, conn, volSrcArgs, op)
} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.MigrateVolume(fsVol, conn, volSrcArgs, op)
if err != nil {
return err
}
}
if vol.IsSnapshot() {
parentName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
sendName := fmt.Sprintf("%s/snapshots_%s_%s_start_clone", d.name, parentName, snapOnlyName)
cloneVol := NewVolume(d, d.name, vol.volType, vol.contentType, vol.name, nil, nil)
// Mounting the volume snapshot will create the clone "snapshots_<parent>_<snap>_start_clone".
_, err := d.MountVolumeSnapshot(cloneVol, op)
if err != nil {
return err
}
defer d.UnmountVolumeSnapshot(cloneVol, op)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
err = d.sendVolume(conn, sendName, "", wrapper)
if err != nil {
return err
}
return nil
}
lastSnap := ""
if !volSrcArgs.FinalSync {
for i, snapName := range volSrcArgs.Snapshots {
snapshot, _ := vol.NewSnapshot(snapName)
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", volSrcArgs.Snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snapName)
sendSnapName := d.getRBDVolumeName(vol, lastSnap, false, true)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", snapshot.name)
}
err := d.sendVolume(conn, sendSnapName, prev, wrapper)
if err != nil {
return err
}
}
}
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
runningSnapName := fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
err := d.rbdCreateVolumeSnapshot(vol, runningSnapName)
if err != nil {
return err
}
defer d.rbdDeleteVolumeSnapshot(vol, runningSnapName)
cur := d.getRBDVolumeName(vol, runningSnapName, false, true)
err = d.sendVolume(conn, cur, lastSnap, wrapper)
if err != nil {
return err
}
return nil
}
// BackupVolume creates an exported version of a volume.
func (d *ceph) BackupVolume(vol Volume, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return genericVFSBackupVolume(d, vol, tarWriter, snapshots, op)
}
// CreateVolumeSnapshot creates a snapshot of a volume.
func (d *ceph) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
sourcePath := GetVolumeMountPath(d.name, snapVol.volType, parentName)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
if shared.IsMountPoint(sourcePath) {
// This is costly but we need to ensure that all cached data has
// been committed to disk. If we don't then the rbd snapshot of
// the underlying filesystem can be inconsistent or - worst case
// - empty.
unix.Sync()
_, err := shared.TryRunCommand("fsfreeze", "--freeze", sourcePath)
if err == nil {
defer shared.TryRunCommand("fsfreeze", "--unfreeze", sourcePath)
}
}
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err = d.rbdCreateVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(fsVol, op) })
}
revert.Success()
return nil
}
// DeleteVolumeSnapshot removes a snapshot from the storage device.
func (d *ceph) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
// Check if snapshot exists, and return if not.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"info",
d.getRBDVolumeName(snapVol, "", false, false))
if err != nil {
return nil
}
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
_, err = d.deleteVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return errors.Wrap(err, "Failed to delete volume snapshot")
}
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err = wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
// Remove the parent snapshot directory if this is the last snapshot being removed.
err = deleteParentSnapshotDirIfEmpty(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
// For VM images, delete the filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.DeleteVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
}
return nil
}
// MountVolumeSnapshot simulates mounting a volume snapshot.
func (d *ceph) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
prefixedSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
// Protect snapshot to prevent data loss.
err := d.rbdProtectVolumeSnapshot(parentVol, prefixedSnapOnlyName)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, prefixedSnapOnlyName) })
// Clone snapshot.
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdCreateClone(parentVol, prefixedSnapOnlyName, cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdDeleteVolume(cloneVol) })
// Map volume.
rbdDevPath, err := d.rbdMapVolume(cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnmapVolume(cloneVol, true) })
if shared.IsMountPoint(mountPath) {
return false, nil
}
err = snapVol.EnsureMountPath()
if err != nil {
return false, err
}
RBDFilesystem := d.getRBDFilesystem(snapVol)
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(snapVol))
if RBDFilesystem == "xfs" {
idx := strings.Index(mountOptions, "nouuid")
if idx < 0 {
mountOptions += ",nouuid"
}
}
err = TryMount(rbdDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
revert.Success()
return true, nil
}
// For VMs, mount the filesystem volume.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.MountVolumeSnapshot(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume snapshot.
func (d *ceph) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if !shared.IsMountPoint(mountPath) {
return false, nil
}
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdUnmapVolume(cloneVol, true)
if err != nil {
return false, err
}
if !d.HasVolume(cloneVol) {
return true, nil
}
// Delete the temporary RBD volume.
err = d.rbdDeleteVolume(cloneVol)
if err != nil {
return false, err
}
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.UnmountVolumeSnapshot(fsVol, op)
}
return true, nil
}
// VolumeSnapshots returns a list of snapshots for the volume.
func (d *ceph) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
snapshots, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
if err == db.ErrNoSuchObject {
return nil, nil
}
return nil, err
}
var ret []string
for _, snap := range snapshots {
// Ignore zombie snapshots as these are only used internally and
// not relevant for users.
if strings.HasPrefix(snap, "zombie_") || strings.HasPrefix(snap, "migration-send-") {
continue
}
ret = append(ret, strings.TrimPrefix(snap, "snapshot_"))
}
return ret, nil
}
// RestoreVolume restores a volume from a snapshot.
func (d *ceph) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
ourUmount, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
if ourUmount {
defer d.MountVolume(vol, op)
}
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"rollback",
"--snap", fmt.Sprintf("snapshot_%s", snapshotName),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}
err = d.generateUUID(snapVol)
if err != nil {
return err
}
return nil
}
// RenameVolumeSnapshot renames a volume snapshot.
func (d *ceph) RenameVolumeSnapshot(snapVol Volume, newSnapshotName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
newSnapOnlyName := fmt.Sprintf("snapshot_%s", newSnapshotName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err := d.rbdRenameVolumeSnapshot(parentVol, oldSnapOnlyName, newSnapOnlyName)
if err != nil {
return err
}
revert.Add(func() { d.rbdRenameVolumeSnapshot(parentVol, newSnapOnlyName, oldSnapOnlyName) })
if snapVol.contentType == ContentTypeFS {
err = genericVFSRenameVolumeSnapshot(d, snapVol, newSnapshotName, op)
if err != nil {
return err
}
}
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.RenameVolumeSnapshot(fsVol, newSnapshotName, op)
if err != nil {
return err
}
revert.Add(func() {
newFsVol := NewVolume(d, d.name, snapVol.volType, ContentTypeFS, fmt.Sprintf("%s/%s", parentName, newSnapshotName), snapVol.config, snapVol.poolConfig)
d.RenameVolumeSnapshot(newFsVol, snapVol.name, op)
})
}
revert.Success()
return nil
}
lxd/storage/drivers/ceph: Re-create image snapshot
Signed-off-by: Thomas Hipp <5f82c492b3b00e427412d216ce820707a10c51ce@canonical.com>
package drivers
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
"github.com/lxc/lxd/shared/units"
)
// CreateVolume creates an empty volume and can optionally fill it by executing the supplied
// filler function.
func (d *ceph) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
// Revert handling.
revert := revert.New()
defer revert.Fail()
if vol.contentType == ContentTypeFS {
// Create mountpoint.
err := vol.EnsureMountPath()
if err != nil {
return err
}
revert.Add(func() { os.Remove(vol.MountPath()) })
}
// Figure out the potential zombie volume.
zombieImageVol := NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s", vol.name, d.getRBDFilesystem(vol)), nil, nil)
if (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock {
zombieImageVol = NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s.block", vol.name, d.getRBDFilesystem(vol)), nil, nil)
}
// Check if we have a zombie image. If so, restore it otherwise
// create a new image volume.
if vol.volType == VolumeTypeImage && d.HasVolume(zombieImageVol) {
// Figure out the names.
oldName := d.getRBDVolumeName(zombieImageVol, "", false, true)
newName := d.getRBDVolumeName(vol, "", false, true)
// Rename back to active.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"mv",
oldName,
newName)
if err != nil {
return err
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
revert.Success()
return nil
}
// Get size.
RBDSize, err := d.getRBDSize(vol)
if err != nil {
return err
}
// Create volume.
err = d.rbdCreateVolume(vol, RBDSize)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
// Get filesystem.
RBDFilesystem := d.getRBDFilesystem(vol)
if vol.contentType == ContentTypeFS {
_, err = makeFSType(RBDDevPath, RBDFilesystem, nil)
if err != nil {
return err
}
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
// Run the volume filler function if supplied.
if filler != nil && filler.Fill != nil {
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType == ContentTypeFS {
return filler.Fill(mountPath, "")
}
devPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = filler.Fill(mountPath, devPath)
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(devPath)
if err != nil {
return err
}
}
return err
}, op)
if err != nil {
return err
}
}
// Create a readonly snapshot of the image volume which will be used a the
// clone source for future non-image volumes.
if vol.volType == VolumeTypeImage {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(vol, "readonly") })
err = d.rbdProtectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
if vol.contentType == ContentTypeBlock {
// Re-create the readonly snapshot, post-filling.
fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
err := d.rbdUnprotectVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
_, err = d.deleteVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(fsVol, "readonly") })
err = d.rbdProtectVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
}
}
revert.Success()
return nil
}
// CreateVolumeFromBackup re-creates a volume from its exported state.
func (d *ceph) CreateVolumeFromBackup(vol Volume, snapshots []string, srcData io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol Volume) error, func(), error) {
return genericVFSBackupUnpack(d, vol, snapshots, srcData, op)
}
// CreateVolumeFromCopy provides same-pool volume copying functionality.
func (d *ceph) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
var err error
snapshots := []string{}
revert := revert.New()
defer revert.Fail()
if !srcVol.IsSnapshot() && copySnapshots {
snapshots, err = d.VolumeSnapshots(srcVol, op)
if err != nil {
return err
}
}
// Copy without snapshots.
if !copySnapshots || len(snapshots) == 0 {
if d.config["ceph.rbd.clone_copy"] != "" &&
!shared.IsTrue(d.config["ceph.rbd.clone_copy"]) &&
srcVol.volType != VolumeTypeImage {
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"cp",
d.getRBDVolumeName(srcVol, "", false, true),
d.getRBDVolumeName(vol, "", false, true))
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
_, err = d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
} else {
parentVol := srcVol
snapshotName := "readonly"
if srcVol.volType != VolumeTypeImage {
snapshotName = fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
if srcVol.IsSnapshot() {
srcParentName, srcSnapOnlyName, _ :=
shared.InstanceGetParentAndSnapshotName(srcVol.name)
snapshotName = fmt.Sprintf("snapshot_%s", srcSnapOnlyName)
parentVol = NewVolume(d, d.name, srcVol.volType, srcVol.contentType, srcParentName, nil, nil)
} else {
// Create snapshot.
err := d.rbdCreateVolumeSnapshot(srcVol, snapshotName)
if err != nil {
return err
}
}
// Protect volume so we can create clones of it.
err = d.rbdProtectVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, snapshotName) })
}
err = d.rbdCreateClone(parentVol, snapshotName, vol)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
if vol.contentType == ContentTypeFS {
// Re-generate the UUID.
err = d.generateUUID(vol)
if err != nil {
return err
}
}
// For VMs, also copy the filesystem volume.
if vol.IsVMBlock() {
srcFSVol := srcVol.NewVMBlockFilesystemVolume()
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromCopy(fsVol, srcFSVol, false, op)
if err != nil {
return err
}
}
revert.Success()
return nil
}
// Copy with snapshots.
// Create empty dummy volume
err = d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolume(vol) })
// Receive over the dummy volume we created above.
targetVolumeName := d.getRBDVolumeName(vol, "", false, true)
lastSnap := ""
if len(snapshots) > 0 {
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
}
for i, snap := range snapshots {
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snap)
sourceVolumeName := d.getRBDVolumeName(srcVol, lastSnap, false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
prev)
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolumeSnapshot(vol, snap) })
snapVol, err := vol.NewSnapshot(snap)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
// Copy snapshot.
sourceVolumeName := d.getRBDVolumeName(srcVol, "", false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
lastSnap)
if err != nil {
return err
}
// Re-generate the UUID.
err = d.generateUUID(vol)
if err != nil {
return err
}
ourMount, err := d.MountVolume(vol, op)
if err != nil {
return err
}
if ourMount {
defer d.UnmountVolume(vol, op)
}
revert.Success()
return nil
}
// CreateVolumeFromMigration creates a volume being sent via a migration.
func (d *ceph) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Handle simple rsync and block_and_rsync through generic.
if volTargetArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volTargetArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op)
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromMigration(fsVol, conn, volTargetArgs, preFiller, op)
if err != nil {
return err
}
}
recvName := d.getRBDVolumeName(vol, "", false, true)
if !d.HasVolume(vol) {
err := d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
}
err := vol.EnsureMountPath()
if err != nil {
return err
}
// Handle zfs send/receive migration.
if len(volTargetArgs.Snapshots) > 0 {
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
// Transfer the snapshots.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := d.getRBDVolumeName(vol, snapName, false, true)
wrapper := migration.ProgressWriter(op, "fs_progress", fullSnapshotName)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
}
defer func() {
// Delete all migration-send-* snapshots.
snaps, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
return
}
for _, snap := range snaps {
if !strings.HasPrefix(snap, "migration-send") {
continue
}
d.rbdDeleteVolumeSnapshot(vol, snap)
}
}()
wrapper := migration.ProgressWriter(op, "fs_progress", vol.name)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
if volTargetArgs.Live {
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
}
err = d.generateUUID(vol)
if err != nil {
return err
}
return nil
}
// RefreshVolume updates an existing volume to match the state of another.
func (d *ceph) RefreshVolume(vol Volume, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
return genericVFSCopyVolume(d, nil, vol, srcVol, srcSnapshots, true, op)
}
// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then
// this function will return an error.
func (d *ceph) DeleteVolume(vol Volume, op *operations.Operation) error {
if vol.volType == VolumeTypeImage {
// Try to umount but don't fail.
d.UnmountVolume(vol, op)
// Check if image has dependant snapshots.
_, err := d.rbdListSnapshotClones(vol, "readonly")
if err != nil {
if err != db.ErrNoSuchObject {
return err
}
// Unprotect snapshot.
err = d.rbdUnprotectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
// Delete snapshots.
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"purge",
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Unmap image.
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Delete image.
err = d.rbdDeleteVolume(vol)
} else {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdMarkVolumeDeleted(vol, vol.name)
}
if err != nil {
return err
}
} else {
if !d.HasVolume(vol) {
return nil
}
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
_, err = d.deleteVolume(vol)
if err != nil {
return errors.Wrap(err, "Failed to delete volume")
}
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.DeleteVolume(fsVol, op)
if err != nil {
return err
}
}
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err := wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
return nil
}
// HasVolume indicates whether a specific volume exists on the storage pool.
func (d *ceph) HasVolume(vol Volume) bool {
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"image-meta",
"list",
d.getRBDVolumeName(vol, "", false, false))
return err == nil
}
// ValidateVolume validates the supplied volume config.
func (d *ceph) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
rules := map[string]func(value string) error{
"block.filesystem": shared.IsAny,
"block.mount_options": shared.IsAny,
}
return d.validateVolume(vol, rules, removeUnknownKeys)
}
// UpdateVolume applies config changes to the volume.
func (d *ceph) UpdateVolume(vol Volume, changedConfig map[string]string) error {
if vol.volType != VolumeTypeCustom {
return ErrNotSupported
}
val, ok := changedConfig["size"]
if ok {
err := d.SetVolumeQuota(vol, val, nil)
if err != nil {
return err
}
}
return nil
}
// GetVolumeUsage returns the disk space used by the volume.
func (d *ceph) GetVolumeUsage(vol Volume) (int64, error) {
if vol.contentType == ContentTypeFS && shared.IsMountPoint(vol.MountPath()) {
var stat unix.Statfs_t
err := unix.Statfs(vol.MountPath(), &stat)
if err != nil {
return -1, err
}
return int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize), nil
}
type cephDuLine struct {
Name string `json:"name"`
Snapshot string `json:"snapshot"`
ProvisionedSize int64 `json:"provisioned_size"`
UsedSize int64 `json:"used_size"`
}
type cephDuInfo struct {
Images []cephDuLine `json:"images"`
}
jsonInfo, err := shared.TryRunCommand(
"rbd",
"du",
"--format", "json",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return -1, err
}
var usedSize int64
var result cephDuInfo
err = json.Unmarshal([]byte(jsonInfo), &result)
if err != nil {
return -1, err
}
// rbd du gives the output of all related rbd images, snapshots included
// to get the total size of the image we use the result that does not include
// a snapshot name, this is the total image size.
for _, image := range result.Images {
if image.Snapshot == "" {
usedSize = image.UsedSize
}
}
return usedSize, nil
}
// SetVolumeQuota applies a size limit on volume.
func (d *ceph) SetVolumeQuota(vol Volume, size string, op *operations.Operation) error {
fsType := d.getRBDFilesystem(vol)
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return err
}
// The grow/shrink functions use Mount/Unmount which may cause an unmap, so make sure to keep a reference.
oldKeepDevice := vol.keepDevice
vol.keepDevice = true
defer func() {
vol.keepDevice = oldKeepDevice
}()
oldSizeBytes, err := blockDevSizeBytes(RBDDevPath)
if err != nil {
return errors.Wrapf(err, "Error getting current size")
}
newSizeBytes, err := units.ParseByteSizeString(size)
if err != nil {
return err
}
// The right disjunct just means that someone unset the size property in the instance's config.
// We obviously cannot resize to 0.
if oldSizeBytes == newSizeBytes || newSizeBytes == 0 {
return nil
}
// Resize filesystem if needed.
if vol.contentType == ContentTypeFS {
if newSizeBytes < oldSizeBytes {
err = shrinkFileSystem(fsType, RBDDevPath, vol, newSizeBytes)
if err != nil {
return err
}
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--allow-shrink",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
} else {
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Grow the filesystem.
err = growFileSystem(fsType, RBDDevPath, vol)
if err != nil {
return err
}
}
} else {
if newSizeBytes < oldSizeBytes {
return fmt.Errorf("You cannot shrink block volumes")
}
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(RBDDevPath)
if err != nil {
return err
}
}
}
return nil
}
// GetVolumeDiskPath returns the location of a root disk block device.
func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) {
if vol.IsVMBlock() {
return d.getRBDMappedDevPath(vol)
}
return "", ErrNotSupported
}
// MountVolume simulates mounting a volume.
func (d *ceph) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
RBDFilesystem := d.getRBDFilesystem(vol)
err := vol.EnsureMountPath()
if err != nil {
return false, err
}
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return false, err
}
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(vol))
err = TryMount(RBDDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
return true, nil
}
// For VMs, mount the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
return d.MountVolume(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume.
func (d *ceph) UnmountVolume(vol Volume, op *operations.Operation) (bool, error) {
// For VMs, also unmount the filesystem dataset.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
_, err := d.UnmountVolume(fsVol, op)
if err != nil {
return false, err
}
}
// Attempt to unmount the volume.
mountPath := vol.MountPath()
if shared.IsMountPoint(mountPath) {
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
}
// Attempt to unmap.
if !vol.keepDevice {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return true, err
}
}
return true, nil
}
// RenameVolume renames a volume and its snapshots.
func (d *ceph) RenameVolume(vol Volume, newName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return nil
}
revert.Add(func() { d.rbdMapVolume(vol) })
err = d.rbdRenameVolume(vol, newName)
if err != nil {
return err
}
newVol := NewVolume(d, d.name, vol.volType, vol.contentType, newName, nil, nil)
revert.Add(func() { d.rbdRenameVolume(newVol, vol.name) })
_, err = d.rbdMapVolume(newVol)
if err != nil {
return err
}
err = genericVFSRenameVolume(d, vol, newName, op)
if err != nil {
return nil
}
revert.Success()
return nil
}
// MigrateVolume sends a volume for migration.
func (d *ceph) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
// If data is set, this request is coming from the clustering code.
// In this case, we only need to unmap and rename the rbd image.
if volSrcArgs.Data != nil {
data, ok := volSrcArgs.Data.(string)
if ok {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Rename volume.
if vol.name != data {
err = d.rbdRenameVolume(vol, data)
if err != nil {
return err
}
}
return nil
}
}
// Handle simple rsync and block_and_rsync through generic.
if volSrcArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volSrcArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSMigrateVolume(d, d.state, vol, conn, volSrcArgs, op)
} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.MigrateVolume(fsVol, conn, volSrcArgs, op)
if err != nil {
return err
}
}
if vol.IsSnapshot() {
parentName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
sendName := fmt.Sprintf("%s/snapshots_%s_%s_start_clone", d.name, parentName, snapOnlyName)
cloneVol := NewVolume(d, d.name, vol.volType, vol.contentType, vol.name, nil, nil)
// Mounting the volume snapshot will create the clone "snapshots_<parent>_<snap>_start_clone".
_, err := d.MountVolumeSnapshot(cloneVol, op)
if err != nil {
return err
}
defer d.UnmountVolumeSnapshot(cloneVol, op)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
err = d.sendVolume(conn, sendName, "", wrapper)
if err != nil {
return err
}
return nil
}
lastSnap := ""
if !volSrcArgs.FinalSync {
for i, snapName := range volSrcArgs.Snapshots {
snapshot, _ := vol.NewSnapshot(snapName)
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", volSrcArgs.Snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snapName)
sendSnapName := d.getRBDVolumeName(vol, lastSnap, false, true)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", snapshot.name)
}
err := d.sendVolume(conn, sendSnapName, prev, wrapper)
if err != nil {
return err
}
}
}
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
runningSnapName := fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
err := d.rbdCreateVolumeSnapshot(vol, runningSnapName)
if err != nil {
return err
}
defer d.rbdDeleteVolumeSnapshot(vol, runningSnapName)
cur := d.getRBDVolumeName(vol, runningSnapName, false, true)
err = d.sendVolume(conn, cur, lastSnap, wrapper)
if err != nil {
return err
}
return nil
}
// BackupVolume creates an exported version of a volume.
func (d *ceph) BackupVolume(vol Volume, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return genericVFSBackupVolume(d, vol, tarWriter, snapshots, op)
}
// CreateVolumeSnapshot creates a snapshot of a volume.
func (d *ceph) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
sourcePath := GetVolumeMountPath(d.name, snapVol.volType, parentName)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
if shared.IsMountPoint(sourcePath) {
// This is costly but we need to ensure that all cached data has
// been committed to disk. If we don't then the rbd snapshot of
// the underlying filesystem can be inconsistent or - worst case
// - empty.
unix.Sync()
_, err := shared.TryRunCommand("fsfreeze", "--freeze", sourcePath)
if err == nil {
defer shared.TryRunCommand("fsfreeze", "--unfreeze", sourcePath)
}
}
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err = d.rbdCreateVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(fsVol, op) })
}
revert.Success()
return nil
}
// DeleteVolumeSnapshot removes a snapshot from the storage device.
func (d *ceph) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
// Check if snapshot exists, and return if not.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"info",
d.getRBDVolumeName(snapVol, "", false, false))
if err != nil {
return nil
}
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
_, err = d.deleteVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return errors.Wrap(err, "Failed to delete volume snapshot")
}
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err = wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
// Remove the parent snapshot directory if this is the last snapshot being removed.
err = deleteParentSnapshotDirIfEmpty(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
// For VM images, delete the filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.DeleteVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
}
return nil
}
// MountVolumeSnapshot simulates mounting a volume snapshot.
func (d *ceph) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
prefixedSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
// Protect snapshot to prevent data loss.
err := d.rbdProtectVolumeSnapshot(parentVol, prefixedSnapOnlyName)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, prefixedSnapOnlyName) })
// Clone snapshot.
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdCreateClone(parentVol, prefixedSnapOnlyName, cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdDeleteVolume(cloneVol) })
// Map volume.
rbdDevPath, err := d.rbdMapVolume(cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnmapVolume(cloneVol, true) })
if shared.IsMountPoint(mountPath) {
return false, nil
}
err = snapVol.EnsureMountPath()
if err != nil {
return false, err
}
RBDFilesystem := d.getRBDFilesystem(snapVol)
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(snapVol))
if RBDFilesystem == "xfs" {
idx := strings.Index(mountOptions, "nouuid")
if idx < 0 {
mountOptions += ",nouuid"
}
}
err = TryMount(rbdDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
revert.Success()
return true, nil
}
// For VMs, mount the filesystem volume.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.MountVolumeSnapshot(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume snapshot.
func (d *ceph) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if !shared.IsMountPoint(mountPath) {
return false, nil
}
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdUnmapVolume(cloneVol, true)
if err != nil {
return false, err
}
if !d.HasVolume(cloneVol) {
return true, nil
}
// Delete the temporary RBD volume.
err = d.rbdDeleteVolume(cloneVol)
if err != nil {
return false, err
}
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.UnmountVolumeSnapshot(fsVol, op)
}
return true, nil
}
// VolumeSnapshots returns a list of snapshots for the volume.
func (d *ceph) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
snapshots, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
if err == db.ErrNoSuchObject {
return nil, nil
}
return nil, err
}
var ret []string
for _, snap := range snapshots {
// Ignore zombie snapshots as these are only used internally and
// not relevant for users.
if strings.HasPrefix(snap, "zombie_") || strings.HasPrefix(snap, "migration-send-") {
continue
}
ret = append(ret, strings.TrimPrefix(snap, "snapshot_"))
}
return ret, nil
}
// RestoreVolume restores a volume from a snapshot.
func (d *ceph) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
ourUmount, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
if ourUmount {
defer d.MountVolume(vol, op)
}
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"rollback",
"--snap", fmt.Sprintf("snapshot_%s", snapshotName),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}
err = d.generateUUID(snapVol)
if err != nil {
return err
}
return nil
}
// RenameVolumeSnapshot renames a volume snapshot.
func (d *ceph) RenameVolumeSnapshot(snapVol Volume, newSnapshotName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
newSnapOnlyName := fmt.Sprintf("snapshot_%s", newSnapshotName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err := d.rbdRenameVolumeSnapshot(parentVol, oldSnapOnlyName, newSnapOnlyName)
if err != nil {
return err
}
revert.Add(func() { d.rbdRenameVolumeSnapshot(parentVol, newSnapOnlyName, oldSnapOnlyName) })
if snapVol.contentType == ContentTypeFS {
err = genericVFSRenameVolumeSnapshot(d, snapVol, newSnapshotName, op)
if err != nil {
return err
}
}
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.RenameVolumeSnapshot(fsVol, newSnapshotName, op)
if err != nil {
return err
}
revert.Add(func() {
newFsVol := NewVolume(d, d.name, snapVol.volType, ContentTypeFS, fmt.Sprintf("%s/%s", parentName, newSnapshotName), snapVol.config, snapVol.poolConfig)
d.RenameVolumeSnapshot(newFsVol, snapVol.name, op)
})
}
revert.Success()
return nil
}
|
package drivers
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/units"
)
// CreateVolume creates an empty volume and can optionally fill it by executing the supplied
// filler function.
func (d *ceph) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
// Revert handling.
revert := revert.New()
defer revert.Fail()
if vol.contentType == ContentTypeFS {
// Create mountpoint.
err := vol.EnsureMountPath()
if err != nil {
return err
}
revert.Add(func() { os.Remove(vol.MountPath()) })
}
// Figure out the potential zombie volume.
zombieImageVol := NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s", vol.name, d.getRBDFilesystem(vol)), nil, nil)
if (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock {
zombieImageVol = NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s.block", vol.name, d.getRBDFilesystem(vol)), nil, nil)
}
// Check if we have a zombie image. If so, restore it otherwise
// create a new image volume.
if vol.volType == VolumeTypeImage && d.HasVolume(zombieImageVol) {
// Figure out the names.
oldName := d.getRBDVolumeName(zombieImageVol, "", false, true)
newName := d.getRBDVolumeName(vol, "", false, true)
// Rename back to active.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"mv",
oldName,
newName)
if err != nil {
return err
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
revert.Success()
return nil
}
// Get size.
RBDSize, err := d.getRBDSize(vol)
if err != nil {
return err
}
// Create volume.
err = d.rbdCreateVolume(vol, RBDSize)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
// Get filesystem.
RBDFilesystem := d.getRBDFilesystem(vol)
if vol.contentType == ContentTypeFS {
_, err = makeFSType(RBDDevPath, RBDFilesystem, nil)
if err != nil {
return err
}
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
// Run the volume filler function if supplied.
if filler != nil && filler.Fill != nil {
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType == ContentTypeFS {
return filler.Fill(mountPath, "")
}
devPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = filler.Fill(mountPath, devPath)
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(devPath)
if err != nil {
return err
}
}
return err
}, op)
if err != nil {
return err
}
}
// Create a readonly snapshot of the image volume which will be used a the
// clone source for future non-image volumes.
if vol.volType == VolumeTypeImage {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(vol, "readonly") })
err = d.rbdProtectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
if vol.contentType == ContentTypeBlock {
// Re-create the FS config volume's readonly snapshot now that the filler function has run and unpacked into both config and block volumes.
fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
err := d.rbdUnprotectVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
_, err = d.deleteVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(fsVol, "readonly") })
err = d.rbdProtectVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
}
}
revert.Success()
return nil
}
// CreateVolumeFromBackup re-creates a volume from its exported state.
func (d *ceph) CreateVolumeFromBackup(vol Volume, snapshots []string, srcData io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol Volume) error, func(), error) {
return genericVFSBackupUnpack(d, vol, snapshots, srcData, op)
}
// CreateVolumeFromCopy provides same-pool volume copying functionality.
func (d *ceph) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
var err error
snapshots := []string{}
revert := revert.New()
defer revert.Fail()
if !srcVol.IsSnapshot() && copySnapshots {
snapshots, err = d.VolumeSnapshots(srcVol, op)
if err != nil {
return err
}
}
// Copy without snapshots.
if !copySnapshots || len(snapshots) == 0 {
if d.config["ceph.rbd.clone_copy"] != "" &&
!shared.IsTrue(d.config["ceph.rbd.clone_copy"]) &&
srcVol.volType != VolumeTypeImage {
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"cp",
d.getRBDVolumeName(srcVol, "", false, true),
d.getRBDVolumeName(vol, "", false, true))
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
_, err = d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
} else {
parentVol := srcVol
snapshotName := "readonly"
if srcVol.volType != VolumeTypeImage {
snapshotName = fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
if srcVol.IsSnapshot() {
srcParentName, srcSnapOnlyName, _ :=
shared.InstanceGetParentAndSnapshotName(srcVol.name)
snapshotName = fmt.Sprintf("snapshot_%s", srcSnapOnlyName)
parentVol = NewVolume(d, d.name, srcVol.volType, srcVol.contentType, srcParentName, nil, nil)
} else {
// Create snapshot.
err := d.rbdCreateVolumeSnapshot(srcVol, snapshotName)
if err != nil {
return err
}
}
// Protect volume so we can create clones of it.
err = d.rbdProtectVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, snapshotName) })
}
err = d.rbdCreateClone(parentVol, snapshotName, vol)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
if vol.contentType == ContentTypeFS {
// Re-generate the UUID.
err = d.generateUUID(vol)
if err != nil {
return err
}
}
// For VMs, also copy the filesystem volume.
if vol.IsVMBlock() {
srcFSVol := srcVol.NewVMBlockFilesystemVolume()
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromCopy(fsVol, srcFSVol, false, op)
if err != nil {
return err
}
}
revert.Success()
return nil
}
// Copy with snapshots.
// Create empty dummy volume
err = d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolume(vol) })
// Receive over the dummy volume we created above.
targetVolumeName := d.getRBDVolumeName(vol, "", false, true)
lastSnap := ""
if len(snapshots) > 0 {
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
}
for i, snap := range snapshots {
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snap)
sourceVolumeName := d.getRBDVolumeName(srcVol, lastSnap, false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
prev)
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolumeSnapshot(vol, snap) })
snapVol, err := vol.NewSnapshot(snap)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
// Copy snapshot.
sourceVolumeName := d.getRBDVolumeName(srcVol, "", false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
lastSnap)
if err != nil {
return err
}
// Re-generate the UUID.
err = d.generateUUID(vol)
if err != nil {
return err
}
ourMount, err := d.MountVolume(vol, op)
if err != nil {
return err
}
if ourMount {
defer d.UnmountVolume(vol, op)
}
revert.Success()
return nil
}
// CreateVolumeFromMigration creates a volume being sent via a migration.
func (d *ceph) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Handle simple rsync and block_and_rsync through generic.
if volTargetArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volTargetArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op)
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromMigration(fsVol, conn, volTargetArgs, preFiller, op)
if err != nil {
return err
}
}
recvName := d.getRBDVolumeName(vol, "", false, true)
if !d.HasVolume(vol) {
err := d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
}
err := vol.EnsureMountPath()
if err != nil {
return err
}
// Handle zfs send/receive migration.
if len(volTargetArgs.Snapshots) > 0 {
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
// Transfer the snapshots.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := d.getRBDVolumeName(vol, snapName, false, true)
wrapper := migration.ProgressWriter(op, "fs_progress", fullSnapshotName)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
}
defer func() {
// Delete all migration-send-* snapshots.
snaps, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
return
}
for _, snap := range snaps {
if !strings.HasPrefix(snap, "migration-send") {
continue
}
d.rbdDeleteVolumeSnapshot(vol, snap)
}
}()
wrapper := migration.ProgressWriter(op, "fs_progress", vol.name)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
if volTargetArgs.Live {
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
}
err = d.generateUUID(vol)
if err != nil {
return err
}
return nil
}
// RefreshVolume updates an existing volume to match the state of another.
func (d *ceph) RefreshVolume(vol Volume, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
return genericVFSCopyVolume(d, nil, vol, srcVol, srcSnapshots, true, op)
}
// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then
// this function will return an error.
func (d *ceph) DeleteVolume(vol Volume, op *operations.Operation) error {
if vol.volType == VolumeTypeImage {
// Try to umount but don't fail.
d.UnmountVolume(vol, op)
// Check if image has dependant snapshots.
_, err := d.rbdListSnapshotClones(vol, "readonly")
if err != nil {
if err != db.ErrNoSuchObject {
return err
}
// Unprotect snapshot.
err = d.rbdUnprotectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
// Delete snapshots.
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"purge",
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Unmap image.
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Delete image.
err = d.rbdDeleteVolume(vol)
} else {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdMarkVolumeDeleted(vol, vol.name)
}
if err != nil {
return err
}
} else {
if !d.HasVolume(vol) {
return nil
}
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
_, err = d.deleteVolume(vol)
if err != nil {
return errors.Wrap(err, "Failed to delete volume")
}
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.DeleteVolume(fsVol, op)
if err != nil {
return err
}
}
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err := wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
return nil
}
// HasVolume indicates whether a specific volume exists on the storage pool.
func (d *ceph) HasVolume(vol Volume) bool {
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"image-meta",
"list",
d.getRBDVolumeName(vol, "", false, false))
return err == nil
}
// ValidateVolume validates the supplied volume config.
func (d *ceph) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
rules := map[string]func(value string) error{
"block.filesystem": shared.IsAny,
"block.mount_options": shared.IsAny,
}
return d.validateVolume(vol, rules, removeUnknownKeys)
}
// UpdateVolume applies config changes to the volume.
func (d *ceph) UpdateVolume(vol Volume, changedConfig map[string]string) error {
if vol.volType != VolumeTypeCustom {
return ErrNotSupported
}
val, ok := changedConfig["size"]
if ok {
err := d.SetVolumeQuota(vol, val, nil)
if err != nil {
return err
}
}
return nil
}
// GetVolumeUsage returns the disk space used by the volume.
func (d *ceph) GetVolumeUsage(vol Volume) (int64, error) {
if vol.contentType == ContentTypeFS && shared.IsMountPoint(vol.MountPath()) {
var stat unix.Statfs_t
err := unix.Statfs(vol.MountPath(), &stat)
if err != nil {
return -1, err
}
return int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize), nil
}
type cephDuLine struct {
Name string `json:"name"`
Snapshot string `json:"snapshot"`
ProvisionedSize int64 `json:"provisioned_size"`
UsedSize int64 `json:"used_size"`
}
type cephDuInfo struct {
Images []cephDuLine `json:"images"`
}
jsonInfo, err := shared.TryRunCommand(
"rbd",
"du",
"--format", "json",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return -1, err
}
var usedSize int64
var result cephDuInfo
err = json.Unmarshal([]byte(jsonInfo), &result)
if err != nil {
return -1, err
}
// rbd du gives the output of all related rbd images, snapshots included
// to get the total size of the image we use the result that does not include
// a snapshot name, this is the total image size.
for _, image := range result.Images {
if image.Snapshot == "" {
usedSize = image.UsedSize
}
}
return usedSize, nil
}
// SetVolumeQuota applies a size limit on volume.
func (d *ceph) SetVolumeQuota(vol Volume, size string, op *operations.Operation) error {
fsType := d.getRBDFilesystem(vol)
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return err
}
// The grow/shrink functions use Mount/Unmount which may cause an unmap, so make sure to keep a reference.
oldKeepDevice := vol.keepDevice
vol.keepDevice = true
defer func() {
vol.keepDevice = oldKeepDevice
}()
oldSizeBytes, err := blockDevSizeBytes(RBDDevPath)
if err != nil {
return errors.Wrapf(err, "Error getting current size")
}
newSizeBytes, err := units.ParseByteSizeString(size)
if err != nil {
return err
}
// The right disjunct just means that someone unset the size property in the instance's config.
// We obviously cannot resize to 0.
if oldSizeBytes == newSizeBytes || newSizeBytes == 0 {
return nil
}
// Resize filesystem if needed.
if vol.contentType == ContentTypeFS {
if newSizeBytes < oldSizeBytes {
err = shrinkFileSystem(fsType, RBDDevPath, vol, newSizeBytes)
if err != nil {
return err
}
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--allow-shrink",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
} else {
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Grow the filesystem.
err = growFileSystem(fsType, RBDDevPath, vol)
if err != nil {
return err
}
}
} else {
if newSizeBytes < oldSizeBytes {
return fmt.Errorf("You cannot shrink block volumes")
}
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(RBDDevPath)
if err != nil {
return err
}
}
}
return nil
}
// GetVolumeDiskPath returns the location of a root disk block device.
func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) {
if vol.IsVMBlock() {
return d.getRBDMappedDevPath(vol)
}
return "", ErrNotSupported
}
// MountVolume simulates mounting a volume.
func (d *ceph) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
RBDFilesystem := d.getRBDFilesystem(vol)
err := vol.EnsureMountPath()
if err != nil {
return false, err
}
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return false, err
}
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(vol))
err = TryMount(RBDDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
d.logger.Debug("Mounted RBD volume", log.Ctx{"dev": RBDDevPath, "path": mountPath, "options": mountOptions})
return true, nil
}
// For VMs, mount the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
return d.MountVolume(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume.
func (d *ceph) UnmountVolume(vol Volume, op *operations.Operation) (bool, error) {
// For VMs, also unmount the filesystem dataset.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
_, err := d.UnmountVolume(fsVol, op)
if err != nil {
return false, err
}
}
// Attempt to unmount the volume.
mountPath := vol.MountPath()
if shared.IsMountPoint(mountPath) {
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
d.logger.Debug("Unmounted RBD volume", log.Ctx{"path": mountPath})
}
// Attempt to unmap.
if !vol.keepDevice {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return true, err
}
}
return true, nil
}
// RenameVolume renames a volume and its snapshots.
func (d *ceph) RenameVolume(vol Volume, newName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return nil
}
revert.Add(func() { d.rbdMapVolume(vol) })
err = d.rbdRenameVolume(vol, newName)
if err != nil {
return err
}
newVol := NewVolume(d, d.name, vol.volType, vol.contentType, newName, nil, nil)
revert.Add(func() { d.rbdRenameVolume(newVol, vol.name) })
_, err = d.rbdMapVolume(newVol)
if err != nil {
return err
}
err = genericVFSRenameVolume(d, vol, newName, op)
if err != nil {
return nil
}
revert.Success()
return nil
}
// MigrateVolume sends a volume for migration.
func (d *ceph) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
// If data is set, this request is coming from the clustering code.
// In this case, we only need to unmap and rename the rbd image.
if volSrcArgs.Data != nil {
data, ok := volSrcArgs.Data.(string)
if ok {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Rename volume.
if vol.name != data {
err = d.rbdRenameVolume(vol, data)
if err != nil {
return err
}
}
return nil
}
}
// Handle simple rsync and block_and_rsync through generic.
if volSrcArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volSrcArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSMigrateVolume(d, d.state, vol, conn, volSrcArgs, op)
} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.MigrateVolume(fsVol, conn, volSrcArgs, op)
if err != nil {
return err
}
}
if vol.IsSnapshot() {
parentName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
sendName := fmt.Sprintf("%s/snapshots_%s_%s_start_clone", d.name, parentName, snapOnlyName)
cloneVol := NewVolume(d, d.name, vol.volType, vol.contentType, vol.name, nil, nil)
// Mounting the volume snapshot will create the clone "snapshots_<parent>_<snap>_start_clone".
_, err := d.MountVolumeSnapshot(cloneVol, op)
if err != nil {
return err
}
defer d.UnmountVolumeSnapshot(cloneVol, op)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
err = d.sendVolume(conn, sendName, "", wrapper)
if err != nil {
return err
}
return nil
}
lastSnap := ""
if !volSrcArgs.FinalSync {
for i, snapName := range volSrcArgs.Snapshots {
snapshot, _ := vol.NewSnapshot(snapName)
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", volSrcArgs.Snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snapName)
sendSnapName := d.getRBDVolumeName(vol, lastSnap, false, true)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", snapshot.name)
}
err := d.sendVolume(conn, sendSnapName, prev, wrapper)
if err != nil {
return err
}
}
}
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
runningSnapName := fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
err := d.rbdCreateVolumeSnapshot(vol, runningSnapName)
if err != nil {
return err
}
defer d.rbdDeleteVolumeSnapshot(vol, runningSnapName)
cur := d.getRBDVolumeName(vol, runningSnapName, false, true)
err = d.sendVolume(conn, cur, lastSnap, wrapper)
if err != nil {
return err
}
return nil
}
// BackupVolume creates an exported version of a volume.
func (d *ceph) BackupVolume(vol Volume, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return genericVFSBackupVolume(d, vol, tarWriter, snapshots, op)
}
// CreateVolumeSnapshot creates a snapshot of a volume.
func (d *ceph) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
sourcePath := GetVolumeMountPath(d.name, snapVol.volType, parentName)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
if shared.IsMountPoint(sourcePath) {
// This is costly but we need to ensure that all cached data has
// been committed to disk. If we don't then the rbd snapshot of
// the underlying filesystem can be inconsistent or - worst case
// - empty.
unix.Sync()
_, err := shared.TryRunCommand("fsfreeze", "--freeze", sourcePath)
if err == nil {
defer shared.TryRunCommand("fsfreeze", "--unfreeze", sourcePath)
}
}
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err = d.rbdCreateVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(fsVol, op) })
}
revert.Success()
return nil
}
// DeleteVolumeSnapshot removes a snapshot from the storage device.
func (d *ceph) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
// Check if snapshot exists, and return if not.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"info",
d.getRBDVolumeName(snapVol, "", false, false))
if err != nil {
return nil
}
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
_, err = d.deleteVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return errors.Wrap(err, "Failed to delete volume snapshot")
}
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err = wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
// Remove the parent snapshot directory if this is the last snapshot being removed.
err = deleteParentSnapshotDirIfEmpty(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
// For VM images, delete the filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.DeleteVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
}
return nil
}
// MountVolumeSnapshot simulates mounting a volume snapshot.
func (d *ceph) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
prefixedSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
// Protect snapshot to prevent data loss.
err := d.rbdProtectVolumeSnapshot(parentVol, prefixedSnapOnlyName)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, prefixedSnapOnlyName) })
// Clone snapshot.
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdCreateClone(parentVol, prefixedSnapOnlyName, cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdDeleteVolume(cloneVol) })
// Map volume.
rbdDevPath, err := d.rbdMapVolume(cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnmapVolume(cloneVol, true) })
if shared.IsMountPoint(mountPath) {
return false, nil
}
err = snapVol.EnsureMountPath()
if err != nil {
return false, err
}
RBDFilesystem := d.getRBDFilesystem(snapVol)
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(snapVol))
if RBDFilesystem == "xfs" {
idx := strings.Index(mountOptions, "nouuid")
if idx < 0 {
mountOptions += ",nouuid"
}
}
err = TryMount(rbdDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
d.logger.Debug("Mounted RBD volume snapshot", log.Ctx{"dev": rbdDevPath, "path": mountPath, "options": mountOptions})
revert.Success()
return true, nil
}
// For VMs, mount the filesystem volume.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.MountVolumeSnapshot(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume snapshot.
func (d *ceph) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if !shared.IsMountPoint(mountPath) {
return false, nil
}
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
d.logger.Debug("Unmounted RBD volume snapshot", log.Ctx{"path": mountPath})
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdUnmapVolume(cloneVol, true)
if err != nil {
return false, err
}
if !d.HasVolume(cloneVol) {
return true, nil
}
// Delete the temporary RBD volume.
err = d.rbdDeleteVolume(cloneVol)
if err != nil {
return false, err
}
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.UnmountVolumeSnapshot(fsVol, op)
}
return true, nil
}
// VolumeSnapshots returns a list of snapshots for the volume.
func (d *ceph) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
snapshots, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
if err == db.ErrNoSuchObject {
return nil, nil
}
return nil, err
}
var ret []string
for _, snap := range snapshots {
// Ignore zombie snapshots as these are only used internally and
// not relevant for users.
if strings.HasPrefix(snap, "zombie_") || strings.HasPrefix(snap, "migration-send-") {
continue
}
ret = append(ret, strings.TrimPrefix(snap, "snapshot_"))
}
return ret, nil
}
// RestoreVolume restores a volume from a snapshot.
func (d *ceph) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
ourUmount, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
if ourUmount {
defer d.MountVolume(vol, op)
}
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"rollback",
"--snap", fmt.Sprintf("snapshot_%s", snapshotName),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}
err = d.generateUUID(snapVol)
if err != nil {
return err
}
return nil
}
// RenameVolumeSnapshot renames a volume snapshot.
func (d *ceph) RenameVolumeSnapshot(snapVol Volume, newSnapshotName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
newSnapOnlyName := fmt.Sprintf("snapshot_%s", newSnapshotName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err := d.rbdRenameVolumeSnapshot(parentVol, oldSnapOnlyName, newSnapOnlyName)
if err != nil {
return err
}
revert.Add(func() { d.rbdRenameVolumeSnapshot(parentVol, newSnapOnlyName, oldSnapOnlyName) })
if snapVol.contentType == ContentTypeFS {
err = genericVFSRenameVolumeSnapshot(d, snapVol, newSnapshotName, op)
if err != nil {
return err
}
}
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.RenameVolumeSnapshot(fsVol, newSnapshotName, op)
if err != nil {
return err
}
revert.Add(func() {
newFsVol := NewVolume(d, d.name, snapVol.volType, ContentTypeFS, fmt.Sprintf("%s/%s", parentName, newSnapshotName), snapVol.config, snapVol.poolConfig)
d.RenameVolumeSnapshot(newFsVol, snapVol.name, op)
})
}
revert.Success()
return nil
}
lxd/storage/drivers/driver/ceph/volumes: d.generateUUID updated signature usage
Map volumes outside of d.generateUUID to stay in control of what gets mapped and unmapped.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package drivers
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/migration"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/instancewriter"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/units"
)
// CreateVolume creates an empty volume and can optionally fill it by executing the supplied
// filler function.
func (d *ceph) CreateVolume(vol Volume, filler *VolumeFiller, op *operations.Operation) error {
// Revert handling.
revert := revert.New()
defer revert.Fail()
if vol.contentType == ContentTypeFS {
// Create mountpoint.
err := vol.EnsureMountPath()
if err != nil {
return err
}
revert.Add(func() { os.Remove(vol.MountPath()) })
}
// Figure out the potential zombie volume.
zombieImageVol := NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s", vol.name, d.getRBDFilesystem(vol)), nil, nil)
if (vol.volType == VolumeTypeVM || vol.volType == VolumeTypeImage) && vol.contentType == ContentTypeBlock {
zombieImageVol = NewVolume(d, d.name, VolumeType("zombie_image"), vol.contentType,
fmt.Sprintf("%s_%s.block", vol.name, d.getRBDFilesystem(vol)), nil, nil)
}
// Check if we have a zombie image. If so, restore it otherwise
// create a new image volume.
if vol.volType == VolumeTypeImage && d.HasVolume(zombieImageVol) {
// Figure out the names.
oldName := d.getRBDVolumeName(zombieImageVol, "", false, true)
newName := d.getRBDVolumeName(vol, "", false, true)
// Rename back to active.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"mv",
oldName,
newName)
if err != nil {
return err
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
revert.Success()
return nil
}
// Get size.
RBDSize, err := d.getRBDSize(vol)
if err != nil {
return err
}
// Create volume.
err = d.rbdCreateVolume(vol, RBDSize)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
// Get filesystem.
RBDFilesystem := d.getRBDFilesystem(vol)
if vol.contentType == ContentTypeFS {
_, err = makeFSType(RBDDevPath, RBDFilesystem, nil)
if err != nil {
return err
}
}
// For VMs, also create the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolume(fsVol, nil, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(fsVol, op) })
}
// Run the volume filler function if supplied.
if filler != nil && filler.Fill != nil {
err := vol.MountTask(func(mountPath string, op *operations.Operation) error {
if vol.contentType == ContentTypeFS {
return filler.Fill(mountPath, "")
}
devPath, err := d.GetVolumeDiskPath(vol)
if err != nil {
return err
}
err = filler.Fill(mountPath, devPath)
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(devPath)
if err != nil {
return err
}
}
return err
}, op)
if err != nil {
return err
}
}
// Create a readonly snapshot of the image volume which will be used a the
// clone source for future non-image volumes.
if vol.volType == VolumeTypeImage {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(vol, "readonly") })
err = d.rbdProtectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
if vol.contentType == ContentTypeBlock {
// Re-create the FS config volume's readonly snapshot now that the filler function has run and unpacked into both config and block volumes.
fsVol := NewVolume(d, d.name, vol.volType, ContentTypeFS, vol.name, vol.config, vol.poolConfig)
err := d.rbdUnprotectVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
_, err = d.deleteVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
err = d.rbdCreateVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
revert.Add(func() { d.deleteVolumeSnapshot(fsVol, "readonly") })
err = d.rbdProtectVolumeSnapshot(fsVol, "readonly")
if err != nil {
return err
}
}
}
revert.Success()
return nil
}
// CreateVolumeFromBackup re-creates a volume from its exported state.
func (d *ceph) CreateVolumeFromBackup(vol Volume, snapshots []string, srcData io.ReadSeeker, optimizedStorage bool, op *operations.Operation) (func(vol Volume) error, func(), error) {
return genericVFSBackupUnpack(d, vol, snapshots, srcData, op)
}
// CreateVolumeFromCopy provides same-pool volume copying functionality.
func (d *ceph) CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error {
var err error
snapshots := []string{}
revert := revert.New()
defer revert.Fail()
if !srcVol.IsSnapshot() && copySnapshots {
snapshots, err = d.VolumeSnapshots(srcVol, op)
if err != nil {
return err
}
}
// Copy without snapshots.
if !copySnapshots || len(snapshots) == 0 {
if d.config["ceph.rbd.clone_copy"] != "" &&
!shared.IsTrue(d.config["ceph.rbd.clone_copy"]) &&
srcVol.volType != VolumeTypeImage {
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"cp",
d.getRBDVolumeName(srcVol, "", false, true),
d.getRBDVolumeName(vol, "", false, true))
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
_, err = d.rbdMapVolume(vol)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnmapVolume(vol, true) })
} else {
parentVol := srcVol
snapshotName := "readonly"
if srcVol.volType != VolumeTypeImage {
snapshotName = fmt.Sprintf("zombie_snapshot_%s", uuid.NewRandom().String())
if srcVol.IsSnapshot() {
srcParentName, srcSnapOnlyName, _ :=
shared.InstanceGetParentAndSnapshotName(srcVol.name)
snapshotName = fmt.Sprintf("snapshot_%s", srcSnapOnlyName)
parentVol = NewVolume(d, d.name, srcVol.volType, srcVol.contentType, srcParentName, nil, nil)
} else {
// Create snapshot.
err := d.rbdCreateVolumeSnapshot(srcVol, snapshotName)
if err != nil {
return err
}
}
// Protect volume so we can create clones of it.
err = d.rbdProtectVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, snapshotName) })
}
err = d.rbdCreateClone(parentVol, snapshotName, vol)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolume(vol, op) })
}
if vol.contentType == ContentTypeFS {
// Map the RBD volume.
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
defer d.rbdUnmapVolume(vol, true)
// Re-generate the UUID.
err = d.generateUUID(d.getRBDFilesystem(vol), RBDDevPath)
if err != nil {
return err
}
}
// For VMs, also copy the filesystem volume.
if vol.IsVMBlock() {
srcFSVol := srcVol.NewVMBlockFilesystemVolume()
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromCopy(fsVol, srcFSVol, false, op)
if err != nil {
return err
}
}
revert.Success()
return nil
}
// Copy with snapshots.
// Create empty dummy volume
err = d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolume(vol) })
// Receive over the dummy volume we created above.
targetVolumeName := d.getRBDVolumeName(vol, "", false, true)
lastSnap := ""
if len(snapshots) > 0 {
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
}
for i, snap := range snapshots {
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snap)
sourceVolumeName := d.getRBDVolumeName(srcVol, lastSnap, false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
prev)
if err != nil {
return err
}
revert.Add(func() { d.rbdDeleteVolumeSnapshot(vol, snap) })
snapVol, err := vol.NewSnapshot(snap)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
// Copy snapshot.
sourceVolumeName := d.getRBDVolumeName(srcVol, "", false, true)
err = d.copyWithSnapshots(
sourceVolumeName,
targetVolumeName,
lastSnap)
if err != nil {
return err
}
// Map the RBD volume.
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
defer d.rbdUnmapVolume(vol, true)
// Re-generate the UUID.
err = d.generateUUID(d.getRBDFilesystem(vol), RBDDevPath)
if err != nil {
return err
}
ourMount, err := d.MountVolume(vol, op)
if err != nil {
return err
}
if ourMount {
defer d.UnmountVolume(vol, op)
}
revert.Success()
return nil
}
// CreateVolumeFromMigration creates a volume being sent via a migration.
func (d *ceph) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, volTargetArgs migration.VolumeTargetArgs, preFiller *VolumeFiller, op *operations.Operation) error {
// Handle simple rsync and block_and_rsync through generic.
if volTargetArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volTargetArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSCreateVolumeFromMigration(d, nil, vol, conn, volTargetArgs, preFiller, op)
} else if volTargetArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeFromMigration(fsVol, conn, volTargetArgs, preFiller, op)
if err != nil {
return err
}
}
recvName := d.getRBDVolumeName(vol, "", false, true)
if !d.HasVolume(vol) {
err := d.rbdCreateVolume(vol, "0")
if err != nil {
return err
}
}
err := vol.EnsureMountPath()
if err != nil {
return err
}
// Handle zfs send/receive migration.
if len(volTargetArgs.Snapshots) > 0 {
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, vol.volType, vol.name)
if err != nil {
return err
}
// Transfer the snapshots.
for _, snapName := range volTargetArgs.Snapshots {
fullSnapshotName := d.getRBDVolumeName(vol, snapName, false, true)
wrapper := migration.ProgressWriter(op, "fs_progress", fullSnapshotName)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
}
}
defer func() {
// Delete all migration-send-* snapshots.
snaps, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
return
}
for _, snap := range snaps {
if !strings.HasPrefix(snap, "migration-send") {
continue
}
d.rbdDeleteVolumeSnapshot(vol, snap)
}
}()
wrapper := migration.ProgressWriter(op, "fs_progress", vol.name)
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
if volTargetArgs.Live {
err = d.receiveVolume(recvName, conn, wrapper)
if err != nil {
return err
}
}
// Map the RBD volume.
RBDDevPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}
defer d.rbdUnmapVolume(vol, true)
// Re-generate the UUID.
err = d.generateUUID(d.getRBDFilesystem(vol), RBDDevPath)
if err != nil {
return err
}
return nil
}
// RefreshVolume updates an existing volume to match the state of another.
func (d *ceph) RefreshVolume(vol Volume, srcVol Volume, srcSnapshots []Volume, op *operations.Operation) error {
return genericVFSCopyVolume(d, nil, vol, srcVol, srcSnapshots, true, op)
}
// DeleteVolume deletes a volume of the storage device. If any snapshots of the volume remain then
// this function will return an error.
func (d *ceph) DeleteVolume(vol Volume, op *operations.Operation) error {
if vol.volType == VolumeTypeImage {
// Try to umount but don't fail.
d.UnmountVolume(vol, op)
// Check if image has dependant snapshots.
_, err := d.rbdListSnapshotClones(vol, "readonly")
if err != nil {
if err != db.ErrNoSuchObject {
return err
}
// Unprotect snapshot.
err = d.rbdUnprotectVolumeSnapshot(vol, "readonly")
if err != nil {
return err
}
// Delete snapshots.
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"purge",
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Unmap image.
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Delete image.
err = d.rbdDeleteVolume(vol)
} else {
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
err = d.rbdMarkVolumeDeleted(vol, vol.name)
}
if err != nil {
return err
}
} else {
if !d.HasVolume(vol) {
return nil
}
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
_, err = d.deleteVolume(vol)
if err != nil {
return errors.Wrap(err, "Failed to delete volume")
}
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.DeleteVolume(fsVol, op)
if err != nil {
return err
}
}
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err := wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
return nil
}
// HasVolume indicates whether a specific volume exists on the storage pool.
func (d *ceph) HasVolume(vol Volume) bool {
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"image-meta",
"list",
d.getRBDVolumeName(vol, "", false, false))
return err == nil
}
// ValidateVolume validates the supplied volume config.
func (d *ceph) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
rules := map[string]func(value string) error{
"block.filesystem": shared.IsAny,
"block.mount_options": shared.IsAny,
}
return d.validateVolume(vol, rules, removeUnknownKeys)
}
// UpdateVolume applies config changes to the volume.
func (d *ceph) UpdateVolume(vol Volume, changedConfig map[string]string) error {
if vol.volType != VolumeTypeCustom {
return ErrNotSupported
}
val, ok := changedConfig["size"]
if ok {
err := d.SetVolumeQuota(vol, val, nil)
if err != nil {
return err
}
}
return nil
}
// GetVolumeUsage returns the disk space used by the volume.
func (d *ceph) GetVolumeUsage(vol Volume) (int64, error) {
if vol.contentType == ContentTypeFS && shared.IsMountPoint(vol.MountPath()) {
var stat unix.Statfs_t
err := unix.Statfs(vol.MountPath(), &stat)
if err != nil {
return -1, err
}
return int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize), nil
}
type cephDuLine struct {
Name string `json:"name"`
Snapshot string `json:"snapshot"`
ProvisionedSize int64 `json:"provisioned_size"`
UsedSize int64 `json:"used_size"`
}
type cephDuInfo struct {
Images []cephDuLine `json:"images"`
}
jsonInfo, err := shared.TryRunCommand(
"rbd",
"du",
"--format", "json",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return -1, err
}
var usedSize int64
var result cephDuInfo
err = json.Unmarshal([]byte(jsonInfo), &result)
if err != nil {
return -1, err
}
// rbd du gives the output of all related rbd images, snapshots included
// to get the total size of the image we use the result that does not include
// a snapshot name, this is the total image size.
for _, image := range result.Images {
if image.Snapshot == "" {
usedSize = image.UsedSize
}
}
return usedSize, nil
}
// SetVolumeQuota applies a size limit on volume.
func (d *ceph) SetVolumeQuota(vol Volume, size string, op *operations.Operation) error {
fsType := d.getRBDFilesystem(vol)
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return err
}
// The grow/shrink functions use Mount/Unmount which may cause an unmap, so make sure to keep a reference.
oldKeepDevice := vol.keepDevice
vol.keepDevice = true
defer func() {
vol.keepDevice = oldKeepDevice
}()
oldSizeBytes, err := blockDevSizeBytes(RBDDevPath)
if err != nil {
return errors.Wrapf(err, "Error getting current size")
}
newSizeBytes, err := units.ParseByteSizeString(size)
if err != nil {
return err
}
// The right disjunct just means that someone unset the size property in the instance's config.
// We obviously cannot resize to 0.
if oldSizeBytes == newSizeBytes || newSizeBytes == 0 {
return nil
}
// Resize filesystem if needed.
if vol.contentType == ContentTypeFS {
if newSizeBytes < oldSizeBytes {
err = shrinkFileSystem(fsType, RBDDevPath, vol, newSizeBytes)
if err != nil {
return err
}
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--allow-shrink",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
} else {
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Grow the filesystem.
err = growFileSystem(fsType, RBDDevPath, vol)
if err != nil {
return err
}
}
} else {
if newSizeBytes < oldSizeBytes {
return fmt.Errorf("You cannot shrink block volumes")
}
// Grow the block device.
_, err = shared.TryRunCommand(
"rbd",
"resize",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"--size", fmt.Sprintf("%dB", newSizeBytes),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
// Move the GPT alt header to end of disk if needed.
if vol.IsVMBlock() {
err = d.moveGPTAltHeader(RBDDevPath)
if err != nil {
return err
}
}
}
return nil
}
// GetVolumeDiskPath returns the location of a root disk block device.
func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) {
if vol.IsVMBlock() {
return d.getRBDMappedDevPath(vol)
}
return "", ErrNotSupported
}
// MountVolume simulates mounting a volume.
func (d *ceph) MountVolume(vol Volume, op *operations.Operation) (bool, error) {
mountPath := vol.MountPath()
if vol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
RBDFilesystem := d.getRBDFilesystem(vol)
err := vol.EnsureMountPath()
if err != nil {
return false, err
}
RBDDevPath, err := d.getRBDMappedDevPath(vol)
if err != nil {
return false, err
}
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(vol))
err = TryMount(RBDDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
d.logger.Debug("Mounted RBD volume", log.Ctx{"dev": RBDDevPath, "path": mountPath, "options": mountOptions})
return true, nil
}
// For VMs, mount the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
return d.MountVolume(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume.
func (d *ceph) UnmountVolume(vol Volume, op *operations.Operation) (bool, error) {
// For VMs, also unmount the filesystem dataset.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
_, err := d.UnmountVolume(fsVol, op)
if err != nil {
return false, err
}
}
// Attempt to unmount the volume.
mountPath := vol.MountPath()
if shared.IsMountPoint(mountPath) {
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
d.logger.Debug("Unmounted RBD volume", log.Ctx{"path": mountPath})
}
// Attempt to unmap.
if !vol.keepDevice {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return true, err
}
}
return true, nil
}
// RenameVolume renames a volume and its snapshots.
func (d *ceph) RenameVolume(vol Volume, newName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
_, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
err = d.rbdUnmapVolume(vol, true)
if err != nil {
return nil
}
revert.Add(func() { d.rbdMapVolume(vol) })
err = d.rbdRenameVolume(vol, newName)
if err != nil {
return err
}
newVol := NewVolume(d, d.name, vol.volType, vol.contentType, newName, nil, nil)
revert.Add(func() { d.rbdRenameVolume(newVol, vol.name) })
_, err = d.rbdMapVolume(newVol)
if err != nil {
return err
}
err = genericVFSRenameVolume(d, vol, newName, op)
if err != nil {
return nil
}
revert.Success()
return nil
}
// MigrateVolume sends a volume for migration.
func (d *ceph) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs *migration.VolumeSourceArgs, op *operations.Operation) error {
// If data is set, this request is coming from the clustering code.
// In this case, we only need to unmap and rename the rbd image.
if volSrcArgs.Data != nil {
data, ok := volSrcArgs.Data.(string)
if ok {
err := d.rbdUnmapVolume(vol, true)
if err != nil {
return err
}
// Rename volume.
if vol.name != data {
err = d.rbdRenameVolume(vol, data)
if err != nil {
return err
}
}
return nil
}
}
// Handle simple rsync and block_and_rsync through generic.
if volSrcArgs.MigrationType.FSType == migration.MigrationFSType_RSYNC || volSrcArgs.MigrationType.FSType == migration.MigrationFSType_BLOCK_AND_RSYNC {
return genericVFSMigrateVolume(d, d.state, vol, conn, volSrcArgs, op)
} else if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RBD {
return ErrNotSupported
}
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.MigrateVolume(fsVol, conn, volSrcArgs, op)
if err != nil {
return err
}
}
if vol.IsSnapshot() {
parentName, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(vol.name)
sendName := fmt.Sprintf("%s/snapshots_%s_%s_start_clone", d.name, parentName, snapOnlyName)
cloneVol := NewVolume(d, d.name, vol.volType, vol.contentType, vol.name, nil, nil)
// Mounting the volume snapshot will create the clone "snapshots_<parent>_<snap>_start_clone".
_, err := d.MountVolumeSnapshot(cloneVol, op)
if err != nil {
return err
}
defer d.UnmountVolumeSnapshot(cloneVol, op)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
err = d.sendVolume(conn, sendName, "", wrapper)
if err != nil {
return err
}
return nil
}
lastSnap := ""
if !volSrcArgs.FinalSync {
for i, snapName := range volSrcArgs.Snapshots {
snapshot, _ := vol.NewSnapshot(snapName)
prev := ""
if i > 0 {
prev = fmt.Sprintf("snapshot_%s", volSrcArgs.Snapshots[i-1])
}
lastSnap = fmt.Sprintf("snapshot_%s", snapName)
sendSnapName := d.getRBDVolumeName(vol, lastSnap, false, true)
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", snapshot.name)
}
err := d.sendVolume(conn, sendSnapName, prev, wrapper)
if err != nil {
return err
}
}
}
// Setup progress tracking.
var wrapper *ioprogress.ProgressTracker
if volSrcArgs.TrackProgress {
wrapper = migration.ProgressTracker(op, "fs_progress", vol.name)
}
runningSnapName := fmt.Sprintf("migration-send-%s", uuid.NewRandom().String())
err := d.rbdCreateVolumeSnapshot(vol, runningSnapName)
if err != nil {
return err
}
defer d.rbdDeleteVolumeSnapshot(vol, runningSnapName)
cur := d.getRBDVolumeName(vol, runningSnapName, false, true)
err = d.sendVolume(conn, cur, lastSnap, wrapper)
if err != nil {
return err
}
return nil
}
// BackupVolume creates an exported version of a volume.
func (d *ceph) BackupVolume(vol Volume, tarWriter *instancewriter.InstanceTarWriter, optimized bool, snapshots bool, op *operations.Operation) error {
return genericVFSBackupVolume(d, vol, tarWriter, snapshots, op)
}
// CreateVolumeSnapshot creates a snapshot of a volume.
func (d *ceph) CreateVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
sourcePath := GetVolumeMountPath(d.name, snapVol.volType, parentName)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
if shared.IsMountPoint(sourcePath) {
// This is costly but we need to ensure that all cached data has
// been committed to disk. If we don't then the rbd snapshot of
// the underlying filesystem can be inconsistent or - worst case
// - empty.
unix.Sync()
_, err := shared.TryRunCommand("fsfreeze", "--freeze", sourcePath)
if err == nil {
defer shared.TryRunCommand("fsfreeze", "--unfreeze", sourcePath)
}
}
// Create the parent directory.
err := createParentSnapshotDirIfMissing(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
err = snapVol.EnsureMountPath()
if err != nil {
return err
}
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err = d.rbdCreateVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(snapVol, op) })
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.CreateVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
revert.Add(func() { d.DeleteVolumeSnapshot(fsVol, op) })
}
revert.Success()
return nil
}
// DeleteVolumeSnapshot removes a snapshot from the storage device.
func (d *ceph) DeleteVolumeSnapshot(snapVol Volume, op *operations.Operation) error {
// Check if snapshot exists, and return if not.
_, err := shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"info",
d.getRBDVolumeName(snapVol, "", false, false))
if err != nil {
return nil
}
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
snapshotName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
_, err = d.deleteVolumeSnapshot(parentVol, snapshotName)
if err != nil {
return errors.Wrap(err, "Failed to delete volume snapshot")
}
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && shared.PathExists(mountPath) {
err = wipeDirectory(mountPath)
if err != nil {
return err
}
err = os.Remove(mountPath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "Failed to remove '%s'", mountPath)
}
}
// Remove the parent snapshot directory if this is the last snapshot being removed.
err = deleteParentSnapshotDirIfEmpty(d.name, snapVol.volType, parentName)
if err != nil {
return err
}
// For VM images, delete the filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.DeleteVolumeSnapshot(fsVol, op)
if err != nil {
return err
}
}
return nil
}
// MountVolumeSnapshot simulates mounting a volume snapshot.
func (d *ceph) MountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if snapVol.contentType == ContentTypeFS && !shared.IsMountPoint(mountPath) {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
prefixedSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
// Protect snapshot to prevent data loss.
err := d.rbdProtectVolumeSnapshot(parentVol, prefixedSnapOnlyName)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnprotectVolumeSnapshot(parentVol, prefixedSnapOnlyName) })
// Clone snapshot.
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdCreateClone(parentVol, prefixedSnapOnlyName, cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdDeleteVolume(cloneVol) })
// Map volume.
rbdDevPath, err := d.rbdMapVolume(cloneVol)
if err != nil {
return false, err
}
revert.Add(func() { d.rbdUnmapVolume(cloneVol, true) })
if shared.IsMountPoint(mountPath) {
return false, nil
}
err = snapVol.EnsureMountPath()
if err != nil {
return false, err
}
RBDFilesystem := d.getRBDFilesystem(snapVol)
mountFlags, mountOptions := resolveMountOptions(d.getRBDMountOptions(snapVol))
if RBDFilesystem == "xfs" {
idx := strings.Index(mountOptions, "nouuid")
if idx < 0 {
mountOptions += ",nouuid"
}
}
err = TryMount(rbdDevPath, mountPath, RBDFilesystem, mountFlags, mountOptions)
if err != nil {
return false, err
}
d.logger.Debug("Mounted RBD volume snapshot", log.Ctx{"dev": rbdDevPath, "path": mountPath, "options": mountOptions})
revert.Success()
return true, nil
}
// For VMs, mount the filesystem volume.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.MountVolumeSnapshot(fsVol, op)
}
return false, nil
}
// UnmountVolume simulates unmounting a volume snapshot.
func (d *ceph) UnmountVolumeSnapshot(snapVol Volume, op *operations.Operation) (bool, error) {
mountPath := snapVol.MountPath()
if !shared.IsMountPoint(mountPath) {
return false, nil
}
err := TryUnmount(mountPath, unix.MNT_DETACH)
if err != nil {
return false, err
}
d.logger.Debug("Unmounted RBD volume snapshot", log.Ctx{"path": mountPath})
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
cloneName := fmt.Sprintf("%s_%s_start_clone", parentName, snapshotOnlyName)
cloneVol := NewVolume(d, d.name, VolumeType("snapshots"), ContentTypeFS, cloneName, nil, nil)
err = d.rbdUnmapVolume(cloneVol, true)
if err != nil {
return false, err
}
if !d.HasVolume(cloneVol) {
return true, nil
}
// Delete the temporary RBD volume.
err = d.rbdDeleteVolume(cloneVol)
if err != nil {
return false, err
}
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
return d.UnmountVolumeSnapshot(fsVol, op)
}
return true, nil
}
// VolumeSnapshots returns a list of snapshots for the volume.
func (d *ceph) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, error) {
snapshots, err := d.rbdListVolumeSnapshots(vol)
if err != nil {
if err == db.ErrNoSuchObject {
return nil, nil
}
return nil, err
}
var ret []string
for _, snap := range snapshots {
// Ignore zombie snapshots as these are only used internally and
// not relevant for users.
if strings.HasPrefix(snap, "zombie_") || strings.HasPrefix(snap, "migration-send-") {
continue
}
ret = append(ret, strings.TrimPrefix(snap, "snapshot_"))
}
return ret, nil
}
// RestoreVolume restores a volume from a snapshot.
func (d *ceph) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
ourUmount, err := d.UnmountVolume(vol, op)
if err != nil {
return err
}
if ourUmount {
defer d.MountVolume(vol, op)
}
_, err = shared.RunCommand(
"rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"--pool", d.config["ceph.osd.pool_name"],
"snap",
"rollback",
"--snap", fmt.Sprintf("snapshot_%s", snapshotName),
d.getRBDVolumeName(vol, "", false, false))
if err != nil {
return err
}
snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}
// Map the RBD volume.
RBDDevPath, err := d.rbdMapVolume(snapVol)
if err != nil {
return err
}
defer d.rbdUnmapVolume(snapVol, true)
// Re-generate the UUID.
err = d.generateUUID(d.getRBDFilesystem(snapVol), RBDDevPath)
if err != nil {
return err
}
return nil
}
// RenameVolumeSnapshot renames a volume snapshot.
func (d *ceph) RenameVolumeSnapshot(snapVol Volume, newSnapshotName string, op *operations.Operation) error {
revert := revert.New()
defer revert.Fail()
parentName, snapshotOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snapVol.name)
oldSnapOnlyName := fmt.Sprintf("snapshot_%s", snapshotOnlyName)
newSnapOnlyName := fmt.Sprintf("snapshot_%s", newSnapshotName)
parentVol := NewVolume(d, d.name, snapVol.volType, snapVol.contentType, parentName, nil, nil)
err := d.rbdRenameVolumeSnapshot(parentVol, oldSnapOnlyName, newSnapOnlyName)
if err != nil {
return err
}
revert.Add(func() { d.rbdRenameVolumeSnapshot(parentVol, newSnapOnlyName, oldSnapOnlyName) })
if snapVol.contentType == ContentTypeFS {
err = genericVFSRenameVolumeSnapshot(d, snapVol, newSnapshotName, op)
if err != nil {
return err
}
}
// For VM images, create a filesystem volume too.
if snapVol.IsVMBlock() {
fsVol := snapVol.NewVMBlockFilesystemVolume()
err := d.RenameVolumeSnapshot(fsVol, newSnapshotName, op)
if err != nil {
return err
}
revert.Add(func() {
newFsVol := NewVolume(d, d.name, snapVol.volType, ContentTypeFS, fmt.Sprintf("%s/%s", parentName, newSnapshotName), snapVol.config, snapVol.poolConfig)
d.RenameVolumeSnapshot(newFsVol, snapVol.name, op)
})
}
revert.Success()
return nil
}
|
package mpmulticore
import (
"bufio"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
mp "github.com/mackerelio/go-mackerel-plugin-helper"
)
var graphDef = map[string]mp.Graphs{
"multicore.cpu.#": {
Label: "MultiCore CPU",
Unit: "percentage",
Metrics: []mp.Metrics{
{Name: "user", Label: "user", Diff: false, Stacked: true},
{Name: "nice", Label: "nice", Diff: false, Stacked: true},
{Name: "system", Label: "system", Diff: false, Stacked: true},
{Name: "idle", Label: "idle", Diff: false, Stacked: true},
{Name: "iowait", Label: "ioWait", Diff: false, Stacked: true},
{Name: "irq", Label: "irq", Diff: false, Stacked: true},
{Name: "softirq", Label: "softirq", Diff: false, Stacked: true},
{Name: "steal", Label: "steal", Diff: false, Stacked: true},
{Name: "guest", Label: "guest", Diff: false, Stacked: true},
{Name: "guest_nice", Label: "guest_nice", Diff: false, Stacked: true},
},
},
"multicore.loadavg_per_core": {
Label: "MultiCore loadavg5 per core",
Unit: "float",
Metrics: []mp.Metrics{
{Name: "loadavg5", Label: "loadavg5", Diff: false, Stacked: false},
},
},
}
type saveItem struct {
LastTime time.Time
ProcStatsByCPU map[string]procStats
}
type procStats struct {
User *uint64 `json:"user"`
Nice *uint64 `json:"nice"`
System *uint64 `json:"system"`
Idle *uint64 `json:"idle"`
IoWait *uint64 `json:"iowait"`
Irq *uint64 `json:"irq"`
SoftIrq *uint64 `json:"softirq"`
Steal *uint64 `json:"steal"`
Guest *uint64 `json:"guest"`
GuestNice *uint64 `json:"guest_nice"`
Total uint64 `json:"total"`
}
type cpuPercentages struct {
GroupName string
User *float64
Nice *float64
System *float64
Idle *float64
IoWait *float64
Irq *float64
SoftIrq *float64
Steal *float64
Guest *float64
GuestNice *float64
}
func parseCounters(values []string) ([]uint64, error) {
var result []uint64
for _, v := range values {
f, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, err
}
result = append(result, f)
}
return result, nil
}
func fill(arr []uint64, elementCount int) []*uint64 {
var filled []*uint64
for _, v := range arr {
copy := v
filled = append(filled, ©)
}
if len(arr) < elementCount {
emptyArray := make([]*uint64, elementCount-len(arr))
filled = append(filled, emptyArray...)
}
return filled
}
func parseProcStat(out io.Reader) (map[string]procStats, error) {
scanner := bufio.NewScanner(out)
var result = make(map[string]procStats)
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "cpu") {
continue
}
fields := strings.Fields(line)
key := fields[0]
values := fields[1:]
// skip total cpu usage
if key == "cpu" {
continue
}
counterValues, err := parseCounters(values)
if err != nil {
return nil, err
}
var total uint64
for _, v := range counterValues {
total += v
}
filledValues := fill(counterValues, 10)
result[key] = procStats{
User: filledValues[0],
Nice: filledValues[1],
System: filledValues[2],
Idle: filledValues[3],
IoWait: filledValues[4],
Irq: filledValues[5],
SoftIrq: filledValues[6],
Steal: filledValues[7],
Guest: filledValues[8],
GuestNice: filledValues[9],
Total: total,
}
}
return result, nil
}
func collectProcStatValues() (map[string]procStats, error) {
file, err := os.Open("/proc/stat")
if err != nil {
return nil, err
}
defer file.Close()
return parseProcStat(file)
}
func saveValues(tempFileName string, values map[string]procStats, now time.Time) error {
f, err := os.Create(tempFileName)
if err != nil {
return err
}
defer f.Close()
s := saveItem{
LastTime: now,
ProcStatsByCPU: values,
}
encoder := json.NewEncoder(f)
err = encoder.Encode(s)
if err != nil {
return err
}
return nil
}
func fetchSavedItem(tempFileName string) (*saveItem, error) {
f, err := os.Open(tempFileName)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
defer f.Close()
var stat saveItem
decoder := json.NewDecoder(f)
err = decoder.Decode(&stat)
if err != nil {
return nil, err
}
return &stat, nil
}
func calcCPUUsage(currentValues map[string]procStats, now time.Time, savedItem *saveItem) ([]cpuPercentages, error) {
if now.Sub(savedItem.LastTime).Seconds() > 600 {
return nil, errors.New("Too long duration")
}
var result []cpuPercentages
for name, current := range currentValues {
last, ok := savedItem.ProcStatsByCPU[name]
if !ok {
continue
}
if last.Total > current.Total {
return nil, errors.New("cpu counter has been reset")
}
user := calculatePercentage(current.User, last.User, current.Total, last.Total)
nice := calculatePercentage(current.Nice, last.Nice, current.Total, last.Total)
system := calculatePercentage(current.System, last.System, current.Total, last.Total)
idle := calculatePercentage(current.Idle, last.Idle, current.Total, last.Total)
iowait := calculatePercentage(current.IoWait, last.IoWait, current.Total, last.Total)
irq := calculatePercentage(current.Irq, last.Irq, current.Total, last.Total)
softirq := calculatePercentage(current.SoftIrq, last.SoftIrq, current.Total, last.Total)
steal := calculatePercentage(current.Steal, last.Steal, current.Total, last.Total)
guest := calculatePercentage(current.Guest, last.Guest, current.Total, last.Total)
// guest_nice available since Linux 2.6.33 (ref: man proc)
guestNice := calculatePercentage(current.GuestNice, last.GuestNice, current.Total, last.Total)
result = append(result, cpuPercentages{
GroupName: name,
User: user,
Nice: nice,
System: system,
Idle: idle,
IoWait: iowait,
Irq: irq,
SoftIrq: softirq,
Steal: steal,
Guest: guest,
GuestNice: guestNice,
})
}
return result, nil
}
func calculatePercentage(currentValue *uint64, lastValue *uint64, currentTotal uint64, lastTotal uint64) *float64 {
if currentValue == nil || lastValue == nil {
return nil
}
ret := float64(*currentValue-*lastValue) / float64(currentTotal-lastTotal) * 100.0
return &ret
}
func fetchLoadavg5() (float64, error) {
contentbytes, err := ioutil.ReadFile("/proc/loadavg")
if err != nil {
return 0.0, err
}
content := string(contentbytes)
cols := strings.Fields(content)
if len(cols) > 2 {
f, err := strconv.ParseFloat(cols[1], 64)
if err != nil {
return 0.0, err
}
return f, nil
}
return 0.0, fmt.Errorf("cannot fetch loadavg5")
}
func printValue(key string, value *float64, time time.Time) {
if value != nil {
fmt.Printf("%s\t%f\t%d\n", key, *value, time.Unix())
}
}
func outputCPUUsage(cpuUsage []cpuPercentages, now time.Time) {
for _, u := range cpuUsage {
printValue(fmt.Sprintf("multicore.cpu.%s.user", u.GroupName), u.User, now)
printValue(fmt.Sprintf("multicore.cpu.%s.nice", u.GroupName), u.Nice, now)
printValue(fmt.Sprintf("multicore.cpu.%s.system", u.GroupName), u.System, now)
printValue(fmt.Sprintf("multicore.cpu.%s.idle", u.GroupName), u.Idle, now)
printValue(fmt.Sprintf("multicore.cpu.%s.iowait", u.GroupName), u.IoWait, now)
printValue(fmt.Sprintf("multicore.cpu.%s.irq", u.GroupName), u.Irq, now)
printValue(fmt.Sprintf("multicore.cpu.%s.softirq", u.GroupName), u.SoftIrq, now)
printValue(fmt.Sprintf("multicore.cpu.%s.steal", u.GroupName), u.Steal, now)
printValue(fmt.Sprintf("multicore.cpu.%s.guest", u.GroupName), u.Guest, now)
printValue(fmt.Sprintf("multicore.cpu.%s.guest_nice", u.GroupName), u.GuestNice, now)
}
}
func outputLoadavgPerCore(loadavgPerCore float64, now time.Time) {
printValue("multicore.loadavg_per_core.loadavg5", &loadavgPerCore, now)
}
func outputDefinitions() {
fmt.Println("# mackerel-agent-plugin")
var graphs mp.GraphDef
graphs.Graphs = graphDef
b, err := json.Marshal(graphs)
if err != nil {
log.Fatalln("OutputDefinitions: ", err)
}
fmt.Println(string(b))
}
func outputMulticore(tempFileName string) {
now := time.Now()
currentValues, err := collectProcStatValues()
if err != nil {
log.Fatalln("collectProcStatValues: ", err)
}
savedItem, err := fetchSavedItem(tempFileName)
saveValues(tempFileName, currentValues, now)
if err != nil {
log.Fatalln("fetchLastValues: ", err)
}
// maybe first time run
if savedItem == nil {
return
}
cpuUsage, err := calcCPUUsage(currentValues, now, savedItem)
if err != nil {
log.Fatalln("calcCPUUsage: ", err)
}
loadavg5, err := fetchLoadavg5()
if err != nil {
log.Fatalln("fetchLoadavg5: ", err)
}
loadPerCPUCount := loadavg5 / (float64(len(cpuUsage)))
outputCPUUsage(cpuUsage, now)
outputLoadavgPerCore(loadPerCPUCount, now)
}
func generateTempfilePath() string {
dir := os.Getenv("MACKEREL_PLUGIN_WORKDIR")
if dir == "" {
dir = os.TempDir()
}
return filepath.Join(dir, "mackerel-plugin-multicore")
}
// Do the plugin
func Do() {
var tempFileName string
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
tempFileName = *optTempfile
if tempFileName == "" {
tempFileName = generateTempfilePath()
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
outputDefinitions()
} else {
outputMulticore(tempFileName)
}
}
rename GroupName to CPUName
package mpmulticore
import (
"bufio"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
mp "github.com/mackerelio/go-mackerel-plugin-helper"
)
var graphDef = map[string]mp.Graphs{
"multicore.cpu.#": {
Label: "MultiCore CPU",
Unit: "percentage",
Metrics: []mp.Metrics{
{Name: "user", Label: "user", Diff: false, Stacked: true},
{Name: "nice", Label: "nice", Diff: false, Stacked: true},
{Name: "system", Label: "system", Diff: false, Stacked: true},
{Name: "idle", Label: "idle", Diff: false, Stacked: true},
{Name: "iowait", Label: "ioWait", Diff: false, Stacked: true},
{Name: "irq", Label: "irq", Diff: false, Stacked: true},
{Name: "softirq", Label: "softirq", Diff: false, Stacked: true},
{Name: "steal", Label: "steal", Diff: false, Stacked: true},
{Name: "guest", Label: "guest", Diff: false, Stacked: true},
{Name: "guest_nice", Label: "guest_nice", Diff: false, Stacked: true},
},
},
"multicore.loadavg_per_core": {
Label: "MultiCore loadavg5 per core",
Unit: "float",
Metrics: []mp.Metrics{
{Name: "loadavg5", Label: "loadavg5", Diff: false, Stacked: false},
},
},
}
type saveItem struct {
LastTime time.Time
ProcStatsByCPU map[string]procStats
}
type procStats struct {
User *uint64 `json:"user"`
Nice *uint64 `json:"nice"`
System *uint64 `json:"system"`
Idle *uint64 `json:"idle"`
IoWait *uint64 `json:"iowait"`
Irq *uint64 `json:"irq"`
SoftIrq *uint64 `json:"softirq"`
Steal *uint64 `json:"steal"`
Guest *uint64 `json:"guest"`
GuestNice *uint64 `json:"guest_nice"`
Total uint64 `json:"total"`
}
type cpuPercentages struct {
CPUName string
User *float64
Nice *float64
System *float64
Idle *float64
IoWait *float64
Irq *float64
SoftIrq *float64
Steal *float64
Guest *float64
GuestNice *float64
}
func parseCounters(values []string) ([]uint64, error) {
var result []uint64
for _, v := range values {
f, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, err
}
result = append(result, f)
}
return result, nil
}
func fill(arr []uint64, elementCount int) []*uint64 {
var filled []*uint64
for _, v := range arr {
copy := v
filled = append(filled, ©)
}
if len(arr) < elementCount {
emptyArray := make([]*uint64, elementCount-len(arr))
filled = append(filled, emptyArray...)
}
return filled
}
func parseProcStat(out io.Reader) (map[string]procStats, error) {
scanner := bufio.NewScanner(out)
var result = make(map[string]procStats)
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "cpu") {
continue
}
fields := strings.Fields(line)
key := fields[0]
values := fields[1:]
// skip total cpu usage
if key == "cpu" {
continue
}
counterValues, err := parseCounters(values)
if err != nil {
return nil, err
}
var total uint64
for _, v := range counterValues {
total += v
}
filledValues := fill(counterValues, 10)
result[key] = procStats{
User: filledValues[0],
Nice: filledValues[1],
System: filledValues[2],
Idle: filledValues[3],
IoWait: filledValues[4],
Irq: filledValues[5],
SoftIrq: filledValues[6],
Steal: filledValues[7],
Guest: filledValues[8],
GuestNice: filledValues[9],
Total: total,
}
}
return result, nil
}
func collectProcStatValues() (map[string]procStats, error) {
file, err := os.Open("/proc/stat")
if err != nil {
return nil, err
}
defer file.Close()
return parseProcStat(file)
}
func saveValues(tempFileName string, values map[string]procStats, now time.Time) error {
f, err := os.Create(tempFileName)
if err != nil {
return err
}
defer f.Close()
s := saveItem{
LastTime: now,
ProcStatsByCPU: values,
}
encoder := json.NewEncoder(f)
err = encoder.Encode(s)
if err != nil {
return err
}
return nil
}
func fetchSavedItem(tempFileName string) (*saveItem, error) {
f, err := os.Open(tempFileName)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
defer f.Close()
var stat saveItem
decoder := json.NewDecoder(f)
err = decoder.Decode(&stat)
if err != nil {
return nil, err
}
return &stat, nil
}
func calcCPUUsage(currentValues map[string]procStats, now time.Time, savedItem *saveItem) ([]cpuPercentages, error) {
if now.Sub(savedItem.LastTime).Seconds() > 600 {
return nil, errors.New("Too long duration")
}
var result []cpuPercentages
for name, current := range currentValues {
last, ok := savedItem.ProcStatsByCPU[name]
if !ok {
continue
}
if last.Total > current.Total {
return nil, errors.New("cpu counter has been reset")
}
user := calculatePercentage(current.User, last.User, current.Total, last.Total)
nice := calculatePercentage(current.Nice, last.Nice, current.Total, last.Total)
system := calculatePercentage(current.System, last.System, current.Total, last.Total)
idle := calculatePercentage(current.Idle, last.Idle, current.Total, last.Total)
iowait := calculatePercentage(current.IoWait, last.IoWait, current.Total, last.Total)
irq := calculatePercentage(current.Irq, last.Irq, current.Total, last.Total)
softirq := calculatePercentage(current.SoftIrq, last.SoftIrq, current.Total, last.Total)
steal := calculatePercentage(current.Steal, last.Steal, current.Total, last.Total)
guest := calculatePercentage(current.Guest, last.Guest, current.Total, last.Total)
// guest_nice available since Linux 2.6.33 (ref: man proc)
guestNice := calculatePercentage(current.GuestNice, last.GuestNice, current.Total, last.Total)
result = append(result, cpuPercentages{
CPUName: name,
User: user,
Nice: nice,
System: system,
Idle: idle,
IoWait: iowait,
Irq: irq,
SoftIrq: softirq,
Steal: steal,
Guest: guest,
GuestNice: guestNice,
})
}
return result, nil
}
func calculatePercentage(currentValue *uint64, lastValue *uint64, currentTotal uint64, lastTotal uint64) *float64 {
if currentValue == nil || lastValue == nil {
return nil
}
ret := float64(*currentValue-*lastValue) / float64(currentTotal-lastTotal) * 100.0
return &ret
}
func fetchLoadavg5() (float64, error) {
contentbytes, err := ioutil.ReadFile("/proc/loadavg")
if err != nil {
return 0.0, err
}
content := string(contentbytes)
cols := strings.Fields(content)
if len(cols) > 2 {
f, err := strconv.ParseFloat(cols[1], 64)
if err != nil {
return 0.0, err
}
return f, nil
}
return 0.0, fmt.Errorf("cannot fetch loadavg5")
}
func printValue(key string, value *float64, time time.Time) {
if value != nil {
fmt.Printf("%s\t%f\t%d\n", key, *value, time.Unix())
}
}
func outputCPUUsage(cpuUsage []cpuPercentages, now time.Time) {
for _, u := range cpuUsage {
printValue(fmt.Sprintf("multicore.cpu.%s.user", u.CPUName), u.User, now)
printValue(fmt.Sprintf("multicore.cpu.%s.nice", u.CPUName), u.Nice, now)
printValue(fmt.Sprintf("multicore.cpu.%s.system", u.CPUName), u.System, now)
printValue(fmt.Sprintf("multicore.cpu.%s.idle", u.CPUName), u.Idle, now)
printValue(fmt.Sprintf("multicore.cpu.%s.iowait", u.CPUName), u.IoWait, now)
printValue(fmt.Sprintf("multicore.cpu.%s.irq", u.CPUName), u.Irq, now)
printValue(fmt.Sprintf("multicore.cpu.%s.softirq", u.CPUName), u.SoftIrq, now)
printValue(fmt.Sprintf("multicore.cpu.%s.steal", u.CPUName), u.Steal, now)
printValue(fmt.Sprintf("multicore.cpu.%s.guest", u.CPUName), u.Guest, now)
printValue(fmt.Sprintf("multicore.cpu.%s.guest_nice", u.CPUName), u.GuestNice, now)
}
}
func outputLoadavgPerCore(loadavgPerCore float64, now time.Time) {
printValue("multicore.loadavg_per_core.loadavg5", &loadavgPerCore, now)
}
func outputDefinitions() {
fmt.Println("# mackerel-agent-plugin")
var graphs mp.GraphDef
graphs.Graphs = graphDef
b, err := json.Marshal(graphs)
if err != nil {
log.Fatalln("OutputDefinitions: ", err)
}
fmt.Println(string(b))
}
func outputMulticore(tempFileName string) {
now := time.Now()
currentValues, err := collectProcStatValues()
if err != nil {
log.Fatalln("collectProcStatValues: ", err)
}
savedItem, err := fetchSavedItem(tempFileName)
saveValues(tempFileName, currentValues, now)
if err != nil {
log.Fatalln("fetchLastValues: ", err)
}
// maybe first time run
if savedItem == nil {
return
}
cpuUsage, err := calcCPUUsage(currentValues, now, savedItem)
if err != nil {
log.Fatalln("calcCPUUsage: ", err)
}
loadavg5, err := fetchLoadavg5()
if err != nil {
log.Fatalln("fetchLoadavg5: ", err)
}
loadPerCPUCount := loadavg5 / (float64(len(cpuUsage)))
outputCPUUsage(cpuUsage, now)
outputLoadavgPerCore(loadPerCPUCount, now)
}
func generateTempfilePath() string {
dir := os.Getenv("MACKEREL_PLUGIN_WORKDIR")
if dir == "" {
dir = os.TempDir()
}
return filepath.Join(dir, "mackerel-plugin-multicore")
}
// Do the plugin
func Do() {
var tempFileName string
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
tempFileName = *optTempfile
if tempFileName == "" {
tempFileName = generateTempfilePath()
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
outputDefinitions()
} else {
outputMulticore(tempFileName)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.